diff options
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay')
70 files changed, 5896 insertions, 6263 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile index 4e132b936e3d..68b417ac94dd 100644 --- a/drivers/gpu/drm/amd/powerplay/Makefile +++ b/drivers/gpu/drm/amd/powerplay/Makefile @@ -4,12 +4,11 @@ subdir-ccflags-y += \ -I$(FULL_AMD_PATH)/include/asic_reg \ -I$(FULL_AMD_PATH)/include \ -I$(FULL_AMD_PATH)/powerplay/smumgr\ - -I$(FULL_AMD_PATH)/powerplay/hwmgr \ - -I$(FULL_AMD_PATH)/powerplay/eventmgr + -I$(FULL_AMD_PATH)/powerplay/hwmgr AMD_PP_PATH = ../powerplay -PP_LIBS = smumgr hwmgr eventmgr +PP_LIBS = smumgr hwmgr AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(PP_LIBS))) diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index f73e80c4bf33..9f3f3b8cf64f 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -29,22 +29,19 @@ #include "amd_powerplay.h" #include "pp_instance.h" #include "power_state.h" -#include "eventmanager.h" - static inline int pp_check(struct pp_instance *handle) { if (handle == NULL || handle->pp_valid != PP_VALID) return -EINVAL; - if (handle->smu_mgr == NULL || handle->smu_mgr->smumgr_funcs == NULL) + if (handle->hwmgr == NULL || handle->hwmgr->smumgr_funcs == NULL) return -EINVAL; if (handle->pm_en == 0) return PP_DPM_DISABLED; - if (handle->hwmgr == NULL || handle->hwmgr->hwmgr_func == NULL - || handle->eventmgr == NULL) + if (handle->hwmgr->hwmgr_func == NULL) return PP_DPM_DISABLED; return 0; @@ -55,46 +52,32 @@ static int pp_early_init(void *handle) int ret; struct pp_instance *pp_handle = (struct pp_instance *)handle; - ret = smum_early_init(pp_handle); + ret = hwmgr_early_init(pp_handle); if (ret) - return ret; + return -EINVAL; if ((pp_handle->pm_en == 0) || cgs_is_virtualization_enabled(pp_handle->device)) return PP_DPM_DISABLED; - ret = hwmgr_early_init(pp_handle); - if (ret) { - pp_handle->pm_en = 0; - return PP_DPM_DISABLED; - } - - ret = eventmgr_early_init(pp_handle); - if (ret) { - kfree(pp_handle->hwmgr); - pp_handle->hwmgr = NULL; - pp_handle->pm_en = 0; - return PP_DPM_DISABLED; - } - return 0; } static int pp_sw_init(void *handle) { - struct pp_smumgr *smumgr; + struct pp_hwmgr *hwmgr; int ret = 0; struct pp_instance *pp_handle = (struct pp_instance *)handle; ret = pp_check(pp_handle); if (ret == 0 || ret == PP_DPM_DISABLED) { - smumgr = pp_handle->smu_mgr; + hwmgr = pp_handle->hwmgr; - if (smumgr->smumgr_funcs->smu_init == NULL) + if (hwmgr->smumgr_funcs->smu_init == NULL) return -EINVAL; - ret = smumgr->smumgr_funcs->smu_init(smumgr); + ret = hwmgr->smumgr_funcs->smu_init(hwmgr); pr_info("amdgpu: powerplay sw initialized\n"); } @@ -103,40 +86,39 @@ static int pp_sw_init(void *handle) static int pp_sw_fini(void *handle) { - struct pp_smumgr *smumgr; + struct pp_hwmgr *hwmgr; int ret = 0; struct pp_instance *pp_handle = (struct pp_instance *)handle; ret = pp_check(pp_handle); if (ret == 0 || ret == PP_DPM_DISABLED) { - smumgr = pp_handle->smu_mgr; + hwmgr = pp_handle->hwmgr; - if (smumgr->smumgr_funcs->smu_fini == NULL) + if (hwmgr->smumgr_funcs->smu_fini == NULL) return -EINVAL; - ret = smumgr->smumgr_funcs->smu_fini(smumgr); + ret = hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); } return ret; } static int pp_hw_init(void *handle) { - struct pp_smumgr *smumgr; - struct pp_eventmgr *eventmgr; int ret = 0; struct pp_instance *pp_handle = (struct pp_instance *)handle; + struct pp_hwmgr *hwmgr; ret = pp_check(pp_handle); if (ret == 0 || ret == PP_DPM_DISABLED) { - smumgr = pp_handle->smu_mgr; + hwmgr = pp_handle->hwmgr; - if (smumgr->smumgr_funcs->start_smu == NULL) + if (hwmgr->smumgr_funcs->start_smu == NULL) return -EINVAL; - if(smumgr->smumgr_funcs->start_smu(smumgr)) { + if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) { pr_err("smc start failed\n"); - smumgr->smumgr_funcs->smu_fini(smumgr); + hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); return -EINVAL;; } if (ret == PP_DPM_DISABLED) @@ -146,38 +128,21 @@ static int pp_hw_init(void *handle) ret = hwmgr_hw_init(pp_handle); if (ret) goto err; - - eventmgr = pp_handle->eventmgr; - if (eventmgr->pp_eventmgr_init == NULL || - eventmgr->pp_eventmgr_init(eventmgr)) - goto err; - return 0; err: pp_handle->pm_en = 0; - kfree(pp_handle->eventmgr); - kfree(pp_handle->hwmgr); - pp_handle->hwmgr = NULL; - pp_handle->eventmgr = NULL; return PP_DPM_DISABLED; } static int pp_hw_fini(void *handle) { - struct pp_eventmgr *eventmgr; struct pp_instance *pp_handle = (struct pp_instance *)handle; int ret = 0; ret = pp_check(pp_handle); - - if (ret == 0) { - eventmgr = pp_handle->eventmgr; - - if (eventmgr->pp_eventmgr_fini != NULL) - eventmgr->pp_eventmgr_fini(eventmgr); - + if (ret == 0) hwmgr_hw_fini(pp_handle); - } + return 0; } @@ -244,8 +209,6 @@ static int pp_set_powergating_state(void *handle, static int pp_suspend(void *handle) { - struct pp_eventmgr *eventmgr; - struct pem_event_data event_data = { {0} }; struct pp_instance *pp_handle = (struct pp_instance *)handle; int ret = 0; @@ -256,17 +219,12 @@ static int pp_suspend(void *handle) else if (ret != 0) return ret; - eventmgr = pp_handle->eventmgr; - pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data); - - return 0; + return hwmgr_hw_suspend(pp_handle); } static int pp_resume(void *handle) { - struct pp_eventmgr *eventmgr; - struct pem_event_data event_data = { {0} }; - struct pp_smumgr *smumgr; + struct pp_hwmgr *hwmgr; int ret, ret1; struct pp_instance *pp_handle = (struct pp_instance *)handle; @@ -275,26 +233,22 @@ static int pp_resume(void *handle) if (ret1 != 0 && ret1 != PP_DPM_DISABLED) return ret1; - smumgr = pp_handle->smu_mgr; + hwmgr = pp_handle->hwmgr; - if (smumgr->smumgr_funcs->start_smu == NULL) + if (hwmgr->smumgr_funcs->start_smu == NULL) return -EINVAL; - ret = smumgr->smumgr_funcs->start_smu(smumgr); + ret = hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr); if (ret) { pr_err("smc start failed\n"); - smumgr->smumgr_funcs->smu_fini(smumgr); + hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); return ret; } if (ret1 == PP_DPM_DISABLED) return 0; - eventmgr = pp_handle->eventmgr; - - pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data); - - return 0; + return hwmgr_hw_resume(pp_handle); } const struct amd_ip_funcs pp_ip_funcs = { @@ -324,6 +278,42 @@ static int pp_dpm_fw_loading_complete(void *handle) return 0; } +static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr, + enum amd_dpm_forced_level *level) +{ + uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; + + if (!(hwmgr->dpm_level & profile_mode_mask)) { + /* enter umd pstate, save current level, disable gfx cg*/ + if (*level & profile_mode_mask) { + hwmgr->saved_dpm_level = hwmgr->dpm_level; + hwmgr->en_umd_pstate = true; + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_UNGATE); + cgs_set_powergating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_UNGATE); + } + } else { + /* exit umd pstate, restore level, enable gfx cg*/ + if (!(*level & profile_mode_mask)) { + if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) + *level = hwmgr->saved_dpm_level; + hwmgr->en_umd_pstate = false; + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_GATE); + cgs_set_powergating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_GATE); + } + } +} + static int pp_dpm_force_performance_level(void *handle, enum amd_dpm_forced_level level) { @@ -338,13 +328,22 @@ static int pp_dpm_force_performance_level(void *handle, hwmgr = pp_handle->hwmgr; + if (level == hwmgr->dpm_level) + return 0; + if (hwmgr->hwmgr_func->force_dpm_level == NULL) { pr_info("%s was not implemented.\n", __func__); return 0; } mutex_lock(&pp_handle->pp_lock); - hwmgr->hwmgr_func->force_dpm_level(hwmgr, level); + pp_dpm_en_umd_pstate(hwmgr, &level); + hwmgr->request_dpm_level = level; + hwmgr_handle_task(pp_handle, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); + ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level); + if (!ret) + hwmgr->dpm_level = hwmgr->request_dpm_level; + mutex_unlock(&pp_handle->pp_lock); return 0; } @@ -369,11 +368,12 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level( return level; } -static int pp_dpm_get_sclk(void *handle, bool low) +static uint32_t pp_dpm_get_sclk(void *handle, bool low) { struct pp_hwmgr *hwmgr; struct pp_instance *pp_handle = (struct pp_instance *)handle; int ret = 0; + uint32_t clk = 0; ret = pp_check(pp_handle); @@ -387,16 +387,17 @@ static int pp_dpm_get_sclk(void *handle, bool low) return 0; } mutex_lock(&pp_handle->pp_lock); - ret = hwmgr->hwmgr_func->get_sclk(hwmgr, low); + clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low); mutex_unlock(&pp_handle->pp_lock); - return ret; + return clk; } -static int pp_dpm_get_mclk(void *handle, bool low) +static uint32_t pp_dpm_get_mclk(void *handle, bool low) { struct pp_hwmgr *hwmgr; struct pp_instance *pp_handle = (struct pp_instance *)handle; int ret = 0; + uint32_t clk = 0; ret = pp_check(pp_handle); @@ -410,12 +411,12 @@ static int pp_dpm_get_mclk(void *handle, bool low) return 0; } mutex_lock(&pp_handle->pp_lock); - ret = hwmgr->hwmgr_func->get_mclk(hwmgr, low); + clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low); mutex_unlock(&pp_handle->pp_lock); - return ret; + return clk; } -static int pp_dpm_powergate_vce(void *handle, bool gate) +static void pp_dpm_powergate_vce(void *handle, bool gate) { struct pp_hwmgr *hwmgr; struct pp_instance *pp_handle = (struct pp_instance *)handle; @@ -424,21 +425,20 @@ static int pp_dpm_powergate_vce(void *handle, bool gate) ret = pp_check(pp_handle); if (ret != 0) - return ret; + return; hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->powergate_vce == NULL) { pr_info("%s was not implemented.\n", __func__); - return 0; + return; } mutex_lock(&pp_handle->pp_lock); - ret = hwmgr->hwmgr_func->powergate_vce(hwmgr, gate); + hwmgr->hwmgr_func->powergate_vce(hwmgr, gate); mutex_unlock(&pp_handle->pp_lock); - return ret; } -static int pp_dpm_powergate_uvd(void *handle, bool gate) +static void pp_dpm_powergate_uvd(void *handle, bool gate) { struct pp_hwmgr *hwmgr; struct pp_instance *pp_handle = (struct pp_instance *)handle; @@ -447,74 +447,34 @@ static int pp_dpm_powergate_uvd(void *handle, bool gate) ret = pp_check(pp_handle); if (ret != 0) - return ret; + return; hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->powergate_uvd == NULL) { pr_info("%s was not implemented.\n", __func__); - return 0; + return; } mutex_lock(&pp_handle->pp_lock); - ret = hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate); + hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate); mutex_unlock(&pp_handle->pp_lock); - return ret; } -static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state) -{ - switch (state) { - case POWER_STATE_TYPE_BATTERY: - return PP_StateUILabel_Battery; - case POWER_STATE_TYPE_BALANCED: - return PP_StateUILabel_Balanced; - case POWER_STATE_TYPE_PERFORMANCE: - return PP_StateUILabel_Performance; - default: - return PP_StateUILabel_None; - } -} - -static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, +static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, void *input, void *output) { int ret = 0; - struct pem_event_data data = { {0} }; struct pp_instance *pp_handle = (struct pp_instance *)handle; ret = pp_check(pp_handle); if (ret != 0) return ret; - mutex_lock(&pp_handle->pp_lock); - switch (event_id) { - case AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE: - ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); - break; - case AMD_PP_EVENT_ENABLE_USER_STATE: - { - enum amd_pm_state_type ps; - - if (input == NULL) { - ret = -EINVAL; - break; - } - ps = *(unsigned long *)input; - data.requested_ui_label = power_state_convert(ps); - ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); - break; - } - case AMD_PP_EVENT_COMPLETE_INIT: - ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); - break; - case AMD_PP_EVENT_READJUST_POWER_STATE: - ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); - break; - default: - break; - } + mutex_lock(&pp_handle->pp_lock); + ret = hwmgr_handle_task(pp_handle, task_id, input, output); mutex_unlock(&pp_handle->pp_lock); + return ret; } @@ -562,7 +522,7 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) return pm_type; } -static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) +static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) { struct pp_hwmgr *hwmgr; struct pp_instance *pp_handle = (struct pp_instance *)handle; @@ -571,25 +531,25 @@ static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) ret = pp_check(pp_handle); if (ret != 0) - return ret; + return; hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) { pr_info("%s was not implemented.\n", __func__); - return 0; + return; } mutex_lock(&pp_handle->pp_lock); - ret = hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode); + hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode); mutex_unlock(&pp_handle->pp_lock); - return ret; } -static int pp_dpm_get_fan_control_mode(void *handle) +static uint32_t pp_dpm_get_fan_control_mode(void *handle) { struct pp_hwmgr *hwmgr; struct pp_instance *pp_handle = (struct pp_instance *)handle; int ret = 0; + uint32_t mode = 0; ret = pp_check(pp_handle); @@ -603,9 +563,9 @@ static int pp_dpm_get_fan_control_mode(void *handle) return 0; } mutex_lock(&pp_handle->pp_lock); - ret = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr); + mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr); mutex_unlock(&pp_handle->pp_lock); - return ret; + return mode; } static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent) @@ -1128,7 +1088,7 @@ static int pp_dpm_switch_power_profile(void *handle, return 0; } -const struct amd_powerplay_funcs pp_dpm_funcs = { +const struct amd_pm_funcs pp_dpm_funcs = { .get_temperature = pp_dpm_get_temperature, .load_firmware = pp_dpm_load_fw, .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, @@ -1189,15 +1149,9 @@ int amd_powerplay_destroy(void *handle) { struct pp_instance *instance = (struct pp_instance *)handle; - if (instance->pm_en) { - kfree(instance->eventmgr); - kfree(instance->hwmgr); - instance->hwmgr = NULL; - instance->eventmgr = NULL; - } + kfree(instance->hwmgr); + instance->hwmgr = NULL; - kfree(instance->smu_mgr); - instance->smu_mgr = NULL; kfree(instance); instance = NULL; return 0; @@ -1206,18 +1160,16 @@ int amd_powerplay_destroy(void *handle) int amd_powerplay_reset(void *handle) { struct pp_instance *instance = (struct pp_instance *)handle; - struct pp_eventmgr *eventmgr; - struct pem_event_data event_data = { {0} }; int ret; - if (cgs_is_virtualization_enabled(instance->smu_mgr->device)) + if (cgs_is_virtualization_enabled(instance->hwmgr->device)) return PP_DPM_DISABLED; ret = pp_check(instance); if (ret != 0) return ret; - ret = pp_hw_fini(handle); + ret = pp_hw_fini(instance); if (ret) return ret; @@ -1225,16 +1177,7 @@ int amd_powerplay_reset(void *handle) if (ret) return PP_DPM_DISABLED; - eventmgr = instance->eventmgr; - - if (eventmgr->pp_eventmgr_init == NULL) - return PP_DPM_DISABLED; - - ret = eventmgr->pp_eventmgr_init(eventmgr); - if (ret) - return ret; - - return pem_handle_event(eventmgr, AMD_PP_EVENT_COMPLETE_INIT, &event_data); + return hwmgr_handle_task(instance, AMD_PP_TASK_COMPLETE_INIT, NULL, NULL); } /* export this function to DAL */ diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile b/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile deleted file mode 100644 index 7509e3850087..000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -# -# Makefile for the 'event manager' sub-component of powerplay. -# It provides the event management services for the driver. - -EVENT_MGR = eventmgr.o eventinit.o eventmanagement.o \ - eventactionchains.o eventsubchains.o eventtasks.o psm.o - -AMD_PP_EVENT = $(addprefix $(AMD_PP_PATH)/eventmgr/,$(EVENT_MGR)) - -AMD_POWERPLAY_FILES += $(AMD_PP_EVENT) - diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c deleted file mode 100644 index 8cee4e0f9fde..000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "eventmgr.h" -#include "eventactionchains.h" -#include "eventsubchains.h" - -static const pem_event_action * const initialize_event[] = { - block_adjust_power_state_tasks, - power_budget_tasks, - system_config_tasks, - setup_asic_tasks, - enable_dynamic_state_management_tasks, - get_2d_performance_state_tasks, - set_performance_state_tasks, - initialize_thermal_controller_tasks, - conditionally_force_3d_performance_state_tasks, - process_vbios_eventinfo_tasks, - broadcast_power_policy_tasks, - NULL -}; - -const struct action_chain initialize_action_chain = { - "Initialize", - initialize_event -}; - -static const pem_event_action * const uninitialize_event[] = { - ungate_all_display_phys_tasks, - uninitialize_display_phy_access_tasks, - disable_gfx_voltage_island_power_gating_tasks, - disable_gfx_clock_gating_tasks, - uninitialize_thermal_controller_tasks, - set_boot_state_tasks, - adjust_power_state_tasks, - disable_dynamic_state_management_tasks, - disable_clock_power_gatings_tasks, - cleanup_asic_tasks, - prepare_for_pnp_stop_tasks, - NULL -}; - -const struct action_chain uninitialize_action_chain = { - "Uninitialize", - uninitialize_event -}; - -static const pem_event_action * const power_source_change_event_pp_enabled[] = { - set_power_source_tasks, - set_power_saving_state_tasks, - adjust_power_state_tasks, - enable_disable_fps_tasks, - set_nbmcu_state_tasks, - broadcast_power_policy_tasks, - NULL -}; - -const struct action_chain power_source_change_action_chain_pp_enabled = { - "Power source change - PowerPlay enabled", - power_source_change_event_pp_enabled -}; - -static const pem_event_action * const power_source_change_event_pp_disabled[] = { - set_power_source_tasks, - set_nbmcu_state_tasks, - NULL -}; - -const struct action_chain power_source_changes_action_chain_pp_disabled = { - "Power source change - PowerPlay disabled", - power_source_change_event_pp_disabled -}; - -static const pem_event_action * const power_source_change_event_hardware_dc[] = { - set_power_source_tasks, - set_power_saving_state_tasks, - adjust_power_state_tasks, - enable_disable_fps_tasks, - reset_hardware_dc_notification_tasks, - set_nbmcu_state_tasks, - broadcast_power_policy_tasks, - NULL -}; - -const struct action_chain power_source_change_action_chain_hardware_dc = { - "Power source change - with Hardware DC switching", - power_source_change_event_hardware_dc -}; - -static const pem_event_action * const suspend_event[] = { - reset_display_phy_access_tasks, - unregister_interrupt_tasks, - disable_gfx_voltage_island_power_gating_tasks, - disable_gfx_clock_gating_tasks, - notify_smu_suspend_tasks, - disable_smc_firmware_ctf_tasks, - set_boot_state_tasks, - adjust_power_state_tasks, - disable_fps_tasks, - vari_bright_suspend_tasks, - reset_fan_speed_to_default_tasks, - power_down_asic_tasks, - disable_stutter_mode_tasks, - set_connected_standby_tasks, - block_hw_access_tasks, - NULL -}; - -const struct action_chain suspend_action_chain = { - "Suspend", - suspend_event -}; - -static const pem_event_action * const resume_event[] = { - unblock_hw_access_tasks, - resume_connected_standby_tasks, - notify_smu_resume_tasks, - reset_display_configCounter_tasks, - update_dal_configuration_tasks, - vari_bright_resume_tasks, - setup_asic_tasks, - enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */ - enable_dynamic_state_management_tasks, - enable_disable_bapm_tasks, - initialize_thermal_controller_tasks, - get_2d_performance_state_tasks, - set_performance_state_tasks, - adjust_power_state_tasks, - enable_disable_fps_tasks, - notify_hw_power_source_tasks, - process_vbios_event_info_tasks, - enable_gfx_clock_gating_tasks, - enable_gfx_voltage_island_power_gating_tasks, - reset_clock_gating_tasks, - notify_smu_vpu_recovery_end_tasks, - disable_vpu_cap_tasks, - execute_escape_sequence_tasks, - NULL -}; - - -const struct action_chain resume_action_chain = { - "resume", - resume_event -}; - -static const pem_event_action * const complete_init_event[] = { - unblock_adjust_power_state_tasks, - adjust_power_state_tasks, - enable_gfx_clock_gating_tasks, - enable_gfx_voltage_island_power_gating_tasks, - notify_power_state_change_tasks, - NULL -}; - -const struct action_chain complete_init_action_chain = { - "complete init", - complete_init_event -}; - -static const pem_event_action * const enable_gfx_clock_gating_event[] = { - enable_gfx_clock_gating_tasks, - NULL -}; - -const struct action_chain enable_gfx_clock_gating_action_chain = { - "enable gfx clock gate", - enable_gfx_clock_gating_event -}; - -static const pem_event_action * const disable_gfx_clock_gating_event[] = { - disable_gfx_clock_gating_tasks, - NULL -}; - -const struct action_chain disable_gfx_clock_gating_action_chain = { - "disable gfx clock gate", - disable_gfx_clock_gating_event -}; - -static const pem_event_action * const enable_cgpg_event[] = { - enable_cgpg_tasks, - NULL -}; - -const struct action_chain enable_cgpg_action_chain = { - "eable cg pg", - enable_cgpg_event -}; - -static const pem_event_action * const disable_cgpg_event[] = { - disable_cgpg_tasks, - NULL -}; - -const struct action_chain disable_cgpg_action_chain = { - "disable cg pg", - disable_cgpg_event -}; - - -/* Enable user _2d performance and activate */ - -static const pem_event_action * const enable_user_state_event[] = { - create_new_user_performance_state_tasks, - adjust_power_state_tasks, - NULL -}; - -const struct action_chain enable_user_state_action_chain = { - "Enable user state", - enable_user_state_event -}; - -static const pem_event_action * const enable_user_2d_performance_event[] = { - enable_user_2d_performance_tasks, - add_user_2d_performance_state_tasks, - set_performance_state_tasks, - adjust_power_state_tasks, - delete_user_2d_performance_state_tasks, - NULL -}; - -const struct action_chain enable_user_2d_performance_action_chain = { - "enable_user_2d_performance_event_activate", - enable_user_2d_performance_event -}; - - -static const pem_event_action * const disable_user_2d_performance_event[] = { - disable_user_2d_performance_tasks, - delete_user_2d_performance_state_tasks, - NULL -}; - -const struct action_chain disable_user_2d_performance_action_chain = { - "disable_user_2d_performance_event", - disable_user_2d_performance_event -}; - - -static const pem_event_action * const display_config_change_event[] = { - /* countDisplayConfigurationChangeEventTasks, */ - unblock_adjust_power_state_tasks, - set_cpu_power_state, - notify_hw_power_source_tasks, - get_2d_performance_state_tasks, - set_performance_state_tasks, - /* updateDALConfigurationTasks, - variBrightDisplayConfigurationChangeTasks, */ - adjust_power_state_tasks, - /*enableDisableFPSTasks, - setNBMCUStateTasks, - notifyPCIEDeviceReadyTasks,*/ - NULL -}; - -const struct action_chain display_config_change_action_chain = { - "Display configuration change", - display_config_change_event -}; - -static const pem_event_action * const readjust_power_state_event[] = { - adjust_power_state_tasks, - NULL -}; - -const struct action_chain readjust_power_state_action_chain = { - "re-adjust power state", - readjust_power_state_event -}; - diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h deleted file mode 100644 index f181e53cdcda..000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef _EVENT_ACTION_CHAINS_H_ -#define _EVENT_ACTION_CHAINS_H_ -#include "eventmgr.h" - -extern const struct action_chain initialize_action_chain; - -extern const struct action_chain uninitialize_action_chain; - -extern const struct action_chain power_source_change_action_chain_pp_enabled; - -extern const struct action_chain power_source_changes_action_chain_pp_disabled; - -extern const struct action_chain power_source_change_action_chain_hardware_dc; - -extern const struct action_chain suspend_action_chain; - -extern const struct action_chain resume_action_chain; - -extern const struct action_chain complete_init_action_chain; - -extern const struct action_chain enable_gfx_clock_gating_action_chain; - -extern const struct action_chain disable_gfx_clock_gating_action_chain; - -extern const struct action_chain enable_cgpg_action_chain; - -extern const struct action_chain disable_cgpg_action_chain; - -extern const struct action_chain enable_user_2d_performance_action_chain; - -extern const struct action_chain disable_user_2d_performance_action_chain; - -extern const struct action_chain enable_user_state_action_chain; - -extern const struct action_chain readjust_power_state_action_chain; - -extern const struct action_chain display_config_change_action_chain; - -#endif /*_EVENT_ACTION_CHAINS_H_*/ - diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c deleted file mode 100644 index a3cd230d636d..000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "eventmgr.h" -#include "eventinit.h" -#include "ppinterrupt.h" -#include "hardwaremanager.h" - -void pem_init_feature_info(struct pp_eventmgr *eventmgr) -{ - - /* PowerPlay info */ - eventmgr->ui_state_info[PP_PowerSource_AC].default_ui_lable = - PP_StateUILabel_Performance; - - eventmgr->ui_state_info[PP_PowerSource_AC].current_ui_label = - PP_StateUILabel_Performance; - - eventmgr->ui_state_info[PP_PowerSource_DC].default_ui_lable = - PP_StateUILabel_Battery; - - eventmgr->ui_state_info[PP_PowerSource_DC].current_ui_label = - PP_StateUILabel_Battery; - - if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_PowerPlaySupport)) { - eventmgr->features[PP_Feature_PowerPlay].supported = true; - eventmgr->features[PP_Feature_PowerPlay].version = PEM_CURRENT_POWERPLAY_FEATURE_VERSION; - eventmgr->features[PP_Feature_PowerPlay].enabled_default = true; - eventmgr->features[PP_Feature_PowerPlay].enabled = true; - } else { - eventmgr->features[PP_Feature_PowerPlay].supported = false; - eventmgr->features[PP_Feature_PowerPlay].enabled = false; - eventmgr->features[PP_Feature_PowerPlay].enabled_default = false; - } - - eventmgr->features[PP_Feature_Force3DClock].supported = true; - eventmgr->features[PP_Feature_Force3DClock].enabled = false; - eventmgr->features[PP_Feature_Force3DClock].enabled_default = false; - eventmgr->features[PP_Feature_Force3DClock].version = 1; - - /* over drive*/ - eventmgr->features[PP_Feature_User2DPerformance].version = 4; - eventmgr->features[PP_Feature_User3DPerformance].version = 4; - eventmgr->features[PP_Feature_OverdriveTest].version = 4; - - eventmgr->features[PP_Feature_OverDrive].version = 4; - eventmgr->features[PP_Feature_OverDrive].enabled = false; - eventmgr->features[PP_Feature_OverDrive].enabled_default = false; - - eventmgr->features[PP_Feature_User2DPerformance].supported = false; - eventmgr->features[PP_Feature_User2DPerformance].enabled = false; - eventmgr->features[PP_Feature_User2DPerformance].enabled_default = false; - - eventmgr->features[PP_Feature_User3DPerformance].supported = false; - eventmgr->features[PP_Feature_User3DPerformance].enabled = false; - eventmgr->features[PP_Feature_User3DPerformance].enabled_default = false; - - eventmgr->features[PP_Feature_OverdriveTest].supported = false; - eventmgr->features[PP_Feature_OverdriveTest].enabled = false; - eventmgr->features[PP_Feature_OverdriveTest].enabled_default = false; - - eventmgr->features[PP_Feature_OverDrive].supported = false; - - eventmgr->features[PP_Feature_PowerBudgetWaiver].enabled_default = false; - eventmgr->features[PP_Feature_PowerBudgetWaiver].version = 1; - eventmgr->features[PP_Feature_PowerBudgetWaiver].supported = false; - eventmgr->features[PP_Feature_PowerBudgetWaiver].enabled = false; - - /* Multi UVD States support */ - eventmgr->features[PP_Feature_MultiUVDState].supported = false; - eventmgr->features[PP_Feature_MultiUVDState].enabled = false; - eventmgr->features[PP_Feature_MultiUVDState].enabled_default = false; - - /* Dynamic UVD States support */ - eventmgr->features[PP_Feature_DynamicUVDState].supported = false; - eventmgr->features[PP_Feature_DynamicUVDState].enabled = false; - eventmgr->features[PP_Feature_DynamicUVDState].enabled_default = false; - - /* VCE DPM support */ - eventmgr->features[PP_Feature_VCEDPM].supported = false; - eventmgr->features[PP_Feature_VCEDPM].enabled = false; - eventmgr->features[PP_Feature_VCEDPM].enabled_default = false; - - /* ACP PowerGating support */ - eventmgr->features[PP_Feature_ACP_POWERGATING].supported = false; - eventmgr->features[PP_Feature_ACP_POWERGATING].enabled = false; - eventmgr->features[PP_Feature_ACP_POWERGATING].enabled_default = false; - - /* PPM support */ - eventmgr->features[PP_Feature_PPM].version = 1; - eventmgr->features[PP_Feature_PPM].supported = false; - eventmgr->features[PP_Feature_PPM].enabled = false; - - /* FFC support (enables fan and temp settings, Gemini needs temp settings) */ - if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_ODFuzzyFanControlSupport) || - phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_GeminiRegulatorFanControlSupport)) { - eventmgr->features[PP_Feature_FFC].version = 1; - eventmgr->features[PP_Feature_FFC].supported = true; - eventmgr->features[PP_Feature_FFC].enabled = true; - eventmgr->features[PP_Feature_FFC].enabled_default = true; - } else { - eventmgr->features[PP_Feature_FFC].supported = false; - eventmgr->features[PP_Feature_FFC].enabled = false; - eventmgr->features[PP_Feature_FFC].enabled_default = false; - } - - eventmgr->features[PP_Feature_VariBright].supported = false; - eventmgr->features[PP_Feature_VariBright].enabled = false; - eventmgr->features[PP_Feature_VariBright].enabled_default = false; - - eventmgr->features[PP_Feature_BACO].supported = false; - eventmgr->features[PP_Feature_BACO].supported = false; - eventmgr->features[PP_Feature_BACO].enabled_default = false; - - /* PowerDown feature support */ - eventmgr->features[PP_Feature_PowerDown].supported = false; - eventmgr->features[PP_Feature_PowerDown].enabled = false; - eventmgr->features[PP_Feature_PowerDown].enabled_default = false; - - eventmgr->features[PP_Feature_FPS].version = 1; - eventmgr->features[PP_Feature_FPS].supported = false; - eventmgr->features[PP_Feature_FPS].enabled_default = false; - eventmgr->features[PP_Feature_FPS].enabled = false; - - eventmgr->features[PP_Feature_ViPG].version = 1; - eventmgr->features[PP_Feature_ViPG].supported = false; - eventmgr->features[PP_Feature_ViPG].enabled_default = false; - eventmgr->features[PP_Feature_ViPG].enabled = false; -} - -static int thermal_interrupt_callback(void *private_data, - unsigned src_id, const uint32_t *iv_entry) -{ - /* TO DO hanle PEM_Event_ThermalNotification (struct pp_eventmgr *)private_data*/ - pr_info("current thermal is out of range \n"); - return 0; -} - -int pem_register_interrupts(struct pp_eventmgr *eventmgr) -{ - int result = 0; - struct pp_interrupt_registration_info info; - - info.call_back = thermal_interrupt_callback; - info.context = eventmgr; - - result = phm_register_thermal_interrupt(eventmgr->hwmgr, &info); - - /* TODO: - * 2. Register CTF event interrupt - * 3. Register for vbios events interrupt - * 4. Register External Throttle Interrupt - * 5. Register Smc To Host Interrupt - * */ - return result; -} - - -int pem_unregister_interrupts(struct pp_eventmgr *eventmgr) -{ - return 0; -} - - -void pem_uninit_featureInfo(struct pp_eventmgr *eventmgr) -{ - eventmgr->features[PP_Feature_MultiUVDState].supported = false; - eventmgr->features[PP_Feature_VariBright].supported = false; - eventmgr->features[PP_Feature_PowerBudgetWaiver].supported = false; - eventmgr->features[PP_Feature_OverDrive].supported = false; - eventmgr->features[PP_Feature_OverdriveTest].supported = false; - eventmgr->features[PP_Feature_User3DPerformance].supported = false; - eventmgr->features[PP_Feature_User2DPerformance].supported = false; - eventmgr->features[PP_Feature_PowerPlay].supported = false; - eventmgr->features[PP_Feature_Force3DClock].supported = false; -} diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c deleted file mode 100644 index cd1ca07ef7f7..000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "eventmanagement.h" -#include "eventmgr.h" -#include "eventactionchains.h" - -int pem_init_event_action_chains(struct pp_eventmgr *eventmgr) -{ - int i; - - for (i = 0; i < AMD_PP_EVENT_MAX; i++) - eventmgr->event_chain[i] = NULL; - - eventmgr->event_chain[AMD_PP_EVENT_SUSPEND] = pem_get_suspend_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_INITIALIZE] = pem_get_initialize_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_UNINITIALIZE] = pem_get_uninitialize_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_POWER_SOURCE_CHANGE] = pem_get_power_source_change_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_HIBERNATE] = pem_get_hibernate_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_RESUME] = pem_get_resume_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_THERMAL_NOTIFICATION] = pem_get_thermal_notification_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_VBIOS_NOTIFICATION] = pem_get_vbios_notification_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_ENTER_THERMAL_STATE] = pem_get_enter_thermal_state_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_EXIT_THERMAL_STATE] = pem_get_exit_thermal_state_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_ENABLE_POWER_PLAY] = pem_get_enable_powerplay_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_DISABLE_POWER_PLAY] = pem_get_disable_powerplay_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_ENABLE_OVER_DRIVE_TEST] = pem_get_enable_overdrive_test_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_DISABLE_OVER_DRIVE_TEST] = pem_get_disable_overdrive_test_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_ENABLE_GFX_CLOCK_GATING] = pem_get_enable_gfx_clock_gating_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_DISABLE_GFX_CLOCK_GATING] = pem_get_disable_gfx_clock_gating_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_ENABLE_CGPG] = pem_get_enable_cgpg_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_DISABLE_CGPG] = pem_get_disable_cgpg_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_COMPLETE_INIT] = pem_get_complete_init_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_SCREEN_ON] = pem_get_screen_on_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_SCREEN_OFF] = pem_get_screen_off_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_PRE_SUSPEND] = pem_get_pre_suspend_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_PRE_RESUME] = pem_get_pre_resume_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_ENABLE_USER_STATE] = pem_enable_user_state_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_READJUST_POWER_STATE] = pem_readjust_power_state_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE] = pem_display_config_change_action_chain(eventmgr); - return 0; -} - -int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data) -{ - const pem_event_action * const *paction_chain; - const pem_event_action *psub_chain; - int tmp_result = 0; - int result = 0; - - if (eventmgr == NULL || event_chain == NULL || event_data == NULL) - return -EINVAL; - - for (paction_chain = event_chain->action_chain; NULL != *paction_chain; paction_chain++) { - if (0 != result) - return result; - - for (psub_chain = *paction_chain; NULL != *psub_chain; psub_chain++) { - tmp_result = (*psub_chain)(eventmgr, event_data); - if (0 == result) - result = tmp_result; - } - } - - return result; -} - -const struct action_chain *pem_get_suspend_action_chain(struct pp_eventmgr *eventmgr) -{ - return &suspend_action_chain; -} - -const struct action_chain *pem_get_initialize_action_chain(struct pp_eventmgr *eventmgr) -{ - return &initialize_action_chain; -} - -const struct action_chain *pem_get_uninitialize_action_chain(struct pp_eventmgr *eventmgr) -{ - return &uninitialize_action_chain; -} - -const struct action_chain *pem_get_power_source_change_action_chain(struct pp_eventmgr *eventmgr) -{ - return &power_source_change_action_chain_pp_enabled; /* other case base on feature info*/ -} - -const struct action_chain *pem_get_resume_action_chain(struct pp_eventmgr *eventmgr) -{ - return &resume_action_chain; -} - -const struct action_chain *pem_get_hibernate_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_thermal_notification_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_vbios_notification_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_enter_thermal_state_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_exit_thermal_state_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_enable_powerplay_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_disable_powerplay_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_enable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_disable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_enable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr) -{ - return &enable_gfx_clock_gating_action_chain; -} - -const struct action_chain *pem_get_disable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr) -{ - return &disable_gfx_clock_gating_action_chain; -} - -const struct action_chain *pem_get_enable_cgpg_action_chain(struct pp_eventmgr *eventmgr) -{ - return &enable_cgpg_action_chain; -} - -const struct action_chain *pem_get_disable_cgpg_action_chain(struct pp_eventmgr *eventmgr) -{ - return &disable_cgpg_action_chain; -} - -const struct action_chain *pem_get_complete_init_action_chain(struct pp_eventmgr *eventmgr) -{ - return &complete_init_action_chain; -} - -const struct action_chain *pem_get_screen_on_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_screen_off_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_pre_suspend_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_pre_resume_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_enable_user_state_action_chain(struct pp_eventmgr *eventmgr) -{ - return &enable_user_state_action_chain; -} - -const struct action_chain *pem_readjust_power_state_action_chain(struct pp_eventmgr *eventmgr) -{ - return &readjust_power_state_action_chain; -} - -const struct action_chain *pem_display_config_change_action_chain(struct pp_eventmgr *eventmgr) -{ - return &display_config_change_action_chain; -} diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h deleted file mode 100644 index 383d4b295aa9..000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef _EVENT_MANAGEMENT_H_ -#define _EVENT_MANAGEMENT_H_ - -#include "eventmgr.h" - -int pem_init_event_action_chains(struct pp_eventmgr *eventmgr); -int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data); -const struct action_chain *pem_get_suspend_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_initialize_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_uninitialize_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_power_source_change_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_resume_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_hibernate_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_thermal_notification_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_vbios_notification_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_enter_thermal_state_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_exit_thermal_state_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_enable_powerplay_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_disable_powerplay_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_enable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_disable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_enable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_disable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_enable_cgpg_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_disable_cgpg_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_complete_init_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_screen_on_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_screen_off_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_pre_suspend_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_pre_resume_action_chain(struct pp_eventmgr *eventmgr); - -extern const struct action_chain *pem_enable_user_state_action_chain(struct pp_eventmgr *eventmgr); -extern const struct action_chain *pem_readjust_power_state_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_display_config_change_action_chain(struct pp_eventmgr *eventmgr); - - -#endif /* _EVENT_MANAGEMENT_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c deleted file mode 100644 index 3e3ca03bd344..000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include "eventmgr.h" -#include "hwmgr.h" -#include "eventinit.h" -#include "eventmanagement.h" - -static int pem_init(struct pp_eventmgr *eventmgr) -{ - int result = 0; - struct pem_event_data event_data = { {0} }; - - /* Initialize PowerPlay feature info */ - pem_init_feature_info(eventmgr); - - /* Initialize event action chains */ - pem_init_event_action_chains(eventmgr); - - /* Call initialization event */ - result = pem_handle_event(eventmgr, AMD_PP_EVENT_INITIALIZE, &event_data); - - /* if (0 != result) - return result; */ - - /* Register interrupt callback functions */ - result = pem_register_interrupts(eventmgr); - return 0; -} - -static void pem_fini(struct pp_eventmgr *eventmgr) -{ - struct pem_event_data event_data = { {0} }; - - pem_uninit_featureInfo(eventmgr); - pem_unregister_interrupts(eventmgr); - - pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data); -} - -int eventmgr_early_init(struct pp_instance *handle) -{ - struct pp_eventmgr *eventmgr; - - if (handle == NULL) - return -EINVAL; - - eventmgr = kzalloc(sizeof(struct pp_eventmgr), GFP_KERNEL); - if (eventmgr == NULL) - return -ENOMEM; - - eventmgr->hwmgr = handle->hwmgr; - handle->eventmgr = eventmgr; - - eventmgr->platform_descriptor = &(eventmgr->hwmgr->platform_descriptor); - eventmgr->pp_eventmgr_init = pem_init; - eventmgr->pp_eventmgr_fini = pem_fini; - - return 0; -} - -static int pem_handle_event_unlocked(struct pp_eventmgr *eventmgr, enum amd_pp_event event, struct pem_event_data *data) -{ - if (eventmgr == NULL || event >= AMD_PP_EVENT_MAX || data == NULL) - return -EINVAL; - - return pem_excute_event_chain(eventmgr, eventmgr->event_chain[event], data); -} - -int pem_handle_event(struct pp_eventmgr *eventmgr, enum amd_pp_event event, struct pem_event_data *event_data) -{ - int r = 0; - - r = pem_handle_event_unlocked(eventmgr, event, event_data); - - return r; -} - -bool pem_is_hw_access_blocked(struct pp_eventmgr *eventmgr) -{ - return (eventmgr->block_adjust_power_state || phm_is_hw_access_blocked(eventmgr->hwmgr)); -} diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c deleted file mode 100644 index b82c43af59ab..000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c +++ /dev/null @@ -1,410 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "eventmgr.h" -#include "eventsubchains.h" -#include "eventtasks.h" -#include "hardwaremanager.h" - -const pem_event_action reset_display_phy_access_tasks[] = { - pem_task_reset_display_phys_access, - NULL -}; - -const pem_event_action broadcast_power_policy_tasks[] = { - /* PEM_Task_BroadcastPowerPolicyChange, */ - NULL -}; - -const pem_event_action unregister_interrupt_tasks[] = { - pem_task_unregister_interrupts, - NULL -}; - -/* Disable GFX Voltage Islands Power Gating */ -const pem_event_action disable_gfx_voltage_island_powergating_tasks[] = { - pem_task_disable_voltage_island_power_gating, - NULL -}; - -const pem_event_action disable_gfx_clockgating_tasks[] = { - pem_task_disable_gfx_clock_gating, - NULL -}; - -const pem_event_action block_adjust_power_state_tasks[] = { - pem_task_block_adjust_power_state, - NULL -}; - - -const pem_event_action unblock_adjust_power_state_tasks[] = { - pem_task_unblock_adjust_power_state, - NULL -}; - -const pem_event_action set_performance_state_tasks[] = { - pem_task_set_performance_state, - NULL -}; - -const pem_event_action get_2d_performance_state_tasks[] = { - pem_task_get_2D_performance_state_id, - NULL -}; - -const pem_event_action conditionally_force3D_performance_state_tasks[] = { - pem_task_conditionally_force_3d_performance_state, - NULL -}; - -const pem_event_action process_vbios_eventinfo_tasks[] = { - /* PEM_Task_ProcessVbiosEventInfo,*/ - NULL -}; - -const pem_event_action enable_dynamic_state_management_tasks[] = { - /* PEM_Task_ResetBAPMPolicyChangedFlag,*/ - pem_task_get_boot_state_id, - pem_task_enable_dynamic_state_management, - pem_task_register_interrupts, - NULL -}; - -const pem_event_action enable_clock_power_gatings_tasks[] = { - pem_task_enable_clock_power_gatings_tasks, - pem_task_powerdown_uvd_tasks, - pem_task_powerdown_vce_tasks, - NULL -}; - -const pem_event_action setup_asic_tasks[] = { - pem_task_setup_asic, - NULL -}; - -const pem_event_action power_budget_tasks[] = { - /* TODO - * PEM_Task_PowerBudgetWaiverAvailable, - * PEM_Task_PowerBudgetWarningMessage, - * PEM_Task_PruneStatesBasedOnPowerBudget, - */ - NULL -}; - -const pem_event_action system_config_tasks[] = { - /* PEM_Task_PruneStatesBasedOnSystemConfig,*/ - NULL -}; - - -const pem_event_action conditionally_force_3d_performance_state_tasks[] = { - pem_task_conditionally_force_3d_performance_state, - NULL -}; - -const pem_event_action ungate_all_display_phys_tasks[] = { - /* PEM_Task_GetDisplayPhyAccessInfo */ - NULL -}; - -const pem_event_action uninitialize_display_phy_access_tasks[] = { - /* PEM_Task_UninitializeDisplayPhysAccess, */ - NULL -}; - -const pem_event_action disable_gfx_voltage_island_power_gating_tasks[] = { - /* PEM_Task_DisableVoltageIslandPowerGating, */ - NULL -}; - -const pem_event_action disable_gfx_clock_gating_tasks[] = { - pem_task_disable_gfx_clock_gating, - NULL -}; - -const pem_event_action set_boot_state_tasks[] = { - pem_task_get_boot_state_id, - pem_task_set_boot_state, - NULL -}; - -const pem_event_action adjust_power_state_tasks[] = { - pem_task_notify_hw_mgr_display_configuration_change, - pem_task_adjust_power_state, - pem_task_notify_smc_display_config_after_power_state_adjustment, - pem_task_update_allowed_performance_levels, - /* to do pem_task_Enable_disable_bapm, */ - NULL -}; - -const pem_event_action disable_dynamic_state_management_tasks[] = { - pem_task_unregister_interrupts, - pem_task_get_boot_state_id, - pem_task_disable_dynamic_state_management, - NULL -}; - -const pem_event_action disable_clock_power_gatings_tasks[] = { - pem_task_disable_clock_power_gatings_tasks, - NULL -}; - -const pem_event_action cleanup_asic_tasks[] = { - /* PEM_Task_DisableFPS,*/ - pem_task_cleanup_asic, - NULL -}; - -const pem_event_action prepare_for_pnp_stop_tasks[] = { - /* PEM_Task_PrepareForPnpStop,*/ - NULL -}; - -const pem_event_action set_power_source_tasks[] = { - pem_task_set_power_source, - pem_task_notify_hw_of_power_source, - NULL -}; - -const pem_event_action set_power_saving_state_tasks[] = { - pem_task_reset_power_saving_state, - pem_task_get_power_saving_state, - pem_task_set_power_saving_state, - /* PEM_Task_ResetODDCState, - * PEM_Task_GetODDCState, - * PEM_Task_SetODDCState,*/ - NULL -}; - -const pem_event_action enable_disable_fps_tasks[] = { - /* PEM_Task_EnableDisableFPS,*/ - NULL -}; - -const pem_event_action set_nbmcu_state_tasks[] = { - /* PEM_Task_NBMCUStateChange,*/ - NULL -}; - -const pem_event_action reset_hardware_dc_notification_tasks[] = { - /* PEM_Task_ResetHardwareDCNotification,*/ - NULL -}; - - -const pem_event_action notify_smu_suspend_tasks[] = { - /* PEM_Task_NotifySMUSuspend,*/ - NULL -}; - -const pem_event_action disable_smc_firmware_ctf_tasks[] = { - pem_task_disable_smc_firmware_ctf, - NULL -}; - -const pem_event_action disable_fps_tasks[] = { - /* PEM_Task_DisableFPS,*/ - NULL -}; - -const pem_event_action vari_bright_suspend_tasks[] = { - /* PEM_Task_VariBright_Suspend,*/ - NULL -}; - -const pem_event_action reset_fan_speed_to_default_tasks[] = { - /* PEM_Task_ResetFanSpeedToDefault,*/ - NULL -}; - -const pem_event_action power_down_asic_tasks[] = { - /* PEM_Task_DisableFPS,*/ - pem_task_power_down_asic, - NULL -}; - -const pem_event_action disable_stutter_mode_tasks[] = { - /* PEM_Task_DisableStutterMode,*/ - NULL -}; - -const pem_event_action set_connected_standby_tasks[] = { - /* PEM_Task_SetConnectedStandby,*/ - NULL -}; - -const pem_event_action block_hw_access_tasks[] = { - pem_task_block_hw_access, - NULL -}; - -const pem_event_action unblock_hw_access_tasks[] = { - pem_task_un_block_hw_access, - NULL -}; - -const pem_event_action resume_connected_standby_tasks[] = { - /* PEM_Task_ResumeConnectedStandby,*/ - NULL -}; - -const pem_event_action notify_smu_resume_tasks[] = { - /* PEM_Task_NotifySMUResume,*/ - NULL -}; - -const pem_event_action reset_display_configCounter_tasks[] = { - pem_task_reset_display_phys_access, - NULL -}; - -const pem_event_action update_dal_configuration_tasks[] = { - /* PEM_Task_CheckVBlankTime,*/ - NULL -}; - -const pem_event_action vari_bright_resume_tasks[] = { - /* PEM_Task_VariBright_Resume,*/ - NULL -}; - -const pem_event_action notify_hw_power_source_tasks[] = { - pem_task_notify_hw_of_power_source, - NULL -}; - -const pem_event_action process_vbios_event_info_tasks[] = { - /* PEM_Task_ProcessVbiosEventInfo,*/ - NULL -}; - -const pem_event_action enable_gfx_clock_gating_tasks[] = { - pem_task_enable_gfx_clock_gating, - NULL -}; - -const pem_event_action enable_gfx_voltage_island_power_gating_tasks[] = { - pem_task_enable_voltage_island_power_gating, - NULL -}; - -const pem_event_action reset_clock_gating_tasks[] = { - /* PEM_Task_ResetClockGating*/ - NULL -}; - -const pem_event_action notify_smu_vpu_recovery_end_tasks[] = { - /* PEM_Task_NotifySmuVPURecoveryEnd,*/ - NULL -}; - -const pem_event_action disable_vpu_cap_tasks[] = { - /* PEM_Task_DisableVPUCap,*/ - NULL -}; - -const pem_event_action execute_escape_sequence_tasks[] = { - /* PEM_Task_ExecuteEscapesequence,*/ - NULL -}; - -const pem_event_action notify_power_state_change_tasks[] = { - pem_task_notify_power_state_change, - NULL -}; - -const pem_event_action enable_cgpg_tasks[] = { - pem_task_enable_cgpg, - NULL -}; - -const pem_event_action disable_cgpg_tasks[] = { - pem_task_disable_cgpg, - NULL -}; - -const pem_event_action enable_user_2d_performance_tasks[] = { - /* PEM_Task_SetUser2DPerformanceFlag,*/ - /* PEM_Task_UpdateUser2DPerformanceEnableEvents,*/ - NULL -}; - -const pem_event_action add_user_2d_performance_state_tasks[] = { - /* PEM_Task_Get2DPerformanceTemplate,*/ - /* PEM_Task_AllocateNewPowerStateMemory,*/ - /* PEM_Task_CopyNewPowerStateInfo,*/ - /* PEM_Task_UpdateNewPowerStateClocks,*/ - /* PEM_Task_UpdateNewPowerStateUser2DPerformanceFlag,*/ - /* PEM_Task_AddPowerState,*/ - /* PEM_Task_ReleaseNewPowerStateMemory,*/ - NULL -}; - -const pem_event_action delete_user_2d_performance_state_tasks[] = { - /* PEM_Task_GetCurrentUser2DPerformanceStateID,*/ - /* PEM_Task_DeletePowerState,*/ - /* PEM_Task_SetCurrentUser2DPerformanceStateID,*/ - NULL -}; - -const pem_event_action disable_user_2d_performance_tasks[] = { - /* PEM_Task_ResetUser2DPerformanceFlag,*/ - /* PEM_Task_UpdateUser2DPerformanceDisableEvents,*/ - NULL -}; - -const pem_event_action enable_stutter_mode_tasks[] = { - pem_task_enable_stutter_mode, - NULL -}; - -const pem_event_action enable_disable_bapm_tasks[] = { - /*PEM_Task_EnableDisableBAPM,*/ - NULL -}; - -const pem_event_action reset_boot_state_tasks[] = { - pem_task_reset_boot_state, - NULL -}; - -const pem_event_action create_new_user_performance_state_tasks[] = { - pem_task_create_user_performance_state, - NULL -}; - -const pem_event_action initialize_thermal_controller_tasks[] = { - pem_task_initialize_thermal_controller, - NULL -}; - -const pem_event_action uninitialize_thermal_controller_tasks[] = { - pem_task_uninitialize_thermal_controller, - NULL -}; - -const pem_event_action set_cpu_power_state[] = { - pem_task_set_cpu_power_state, - NULL -};
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h deleted file mode 100644 index 7714cb927428..000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _EVENT_SUB_CHAINS_H_ -#define _EVENT_SUB_CHAINS_H_ - -#include "eventmgr.h" - -extern const pem_event_action reset_display_phy_access_tasks[]; -extern const pem_event_action broadcast_power_policy_tasks[]; -extern const pem_event_action unregister_interrupt_tasks[]; -extern const pem_event_action disable_GFX_voltage_island_powergating_tasks[]; -extern const pem_event_action disable_GFX_clockgating_tasks[]; -extern const pem_event_action block_adjust_power_state_tasks[]; -extern const pem_event_action unblock_adjust_power_state_tasks[]; -extern const pem_event_action set_performance_state_tasks[]; -extern const pem_event_action get_2D_performance_state_tasks[]; -extern const pem_event_action conditionally_force3D_performance_state_tasks[]; -extern const pem_event_action process_vbios_eventinfo_tasks[]; -extern const pem_event_action enable_dynamic_state_management_tasks[]; -extern const pem_event_action enable_clock_power_gatings_tasks[]; -extern const pem_event_action conditionally_force3D_performance_state_tasks[]; -extern const pem_event_action setup_asic_tasks[]; -extern const pem_event_action power_budget_tasks[]; -extern const pem_event_action system_config_tasks[]; -extern const pem_event_action get_2d_performance_state_tasks[]; -extern const pem_event_action conditionally_force_3d_performance_state_tasks[]; -extern const pem_event_action ungate_all_display_phys_tasks[]; -extern const pem_event_action uninitialize_display_phy_access_tasks[]; -extern const pem_event_action disable_gfx_voltage_island_power_gating_tasks[]; -extern const pem_event_action disable_gfx_clock_gating_tasks[]; -extern const pem_event_action set_boot_state_tasks[]; -extern const pem_event_action adjust_power_state_tasks[]; -extern const pem_event_action disable_dynamic_state_management_tasks[]; -extern const pem_event_action disable_clock_power_gatings_tasks[]; -extern const pem_event_action cleanup_asic_tasks[]; -extern const pem_event_action prepare_for_pnp_stop_tasks[]; -extern const pem_event_action set_power_source_tasks[]; -extern const pem_event_action set_power_saving_state_tasks[]; -extern const pem_event_action enable_disable_fps_tasks[]; -extern const pem_event_action set_nbmcu_state_tasks[]; -extern const pem_event_action reset_hardware_dc_notification_tasks[]; -extern const pem_event_action notify_smu_suspend_tasks[]; -extern const pem_event_action disable_smc_firmware_ctf_tasks[]; -extern const pem_event_action disable_fps_tasks[]; -extern const pem_event_action vari_bright_suspend_tasks[]; -extern const pem_event_action reset_fan_speed_to_default_tasks[]; -extern const pem_event_action power_down_asic_tasks[]; -extern const pem_event_action disable_stutter_mode_tasks[]; -extern const pem_event_action set_connected_standby_tasks[]; -extern const pem_event_action block_hw_access_tasks[]; -extern const pem_event_action unblock_hw_access_tasks[]; -extern const pem_event_action resume_connected_standby_tasks[]; -extern const pem_event_action notify_smu_resume_tasks[]; -extern const pem_event_action reset_display_configCounter_tasks[]; -extern const pem_event_action update_dal_configuration_tasks[]; -extern const pem_event_action vari_bright_resume_tasks[]; -extern const pem_event_action notify_hw_power_source_tasks[]; -extern const pem_event_action process_vbios_event_info_tasks[]; -extern const pem_event_action enable_gfx_clock_gating_tasks[]; -extern const pem_event_action enable_gfx_voltage_island_power_gating_tasks[]; -extern const pem_event_action reset_clock_gating_tasks[]; -extern const pem_event_action notify_smu_vpu_recovery_end_tasks[]; -extern const pem_event_action disable_vpu_cap_tasks[]; -extern const pem_event_action execute_escape_sequence_tasks[]; -extern const pem_event_action notify_power_state_change_tasks[]; -extern const pem_event_action enable_cgpg_tasks[]; -extern const pem_event_action disable_cgpg_tasks[]; -extern const pem_event_action enable_user_2d_performance_tasks[]; -extern const pem_event_action add_user_2d_performance_state_tasks[]; -extern const pem_event_action delete_user_2d_performance_state_tasks[]; -extern const pem_event_action disable_user_2d_performance_tasks[]; -extern const pem_event_action enable_stutter_mode_tasks[]; -extern const pem_event_action enable_disable_bapm_tasks[]; -extern const pem_event_action reset_boot_state_tasks[]; -extern const pem_event_action create_new_user_performance_state_tasks[]; -extern const pem_event_action initialize_thermal_controller_tasks[]; -extern const pem_event_action uninitialize_thermal_controller_tasks[]; -extern const pem_event_action set_cpu_power_state[]; -#endif /* _EVENT_SUB_CHAINS_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c deleted file mode 100644 index 8c4ebaae1e0c..000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c +++ /dev/null @@ -1,445 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "eventmgr.h" -#include "eventinit.h" -#include "eventmanagement.h" -#include "eventmanager.h" -#include "hardwaremanager.h" -#include "eventtasks.h" -#include "power_state.h" -#include "hwmgr.h" -#include "amd_powerplay.h" -#include "psm.h" - -#define TEMP_RANGE_MIN (90 * 1000) -#define TEMP_RANGE_MAX (120 * 1000) - -int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - - if (eventmgr == NULL || eventmgr->hwmgr == NULL) - return -EINVAL; - - if (pem_is_hw_access_blocked(eventmgr)) - return 0; - - phm_force_dpm_levels(eventmgr->hwmgr, eventmgr->hwmgr->dpm_level); - - return 0; -} - -/* eventtasks_generic.c */ -int pem_task_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - struct pp_hwmgr *hwmgr; - - if (pem_is_hw_access_blocked(eventmgr)) - return 0; - - hwmgr = eventmgr->hwmgr; - if (event_data->pnew_power_state != NULL) - hwmgr->request_ps = event_data->pnew_power_state; - - if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_DynamicPatchPowerState)) - psm_adjust_power_state_dynamic(eventmgr, event_data->skip_state_adjust_rules); - else - psm_adjust_power_state_static(eventmgr, event_data->skip_state_adjust_rules); - - return 0; -} - -int pem_task_power_down_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return phm_power_down_asic(eventmgr->hwmgr); -} - -int pem_task_set_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - if (pem_is_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID)) - return psm_set_states(eventmgr, &(event_data->requested_state_id)); - - return 0; -} - -int pem_task_reset_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_update_new_power_state_clocks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_system_shutdown(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_register_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_unregister_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return pem_unregister_interrupts(eventmgr); -} - -int pem_task_get_boot_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - int result; - - result = psm_get_state_by_classification(eventmgr, - PP_StateClassificationFlag_Boot, - &(event_data->requested_state_id) - ); - - if (0 == result) - pem_set_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID); - else - pem_unset_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID); - - return result; -} - -int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return phm_enable_dynamic_state_management(eventmgr->hwmgr); -} - -int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return phm_disable_dynamic_state_management(eventmgr->hwmgr); -} - -int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return phm_enable_clock_power_gatings(eventmgr->hwmgr); -} - -int pem_task_powerdown_uvd_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return phm_powerdown_uvd(eventmgr->hwmgr); -} - -int pem_task_powerdown_vce_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - phm_powergate_uvd(eventmgr->hwmgr, true); - phm_powergate_vce(eventmgr->hwmgr, true); - return 0; -} - -int pem_task_disable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - phm_disable_clock_power_gatings(eventmgr->hwmgr); - return 0; -} - -int pem_task_start_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_stop_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_disable_smc_firmware_ctf(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return phm_disable_smc_firmware_ctf(eventmgr->hwmgr); -} - -int pem_task_setup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return phm_setup_asic(eventmgr->hwmgr); -} - -int pem_task_cleanup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_store_dal_configuration(struct pp_eventmgr *eventmgr, const struct amd_display_configuration *display_config) -{ - /* TODO */ - return 0; - /*phm_store_dal_configuration_data(eventmgr->hwmgr, display_config) */ -} - -int pem_task_notify_hw_mgr_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - if (pem_is_hw_access_blocked(eventmgr)) - return 0; - - return phm_display_configuration_changed(eventmgr->hwmgr); -} - -int pem_task_notify_hw_mgr_pre_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return 0; -} - -int pem_task_notify_smc_display_config_after_power_state_adjustment(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - if (pem_is_hw_access_blocked(eventmgr)) - return 0; - - return phm_notify_smc_display_config_after_ps_adjustment(eventmgr->hwmgr); -} - -int pem_task_block_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - eventmgr->block_adjust_power_state = true; - /* to do PHM_ResetIPSCounter(pEventMgr->pHwMgr);*/ - return 0; -} - -int pem_task_unblock_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - eventmgr->block_adjust_power_state = false; - return 0; -} - -int pem_task_notify_power_state_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_un_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_reset_display_phys_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_set_cpu_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return phm_set_cpu_power_state(eventmgr->hwmgr); -} - -/*powersaving*/ - -int pem_task_set_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_notify_hw_of_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_get_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_reset_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_set_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_set_screen_state_on(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_set_screen_state_off(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_enable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_disable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_enable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_disable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_enable_clock_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - - -int pem_task_enable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_disable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - - -/* performance */ -int pem_task_set_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - if (pem_is_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID)) - return psm_set_states(eventmgr, &(event_data->requested_state_id)); - - return 0; -} - -int pem_task_conditionally_force_3d_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_enable_stutter_mode(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_get_2D_performance_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - int result; - - if (eventmgr->features[PP_Feature_PowerPlay].supported && - !(eventmgr->features[PP_Feature_PowerPlay].enabled)) - result = psm_get_state_by_classification(eventmgr, - PP_StateClassificationFlag_Boot, - &(event_data->requested_state_id)); - else if (eventmgr->features[PP_Feature_User2DPerformance].enabled) - result = psm_get_state_by_classification(eventmgr, - PP_StateClassificationFlag_User2DPerformance, - &(event_data->requested_state_id)); - else - result = psm_get_ui_state(eventmgr, PP_StateUILabel_Performance, - &(event_data->requested_state_id)); - - if (0 == result) - pem_set_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID); - else - pem_unset_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID); - - return result; -} - -int pem_task_create_user_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - struct pp_power_state *state; - int table_entries; - struct pp_hwmgr *hwmgr = eventmgr->hwmgr; - int i; - - table_entries = hwmgr->num_ps; - state = hwmgr->ps; - -restart_search: - for (i = 0; i < table_entries; i++) { - if (state->classification.ui_label & event_data->requested_ui_label) { - event_data->pnew_power_state = state; - return 0; - } - state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); - } - - switch (event_data->requested_ui_label) { - case PP_StateUILabel_Battery: - case PP_StateUILabel_Balanced: - event_data->requested_ui_label = PP_StateUILabel_Performance; - goto restart_search; - default: - break; - } - return -1; -} - -int pem_task_initialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - struct PP_TemperatureRange range; - - range.max = TEMP_RANGE_MAX; - range.min = TEMP_RANGE_MIN; - - if (eventmgr == NULL || eventmgr->platform_descriptor == NULL) - return -EINVAL; - - if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_ThermalController)) - return phm_start_thermal_controller(eventmgr->hwmgr, &range); - - return 0; -} - -int pem_task_uninitialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return phm_stop_thermal_controller(eventmgr->hwmgr); -} diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h deleted file mode 100644 index 37e7ca5a58e0..000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _EVENT_TASKS_H_ -#define _EVENT_TASKS_H_ -#include "eventmgr.h" - -struct amd_display_configuration; - -/* eventtasks_generic.c */ -int pem_task_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_power_down_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_get_boot_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_set_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_reset_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_update_new_power_state_clocks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_system_shutdown(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_register_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_unregister_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_powerdown_uvd_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_powerdown_vce_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_disable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_start_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_stop_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_setup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_cleanup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_store_dal_configuration (struct pp_eventmgr *eventmgr, const struct amd_display_configuration *display_config); -int pem_task_notify_hw_mgr_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_notify_hw_mgr_pre_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_block_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_unblock_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_notify_power_state_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_un_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_reset_display_phys_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_set_cpu_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_notify_smc_display_config_after_power_state_adjustment(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -/*powersaving*/ - -int pem_task_set_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_notify_hw_of_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_get_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_reset_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_set_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_set_screen_state_on(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_set_screen_state_off(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_enable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_disable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_enable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_disable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_enable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_disable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_enable_stutter_mode(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); - -/* performance */ -int pem_task_set_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_conditionally_force_3d_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_get_2D_performance_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_create_user_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -/*thermal */ -int pem_task_initialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_uninitialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_disable_smc_firmware_ctf(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); - -#endif /* _EVENT_TASKS_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c deleted file mode 100644 index 489908887e9c..000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "psm.h" - -int psm_get_ui_state(struct pp_eventmgr *eventmgr, enum PP_StateUILabel ui_label, unsigned long *state_id) -{ - struct pp_power_state *state; - int table_entries; - struct pp_hwmgr *hwmgr = eventmgr->hwmgr; - int i; - - table_entries = hwmgr->num_ps; - state = hwmgr->ps; - - for (i = 0; i < table_entries; i++) { - if (state->classification.ui_label & ui_label) { - *state_id = state->id; - return 0; - } - state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); - } - return -1; -} - -int psm_get_state_by_classification(struct pp_eventmgr *eventmgr, enum PP_StateClassificationFlag flag, unsigned long *state_id) -{ - struct pp_power_state *state; - int table_entries; - struct pp_hwmgr *hwmgr = eventmgr->hwmgr; - int i; - - table_entries = hwmgr->num_ps; - state = hwmgr->ps; - - for (i = 0; i < table_entries; i++) { - if (state->classification.flags & flag) { - *state_id = state->id; - return 0; - } - state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); - } - return -1; -} - -int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id) -{ - struct pp_power_state *state; - int table_entries; - struct pp_hwmgr *hwmgr = eventmgr->hwmgr; - int i; - - table_entries = hwmgr->num_ps; - - state = hwmgr->ps; - - for (i = 0; i < table_entries; i++) { - if (state->id == *state_id) { - memcpy(hwmgr->request_ps, state, hwmgr->ps_size); - return 0; - } - state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); - } - return -1; -} - -int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip) -{ - - struct pp_power_state *pcurrent; - struct pp_power_state *requested; - struct pp_hwmgr *hwmgr; - bool equal; - - if (skip) - return 0; - - hwmgr = eventmgr->hwmgr; - pcurrent = hwmgr->current_ps; - requested = hwmgr->request_ps; - - if (requested == NULL) - return 0; - - phm_apply_state_adjust_rules(hwmgr, requested, pcurrent); - - if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal))) - equal = false; - - if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) { - phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware); - memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size); - } - return 0; -} - -int psm_adjust_power_state_static(struct pp_eventmgr *eventmgr, bool skip) -{ - return 0; -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index f0277c16c2bf..dc4bbcfe1243 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -2,14 +2,15 @@ # Makefile for the 'hw manager' sub-component of powerplay. # It provides the hardware management services for the driver. -HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \ +HARDWARE_MGR = hwmgr.o processpptables.o \ hardwaremanager.o pp_acpi.o cz_hwmgr.o \ cz_clockpowergating.o pppcielanes.o\ process_pptables_v1_0.o ppatomctrl.o ppatomfwctrl.o \ smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \ smu7_clockpowergating.o \ vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \ - vega10_thermal.o pp_overdriver.o rv_hwmgr.o + vega10_thermal.o rv_hwmgr.o pp_psm.o\ + pp_overdriver.o AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index b33935fcf428..44de0874629f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -103,16 +103,6 @@ int cz_phm_ungate_all_display_phys(struct pp_hwmgr *hwmgr) return 0; } -static int cz_tf_uvd_power_gating_initialize(struct pp_hwmgr *hwmgr, void *pInput, void *pOutput, void *pStorage, int Result) -{ - return 0; -} - -static int cz_tf_vce_power_gating_initialize(struct pp_hwmgr *hwmgr, void *pInput, void *pOutput, void *pStorage, int Result) -{ - return 0; -} - int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); @@ -123,12 +113,12 @@ int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) PHM_PlatformCaps_UVDDPM)) { cz_hwmgr->dpm_flags |= DPMFlags_UVD_Enabled; dpm_features |= UVD_DPM_MASK; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableAllSmuFeatures, dpm_features); } else { dpm_features |= UVD_DPM_MASK; cz_hwmgr->dpm_flags &= ~DPMFlags_UVD_Enabled; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableAllSmuFeatures, dpm_features); } return 0; @@ -144,12 +134,12 @@ int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) PHM_PlatformCaps_VCEDPM)) { cz_hwmgr->dpm_flags |= DPMFlags_VCE_Enabled; dpm_features |= VCE_DPM_MASK; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableAllSmuFeatures, dpm_features); } else { dpm_features |= VCE_DPM_MASK; cz_hwmgr->dpm_flags &= ~DPMFlags_VCE_Enabled; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableAllSmuFeatures, dpm_features); } @@ -157,7 +147,7 @@ int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) } -int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) +void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); @@ -183,10 +173,9 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) cz_dpm_update_uvd_dpm(hwmgr, false); } - return 0; } -int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) +void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); @@ -215,29 +204,6 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) AMD_CG_STATE_UNGATE); cz_dpm_update_vce_dpm(hwmgr); cz_enable_disable_vce_dpm(hwmgr, true); - return 0; } - - return 0; } - -static const struct phm_master_table_item cz_enable_clock_power_gatings_list[] = { - /*we don't need an exit table here, because there is only D3 cold on Kv*/ - { - .isFunctionNeededInRuntimeTable = phm_cf_want_uvd_power_gating, - .tableFunction = cz_tf_uvd_power_gating_initialize - }, - { - .isFunctionNeededInRuntimeTable = phm_cf_want_vce_power_gating, - .tableFunction = cz_tf_vce_power_gating_initialize - }, - /* to do { NULL, cz_tf_xdma_power_gating_enable }, */ - { } -}; - -const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master = { - 0, - PHM_MasterTableFlag_None, - cz_enable_clock_power_gatings_list -}; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h index 1954ceaed439..92f707bc46e7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h @@ -29,8 +29,8 @@ extern int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating); extern const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master; -extern int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); -extern int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); +extern void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); +extern void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); extern int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); extern int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable); #endif /* _CZ_CLOCK_POWER_GATING_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index bc839ff0bdd0..73bb99d62a44 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -162,8 +162,8 @@ static uint32_t cz_get_max_sclk_level(struct pp_hwmgr *hwmgr) struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); if (cz_hwmgr->max_sclk_level == 0) { - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxSclkLevel); - cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr->smumgr) + 1; + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel); + cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr) + 1; } return cz_hwmgr->max_sclk_level; @@ -440,14 +440,7 @@ static int cz_construct_boot_state(struct pp_hwmgr *hwmgr) return 0; } -static int cz_tf_reset_active_process_mask(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) -{ - return 0; -} - -static int cz_tf_upload_pptable_to_smu(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr) { struct SMU8_Fusion_ClkTable *clock_table; int ret; @@ -469,7 +462,7 @@ static int cz_tf_upload_pptable_to_smu(struct pp_hwmgr *hwmgr, void *input, if (!hwmgr->need_pp_table_upload) return 0; - ret = smum_download_powerplay_table(hwmgr->smumgr, &table); + ret = smum_download_powerplay_table(hwmgr, &table); PP_ASSERT_WITH_CODE((0 == ret && NULL != table), "Fail to get clock table from SMU!", return -EINVAL;); @@ -561,13 +554,12 @@ static int cz_tf_upload_pptable_to_smu(struct pp_hwmgr *hwmgr, void *input, (uint8_t)dividers.pll_post_divider; } - ret = smum_upload_powerplay_table(hwmgr->smumgr); + ret = smum_upload_powerplay_table(hwmgr); return ret; } -static int cz_tf_init_sclk_limit(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int cz_init_sclk_limit(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); struct phm_clock_voltage_dependency_table *table = @@ -593,8 +585,7 @@ static int cz_tf_init_sclk_limit(struct pp_hwmgr *hwmgr, void *input, return 0; } -static int cz_tf_init_uvd_limit(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); struct phm_uvd_clock_voltage_dependency_table *table = @@ -607,8 +598,8 @@ static int cz_tf_init_uvd_limit(struct pp_hwmgr *hwmgr, void *input, cz_hwmgr->uvd_dpm.soft_min_clk = 0; cz_hwmgr->uvd_dpm.hard_min_clk = 0; - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxUvdLevel); - level = smum_get_argument(hwmgr->smumgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel); + level = smum_get_argument(hwmgr); if (level < table->count) clock = table->entries[level].vclk; @@ -621,8 +612,7 @@ static int cz_tf_init_uvd_limit(struct pp_hwmgr *hwmgr, void *input, return 0; } -static int cz_tf_init_vce_limit(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int cz_init_vce_limit(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); struct phm_vce_clock_voltage_dependency_table *table = @@ -635,8 +625,8 @@ static int cz_tf_init_vce_limit(struct pp_hwmgr *hwmgr, void *input, cz_hwmgr->vce_dpm.soft_min_clk = 0; cz_hwmgr->vce_dpm.hard_min_clk = 0; - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxEclkLevel); - level = smum_get_argument(hwmgr->smumgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel); + level = smum_get_argument(hwmgr); if (level < table->count) clock = table->entries[level].ecclk; @@ -649,8 +639,7 @@ static int cz_tf_init_vce_limit(struct pp_hwmgr *hwmgr, void *input, return 0; } -static int cz_tf_init_acp_limit(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int cz_init_acp_limit(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); struct phm_acp_clock_voltage_dependency_table *table = @@ -663,8 +652,8 @@ static int cz_tf_init_acp_limit(struct pp_hwmgr *hwmgr, void *input, cz_hwmgr->acp_dpm.soft_min_clk = 0; cz_hwmgr->acp_dpm.hard_min_clk = 0; - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxAclkLevel); - level = smum_get_argument(hwmgr->smumgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel); + level = smum_get_argument(hwmgr); if (level < table->count) clock = table->entries[level].acpclk; @@ -676,8 +665,7 @@ static int cz_tf_init_acp_limit(struct pp_hwmgr *hwmgr, void *input, return 0; } -static int cz_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static void cz_init_power_gate_state(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); @@ -686,22 +674,16 @@ static int cz_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input, cz_hwmgr->samu_power_gated = false; cz_hwmgr->acp_power_gated = false; cz_hwmgr->pgacpinit = true; - - return 0; } -static int cz_tf_init_sclk_threshold(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static void cz_init_sclk_threshold(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); cz_hwmgr->low_sclk_interrupt_threshold = 0; - - return 0; } -static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) + +static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); struct phm_clock_voltage_dependency_table *table = @@ -727,7 +709,7 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr, if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) { cz_hwmgr->sclk_dpm.hard_min_clk = clock; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkHardMin, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.hard_min_clk, @@ -753,7 +735,7 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr, if (cz_hwmgr->sclk_dpm.soft_min_clk != clock) { cz_hwmgr->sclk_dpm.soft_min_clk = clock; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMin, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_min_clk, @@ -764,7 +746,7 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr, PHM_PlatformCaps_StablePState) && cz_hwmgr->sclk_dpm.soft_max_clk != clock) { cz_hwmgr->sclk_dpm.soft_max_clk = clock; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMax, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_max_clk, @@ -774,9 +756,7 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr, return 0; } -static int cz_tf_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static int cz_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { @@ -786,7 +766,7 @@ static int cz_tf_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr, PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepSclk, clks); } @@ -794,77 +774,84 @@ static int cz_tf_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr, return 0; } -static int cz_tf_set_watermark_threshold(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static int cz_set_watermark_threshold(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWatermarkFrequency, cz_hwmgr->sclk_dpm.soft_max_clk); return 0; } -static int cz_tf_set_enabled_levels(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock) { + struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); + + if (hw_data->is_nb_dpm_enabled) { + if (enable) { + PP_DBG_LOG("enable Low Memory PState.\n"); + + return smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_EnableLowMemoryPstate, + (lock ? 1 : 0)); + } else { + PP_DBG_LOG("disable Low Memory PState.\n"); + + return smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_DisableLowMemoryPstate, + (lock ? 1 : 0)); + } + } + return 0; } - -static int cz_tf_enable_nb_dpm(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static int cz_disable_nb_dpm(struct pp_hwmgr *hwmgr) { int ret = 0; struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); unsigned long dpm_features = 0; - if (!cz_hwmgr->is_nb_dpm_enabled) { - PP_DBG_LOG("enabling ALL SMU features.\n"); + if (cz_hwmgr->is_nb_dpm_enabled) { + cz_nbdpm_pstate_enable_disable(hwmgr, true, true); dpm_features |= NB_DPM_MASK; ret = smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - PPSMC_MSG_EnableAllSmuFeatures, + hwmgr, + PPSMC_MSG_DisableAllSmuFeatures, dpm_features); if (ret == 0) - cz_hwmgr->is_nb_dpm_enabled = true; + cz_hwmgr->is_nb_dpm_enabled = false; } return ret; } -static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock) +static int cz_enable_nb_dpm(struct pp_hwmgr *hwmgr) { - struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); - - if (hw_data->is_nb_dpm_enabled) { - if (enable) { - PP_DBG_LOG("enable Low Memory PState.\n"); + int ret = 0; - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_EnableLowMemoryPstate, - (lock ? 1 : 0)); - } else { - PP_DBG_LOG("disable Low Memory PState.\n"); + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + unsigned long dpm_features = 0; - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_DisableLowMemoryPstate, - (lock ? 1 : 0)); - } + if (!cz_hwmgr->is_nb_dpm_enabled) { + PP_DBG_LOG("enabling ALL SMU features.\n"); + dpm_features |= NB_DPM_MASK; + ret = smum_send_msg_to_smc_with_parameter( + hwmgr, + PPSMC_MSG_EnableAllSmuFeatures, + dpm_features); + if (ret == 0) + cz_hwmgr->is_nb_dpm_enabled = true; } - return 0; + return ret; } -static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static int cz_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input) { bool disable_switch; bool enable_low_mem_state; @@ -886,64 +873,64 @@ static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr, return 0; } -static const struct phm_master_table_item cz_set_power_state_list[] = { - { .tableFunction = cz_tf_update_sclk_limit }, - { .tableFunction = cz_tf_set_deep_sleep_sclk_threshold }, - { .tableFunction = cz_tf_set_watermark_threshold }, - { .tableFunction = cz_tf_set_enabled_levels }, - { .tableFunction = cz_tf_enable_nb_dpm }, - { .tableFunction = cz_tf_update_low_mem_pstate }, - { } -}; +static int cz_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) +{ + int ret = 0; -static const struct phm_master_table_header cz_set_power_state_master = { - 0, - PHM_MasterTableFlag_None, - cz_set_power_state_list -}; + cz_update_sclk_limit(hwmgr); + cz_set_deep_sleep_sclk_threshold(hwmgr); + cz_set_watermark_threshold(hwmgr); + ret = cz_enable_nb_dpm(hwmgr); + if (ret) + return ret; + cz_update_low_mem_pstate(hwmgr, input); -static const struct phm_master_table_item cz_setup_asic_list[] = { - { .tableFunction = cz_tf_reset_active_process_mask }, - { .tableFunction = cz_tf_upload_pptable_to_smu }, - { .tableFunction = cz_tf_init_sclk_limit }, - { .tableFunction = cz_tf_init_uvd_limit }, - { .tableFunction = cz_tf_init_vce_limit }, - { .tableFunction = cz_tf_init_acp_limit }, - { .tableFunction = cz_tf_init_power_gate_state }, - { .tableFunction = cz_tf_init_sclk_threshold }, - { } + return 0; }; -static const struct phm_master_table_header cz_setup_asic_master = { - 0, - PHM_MasterTableFlag_None, - cz_setup_asic_list -}; -static int cz_tf_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static int cz_setup_asic_task(struct pp_hwmgr *hwmgr) +{ + int ret; + + ret = cz_upload_pptable_to_smu(hwmgr); + if (ret) + return ret; + ret = cz_init_sclk_limit(hwmgr); + if (ret) + return ret; + ret = cz_init_uvd_limit(hwmgr); + if (ret) + return ret; + ret = cz_init_vce_limit(hwmgr); + if (ret) + return ret; + ret = cz_init_acp_limit(hwmgr); + if (ret) + return ret; + + cz_init_power_gate_state(hwmgr); + cz_init_sclk_threshold(hwmgr); + + return 0; +} + +static void cz_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); + hw_data->disp_clk_bypass_pending = false; hw_data->disp_clk_bypass = false; - - return 0; } -static int cz_tf_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static void cz_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); - hw_data->is_nb_dpm_enabled = false; - return 0; + hw_data->is_nb_dpm_enabled = false; } -static int cz_tf_reset_cc6_data(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static void cz_reset_cc6_data(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); @@ -951,63 +938,73 @@ static int cz_tf_reset_cc6_data(struct pp_hwmgr *hwmgr, hw_data->cc6_settings.cpu_pstate_separation_time = 0; hw_data->cc6_settings.cpu_cc6_disable = false; hw_data->cc6_settings.cpu_pstate_disable = false; - - return 0; } -static const struct phm_master_table_item cz_power_down_asic_list[] = { - { .tableFunction = cz_tf_power_up_display_clock_sys_pll }, - { .tableFunction = cz_tf_clear_nb_dpm_flag }, - { .tableFunction = cz_tf_reset_cc6_data }, - { } -}; - -static const struct phm_master_table_header cz_power_down_asic_master = { - 0, - PHM_MasterTableFlag_None, - cz_power_down_asic_list +static int cz_power_off_asic(struct pp_hwmgr *hwmgr) +{ + cz_power_up_display_clock_sys_pll(hwmgr); + cz_clear_nb_dpm_flag(hwmgr); + cz_reset_cc6_data(hwmgr); + return 0; }; -static int cz_tf_program_voting_clients(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static void cz_program_voting_clients(struct pp_hwmgr *hwmgr) { PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0, PPCZ_VOTINGRIGHTSCLIENTS_DFLT0); - return 0; } -static int cz_tf_start_dpm(struct pp_hwmgr *hwmgr, void *input, void *output, - void *storage, int result) +static void cz_clear_voting_clients(struct pp_hwmgr *hwmgr) +{ + PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0, 0); +} + +static int cz_start_dpm(struct pp_hwmgr *hwmgr) { - int res = 0xff; + int ret = 0; struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); unsigned long dpm_features = 0; cz_hwmgr->dpm_flags |= DPMFlags_SCLK_Enabled; dpm_features |= SCLK_DPM_MASK; - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableAllSmuFeatures, dpm_features); - return res; + return ret; } -static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int cz_stop_dpm(struct pp_hwmgr *hwmgr) +{ + int ret = 0; + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + unsigned long dpm_features = 0; + + if (cz_hwmgr->dpm_flags & DPMFlags_SCLK_Enabled) { + dpm_features |= SCLK_DPM_MASK; + cz_hwmgr->dpm_flags &= ~DPMFlags_SCLK_Enabled; + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_DisableAllSmuFeatures, + dpm_features); + } + return ret; +} + +static int cz_program_bootup_state(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); cz_hwmgr->sclk_dpm.soft_min_clk = cz_hwmgr->sys_info.bootup_engine_clock; cz_hwmgr->sclk_dpm.soft_max_clk = cz_hwmgr->sys_info.bootup_engine_clock; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMin, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_min_clk, PPSMC_MSG_SetSclkSoftMin)); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMax, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_max_clk, @@ -1016,13 +1013,11 @@ static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input, return 0; } -static int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static void cz_reset_acp_boot_level(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); cz_hwmgr->acp_boot_level = 0xff; - return 0; } static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr, @@ -1031,67 +1026,52 @@ static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr, int result; unsigned long features; - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetFeatureStatus, 0); + result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0); if (result == 0) { - features = smum_get_argument(hwmgr->smumgr); + features = smum_get_argument(hwmgr); if (features & check_feature) return true; } - return result; + return false; } -static int cz_tf_check_for_dpm_disabled(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static bool cz_check_for_dpm_enabled(struct pp_hwmgr *hwmgr) { if (cz_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn)) - return PP_Result_TableImmediateExit; - return 0; + return true; + return false; } -static int cz_tf_enable_didt(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int cz_disable_dpm_tasks(struct pp_hwmgr *hwmgr) { - /* TO DO */ - return 0; -} + if (!cz_check_for_dpm_enabled(hwmgr)) { + pr_info("dpm has been disabled\n"); + return 0; + } + cz_disable_nb_dpm(hwmgr); -static int cz_tf_check_for_dpm_enabled(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) -{ - if (!cz_dpm_check_smu_features(hwmgr, - SMU_EnabledFeatureScoreboard_SclkDpmOn)) - return PP_Result_TableImmediateExit; - return 0; -} + cz_clear_voting_clients(hwmgr); + if (cz_stop_dpm(hwmgr)) + return -EINVAL; -static const struct phm_master_table_item cz_disable_dpm_list[] = { - { .tableFunction = cz_tf_check_for_dpm_enabled }, - { }, + return 0; }; +static int cz_enable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + if (cz_check_for_dpm_enabled(hwmgr)) { + pr_info("dpm has been enabled\n"); + return 0; + } -static const struct phm_master_table_header cz_disable_dpm_master = { - 0, - PHM_MasterTableFlag_None, - cz_disable_dpm_list -}; - -static const struct phm_master_table_item cz_enable_dpm_list[] = { - { .tableFunction = cz_tf_check_for_dpm_disabled }, - { .tableFunction = cz_tf_program_voting_clients }, - { .tableFunction = cz_tf_start_dpm }, - { .tableFunction = cz_tf_program_bootup_state }, - { .tableFunction = cz_tf_enable_didt }, - { .tableFunction = cz_tf_reset_acp_boot_level }, - { }, -}; + cz_program_voting_clients(hwmgr); + if (cz_start_dpm(hwmgr)) + return -EINVAL; + cz_program_bootup_state(hwmgr); + cz_reset_acp_boot_level(hwmgr); -static const struct phm_master_table_header cz_enable_dpm_master = { - 0, - PHM_MasterTableFlag_None, - cz_enable_dpm_list + return 0; }; static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, @@ -1138,7 +1118,11 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, cz_ps->action = cz_current_ps->action; - if (!force_high && (cz_ps->action == FORCE_HIGH)) + if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + cz_nbdpm_pstate_enable_disable(hwmgr, false, false); + else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) + cz_nbdpm_pstate_enable_disable(hwmgr, false, true); + else if (!force_high && (cz_ps->action == FORCE_HIGH)) cz_ps->action = CANCEL_FORCE_HIGH; else if (force_high && (cz_ps->action != FORCE_HIGH)) cz_ps->action = FORCE_HIGH; @@ -1173,62 +1157,16 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) cz_construct_boot_state(hwmgr); - result = phm_construct_table(hwmgr, &cz_setup_asic_master, - &(hwmgr->setup_asic)); - if (result != 0) { - pr_err("Fail to construct setup ASIC\n"); - return result; - } - - result = phm_construct_table(hwmgr, &cz_power_down_asic_master, - &(hwmgr->power_down_asic)); - if (result != 0) { - pr_err("Fail to construct power down ASIC\n"); - return result; - } - - result = phm_construct_table(hwmgr, &cz_disable_dpm_master, - &(hwmgr->disable_dynamic_state_management)); - if (result != 0) { - pr_err("Fail to disable_dynamic_state\n"); - return result; - } - result = phm_construct_table(hwmgr, &cz_enable_dpm_master, - &(hwmgr->enable_dynamic_state_management)); - if (result != 0) { - pr_err("Fail to enable_dynamic_state\n"); - return result; - } - result = phm_construct_table(hwmgr, &cz_set_power_state_master, - &(hwmgr->set_power_state)); - if (result != 0) { - pr_err("Fail to construct set_power_state\n"); - return result; - } hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = CZ_MAX_HARDWARE_POWERLEVELS; - result = phm_construct_table(hwmgr, &cz_phm_enable_clock_power_gatings_master, &(hwmgr->enable_clock_power_gatings)); - if (result != 0) { - pr_err("Fail to construct enable_clock_power_gatings\n"); - return result; - } return result; } static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) { if (hwmgr != NULL) { - phm_destroy_table(hwmgr, &(hwmgr->enable_clock_power_gatings)); - phm_destroy_table(hwmgr, &(hwmgr->set_power_state)); - phm_destroy_table(hwmgr, &(hwmgr->enable_dynamic_state_management)); - phm_destroy_table(hwmgr, &(hwmgr->disable_dynamic_state_management)); - phm_destroy_table(hwmgr, &(hwmgr->power_down_asic)); - phm_destroy_table(hwmgr, &(hwmgr->setup_asic)); - - if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) { - kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); - hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; - } + kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); + hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; kfree(hwmgr->backend); hwmgr->backend = NULL; @@ -1240,13 +1178,13 @@ static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMin, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_max_clk, PPSMC_MSG_SetSclkSoftMin)); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMax, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_max_clk, @@ -1278,13 +1216,13 @@ static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr) cz_hwmgr->sclk_dpm.soft_max_clk = clock; cz_hwmgr->sclk_dpm.hard_max_clk = clock; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMin, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_min_clk, PPSMC_MSG_SetSclkSoftMin)); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMax, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_max_clk, @@ -1297,13 +1235,13 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMax, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_min_clk, PPSMC_MSG_SetSclkSoftMax)); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMin, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_min_clk, @@ -1312,106 +1250,25 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr) return 0; } -static int cz_phm_force_dpm_sclk(struct pp_hwmgr *hwmgr, uint32_t sclk) -{ - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetSclkSoftMin, - cz_get_sclk_level(hwmgr, - sclk, - PPSMC_MSG_SetSclkSoftMin)); - - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetSclkSoftMax, - cz_get_sclk_level(hwmgr, - sclk, - PPSMC_MSG_SetSclkSoftMax)); - return 0; -} - -static int cz_get_profiling_clk(struct pp_hwmgr *hwmgr, uint32_t *sclk) -{ - struct phm_clock_voltage_dependency_table *table = - hwmgr->dyn_state.vddc_dependency_on_sclk; - int32_t tmp_sclk; - int32_t count; - - tmp_sclk = table->entries[table->count-1].clk * 70 / 100; - - for (count = table->count-1; count >= 0; count--) { - if (tmp_sclk >= table->entries[count].clk) { - tmp_sclk = table->entries[count].clk; - *sclk = tmp_sclk; - break; - } - } - if (count < 0) - *sclk = table->entries[0].clk; - - return 0; -} - static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { - uint32_t sclk = 0; int ret = 0; - uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | - AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | - AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; - - if (level == hwmgr->dpm_level) - return ret; - - if (!(hwmgr->dpm_level & profile_mode_mask)) { - /* enter profile mode, save current level, disable gfx cg*/ - if (level & profile_mode_mask) { - hwmgr->saved_dpm_level = hwmgr->dpm_level; - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_UNGATE); - } - } else { - /* exit profile mode, restore level, enable gfx cg*/ - if (!(level & profile_mode_mask)) { - if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) - level = hwmgr->saved_dpm_level; - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_GATE); - } - } switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: ret = cz_phm_force_dpm_highest(hwmgr); - if (ret) - return ret; - hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_LOW: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: ret = cz_phm_force_dpm_lowest(hwmgr); - if (ret) - return ret; - hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_AUTO: ret = cz_phm_unforce_dpm_levels(hwmgr); - if (ret) - return ret; - hwmgr->dpm_level = level; - break; - case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: - ret = cz_get_profiling_clk(hwmgr, &sclk); - if (ret) - return ret; - hwmgr->dpm_level = level; - cz_phm_force_dpm_sclk(hwmgr, sclk); break; case AMD_DPM_FORCED_LEVEL_MANUAL: - hwmgr->dpm_level = level; - break; case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: default: break; @@ -1424,7 +1281,7 @@ int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating)) - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF); return 0; } @@ -1436,11 +1293,11 @@ int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDynamicPowerGating)) { return smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_UVDPowerON, 1); } else { return smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_UVDPowerON, 0); } } @@ -1457,11 +1314,12 @@ int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) if (!bgate) { /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) { + PHM_PlatformCaps_StablePState) + || hwmgr->en_umd_pstate) { cz_hwmgr->uvd_dpm.hard_min_clk = ptable->entries[ptable->count - 1].vclk; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetUvdHardMin, cz_get_uvd_level(hwmgr, cz_hwmgr->uvd_dpm.hard_min_clk, @@ -1486,11 +1344,12 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr) /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) { + PHM_PlatformCaps_StablePState) + || hwmgr->en_umd_pstate) { cz_hwmgr->vce_dpm.hard_min_clk = ptable->entries[ptable->count - 1].ecclk; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetEclkHardMin, cz_get_eclk_level(hwmgr, cz_hwmgr->vce_dpm.hard_min_clk, @@ -1498,15 +1357,15 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr) } else { /*Program HardMin based on the vce_arbiter.ecclk */ if (hwmgr->vce_arbiter.ecclk == 0) { - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetEclkHardMin, 0); /* disable ECLK DPM 0. Otherwise VCE could hang if * switching SCLK from DPM 0 to 6/7 */ - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetEclkSoftMin, 1); } else { cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetEclkHardMin, cz_get_eclk_level(hwmgr, cz_hwmgr->vce_dpm.hard_min_clk, @@ -1520,7 +1379,7 @@ int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating)) - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_VCEPowerOFF); return 0; } @@ -1529,19 +1388,19 @@ int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating)) - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_VCEPowerON); return 0; } -static int cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) +static uint32_t cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); return cz_hwmgr->sys_info.bootup_uma_clock; } -static int cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) +static uint32_t cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) { struct pp_power_state *ps; struct cz_power_state *cz_ps; @@ -1679,7 +1538,7 @@ static void cz_hw_print_display_cfg( PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n", data); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDisplaySizePowerParams, data); } @@ -1744,10 +1603,10 @@ static int cz_force_clock_level(struct pp_hwmgr *hwmgr, switch (type) { case PP_SCLK: - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMin, mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMax, mask); break; @@ -1989,7 +1848,7 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, *((uint32_t *)value) = 0; return 0; case AMDGPU_PP_SENSOR_GPU_LOAD: - result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity); + result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGraphicsActivity); if (0 == result) { activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0); activity_percent = activity_percent > 100 ? 100 : activity_percent; @@ -2015,7 +1874,6 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, static const struct pp_hwmgr_func cz_hwmgr_funcs = { .backend_init = cz_hwmgr_backend_init, .backend_fini = cz_hwmgr_backend_fini, - .asic_setup = NULL, .apply_state_adjust_rules = cz_apply_state_adjust_rules, .force_dpm_level = cz_dpm_force_dpm_level, .get_power_state_size = cz_get_power_state_size, @@ -2037,6 +1895,11 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = { .get_clock_by_type = cz_get_clock_by_type, .get_max_high_clocks = cz_get_max_high_clocks, .read_sensor = cz_read_sensor, + .power_off_asic = cz_power_off_asic, + .asic_setup = cz_setup_asic_task, + .dynamic_state_management_enable = cz_enable_dpm_tasks, + .power_state_set = cz_set_power_state_tasks, + .dynamic_state_management_disable = cz_disable_dpm_tasks, }; int cz_init_function_pointers(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c deleted file mode 100644 index bc7d8bd7e7cb..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include "hwmgr.h" - -static int phm_run_table(struct pp_hwmgr *hwmgr, - struct phm_runtime_table_header *rt_table, - void *input, - void *output, - void *temp_storage) -{ - int result = 0; - phm_table_function *function; - - if (rt_table->function_list == NULL) { - pr_debug("this function not implement!\n"); - return 0; - } - - for (function = rt_table->function_list; NULL != *function; function++) { - int tmp = (*function)(hwmgr, input, output, temp_storage, result); - - if (tmp == PP_Result_TableImmediateExit) - break; - if (tmp) { - if (0 == result) - result = tmp; - if (rt_table->exit_error) - break; - } - } - - return result; -} - -int phm_dispatch_table(struct pp_hwmgr *hwmgr, - struct phm_runtime_table_header *rt_table, - void *input, void *output) -{ - int result; - void *temp_storage; - - if (hwmgr == NULL || rt_table == NULL) { - pr_err("Invalid Parameter!\n"); - return -EINVAL; - } - - if (0 != rt_table->storage_size) { - temp_storage = kzalloc(rt_table->storage_size, GFP_KERNEL); - if (temp_storage == NULL) { - pr_err("Could not allocate table temporary storage\n"); - return -ENOMEM; - } - } else { - temp_storage = NULL; - } - - result = phm_run_table(hwmgr, rt_table, input, output, temp_storage); - - kfree(temp_storage); - - return result; -} - -int phm_construct_table(struct pp_hwmgr *hwmgr, - const struct phm_master_table_header *master_table, - struct phm_runtime_table_header *rt_table) -{ - uint32_t function_count = 0; - const struct phm_master_table_item *table_item; - uint32_t size; - phm_table_function *run_time_list; - phm_table_function *rtf; - - if (hwmgr == NULL || master_table == NULL || rt_table == NULL) { - pr_err("Invalid Parameter!\n"); - return -EINVAL; - } - - for (table_item = master_table->master_list; - NULL != table_item->tableFunction; table_item++) { - if ((NULL == table_item->isFunctionNeededInRuntimeTable) || - (table_item->isFunctionNeededInRuntimeTable(hwmgr))) - function_count++; - } - - size = (function_count + 1) * sizeof(phm_table_function); - run_time_list = kzalloc(size, GFP_KERNEL); - - if (NULL == run_time_list) - return -ENOMEM; - - rtf = run_time_list; - for (table_item = master_table->master_list; - NULL != table_item->tableFunction; table_item++) { - if ((rtf - run_time_list) > function_count) { - pr_err("Check function results have changed\n"); - kfree(run_time_list); - return -EINVAL; - } - - if ((NULL == table_item->isFunctionNeededInRuntimeTable) || - (table_item->isFunctionNeededInRuntimeTable(hwmgr))) { - *(rtf++) = table_item->tableFunction; - } - } - - if ((rtf - run_time_list) > function_count) { - pr_err("Check function results have changed\n"); - kfree(run_time_list); - return -EINVAL; - } - - *rtf = NULL; - rt_table->function_list = run_time_list; - rt_table->exit_error = (0 != (master_table->flags & PHM_MasterTableFlag_ExitOnError)); - rt_table->storage_size = master_table->storage_size; - return 0; -} - -int phm_destroy_table(struct pp_hwmgr *hwmgr, - struct phm_runtime_table_header *rt_table) -{ - if (hwmgr == NULL || rt_table == NULL) { - pr_err("Invalid Parameter\n"); - return -EINVAL; - } - - if (NULL == rt_table->function_list) - return 0; - - kfree(rt_table->function_list); - - rt_table->function_list = NULL; - rt_table->storage_size = 0; - rt_table->exit_error = false; - - return 0; -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index fcc722ea7649..623cff90233d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -26,35 +26,22 @@ #include "hardwaremanager.h" #include "power_state.h" + +#define TEMP_RANGE_MIN (0) +#define TEMP_RANGE_MAX (80 * 1000) + #define PHM_FUNC_CHECK(hw) \ do { \ if ((hw) == NULL || (hw)->hwmgr_func == NULL) \ return -EINVAL; \ } while (0) -bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr) -{ - return hwmgr->block_hw_access; -} - -int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block) -{ - hwmgr->block_hw_access = block; - return 0; -} - int phm_setup_asic(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface)) { - if (NULL != hwmgr->hwmgr_func->asic_setup) - return hwmgr->hwmgr_func->asic_setup(hwmgr); - } else { - return phm_dispatch_table(hwmgr, &(hwmgr->setup_asic), - NULL, NULL); - } + if (NULL != hwmgr->hwmgr_func->asic_setup) + return hwmgr->hwmgr_func->asic_setup(hwmgr); return 0; } @@ -63,14 +50,8 @@ int phm_power_down_asic(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface)) { - if (NULL != hwmgr->hwmgr_func->power_off_asic) - return hwmgr->hwmgr_func->power_off_asic(hwmgr); - } else { - return phm_dispatch_table(hwmgr, &(hwmgr->power_down_asic), - NULL, NULL); - } + if (NULL != hwmgr->hwmgr_func->power_off_asic) + return hwmgr->hwmgr_func->power_off_asic(hwmgr); return 0; } @@ -86,13 +67,8 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr, states.pcurrent_state = pcurrent_state; states.pnew_state = pnew_power_state; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface)) { - if (NULL != hwmgr->hwmgr_func->power_state_set) - return hwmgr->hwmgr_func->power_state_set(hwmgr, &states); - } else { - return phm_dispatch_table(hwmgr, &(hwmgr->set_power_state), &states, NULL); - } + if (NULL != hwmgr->hwmgr_func->power_state_set) + return hwmgr->hwmgr_func->power_state_set(hwmgr, &states); return 0; } @@ -103,15 +79,8 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) bool enabled; PHM_FUNC_CHECK(hwmgr); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface)) { - if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable) - ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr); - } else { - ret = phm_dispatch_table(hwmgr, - &(hwmgr->enable_dynamic_state_management), - NULL, NULL); - } + if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable) + ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr); enabled = ret == 0; @@ -127,15 +96,8 @@ int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr) PHM_FUNC_CHECK(hwmgr); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface)) { - if (hwmgr->hwmgr_func->dynamic_state_management_disable) - ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr); - } else { - ret = phm_dispatch_table(hwmgr, - &(hwmgr->disable_dynamic_state_management), - NULL, NULL); - } + if (hwmgr->hwmgr_func->dynamic_state_management_disable) + ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr); enabled = ret == 0 ? false : true; @@ -193,35 +155,13 @@ int phm_powerdown_uvd(struct pp_hwmgr *hwmgr) return 0; } -int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate) -{ - PHM_FUNC_CHECK(hwmgr); - - if (hwmgr->hwmgr_func->powergate_uvd != NULL) - return hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate); - return 0; -} - -int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate) -{ - PHM_FUNC_CHECK(hwmgr); - - if (hwmgr->hwmgr_func->powergate_vce != NULL) - return hwmgr->hwmgr_func->powergate_vce(hwmgr, gate); - return 0; -} - int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface)) { - if (NULL != hwmgr->hwmgr_func->enable_clock_power_gating) - return hwmgr->hwmgr_func->enable_clock_power_gating(hwmgr); - } else { - return phm_dispatch_table(hwmgr, &(hwmgr->enable_clock_power_gatings), NULL, NULL); - } + if (NULL != hwmgr->hwmgr_func->enable_clock_power_gating) + return hwmgr->hwmgr_func->enable_clock_power_gating(hwmgr); + return 0; } @@ -229,11 +169,9 @@ int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface)) { - if (NULL != hwmgr->hwmgr_func->disable_clock_power_gating) - return hwmgr->hwmgr_func->disable_clock_power_gating(hwmgr); - } + if (NULL != hwmgr->hwmgr_func->disable_clock_power_gating) + return hwmgr->hwmgr_func->disable_clock_power_gating(hwmgr); + return 0; } @@ -242,12 +180,9 @@ int phm_display_configuration_changed(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface)) { - if (NULL != hwmgr->hwmgr_func->display_config_changed) - hwmgr->hwmgr_func->display_config_changed(hwmgr); - } else - return phm_dispatch_table(hwmgr, &hwmgr->display_configuration_changed, NULL, NULL); + if (NULL != hwmgr->hwmgr_func->display_config_changed) + hwmgr->hwmgr_func->display_config_changed(hwmgr); + return 0; } @@ -255,9 +190,7 @@ int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface)) - if (NULL != hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment) + if (NULL != hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment) hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment(hwmgr); return 0; @@ -277,10 +210,10 @@ int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info) { PHM_FUNC_CHECK(hwmgr); - if (hwmgr->hwmgr_func->register_internal_thermal_interrupt == NULL) - return -EINVAL; + if (hwmgr->hwmgr_func->register_internal_thermal_interrupt != NULL) + return hwmgr->hwmgr_func->register_internal_thermal_interrupt(hwmgr, info); - return hwmgr->hwmgr_func->register_internal_thermal_interrupt(hwmgr, info); + return 0; } /** @@ -292,7 +225,21 @@ int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info) */ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *temperature_range) { - return phm_dispatch_table(hwmgr, &(hwmgr->start_thermal_controller), temperature_range, NULL); + struct PP_TemperatureRange range; + + if (temperature_range == NULL) { + range.max = TEMP_RANGE_MAX; + range.min = TEMP_RANGE_MIN; + } else { + range.max = temperature_range->max; + range.min = temperature_range->min; + } + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController) + && hwmgr->hwmgr_func->start_thermal_controller != NULL) + return hwmgr->hwmgr_func->start_thermal_controller(hwmgr, &range); + + return 0; } @@ -323,6 +270,9 @@ int phm_check_states_equal(struct pp_hwmgr *hwmgr, int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, const struct amd_pp_display_configuration *display_config) { + int index = 0; + int number_of_active_display = 0; + PHM_FUNC_CHECK(hwmgr); if (display_config == NULL) @@ -330,6 +280,17 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, hwmgr->display_config = *display_config; + if (NULL != hwmgr->hwmgr_func->set_deep_sleep_dcefclk) + hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, hwmgr->display_config.min_dcef_deep_sleep_set_clk); + + for (index = 0; index < hwmgr->display_config.num_path_including_non_display; index++) { + if (hwmgr->display_config.displays[index].controller_id != 0) + number_of_active_display++; + } + + if (NULL != hwmgr->hwmgr_func->set_active_display_count) + hwmgr->hwmgr_func->set_active_display_count(hwmgr, number_of_active_display); + if (hwmgr->hwmgr_func->store_cc6_data == NULL) return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 9547f265a8bb..73969f35846c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -26,8 +26,8 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/types.h> +#include <linux/pci.h> #include <drm/amdgpu_drm.h> -#include "cgs_common.h" #include "power_state.h" #include "hwmgr.h" #include "pppcielanes.h" @@ -35,21 +35,100 @@ #include "ppsmc.h" #include "pp_acpi.h" #include "amd_acpi.h" +#include "pp_psm.h" -extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr); +extern const struct pp_smumgr_func ci_smu_funcs; +extern const struct pp_smumgr_func cz_smu_funcs; +extern const struct pp_smumgr_func iceland_smu_funcs; +extern const struct pp_smumgr_func tonga_smu_funcs; +extern const struct pp_smumgr_func fiji_smu_funcs; +extern const struct pp_smumgr_func polaris10_smu_funcs; +extern const struct pp_smumgr_func vega10_smu_funcs; +extern const struct pp_smumgr_func rv_smu_funcs; +extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr); static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr); static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr); static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr); static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr); static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr); static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr); +static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr); uint8_t convert_to_vid(uint16_t vddc) { return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25); } +static int phm_get_pci_bus_devfn(struct pp_hwmgr *hwmgr, + struct cgs_system_info *sys_info) +{ + sys_info->size = sizeof(struct cgs_system_info); + sys_info->info_id = CGS_SYSTEM_INFO_PCIE_BUS_DEVFN; + + return cgs_query_system_info(hwmgr->device, sys_info); +} + +static int phm_thermal_l2h_irq(void *private_data, + unsigned src_id, const uint32_t *iv_entry) +{ + struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data; + struct cgs_system_info sys_info = {0}; + int result; + + result = phm_get_pci_bus_devfn(hwmgr, &sys_info); + if (result) + return -EINVAL; + + pr_warn("GPU over temperature range detected on PCIe %lld:%lld.%lld!\n", + PCI_BUS_NUM(sys_info.value), + PCI_SLOT(sys_info.value), + PCI_FUNC(sys_info.value)); + return 0; +} + +static int phm_thermal_h2l_irq(void *private_data, + unsigned src_id, const uint32_t *iv_entry) +{ + struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data; + struct cgs_system_info sys_info = {0}; + int result; + + result = phm_get_pci_bus_devfn(hwmgr, &sys_info); + if (result) + return -EINVAL; + + pr_warn("GPU under temperature range detected on PCIe %lld:%lld.%lld!\n", + PCI_BUS_NUM(sys_info.value), + PCI_SLOT(sys_info.value), + PCI_FUNC(sys_info.value)); + return 0; +} + +static int phm_ctf_irq(void *private_data, + unsigned src_id, const uint32_t *iv_entry) +{ + struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data; + struct cgs_system_info sys_info = {0}; + int result; + + result = phm_get_pci_bus_devfn(hwmgr, &sys_info); + if (result) + return -EINVAL; + + pr_warn("GPU Critical Temperature Fault detected on PCIe %lld:%lld.%lld!\n", + PCI_BUS_NUM(sys_info.value), + PCI_SLOT(sys_info.value), + PCI_FUNC(sys_info.value)); + return 0; +} + +static const struct cgs_irq_src_funcs thermal_irq_src[3] = { + {NULL, phm_thermal_l2h_irq}, + {NULL, phm_thermal_h2l_irq}, + {NULL, phm_ctf_irq} +}; + int hwmgr_early_init(struct pp_instance *handle) { struct pp_hwmgr *hwmgr; @@ -62,7 +141,6 @@ int hwmgr_early_init(struct pp_instance *handle) return -ENOMEM; handle->hwmgr = hwmgr; - hwmgr->smumgr = handle->smu_mgr; hwmgr->device = handle->device; hwmgr->chip_family = handle->chip_family; hwmgr->chip_id = handle->chip_id; @@ -73,24 +151,38 @@ int hwmgr_early_init(struct pp_instance *handle) hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; hwmgr_init_default_caps(hwmgr); hwmgr_set_user_specify_caps(hwmgr); + hwmgr->fan_ctrl_is_in_default_mode = true; + hwmgr->reload_fw = 1; switch (hwmgr->chip_family) { + case AMDGPU_FAMILY_CI: + hwmgr->smumgr_funcs = &ci_smu_funcs; + ci_set_asic_special_caps(hwmgr); + hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK | + PP_ENABLE_GFX_CG_THRU_SMU); + hwmgr->pp_table_version = PP_TABLE_V0; + smu7_init_function_pointers(hwmgr); + break; case AMDGPU_FAMILY_CZ: + hwmgr->smumgr_funcs = &cz_smu_funcs; cz_init_function_pointers(hwmgr); break; case AMDGPU_FAMILY_VI: switch (hwmgr->chip_id) { case CHIP_TOPAZ: + hwmgr->smumgr_funcs = &iceland_smu_funcs; topaz_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK | PP_ENABLE_GFX_CG_THRU_SMU); hwmgr->pp_table_version = PP_TABLE_V0; break; case CHIP_TONGA: + hwmgr->smumgr_funcs = &tonga_smu_funcs; tonga_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~PP_VBI_TIME_SUPPORT_MASK; break; case CHIP_FIJI: + hwmgr->smumgr_funcs = &fiji_smu_funcs; fiji_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK | PP_ENABLE_GFX_CG_THRU_SMU); @@ -98,6 +190,7 @@ int hwmgr_early_init(struct pp_instance *handle) case CHIP_POLARIS11: case CHIP_POLARIS10: case CHIP_POLARIS12: + hwmgr->smumgr_funcs = &polaris10_smu_funcs; polaris_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK); break; @@ -109,6 +202,7 @@ int hwmgr_early_init(struct pp_instance *handle) case AMDGPU_FAMILY_AI: switch (hwmgr->chip_id) { case CHIP_VEGA10: + hwmgr->smumgr_funcs = &vega10_smu_funcs; vega10_hwmgr_init(hwmgr); break; default: @@ -118,6 +212,7 @@ int hwmgr_early_init(struct pp_instance *handle) case AMDGPU_FAMILY_RV: switch (hwmgr->chip_id) { case CHIP_RAVEN: + hwmgr->smumgr_funcs = &rv_smu_funcs; rv_init_function_pointers(hwmgr); break; default: @@ -131,80 +226,6 @@ int hwmgr_early_init(struct pp_instance *handle) return 0; } -static int hw_init_power_state_table(struct pp_hwmgr *hwmgr) -{ - int result; - unsigned int i; - unsigned int table_entries; - struct pp_power_state *state; - int size; - - if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL) - return -EINVAL; - - if (hwmgr->hwmgr_func->get_power_state_size == NULL) - return -EINVAL; - - hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr); - - hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) + - sizeof(struct pp_power_state); - - hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL); - if (hwmgr->ps == NULL) - return -ENOMEM; - - hwmgr->request_ps = kzalloc(size, GFP_KERNEL); - if (hwmgr->request_ps == NULL) { - kfree(hwmgr->ps); - hwmgr->ps = NULL; - return -ENOMEM; - } - - hwmgr->current_ps = kzalloc(size, GFP_KERNEL); - if (hwmgr->current_ps == NULL) { - kfree(hwmgr->request_ps); - kfree(hwmgr->ps); - hwmgr->request_ps = NULL; - hwmgr->ps = NULL; - return -ENOMEM; - } - - state = hwmgr->ps; - - for (i = 0; i < table_entries; i++) { - result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state); - - if (state->classification.flags & PP_StateClassificationFlag_Boot) { - hwmgr->boot_ps = state; - memcpy(hwmgr->current_ps, state, size); - memcpy(hwmgr->request_ps, state, size); - } - - state->id = i + 1; /* assigned unique num for every power state id */ - - if (state->classification.flags & PP_StateClassificationFlag_Uvd) - hwmgr->uvd_ps = state; - state = (struct pp_power_state *)((unsigned long)state + size); - } - - return 0; -} - -static int hw_fini_power_state_table(struct pp_hwmgr *hwmgr) -{ - if (hwmgr == NULL) - return -EINVAL; - - kfree(hwmgr->current_ps); - kfree(hwmgr->request_ps); - kfree(hwmgr->ps); - hwmgr->request_ps = NULL; - hwmgr->ps = NULL; - hwmgr->current_ps = NULL; - return 0; -} - int hwmgr_hw_init(struct pp_instance *handle) { struct pp_hwmgr *hwmgr; @@ -228,9 +249,26 @@ int hwmgr_hw_init(struct pp_instance *handle) if (ret) goto err1; - ret = hw_init_power_state_table(hwmgr); + ret = psm_init_power_state_table(hwmgr); + if (ret) + goto err2; + + ret = phm_setup_asic(hwmgr); if (ret) goto err2; + + ret = phm_enable_dynamic_state_management(hwmgr); + if (ret) + goto err2; + ret = phm_start_thermal_controller(hwmgr, NULL); + ret |= psm_set_performance_states(hwmgr); + if (ret) + goto err2; + + ret = phm_register_thermal_interrupt(hwmgr, &thermal_irq_src); + if (ret) + goto err2; + return 0; err2: if (hwmgr->hwmgr_func->backend_fini) @@ -247,19 +285,138 @@ int hwmgr_hw_fini(struct pp_instance *handle) { struct pp_hwmgr *hwmgr; - if (handle == NULL) + if (handle == NULL || handle->hwmgr == NULL) return -EINVAL; hwmgr = handle->hwmgr; + phm_stop_thermal_controller(hwmgr); + psm_set_boot_states(hwmgr); + phm_display_configuration_changed(hwmgr); + psm_adjust_power_state_dynamic(hwmgr, false, NULL); + phm_disable_dynamic_state_management(hwmgr); + phm_disable_clock_power_gatings(hwmgr); + if (hwmgr->hwmgr_func->backend_fini) hwmgr->hwmgr_func->backend_fini(hwmgr); if (hwmgr->pptable_func->pptable_fini) hwmgr->pptable_func->pptable_fini(hwmgr); - return hw_fini_power_state_table(hwmgr); + return psm_fini_power_state_table(hwmgr); } +int hwmgr_hw_suspend(struct pp_instance *handle) +{ + struct pp_hwmgr *hwmgr; + int ret = 0; + + if (handle == NULL || handle->hwmgr == NULL) + return -EINVAL; + + hwmgr = handle->hwmgr; + phm_disable_smc_firmware_ctf(hwmgr); + ret = psm_set_boot_states(hwmgr); + if (ret) + return ret; + ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL); + if (ret) + return ret; + ret = phm_power_down_asic(hwmgr); + + return ret; +} +int hwmgr_hw_resume(struct pp_instance *handle) +{ + struct pp_hwmgr *hwmgr; + int ret = 0; + + if (handle == NULL || handle->hwmgr == NULL) + return -EINVAL; + + hwmgr = handle->hwmgr; + ret = phm_setup_asic(hwmgr); + if (ret) + return ret; + + ret = phm_enable_dynamic_state_management(hwmgr); + if (ret) + return ret; + ret = phm_start_thermal_controller(hwmgr, NULL); + if (ret) + return ret; + + ret |= psm_set_performance_states(hwmgr); + if (ret) + return ret; + + ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL); + + return ret; +} + +static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state) +{ + switch (state) { + case POWER_STATE_TYPE_BATTERY: + return PP_StateUILabel_Battery; + case POWER_STATE_TYPE_BALANCED: + return PP_StateUILabel_Balanced; + case POWER_STATE_TYPE_PERFORMANCE: + return PP_StateUILabel_Performance; + default: + return PP_StateUILabel_None; + } +} + +int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id, + void *input, void *output) +{ + int ret = 0; + struct pp_hwmgr *hwmgr; + + if (handle == NULL || handle->hwmgr == NULL) + return -EINVAL; + + hwmgr = handle->hwmgr; + + switch (task_id) { + case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: + ret = phm_set_cpu_power_state(hwmgr); + if (ret) + return ret; + ret = psm_set_performance_states(hwmgr); + if (ret) + return ret; + ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL); + break; + case AMD_PP_TASK_ENABLE_USER_STATE: + { + enum amd_pm_state_type ps; + enum PP_StateUILabel requested_ui_label; + struct pp_power_state *requested_ps = NULL; + + if (input == NULL) { + ret = -EINVAL; + break; + } + ps = *(unsigned long *)input; + + requested_ui_label = power_state_convert(ps); + ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps); + if (ret) + return ret; + ret = psm_adjust_power_state_dynamic(hwmgr, false, requested_ps); + break; + } + case AMD_PP_TASK_COMPLETE_INIT: + case AMD_PP_TASK_READJUST_POWER_STATE: + ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL); + break; + default: + break; + } + return ret; +} /** * Returns once the part of the register indicated by the mask has * reached the given value. @@ -294,7 +451,7 @@ int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, * reached the given value.The indirect space is described by giving * the memory-mapped index of the indirect index register. */ -void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, +int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, uint32_t indirect_port, uint32_t index, uint32_t value, @@ -302,14 +459,50 @@ void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, { if (hwmgr == NULL || hwmgr->device == NULL) { pr_err("Invalid Hardware Manager!"); - return; + return -EINVAL; } cgs_write_register(hwmgr->device, indirect_port, index); - phm_wait_on_register(hwmgr, indirect_port + 1, mask, value); + return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value); +} + +int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, + uint32_t index, + uint32_t value, uint32_t mask) +{ + uint32_t i; + uint32_t cur_value; + + if (hwmgr == NULL || hwmgr->device == NULL) + return -EINVAL; + + for (i = 0; i < hwmgr->usec_timeout; i++) { + cur_value = cgs_read_register(hwmgr->device, + index); + if ((cur_value & mask) != (value & mask)) + break; + udelay(1); + } + + /* timeout means wrong logic */ + if (i == hwmgr->usec_timeout) + return -ETIME; + return 0; } +int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr, + uint32_t indirect_port, + uint32_t index, + uint32_t value, + uint32_t mask) +{ + if (hwmgr == NULL || hwmgr->device == NULL) + return -EINVAL; + cgs_write_register(hwmgr->device, indirect_port, index); + return phm_wait_for_register_unequal(hwmgr, indirect_port + 1, + value, mask); +} bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr) { @@ -678,7 +871,7 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr) for (i = 0; i < vddc_table->count; i++) { if (req_vddc <= vddc_table->entries[i].vddc) { req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VddC_Request, req_volt); return; } @@ -689,28 +882,8 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr) void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM); phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM); @@ -735,7 +908,6 @@ void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_FanSpeedInTableIsRPM); - return; } @@ -784,7 +956,8 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr) { - + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EVV); phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); phm_cap_set(hwmgr->platform_descriptor.platformCaps, @@ -793,10 +966,6 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_AutomaticDCTransition); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); - - if (hwmgr->chip_id != CHIP_POLARIS10) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SPLLShutdownSupport); @@ -814,6 +983,8 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr) int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EVV); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, @@ -822,15 +993,13 @@ int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_TDRamping); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); - return 0; } int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EVV); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, @@ -844,14 +1013,25 @@ int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_UVDPowerGating); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating); + return 0; +} +int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr) +{ phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); - + PHM_PlatformCaps_EVV); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); return 0; } -int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr) +int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr) { phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); @@ -862,8 +1042,8 @@ int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr) phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); + PHM_PlatformCaps_MemorySpreadSpectrumSupport); phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EVV); + PHM_PlatformCaps_EngineSpreadSpectrumSupport); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c new file mode 100644 index 000000000000..167cdc321db2 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c @@ -0,0 +1,246 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include "pp_psm.h" + +int psm_init_power_state_table(struct pp_hwmgr *hwmgr) +{ + int result; + unsigned int i; + unsigned int table_entries; + struct pp_power_state *state; + int size; + + if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL) + return -EINVAL; + + if (hwmgr->hwmgr_func->get_power_state_size == NULL) + return -EINVAL; + + hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr); + + hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) + + sizeof(struct pp_power_state); + + hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL); + if (hwmgr->ps == NULL) + return -ENOMEM; + + hwmgr->request_ps = kzalloc(size, GFP_KERNEL); + if (hwmgr->request_ps == NULL) { + kfree(hwmgr->ps); + hwmgr->ps = NULL; + return -ENOMEM; + } + + hwmgr->current_ps = kzalloc(size, GFP_KERNEL); + if (hwmgr->current_ps == NULL) { + kfree(hwmgr->request_ps); + kfree(hwmgr->ps); + hwmgr->request_ps = NULL; + hwmgr->ps = NULL; + return -ENOMEM; + } + + state = hwmgr->ps; + + for (i = 0; i < table_entries; i++) { + result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state); + + if (state->classification.flags & PP_StateClassificationFlag_Boot) { + hwmgr->boot_ps = state; + memcpy(hwmgr->current_ps, state, size); + memcpy(hwmgr->request_ps, state, size); + } + + state->id = i + 1; /* assigned unique num for every power state id */ + + if (state->classification.flags & PP_StateClassificationFlag_Uvd) + hwmgr->uvd_ps = state; + state = (struct pp_power_state *)((unsigned long)state + size); + } + + return 0; +} + +int psm_fini_power_state_table(struct pp_hwmgr *hwmgr) +{ + if (hwmgr == NULL) + return -EINVAL; + + kfree(hwmgr->current_ps); + kfree(hwmgr->request_ps); + kfree(hwmgr->ps); + hwmgr->request_ps = NULL; + hwmgr->ps = NULL; + hwmgr->current_ps = NULL; + return 0; +} + +static int psm_get_ui_state(struct pp_hwmgr *hwmgr, + enum PP_StateUILabel ui_label, + unsigned long *state_id) +{ + struct pp_power_state *state; + int table_entries; + int i; + + table_entries = hwmgr->num_ps; + state = hwmgr->ps; + + for (i = 0; i < table_entries; i++) { + if (state->classification.ui_label & ui_label) { + *state_id = state->id; + return 0; + } + state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); + } + return -EINVAL; +} + +static int psm_get_state_by_classification(struct pp_hwmgr *hwmgr, + enum PP_StateClassificationFlag flag, + unsigned long *state_id) +{ + struct pp_power_state *state; + int table_entries; + int i; + + table_entries = hwmgr->num_ps; + state = hwmgr->ps; + + for (i = 0; i < table_entries; i++) { + if (state->classification.flags & flag) { + *state_id = state->id; + return 0; + } + state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); + } + return -EINVAL; +} + +static int psm_set_states(struct pp_hwmgr *hwmgr, unsigned long state_id) +{ + struct pp_power_state *state; + int table_entries; + int i; + + table_entries = hwmgr->num_ps; + + state = hwmgr->ps; + + for (i = 0; i < table_entries; i++) { + if (state->id == state_id) { + memcpy(hwmgr->request_ps, state, hwmgr->ps_size); + return 0; + } + state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); + } + return -EINVAL; +} + +int psm_set_boot_states(struct pp_hwmgr *hwmgr) +{ + unsigned long state_id; + int ret = -EINVAL; + + if (!psm_get_state_by_classification(hwmgr, PP_StateClassificationFlag_Boot, + &state_id)) + ret = psm_set_states(hwmgr, state_id); + + return ret; +} + +int psm_set_performance_states(struct pp_hwmgr *hwmgr) +{ + unsigned long state_id; + int ret = -EINVAL; + + if (!psm_get_ui_state(hwmgr, PP_StateUILabel_Performance, + &state_id)) + ret = psm_set_states(hwmgr, state_id); + + return ret; +} + +int psm_set_user_performance_state(struct pp_hwmgr *hwmgr, + enum PP_StateUILabel label_id, + struct pp_power_state **state) +{ + int table_entries; + int i; + + table_entries = hwmgr->num_ps; + *state = hwmgr->ps; + +restart_search: + for (i = 0; i < table_entries; i++) { + if ((*state)->classification.ui_label & label_id) + return 0; + *state = (struct pp_power_state *)((uintptr_t)*state + hwmgr->ps_size); + } + + switch (label_id) { + case PP_StateUILabel_Battery: + case PP_StateUILabel_Balanced: + label_id = PP_StateUILabel_Performance; + goto restart_search; + default: + break; + } + return -EINVAL; +} + +int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip, + struct pp_power_state *new_ps) +{ + struct pp_power_state *pcurrent; + struct pp_power_state *requested; + bool equal; + + if (skip) + return 0; + + if (new_ps != NULL) + requested = new_ps; + else + requested = hwmgr->request_ps; + + pcurrent = hwmgr->current_ps; + + phm_apply_state_adjust_rules(hwmgr, requested, pcurrent); + + if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, + &pcurrent->hardware, &requested->hardware, &equal))) + equal = false; + + if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) { + phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware); + memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size); + } + return 0; +} + diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h index 9ef96aab3f24..fa1b6825036a 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h @@ -1,5 +1,5 @@ /* - * Copyright 2015 Advanced Micro Devices, Inc. + * Copyright 2017 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -21,14 +21,20 @@ * */ -#ifndef _EVENTINIT_H_ -#define _EVENTINIT_H_ +#ifndef PP_PSM_H +#define PP_PSM_H -#define PEM_CURRENT_POWERPLAY_FEATURE_VERSION 4 +#include "hwmgr.h" -void pem_init_feature_info(struct pp_eventmgr *eventmgr); -void pem_uninit_featureInfo(struct pp_eventmgr *eventmgr); -int pem_register_interrupts(struct pp_eventmgr *eventmgr); -int pem_unregister_interrupts(struct pp_eventmgr *eventmgr); +int psm_init_power_state_table(struct pp_hwmgr *hwmgr); +int psm_fini_power_state_table(struct pp_hwmgr *hwmgr); +int psm_set_boot_states(struct pp_hwmgr *hwmgr); +int psm_set_performance_states(struct pp_hwmgr *hwmgr); +int psm_set_user_performance_state(struct pp_hwmgr *hwmgr, + enum PP_StateUILabel label_id, + struct pp_power_state **state); +int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, + bool skip, + struct pp_power_state *new_ps); -#endif /* _EVENTINIT_H_ */ +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index 953e0c9ad7cd..a129bc5b1844 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -470,7 +470,7 @@ uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr) * SET_VOLTAGE_TYPE_ASIC_MVDDC, SET_VOLTAGE_TYPE_ASIC_MVDDQ. * voltage_mode is one of ATOM_SET_VOLTAGE, ATOM_SET_VOLTAGE_PHASE */ -bool atomctrl_is_voltage_controled_by_gpio_v3( +bool atomctrl_is_voltage_controlled_by_gpio_v3( struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode) @@ -1100,10 +1100,10 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, } } - PP_ASSERT_WITH_CODE(entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count, - "Can't find requested voltage id in vddc_dependency_on_sclk table!", + if (entry_id >= hwmgr->dyn_state.vddc_dependency_on_sclk->count) { + pr_debug("Can't find requested voltage id in vddc_dependency_on_sclk table!\n"); return -EINVAL; - ); + } get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC; get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; @@ -1418,3 +1418,83 @@ int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type, return 0; } + +int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id) +{ + int result; + SET_VOLTAGE_PS_ALLOCATION allocation; + SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters = + (SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage; + + voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID; + + result = cgs_atom_exec_cmd_table(hwmgr->device, + GetIndexIntoMasterTable(COMMAND, SetVoltage), + voltage_parameters); + + *virtual_voltage_id = voltage_parameters->usVoltageLevel; + + return result; +} + +int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr, + uint16_t *vddc, uint16_t *vddci, + uint16_t virtual_voltage_id, + uint16_t efuse_voltage_id) +{ + int i, j; + int ix; + u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf; + ATOM_ASIC_PROFILING_INFO_V2_1 *profile; + + *vddc = 0; + *vddci = 0; + + ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo); + + profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *) + cgs_atom_get_data_table(hwmgr->device, + ix, + NULL, NULL, NULL); + if (!profile) + return -EINVAL; + + if ((profile->asHeader.ucTableFormatRevision >= 2) && + (profile->asHeader.ucTableContentRevision >= 1) && + (profile->asHeader.usStructureSize >= sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))) { + leakage_bin = (u16 *)((char *)profile + profile->usLeakageBinArrayOffset); + vddc_id_buf = (u16 *)((char *)profile + profile->usElbVDDC_IdArrayOffset); + vddc_buf = (u16 *)((char *)profile + profile->usElbVDDC_LevelArrayOffset); + if (profile->ucElbVDDC_Num > 0) { + for (i = 0; i < profile->ucElbVDDC_Num; i++) { + if (vddc_id_buf[i] == virtual_voltage_id) { + for (j = 0; j < profile->ucLeakageBinNum; j++) { + if (efuse_voltage_id <= leakage_bin[j]) { + *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i]; + break; + } + } + break; + } + } + } + + vddci_id_buf = (u16 *)((char *)profile + profile->usElbVDDCI_IdArrayOffset); + vddci_buf = (u16 *)((char *)profile + profile->usElbVDDCI_LevelArrayOffset); + if (profile->ucElbVDDCI_Num > 0) { + for (i = 0; i < profile->ucElbVDDCI_Num; i++) { + if (vddci_id_buf[i] == virtual_voltage_id) { + for (j = 0; j < profile->ucLeakageBinNum; j++) { + if (efuse_voltage_id <= leakage_bin[j]) { + *vddci = vddci_buf[j * profile->ucElbVDDC_Num + i]; + break; + } + } + break; + } + } + } + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h index e9fe2e84006b..c44a92064cf1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h @@ -291,7 +291,7 @@ extern uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr); extern int atomctrl_get_memory_pll_dividers_si(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param, bool strobe_mode); extern int atomctrl_get_engine_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_vi *dividers); extern int atomctrl_get_dfs_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_vi *dividers); -extern bool atomctrl_is_voltage_controled_by_gpio_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode); +extern bool atomctrl_is_voltage_controlled_by_gpio_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode); extern int atomctrl_get_voltage_table_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode, pp_atomctrl_voltage_table *voltage_table); extern int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param); @@ -314,5 +314,11 @@ extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ extern int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t *svd_gpio_id, uint8_t *svc_gpio_id, uint16_t *load_line); + +extern int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr, + uint16_t *vddc, uint16_t *vddci, + uint16_t virtual_voltage_id, + uint16_t efuse_voltage_id); +extern int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index 84f01fd33aff..d1af1483c69b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c @@ -173,8 +173,6 @@ static int get_vddc_lookup_table( if (NULL == table) return -ENOMEM; - memset(table, 0x00, table_size); - table->count = vddc_lookup_pp_tables->ucNumEntries; for (i = 0; i < vddc_lookup_pp_tables->ucNumEntries; i++) { @@ -335,8 +333,6 @@ static int get_valid_clk( if (NULL == table) return -ENOMEM; - memset(table, 0x00, table_size); - table->count = (uint32_t)clk_volt_pp_table->count; for (i = 0; i < table->count; i++) { @@ -390,8 +386,6 @@ static int get_mclk_voltage_dependency_table( if (NULL == mclk_table) return -ENOMEM; - memset(mclk_table, 0x00, table_size); - mclk_table->count = (uint32_t)mclk_dep_table->ucNumEntries; for (i = 0; i < mclk_dep_table->ucNumEntries; i++) { @@ -439,8 +433,6 @@ static int get_sclk_voltage_dependency_table( if (NULL == sclk_table) return -ENOMEM; - memset(sclk_table, 0x00, table_size); - sclk_table->count = (uint32_t)tonga_table->ucNumEntries; for (i = 0; i < tonga_table->ucNumEntries; i++) { @@ -473,8 +465,6 @@ static int get_sclk_voltage_dependency_table( if (NULL == sclk_table) return -ENOMEM; - memset(sclk_table, 0x00, table_size); - sclk_table->count = (uint32_t)polaris_table->ucNumEntries; for (i = 0; i < polaris_table->ucNumEntries; i++) { @@ -525,8 +515,6 @@ static int get_pcie_table( if (pcie_table == NULL) return -ENOMEM; - memset(pcie_table, 0x00, table_size); - /* * Make sure the number of pcie entries are less than or equal to sclk dpm levels. * Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1. @@ -567,8 +555,6 @@ static int get_pcie_table( if (pcie_table == NULL) return -ENOMEM; - memset(pcie_table, 0x00, table_size); - /* * Make sure the number of pcie entries are less than or equal to sclk dpm levels. * Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1. @@ -615,8 +601,6 @@ static int get_cac_tdp_table( if (NULL == tdp_table) return -ENOMEM; - memset(tdp_table, 0x00, table_size); - hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL); if (NULL == hwmgr->dyn_state.cac_dtp_table) { @@ -624,8 +608,6 @@ static int get_cac_tdp_table( return -ENOMEM; } - memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size); - if (table->ucRevId < 3) { const ATOM_Tonga_PowerTune_Table *tonga_table = (ATOM_Tonga_PowerTune_Table *)table; @@ -725,8 +707,6 @@ static int get_mm_clock_voltage_table( if (NULL == mm_table) return -ENOMEM; - memset(mm_table, 0x00, table_size); - mm_table->count = mm_dependency_table->ucNumEntries; for (i = 0; i < mm_dependency_table->ucNumEntries; i++) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index 2716721e5453..485f7ebdc754 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -24,7 +24,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> - +#include <drm/amdgpu_drm.h> #include "processpptables.h" #include <atom-types.h> #include <atombios.h> @@ -790,6 +790,39 @@ static const ATOM_PPLIB_STATE_V2 *get_state_entry_v2( return pstate; } +static unsigned char soft_dummy_pp_table[] = { + 0xe1, 0x01, 0x06, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x00, 0x4a, 0x00, 0x6c, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x42, 0x00, 0x02, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, + 0x00, 0x4e, 0x00, 0x88, 0x00, 0x00, 0x9e, 0x00, 0x17, 0x00, 0x00, 0x00, 0x9e, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x02, 0x02, 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x08, 0x04, 0x00, 0x00, 0x00, 0x00, + 0x07, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x18, 0x05, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe1, 0x00, 0x43, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x8e, 0x01, 0x00, 0x00, 0xb8, 0x01, 0x00, 0x00, 0x08, 0x30, 0x75, 0x00, 0x80, 0x00, 0xa0, 0x8c, + 0x00, 0x7e, 0x00, 0x71, 0xa5, 0x00, 0x7c, 0x00, 0xe5, 0xc8, 0x00, 0x70, 0x00, 0x91, 0xf4, 0x00, + 0x64, 0x00, 0x40, 0x19, 0x01, 0x5a, 0x00, 0x0e, 0x28, 0x01, 0x52, 0x00, 0x80, 0x38, 0x01, 0x4a, + 0x00, 0x00, 0x09, 0x30, 0x75, 0x00, 0x30, 0x75, 0x00, 0x40, 0x9c, 0x00, 0x40, 0x9c, 0x00, 0x59, + 0xd8, 0x00, 0x59, 0xd8, 0x00, 0x91, 0xf4, 0x00, 0x91, 0xf4, 0x00, 0x0e, 0x28, 0x01, 0x0e, 0x28, + 0x01, 0x90, 0x5f, 0x01, 0x90, 0x5f, 0x01, 0x00, 0x77, 0x01, 0x00, 0x77, 0x01, 0xca, 0x91, 0x01, + 0xca, 0x91, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x80, 0x00, 0x00, 0x7e, 0x00, 0x01, + 0x7c, 0x00, 0x02, 0x70, 0x00, 0x03, 0x64, 0x00, 0x04, 0x5a, 0x00, 0x05, 0x52, 0x00, 0x06, 0x4a, + 0x00, 0x07, 0x08, 0x08, 0x00, 0x08, 0x00, 0x01, 0x02, 0x02, 0x02, 0x01, 0x02, 0x02, 0x02, 0x03, + 0x02, 0x04, 0x02, 0x00, 0x08, 0x40, 0x9c, 0x00, 0x30, 0x75, 0x00, 0x74, 0xb5, 0x00, 0xa0, 0x8c, + 0x00, 0x60, 0xea, 0x00, 0x74, 0xb5, 0x00, 0x0e, 0x28, 0x01, 0x60, 0xea, 0x00, 0x90, 0x5f, 0x01, + 0x40, 0x19, 0x01, 0xb2, 0xb0, 0x01, 0x90, 0x5f, 0x01, 0xc0, 0xd4, 0x01, 0x00, 0x77, 0x01, 0x5e, + 0xff, 0x01, 0xca, 0x91, 0x01, 0x08, 0x80, 0x00, 0x00, 0x7e, 0x00, 0x01, 0x7c, 0x00, 0x02, 0x70, + 0x00, 0x03, 0x64, 0x00, 0x04, 0x5a, 0x00, 0x05, 0x52, 0x00, 0x06, 0x4a, 0x00, 0x07, 0x00, 0x08, + 0x80, 0x00, 0x30, 0x75, 0x00, 0x7e, 0x00, 0x40, 0x9c, 0x00, 0x7c, 0x00, 0x59, 0xd8, 0x00, 0x70, + 0x00, 0xdc, 0x0b, 0x01, 0x64, 0x00, 0x80, 0x38, 0x01, 0x5a, 0x00, 0x80, 0x38, 0x01, 0x52, 0x00, + 0x80, 0x38, 0x01, 0x4a, 0x00, 0x80, 0x38, 0x01, 0x08, 0x30, 0x75, 0x00, 0x80, 0x00, 0xa0, 0x8c, + 0x00, 0x7e, 0x00, 0x71, 0xa5, 0x00, 0x7c, 0x00, 0xe5, 0xc8, 0x00, 0x74, 0x00, 0x91, 0xf4, 0x00, + 0x66, 0x00, 0x40, 0x19, 0x01, 0x58, 0x00, 0x0e, 0x28, 0x01, 0x52, 0x00, 0x80, 0x38, 0x01, 0x4a, + 0x00 +}; static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table( struct pp_hwmgr *hwmgr) @@ -799,12 +832,17 @@ static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table( uint16_t size; if (!table_addr) { - table_addr = cgs_atom_get_data_table(hwmgr->device, - GetIndexIntoMasterTable(DATA, PowerPlayInfo), - &size, &frev, &crev); - - hwmgr->soft_pp_table = table_addr; - hwmgr->soft_pp_table_size = size; + if (hwmgr->chip_id == CHIP_RAVEN) { + table_addr = &soft_dummy_pp_table[0]; + hwmgr->soft_pp_table = &soft_dummy_pp_table[0]; + hwmgr->soft_pp_table_size = sizeof(soft_dummy_pp_table); + } else { + table_addr = cgs_atom_get_data_table(hwmgr->device, + GetIndexIntoMasterTable(DATA, PowerPlayInfo), + &size, &frev, &crev); + hwmgr->soft_pp_table = table_addr; + hwmgr->soft_pp_table_size = size; + } } return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr; @@ -924,15 +962,14 @@ int pp_tables_get_entry(struct pp_hwmgr *hwmgr, } } - if ((0 == result) && - (0 != (ps->classification.flags & PP_StateClassificationFlag_Boot))) - result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(ps->hardware)); + if ((0 == result) && (0 != (ps->classification.flags & PP_StateClassificationFlag_Boot))) { + if (hwmgr->chip_family < AMDGPU_FAMILY_RV) + result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(ps->hardware)); + } return result; } - - static int init_powerplay_tables( struct pp_hwmgr *hwmgr, const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table @@ -1615,85 +1652,53 @@ static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr) if (hwmgr->chip_id == CHIP_RAVEN) return 0; - if (NULL != hwmgr->dyn_state.vddc_dependency_on_sclk) { - kfree(hwmgr->dyn_state.vddc_dependency_on_sclk); - hwmgr->dyn_state.vddc_dependency_on_sclk = NULL; - } + kfree(hwmgr->dyn_state.vddc_dependency_on_sclk); + hwmgr->dyn_state.vddc_dependency_on_sclk = NULL; - if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) { - kfree(hwmgr->dyn_state.vddci_dependency_on_mclk); - hwmgr->dyn_state.vddci_dependency_on_mclk = NULL; - } + kfree(hwmgr->dyn_state.vddci_dependency_on_mclk); + hwmgr->dyn_state.vddci_dependency_on_mclk = NULL; - if (NULL != hwmgr->dyn_state.vddc_dependency_on_mclk) { - kfree(hwmgr->dyn_state.vddc_dependency_on_mclk); - hwmgr->dyn_state.vddc_dependency_on_mclk = NULL; - } + kfree(hwmgr->dyn_state.vddc_dependency_on_mclk); + hwmgr->dyn_state.vddc_dependency_on_mclk = NULL; - if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) { - kfree(hwmgr->dyn_state.mvdd_dependency_on_mclk); - hwmgr->dyn_state.mvdd_dependency_on_mclk = NULL; - } + kfree(hwmgr->dyn_state.mvdd_dependency_on_mclk); + hwmgr->dyn_state.mvdd_dependency_on_mclk = NULL; - if (NULL != hwmgr->dyn_state.valid_mclk_values) { - kfree(hwmgr->dyn_state.valid_mclk_values); - hwmgr->dyn_state.valid_mclk_values = NULL; - } + kfree(hwmgr->dyn_state.valid_mclk_values); + hwmgr->dyn_state.valid_mclk_values = NULL; - if (NULL != hwmgr->dyn_state.valid_sclk_values) { - kfree(hwmgr->dyn_state.valid_sclk_values); - hwmgr->dyn_state.valid_sclk_values = NULL; - } + kfree(hwmgr->dyn_state.valid_sclk_values); + hwmgr->dyn_state.valid_sclk_values = NULL; - if (NULL != hwmgr->dyn_state.cac_leakage_table) { - kfree(hwmgr->dyn_state.cac_leakage_table); - hwmgr->dyn_state.cac_leakage_table = NULL; - } + kfree(hwmgr->dyn_state.cac_leakage_table); + hwmgr->dyn_state.cac_leakage_table = NULL; - if (NULL != hwmgr->dyn_state.vddc_phase_shed_limits_table) { - kfree(hwmgr->dyn_state.vddc_phase_shed_limits_table); - hwmgr->dyn_state.vddc_phase_shed_limits_table = NULL; - } + kfree(hwmgr->dyn_state.vddc_phase_shed_limits_table); + hwmgr->dyn_state.vddc_phase_shed_limits_table = NULL; - if (NULL != hwmgr->dyn_state.vce_clock_voltage_dependency_table) { - kfree(hwmgr->dyn_state.vce_clock_voltage_dependency_table); - hwmgr->dyn_state.vce_clock_voltage_dependency_table = NULL; - } + kfree(hwmgr->dyn_state.vce_clock_voltage_dependency_table); + hwmgr->dyn_state.vce_clock_voltage_dependency_table = NULL; - if (NULL != hwmgr->dyn_state.uvd_clock_voltage_dependency_table) { - kfree(hwmgr->dyn_state.uvd_clock_voltage_dependency_table); - hwmgr->dyn_state.uvd_clock_voltage_dependency_table = NULL; - } + kfree(hwmgr->dyn_state.uvd_clock_voltage_dependency_table); + hwmgr->dyn_state.uvd_clock_voltage_dependency_table = NULL; - if (NULL != hwmgr->dyn_state.samu_clock_voltage_dependency_table) { - kfree(hwmgr->dyn_state.samu_clock_voltage_dependency_table); - hwmgr->dyn_state.samu_clock_voltage_dependency_table = NULL; - } + kfree(hwmgr->dyn_state.samu_clock_voltage_dependency_table); + hwmgr->dyn_state.samu_clock_voltage_dependency_table = NULL; - if (NULL != hwmgr->dyn_state.acp_clock_voltage_dependency_table) { - kfree(hwmgr->dyn_state.acp_clock_voltage_dependency_table); - hwmgr->dyn_state.acp_clock_voltage_dependency_table = NULL; - } + kfree(hwmgr->dyn_state.acp_clock_voltage_dependency_table); + hwmgr->dyn_state.acp_clock_voltage_dependency_table = NULL; - if (NULL != hwmgr->dyn_state.cac_dtp_table) { - kfree(hwmgr->dyn_state.cac_dtp_table); - hwmgr->dyn_state.cac_dtp_table = NULL; - } + kfree(hwmgr->dyn_state.cac_dtp_table); + hwmgr->dyn_state.cac_dtp_table = NULL; - if (NULL != hwmgr->dyn_state.ppm_parameter_table) { - kfree(hwmgr->dyn_state.ppm_parameter_table); - hwmgr->dyn_state.ppm_parameter_table = NULL; - } + kfree(hwmgr->dyn_state.ppm_parameter_table); + hwmgr->dyn_state.ppm_parameter_table = NULL; - if (NULL != hwmgr->dyn_state.vdd_gfx_dependency_on_sclk) { - kfree(hwmgr->dyn_state.vdd_gfx_dependency_on_sclk); - hwmgr->dyn_state.vdd_gfx_dependency_on_sclk = NULL; - } + kfree(hwmgr->dyn_state.vdd_gfx_dependency_on_sclk); + hwmgr->dyn_state.vdd_gfx_dependency_on_sclk = NULL; - if (NULL != hwmgr->dyn_state.vq_budgeting_table) { - kfree(hwmgr->dyn_state.vq_budgeting_table); - hwmgr->dyn_state.vq_budgeting_table = NULL; - } + kfree(hwmgr->dyn_state.vq_budgeting_table); + hwmgr->dyn_state.vq_budgeting_table = NULL; return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c index 2c3e6baf2524..9186b0788fac 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c @@ -38,20 +38,17 @@ #include "pp_soc15.h" #define RAVEN_MAX_DEEPSLEEP_DIVIDER_ID 5 -#define RAVEN_MINIMUM_ENGINE_CLOCK 800 //8Mhz, the low boundary of engine clock allowed on this chip +#define RAVEN_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */ #define SCLK_MIN_DIV_INTV_SHIFT 12 -#define RAVEN_DISPCLK_BYPASS_THRESHOLD 10000 //100mhz +#define RAVEN_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */ #define SMC_RAM_END 0x40000 static const unsigned long PhwRaven_Magic = (unsigned long) PHM_Rv_Magic; + + int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr, struct pp_display_clock_request *clock_req); -struct phm_vq_budgeting_record rv_vqtable[] = { - /* _TBD - * CUs, SSP low, SSP High, Min Sclk Low, Min Sclk, High, AWD/non-AWD, DCLK, ECLK, Sustainable Sclk, Sustainable CUs */ - { 8, 0, 45, 0, 0, VQ_DisplayConfig_NoneAWD, 80000, 120000, 4, 0 }, -}; static struct rv_power_state *cast_rv_ps(struct pp_hw_power_state *hw_ps) { @@ -70,101 +67,27 @@ static const struct rv_power_state *cast_const_rv_ps( return (struct rv_power_state *)hw_ps; } -static int rv_init_vq_budget_table(struct pp_hwmgr *hwmgr) -{ - uint32_t table_size, i; - struct phm_vq_budgeting_table *ptable; - uint32_t num_entries = ARRAY_SIZE(rv_vqtable); - - if (hwmgr->dyn_state.vq_budgeting_table != NULL) - return 0; - - table_size = sizeof(struct phm_vq_budgeting_table) + - sizeof(struct phm_vq_budgeting_record) * (num_entries - 1); - - ptable = kzalloc(table_size, GFP_KERNEL); - if (NULL == ptable) - return -ENOMEM; - - ptable->numEntries = (uint8_t) num_entries; - - for (i = 0; i < ptable->numEntries; i++) { - ptable->entries[i].ulCUs = rv_vqtable[i].ulCUs; - ptable->entries[i].ulSustainableSOCPowerLimitLow = rv_vqtable[i].ulSustainableSOCPowerLimitLow; - ptable->entries[i].ulSustainableSOCPowerLimitHigh = rv_vqtable[i].ulSustainableSOCPowerLimitHigh; - ptable->entries[i].ulMinSclkLow = rv_vqtable[i].ulMinSclkLow; - ptable->entries[i].ulMinSclkHigh = rv_vqtable[i].ulMinSclkHigh; - ptable->entries[i].ucDispConfig = rv_vqtable[i].ucDispConfig; - ptable->entries[i].ulDClk = rv_vqtable[i].ulDClk; - ptable->entries[i].ulEClk = rv_vqtable[i].ulEClk; - ptable->entries[i].ulSustainableSclk = rv_vqtable[i].ulSustainableSclk; - ptable->entries[i].ulSustainableCUs = rv_vqtable[i].ulSustainableCUs; - } - - hwmgr->dyn_state.vq_budgeting_table = ptable; - - return 0; -} - static int rv_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) { struct rv_hwmgr *rv_hwmgr = (struct rv_hwmgr *)(hwmgr->backend); - struct cgs_system_info sys_info = {0}; - int result; - rv_hwmgr->ddi_power_gating_disabled = 0; - rv_hwmgr->bapm_enabled = 1; rv_hwmgr->dce_slow_sclk_threshold = 30000; - rv_hwmgr->disable_driver_thermal_policy = 1; rv_hwmgr->thermal_auto_throttling_treshold = 0; rv_hwmgr->is_nb_dpm_enabled = 1; rv_hwmgr->dpm_flags = 1; - rv_hwmgr->disable_smu_acp_s3_handshake = 1; - rv_hwmgr->disable_notify_smu_vpu_recovery = 0; rv_hwmgr->gfx_off_controled_by_driver = false; - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicM3Arbiter); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UVDPowerGating); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UVDDynamicPowerGating); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_VCEPowerGating); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SamuPowerGating); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ACP); + rv_hwmgr->need_min_deep_sleep_dcefclk = true; + rv_hwmgr->num_active_display = 0; + rv_hwmgr->deep_sleep_dcefclk = 0; phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_GFXDynamicMGPowerGating); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkThrottleLowNotification); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableVoltageIsland); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicUVDState); - - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; - result = cgs_query_system_info(hwmgr->device, &sys_info); - if (!result) { - if (sys_info.value & AMD_PG_SUPPORT_GFX_DMG) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_GFXDynamicMGPowerGating); - } - + PHM_PlatformCaps_PowerPlaySupport); return 0; } @@ -234,102 +157,88 @@ static int rv_construct_boot_state(struct pp_hwmgr *hwmgr) return 0; } -static int rv_tf_set_clock_limit(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input) { struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); struct PP_Clocks clocks = {0}; struct pp_display_clock_request clock_req; clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk; - clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk; clock_req.clock_type = amd_pp_dcf_clock; clock_req.clock_freq_in_khz = clocks.dcefClock * 10; - if (clocks.dcefClock == 0 && clocks.dcefClockInSR == 0) - clock_req.clock_freq_in_khz = rv_data->dcf_actual_hard_min_freq; - PP_ASSERT_WITH_CODE(!rv_display_clock_voltage_request(hwmgr, &clock_req), "Attempt to set DCF Clock Failed!", return -EINVAL); - if(rv_data->need_min_deep_sleep_dcefclk && 0 != clocks.dcefClockInSR) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetMinDeepSleepDcefclk, - clocks.dcefClockInSR / 100); - /* - if(!rv_data->isp_tileA_power_gated || !rv_data->isp_tileB_power_gated) { - if ((hwmgr->ispArbiter.iclk != 0) && (rv_data->ISPActualHardMinFreq != (hwmgr->ispArbiter.iclk / 100) )) { - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetHardMinIspclkByFreq, hwmgr->ispArbiter.iclk / 100); - rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->ISPActualHardMinFreq), - } - } */ - if (((hwmgr->uvd_arbiter.vclk_soft_min / 100) != rv_data->vclk_soft_min) || ((hwmgr->uvd_arbiter.dclk_soft_min / 100) != rv_data->dclk_soft_min)) { rv_data->vclk_soft_min = hwmgr->uvd_arbiter.vclk_soft_min / 100; rv_data->dclk_soft_min = hwmgr->uvd_arbiter.dclk_soft_min / 100; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinVcn, (rv_data->vclk_soft_min << 16) | rv_data->vclk_soft_min); } if((hwmgr->gfx_arbiter.sclk_hard_min != 0) && ((hwmgr->gfx_arbiter.sclk_hard_min / 100) != rv_data->soc_actual_hard_min_freq)) { - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinSocclkByFreq, hwmgr->gfx_arbiter.sclk_hard_min / 100); - rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->soc_actual_hard_min_freq); + rv_read_arg_from_smc(hwmgr, &rv_data->soc_actual_hard_min_freq); } if ((hwmgr->gfx_arbiter.gfxclk != 0) && (rv_data->gfx_actual_soft_min_freq != (hwmgr->gfx_arbiter.gfxclk))) { - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinVideoGfxclkFreq, hwmgr->gfx_arbiter.gfxclk / 100); - rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->gfx_actual_soft_min_freq); + rv_read_arg_from_smc(hwmgr, &rv_data->gfx_actual_soft_min_freq); } if ((hwmgr->gfx_arbiter.fclk != 0) && (rv_data->fabric_actual_soft_min_freq != (hwmgr->gfx_arbiter.fclk / 100))) { - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinVideoFclkFreq, hwmgr->gfx_arbiter.fclk / 100); - rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->fabric_actual_soft_min_freq); + rv_read_arg_from_smc(hwmgr, &rv_data->fabric_actual_soft_min_freq); } return 0; } -static int rv_tf_set_num_active_display(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int rv_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock) { - uint32_t num_of_active_displays = 0; - struct cgs_display_info info = {0}; + struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); - cgs_get_active_displays_info(hwmgr->device, &info); - num_of_active_displays = info.display_count; + if (rv_data->need_min_deep_sleep_dcefclk && rv_data->deep_sleep_dcefclk != clock/100) { + rv_data->deep_sleep_dcefclk = clock/100; + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetMinDeepSleepDcefclk, + rv_data->deep_sleep_dcefclk); + } + return 0; +} - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, +static int rv_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count) +{ + struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); + + if (rv_data->num_active_display != count) { + rv_data->num_active_display = count; + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDisplayCount, - num_of_active_displays); + rv_data->num_active_display); + } + return 0; } -static const struct phm_master_table_item rv_set_power_state_list[] = { - { .tableFunction = rv_tf_set_clock_limit }, - { .tableFunction = rv_tf_set_num_active_display }, - { } -}; - -static const struct phm_master_table_header rv_set_power_state_master = { - 0, - PHM_MasterTableFlag_None, - rv_set_power_state_list -}; +static int rv_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) +{ + return rv_set_clock_limit(hwmgr, input); +} -static int rv_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int rv_init_power_gate_state(struct pp_hwmgr *hwmgr) { struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); @@ -340,20 +249,13 @@ static int rv_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input, return 0; } -static const struct phm_master_table_item rv_setup_asic_list[] = { - { .tableFunction = rv_tf_init_power_gate_state }, - { } -}; -static const struct phm_master_table_header rv_setup_asic_master = { - 0, - PHM_MasterTableFlag_None, - rv_setup_asic_list -}; +static int rv_setup_asic_task(struct pp_hwmgr *hwmgr) +{ + return rv_init_power_gate_state(hwmgr); +} -static int rv_tf_reset_cc6_data(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static int rv_reset_cc6_data(struct pp_hwmgr *hwmgr) { struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); @@ -365,66 +267,42 @@ static int rv_tf_reset_cc6_data(struct pp_hwmgr *hwmgr, return 0; } -static const struct phm_master_table_item rv_power_down_asic_list[] = { - { .tableFunction = rv_tf_reset_cc6_data }, - { } -}; - -static const struct phm_master_table_header rv_power_down_asic_master = { - 0, - PHM_MasterTableFlag_None, - rv_power_down_asic_list -}; - +static int rv_power_off_asic(struct pp_hwmgr *hwmgr) +{ + return rv_reset_cc6_data(hwmgr); +} -static int rv_tf_disable_gfx_off(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static int rv_disable_gfx_off(struct pp_hwmgr *hwmgr) { struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); if (rv_data->gfx_off_controled_by_driver) - smum_send_msg_to_smc(hwmgr->smumgr, + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff); return 0; } -static const struct phm_master_table_item rv_disable_dpm_list[] = { - { .tableFunction = rv_tf_disable_gfx_off }, - { }, -}; - - -static const struct phm_master_table_header rv_disable_dpm_master = { - 0, - PHM_MasterTableFlag_None, - rv_disable_dpm_list -}; +static int rv_disable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + return rv_disable_gfx_off(hwmgr); +} -static int rv_tf_enable_gfx_off(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static int rv_enable_gfx_off(struct pp_hwmgr *hwmgr) { struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); if (rv_data->gfx_off_controled_by_driver) - smum_send_msg_to_smc(hwmgr->smumgr, + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff); return 0; } -static const struct phm_master_table_item rv_enable_dpm_list[] = { - { .tableFunction = rv_tf_enable_gfx_off }, - { }, -}; - -static const struct phm_master_table_header rv_enable_dpm_master = { - 0, - PHM_MasterTableFlag_None, - rv_enable_dpm_list -}; +static int rv_enable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + return rv_enable_gfx_off(hwmgr); +} static int rv_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, struct pp_power_state *prequest_ps, @@ -505,7 +383,7 @@ static int rv_populate_clock_table(struct pp_hwmgr *hwmgr) DpmClocks_t *table = &(rv_data->clock_table); struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info); - result = rv_copy_table_from_smc(hwmgr->smumgr, (uint8_t *)table, CLOCKTABLE); + result = rv_copy_table_from_smc(hwmgr, (uint8_t *)table, CLOCKTABLE); PP_ASSERT_WITH_CODE((0 == result), "Attempt to copy clock table from smc failed", @@ -563,9 +441,6 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr) return result; } - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerPlaySupport); - rv_populate_clock_table(hwmgr); result = rv_get_system_info_data(hwmgr); @@ -576,40 +451,6 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr) rv_construct_boot_state(hwmgr); - result = phm_construct_table(hwmgr, &rv_setup_asic_master, - &(hwmgr->setup_asic)); - if (result != 0) { - pr_err("Fail to construct setup ASIC\n"); - return result; - } - - result = phm_construct_table(hwmgr, &rv_power_down_asic_master, - &(hwmgr->power_down_asic)); - if (result != 0) { - pr_err("Fail to construct power down ASIC\n"); - return result; - } - - result = phm_construct_table(hwmgr, &rv_set_power_state_master, - &(hwmgr->set_power_state)); - if (result != 0) { - pr_err("Fail to construct set_power_state\n"); - return result; - } - - result = phm_construct_table(hwmgr, &rv_disable_dpm_master, - &(hwmgr->disable_dynamic_state_management)); - if (result != 0) { - pr_err("Fail to disable_dynamic_state\n"); - return result; - } - result = phm_construct_table(hwmgr, &rv_enable_dpm_master, - &(hwmgr->enable_dynamic_state_management)); - if (result != 0) { - pr_err("Fail to enable_dynamic_state\n"); - return result; - } - hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = RAVEN_MAX_HARDWARE_POWERLEVELS; @@ -624,8 +465,6 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr) hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; - rv_init_vq_budget_table(hwmgr); - return result; } @@ -634,46 +473,21 @@ static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info); - phm_destroy_table(hwmgr, &(hwmgr->set_power_state)); - phm_destroy_table(hwmgr, &(hwmgr->enable_dynamic_state_management)); - phm_destroy_table(hwmgr, &(hwmgr->disable_dynamic_state_management)); - phm_destroy_table(hwmgr, &(hwmgr->power_down_asic)); - phm_destroy_table(hwmgr, &(hwmgr->setup_asic)); - - if (pinfo->vdd_dep_on_dcefclk) { - kfree(pinfo->vdd_dep_on_dcefclk); - pinfo->vdd_dep_on_dcefclk = NULL; - } - if (pinfo->vdd_dep_on_socclk) { - kfree(pinfo->vdd_dep_on_socclk); - pinfo->vdd_dep_on_socclk = NULL; - } - if (pinfo->vdd_dep_on_fclk) { - kfree(pinfo->vdd_dep_on_fclk); - pinfo->vdd_dep_on_fclk = NULL; - } - if (pinfo->vdd_dep_on_dispclk) { - kfree(pinfo->vdd_dep_on_dispclk); - pinfo->vdd_dep_on_dispclk = NULL; - } - if (pinfo->vdd_dep_on_dppclk) { - kfree(pinfo->vdd_dep_on_dppclk); - pinfo->vdd_dep_on_dppclk = NULL; - } - if (pinfo->vdd_dep_on_phyclk) { - kfree(pinfo->vdd_dep_on_phyclk); - pinfo->vdd_dep_on_phyclk = NULL; - } - - if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) { - kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); - hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; - } - - if (NULL != hwmgr->dyn_state.vq_budgeting_table) { - kfree(hwmgr->dyn_state.vq_budgeting_table); - hwmgr->dyn_state.vq_budgeting_table = NULL; - } + kfree(pinfo->vdd_dep_on_dcefclk); + pinfo->vdd_dep_on_dcefclk = NULL; + kfree(pinfo->vdd_dep_on_socclk); + pinfo->vdd_dep_on_socclk = NULL; + kfree(pinfo->vdd_dep_on_fclk); + pinfo->vdd_dep_on_fclk = NULL; + kfree(pinfo->vdd_dep_on_dispclk); + pinfo->vdd_dep_on_dispclk = NULL; + kfree(pinfo->vdd_dep_on_dppclk); + pinfo->vdd_dep_on_dppclk = NULL; + kfree(pinfo->vdd_dep_on_phyclk); + pinfo->vdd_dep_on_phyclk = NULL; + + kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); + hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; kfree(hwmgr->backend); hwmgr->backend = NULL; @@ -687,12 +501,12 @@ static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, return 0; } -static int rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) +static uint32_t rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) { return 0; } -static int rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) +static uint32_t rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) { return 0; } @@ -711,18 +525,9 @@ static int rv_dpm_get_pp_table_entry_callback( { struct rv_power_state *rv_ps = cast_rv_ps(hw_ps); - const ATOM_PPLIB_CZ_CLOCK_INFO *rv_clock_info = clock_info; - - struct phm_clock_voltage_dependency_table *table = - hwmgr->dyn_state.vddc_dependency_on_sclk; - uint8_t clock_info_index = rv_clock_info->index; - - if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1)) - clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1); - - rv_ps->levels[index].engine_clock = table->entries[clock_info_index].clk; - rv_ps->levels[index].vddc_index = (uint8_t)table->entries[clock_info_index].v; + rv_ps->levels[index].engine_clock = 0; + rv_ps->levels[index].vddc_index = 0; rv_ps->level = index + 1; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { @@ -814,12 +619,12 @@ static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p ps = cast_const_rv_ps(state); level_index = index > ps->level - 1 ? ps->level - 1 : index; - level->coreClock = ps->levels[level_index].engine_clock; + level->coreClock = 30000; if (designation == PHM_PerformanceLevelDesignation_PowerContainment) { for (i = 1; i < ps->level; i++) { if (ps->levels[i].engine_clock > data->dce_slow_sclk_threshold) { - level->coreClock = ps->levels[i].engine_clock; + level->coreClock = 30000; break; } } @@ -829,8 +634,9 @@ static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p vol_dep_record_index = data->clock_vol_info.vdd_dep_on_fclk->count - 1; level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[vol_dep_record_index].clk; - } else + } else { level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk; + } level->nonLocalMemoryFreq = 0; level->nonLocalMemoryWidth = 0; @@ -993,7 +799,7 @@ int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr, return -EINVAL; } - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, + result = smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq); return result; @@ -1001,7 +807,8 @@ int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr, static int rv_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) { - return -EINVAL; + clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */ + return 0; } static int rv_thermal_get_temperature(struct pp_hwmgr *hwmgr) @@ -1058,6 +865,13 @@ static const struct pp_hwmgr_func rv_hwmgr_funcs = { .get_clock_by_type_with_voltage = rv_get_clock_by_type_with_voltage, .get_max_high_clocks = rv_get_max_high_clocks, .read_sensor = rv_read_sensor, + .set_active_display_count = rv_set_active_display_count, + .set_deep_sleep_dcefclk = rv_set_deep_sleep_dcefclk, + .dynamic_state_management_enable = rv_enable_dpm_tasks, + .power_off_asic = rv_power_off_asic, + .asic_setup = rv_setup_asic_task, + .power_state_set = rv_set_power_state_tasks, + .dynamic_state_management_disable = rv_disable_dpm_tasks, }; int rv_init_function_pointers(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h index 2472b50e54cf..68d61bd95ca0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h @@ -293,7 +293,9 @@ struct rv_hwmgr { DpmClocks_t clock_table; uint32_t active_process_mask; - bool need_min_deep_sleep_dcefclk; /* disabled by default */ + bool need_min_deep_sleep_dcefclk; + uint32_t deep_sleep_dcefclk; + uint32_t num_active_display; }; struct pp_hwmgr; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c index 261b828ad590..69a0678ace98 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c @@ -27,21 +27,21 @@ static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) { - return smum_send_msg_to_smc(hwmgr->smumgr, enable ? + return smum_send_msg_to_smc(hwmgr, enable ? PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); } static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) { - return smum_send_msg_to_smc(hwmgr->smumgr, enable ? + return smum_send_msg_to_smc(hwmgr, enable ? PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); } static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable) { - return smum_send_msg_to_smc(hwmgr->smumgr, enable ? + return smum_send_msg_to_smc(hwmgr, enable ? PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); } @@ -70,7 +70,7 @@ static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate) int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr) { if (phm_cf_want_uvd_power_gating(hwmgr)) - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF); return 0; } @@ -80,10 +80,10 @@ static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr) if (phm_cf_want_uvd_power_gating(hwmgr)) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDynamicPowerGating)) { - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDPowerON, 1); } else { - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDPowerON, 0); } } @@ -94,7 +94,7 @@ static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr) static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr) { if (phm_cf_want_vce_power_gating(hwmgr)) - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_VCEPowerOFF); return 0; } @@ -102,7 +102,7 @@ static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr) static int smu7_powerup_vce(struct pp_hwmgr *hwmgr) { if (phm_cf_want_vce_power_gating(hwmgr)) - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_VCEPowerON); return 0; } @@ -111,7 +111,7 @@ static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SamuPowerGating)) - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SAMPowerOFF); return 0; } @@ -120,7 +120,7 @@ static int smu7_powerup_samu(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SamuPowerGating)) - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SAMPowerON); return 0; } @@ -140,7 +140,7 @@ int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr) return 0; } -int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) +void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -166,10 +166,9 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) smu7_update_uvd_dpm(hwmgr, false); } - return 0; } -int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) +void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -194,7 +193,6 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) AMD_PG_STATE_UNGATE); smu7_update_vce_dpm(hwmgr, false); } - return 0; } int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate) @@ -237,7 +235,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_GFX_CGCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } if (PP_STATE_SUPPORT_LS & *msg_id) { @@ -247,7 +245,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_GFX_CGLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -260,7 +258,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_GFX_3DCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } @@ -271,7 +269,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_GFX_3DLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -284,7 +282,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_GFX_RLC_LS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -297,7 +295,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_GFX_CP_LS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -311,7 +309,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, CG_GFX_OTHERS_MGCG_MASK); if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -331,7 +329,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_BIF_MGCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } if (PP_STATE_SUPPORT_LS & *msg_id) { @@ -341,7 +339,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_BIF_MGLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -354,7 +352,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_MC_MGCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } @@ -365,7 +363,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_MC_MGLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -378,7 +376,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_DRM_MGCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } if (PP_STATE_SUPPORT_LS & *msg_id) { @@ -388,7 +386,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_DRM_MGLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -401,7 +399,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_HDP_MGCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } @@ -412,7 +410,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_HDP_MGLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -425,7 +423,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_SDMA_MGCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } @@ -436,7 +434,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_SDMA_MGLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -449,7 +447,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_ROM_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -489,9 +487,9 @@ int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable) active_cus = sys_info.value; if (enable) - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GFX_CU_PG_ENABLE, active_cus); else - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GFX_CU_PG_DISABLE); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h index c96ed9ed7eaf..7b54d48b2ce2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h @@ -27,8 +27,8 @@ #include "smu7_hwmgr.h" #include "pp_asicblocks.h" -int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); -int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); +void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); +void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr); int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate); int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index c2743233ba10..8dbe9148aad3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -26,6 +26,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <asm/div64.h> +#include <drm/amdgpu_drm.h> #include "pp_acpi.h" #include "ppatomctrl.h" #include "atombios.h" @@ -163,7 +164,7 @@ static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) { if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable); return 0; } @@ -300,28 +301,28 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr) "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;); } - tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDC); + tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC); PP_ASSERT_WITH_CODE( (data->vddc_voltage_table.count <= tmp), "Too many voltage values for VDDC. Trimming to fit state table.", phm_trim_voltage_table_to_fit_state_table(tmp, &(data->vddc_voltage_table))); - tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX); + tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX); PP_ASSERT_WITH_CODE( (data->vddgfx_voltage_table.count <= tmp), "Too many voltage values for VDDC. Trimming to fit state table.", phm_trim_voltage_table_to_fit_state_table(tmp, &(data->vddgfx_voltage_table))); - tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDCI); + tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI); PP_ASSERT_WITH_CODE( (data->vddci_voltage_table.count <= tmp), "Too many voltage values for VDDCI. Trimming to fit state table.", phm_trim_voltage_table_to_fit_state_table(tmp, &(data->vddci_voltage_table))); - tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_MVDD); + tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD); PP_ASSERT_WITH_CODE( (data->mvdd_voltage_table.count <= tmp), "Too many voltage values for MVDD. Trimming to fit state table.", @@ -387,6 +388,7 @@ static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr) static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int i; /* Clear reset for voting clients before enabling DPM */ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, @@ -394,50 +396,26 @@ static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr) PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7); - + for (i = 0; i < 8; i++) + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_0 + i * 4, + data->voting_rights_clients[i]); return 0; } static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr) { + int i; + /* Reset voting clients before disabling DPM */ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_0, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_1, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_2, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_3, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_4, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_5, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_6, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_7, 0); + for (i = 0; i < 8; i++) + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0); return 0; } @@ -493,7 +471,7 @@ static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, static int smu7_reset_to_default(struct pp_hwmgr *hwmgr) { - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults); } /** @@ -551,7 +529,7 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr) data->pcie_gen_performance = data->pcie_gen_power_saving; data->pcie_lane_performance = data->pcie_lane_power_saving; } - tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_LINK); + tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK); phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table, tmp, MAX_REGULAR_DPM_NUMBER); @@ -607,13 +585,20 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr) data->dpm_table.pcie_speed_table.count = 6; } /* Populate last level for boot PCIE level, but do not increment count. */ - phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, + if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { + for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++) + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Max_PCIEGen), + data->vbios_boot_state.pcie_lane_bootup_value); + } else { + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, data->dpm_table.pcie_speed_table.count, get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen), get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - + } return 0; } @@ -625,27 +610,27 @@ static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr) phm_reset_single_dpm_table( &data->dpm_table.sclk_table, - smum_get_mac_definition(hwmgr->smumgr, + smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS), MAX_REGULAR_DPM_NUMBER); phm_reset_single_dpm_table( &data->dpm_table.mclk_table, - smum_get_mac_definition(hwmgr->smumgr, + smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER); phm_reset_single_dpm_table( &data->dpm_table.vddc_table, - smum_get_mac_definition(hwmgr->smumgr, + smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC), MAX_REGULAR_DPM_NUMBER); phm_reset_single_dpm_table( &data->dpm_table.vddci_table, - smum_get_mac_definition(hwmgr->smumgr, + smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER); phm_reset_single_dpm_table( &data->dpm_table.mvdd_table, - smum_get_mac_definition(hwmgr->smumgr, + smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD), MAX_REGULAR_DPM_NUMBER); return 0; @@ -689,7 +674,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) allowed_vdd_sclk_table->entries[i].clk) { data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = allowed_vdd_sclk_table->entries[i].clk; - data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */ + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0; data->dpm_table.sclk_table.count++; } } @@ -703,7 +688,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) allowed_vdd_mclk_table->entries[i].clk) { data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = allowed_vdd_mclk_table->entries[i].clk; - data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */ + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0; data->dpm_table.mclk_table.count++; } } @@ -855,7 +840,7 @@ static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)) - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableVRHotGPIOInterrupt); return 0; @@ -873,7 +858,7 @@ static int smu7_enable_ulv(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (data->ulv_supported) - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV); return 0; } @@ -883,7 +868,7 @@ static int smu7_disable_ulv(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (data->ulv_supported) - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV); return 0; } @@ -892,12 +877,12 @@ static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { - if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON)) + if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON)) PP_ASSERT_WITH_CODE(false, "Attempt to enable Master Deep Sleep switch failed!", return -EINVAL); } else { - if (smum_send_msg_to_smc(hwmgr->smumgr, + if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_OFF)) { PP_ASSERT_WITH_CODE(false, "Attempt to disable Master Deep Sleep switch failed!", @@ -912,7 +897,7 @@ static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { - if (smum_send_msg_to_smc(hwmgr->smumgr, + if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_OFF)) { PP_ASSERT_WITH_CODE(false, "Attempt to disable Master Deep Sleep switch failed!", @@ -928,12 +913,12 @@ static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); uint32_t soft_register_value = 0; uint32_t handshake_disables_offset = data->soft_regs_start - + smum_get_offsetof(hwmgr->smumgr, + + smum_get_offsetof(hwmgr, SMU_SoftRegisters, HandshakeDisables); soft_register_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, handshake_disables_offset); - soft_register_value |= smum_get_mac_definition(hwmgr->smumgr, + soft_register_value |= smum_get_mac_definition(hwmgr, SMU_UVD_MCLK_HANDSHAKE_DISABLE); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, handshake_disables_offset, soft_register_value); @@ -947,7 +932,7 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) /* enable SCLK dpm */ if (!data->sclk_dpm_key_disabled) PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)), + (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)), "Failed to enable SCLK DPM during DPM Start Function!", return -EINVAL); @@ -956,20 +941,31 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK)) smu7_disable_handshake_uvd(hwmgr); PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, + (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Enable)), "Failed to enable MCLK DPM during DPM Start Function!", return -EINVAL); PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005); - udelay(10); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005); + + if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005); + udelay(10); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005); + } else { + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005); + udelay(10); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005); + } } return 0; @@ -993,11 +989,15 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr) cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + - smum_get_offsetof(hwmgr->smumgr, SMU_SoftRegisters, + smum_get_offsetof(hwmgr, SMU_SoftRegisters, VoltageChangeTimeout), 0x1000); PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0); + if (hwmgr->chip_family == AMDGPU_FAMILY_CI) + cgs_write_register(hwmgr->device, 0x1488, + (cgs_read_register(hwmgr->device, 0x1488) & ~0x1)); + if (smu7_enable_sclk_mclk_dpm(hwmgr)) { pr_err("Failed to enable Sclk DPM and Mclk DPM!"); return -EINVAL; @@ -1006,7 +1006,7 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr) /* enable PCIE dpm */ if (0 == data->pcie_dpm_key_disabled) { PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, + (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_Enable)), "Failed to enable pcie DPM during DPM Start Function!", return -EINVAL); @@ -1014,7 +1014,7 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Falcon_QuickTransition)) { - PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableACDCGPIOInterrupt)), "Failed to enable AC DC GPIO Interrupt!", ); @@ -1032,7 +1032,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), "Trying to disable SCLK DPM when DPM is disabled", return 0); - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Disable); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable); } /* disable MCLK dpm */ @@ -1040,7 +1040,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), "Trying to disable MCLK DPM when DPM is disabled", return 0); - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_Disable); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable); } return 0; @@ -1060,7 +1060,7 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr) /* disable PCIE dpm */ if (!data->pcie_dpm_key_disabled) { PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, + (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_Disable) == 0), "Failed to disable pcie DPM during DPM Stop Function!", return -EINVAL); @@ -1072,7 +1072,7 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr) "Trying to disable voltage DPM when DPM is disabled", return 0); - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Disable); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable); return 0; } @@ -1226,7 +1226,7 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to enable VR hot GPIO interrupt!", result = tmp_result); - smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_NoDisplay); + smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay); tmp_result = smu7_enable_sclk_control(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), @@ -1361,14 +1361,14 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->vddc_vddgfx_delta = 300; data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT; data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT; - data->voting_rights_clients0 = SMU7_VOTINGRIGHTSCLIENTS_DFLT0; - data->voting_rights_clients1 = SMU7_VOTINGRIGHTSCLIENTS_DFLT1; - data->voting_rights_clients2 = SMU7_VOTINGRIGHTSCLIENTS_DFLT2; - data->voting_rights_clients3 = SMU7_VOTINGRIGHTSCLIENTS_DFLT3; - data->voting_rights_clients4 = SMU7_VOTINGRIGHTSCLIENTS_DFLT4; - data->voting_rights_clients5 = SMU7_VOTINGRIGHTSCLIENTS_DFLT5; - data->voting_rights_clients6 = SMU7_VOTINGRIGHTSCLIENTS_DFLT6; - data->voting_rights_clients7 = SMU7_VOTINGRIGHTSCLIENTS_DFLT7; + data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0; + data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1; + data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2; + data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3; + data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4; + data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5; + data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6; + data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7; data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; @@ -1382,23 +1382,40 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->force_pcie_gen = PP_PCIEGenInvalid; data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false; - if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->smumgr->is_kicker) { + if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) { uint8_t tmp1, tmp2; uint16_t tmp3 = 0; atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2, &tmp3); tmp3 = (tmp3 >> 5) & 0x3; data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3; + } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { + data->vddc_phase_shed_control = 1; + } else { + data->vddc_phase_shed_control = 0; + } + + if (hwmgr->chip_id == CHIP_HAWAII) { + data->thermal_temp_setting.temperature_low = 94500; + data->thermal_temp_setting.temperature_high = 95000; + data->thermal_temp_setting.temperature_shutdown = 104000; + } else { + data->thermal_temp_setting.temperature_low = 99500; + data->thermal_temp_setting.temperature_high = 100000; + data->thermal_temp_setting.temperature_shutdown = 104000; } data->fast_watermark_threshold = 100; - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; + else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT)) + data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ControlVDDGFX)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) { data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; } @@ -1406,25 +1423,24 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EnableMVDDControl)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; - else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; } - if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) { + if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ControlVDDGFX); - } if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ControlVDDCI)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; - else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; } @@ -1543,7 +1559,7 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) if (vddc >= 2000 || vddc == 0) return -EINVAL; } else { - pr_warn("failed to retrieving EVV voltage!\n"); + pr_debug("failed to retrieving EVV voltage!\n"); continue; } @@ -1676,7 +1692,7 @@ static int phm_add_voltage(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE((0 != look_up_table->count), "Lookup Table empty.", return -EINVAL); - i = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX); + i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX); PP_ASSERT_WITH_CODE((i >= look_up_table->count), "Lookup Table is full.", return -EINVAL); @@ -2274,7 +2290,7 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr) data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; } - if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1) + if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1) hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v; return 0; @@ -2282,40 +2298,65 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr) static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) { - kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); - hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; - } - pp_smu7_thermal_fini(hwmgr); - if (NULL != hwmgr->backend) { - kfree(hwmgr->backend); - hwmgr->backend = NULL; - } + kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); + hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; + kfree(hwmgr->backend); + hwmgr->backend = NULL; + + return 0; +} + +static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr) +{ + uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int i; + if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) { + for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { + virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; + if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci, + virtual_voltage_id, + efuse_voltage_id) == 0) { + if (vddc != 0 && vddc != virtual_voltage_id) { + data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc; + data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id; + data->vddc_leakage.count++; + } + if (vddci != 0 && vddci != virtual_voltage_id) { + data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci; + data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id; + data->vddci_leakage.count++; + } + } + } + } return 0; } static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data; - int result; + int result = 0; data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL); if (data == NULL) return -ENOMEM; hwmgr->backend = data; - pp_smu7_thermal_initialize(hwmgr); - smu7_patch_voltage_workaround(hwmgr); smu7_init_dpm_defaults(hwmgr); /* Get leakage voltage based on leakage ID. */ - result = smu7_get_evv_voltages(hwmgr); - - if (result) { - pr_info("Get EVV Voltage Failed. Abort Driver loading!\n"); - return -EINVAL; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EVV)) { + result = smu7_get_evv_voltages(hwmgr); + if (result) { + pr_info("Get EVV Voltage Failed. Abort Driver loading!\n"); + return -EINVAL; + } + } else { + smu7_get_elb_voltages(hwmgr); } if (hwmgr->pp_table_version == PP_TABLE_V1) { @@ -2382,7 +2423,7 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) level++; if (level) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PCIeDPM_ForceLevel, level); } } @@ -2395,7 +2436,7 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) level++; if (level) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SCLKDPM_SetEnabledMask, (1 << level)); } @@ -2409,7 +2450,7 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) level++; if (level) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_MCLKDPM_SetEnabledMask, (1 << level)); } @@ -2428,14 +2469,14 @@ static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) if (!data->sclk_dpm_key_disabled) { if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SCLKDPM_SetEnabledMask, data->dpm_level_enable_mask.sclk_dpm_enable_mask); } if (!data->mclk_dpm_key_disabled) { if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_MCLKDPM_SetEnabledMask, data->dpm_level_enable_mask.mclk_dpm_enable_mask); } @@ -2451,7 +2492,7 @@ static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr) return -EINVAL; if (!data->pcie_dpm_key_disabled) { - smum_send_msg_to_smc(hwmgr->smumgr, + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel); } @@ -2468,7 +2509,7 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr) if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { level = phm_get_lowest_enabled_level(hwmgr, data->dpm_level_enable_mask.sclk_dpm_enable_mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SCLKDPM_SetEnabledMask, (1 << level)); @@ -2478,7 +2519,7 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr) if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { level = phm_get_lowest_enabled_level(hwmgr, data->dpm_level_enable_mask.mclk_dpm_enable_mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_MCLKDPM_SetEnabledMask, (1 << level)); } @@ -2488,7 +2529,7 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr) if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { level = phm_get_lowest_enabled_level(hwmgr, data->dpm_level_enable_mask.pcie_dpm_enable_mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PCIeDPM_ForceLevel, (level)); } @@ -2572,51 +2613,16 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, uint32_t sclk_mask = 0; uint32_t mclk_mask = 0; uint32_t pcie_mask = 0; - uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | - AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | - AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | - AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; - - if (level == hwmgr->dpm_level) - return ret; - - if (!(hwmgr->dpm_level & profile_mode_mask)) { - /* enter profile mode, save current level, disable gfx cg*/ - if (level & profile_mode_mask) { - hwmgr->saved_dpm_level = hwmgr->dpm_level; - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_UNGATE); - } - } else { - /* exit profile mode, restore level, enable gfx cg*/ - if (!(level & profile_mode_mask)) { - if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) - level = hwmgr->saved_dpm_level; - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_GATE); - } - } switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: ret = smu7_force_dpm_highest(hwmgr); - if (ret) - return ret; - hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_LOW: ret = smu7_force_dpm_lowest(hwmgr); - if (ret) - return ret; - hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_AUTO: ret = smu7_unforce_dpm_levels(hwmgr); - if (ret) - return ret; - hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: @@ -2625,26 +2631,23 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask); if (ret) return ret; - hwmgr->dpm_level = level; smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask); - break; case AMD_DPM_FORCED_LEVEL_MANUAL: - hwmgr->dpm_level = level; - break; case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: default: break; } - if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) - smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); - else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) - smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr); - - return 0; + if (!ret) { + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); + else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr); + } + return ret; } static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr) @@ -2843,7 +2846,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, } -static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) +static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) { struct pp_power_state *ps; struct smu7_power_state *smu7_ps; @@ -2865,7 +2868,7 @@ static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) [smu7_ps->performance_level_count-1].memory_clock; } -static int smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) +static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) { struct pp_power_state *ps; struct smu7_power_state *smu7_ps; @@ -3002,7 +3005,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr, [smu7_power_state->performance_level_count++]); PP_ASSERT_WITH_CODE( - (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)), + (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)), "Performance levels exceeds SMC limit!", return -EINVAL); @@ -3071,11 +3074,11 @@ static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr, if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { if (dep_mclk_table->entries[0].clk != data->vbios_boot_state.mclk_bootup_value) - pr_err("Single MCLK entry VDDCI/MCLK dependency table " + pr_debug("Single MCLK entry VDDCI/MCLK dependency table " "does not match VBIOS boot MCLK level"); if (dep_mclk_table->entries[0].vddci != data->vbios_boot_state.vddci_bootup_value) - pr_err("Single VDDCI entry VDDCI/MCLK dependency table " + pr_debug("Single VDDCI entry VDDCI/MCLK dependency table " "does not match VBIOS boot VDDCI level"); } @@ -3166,7 +3169,7 @@ static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr, data->highest_mclk = memory_clock; PP_ASSERT_WITH_CODE( - (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)), + (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)), "Performance levels exceeds SMC limit!", return -EINVAL); @@ -3219,11 +3222,11 @@ static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr, if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { if (dep_mclk_table->entries[0].clk != data->vbios_boot_state.mclk_bootup_value) - pr_err("Single MCLK entry VDDCI/MCLK dependency table " + pr_debug("Single MCLK entry VDDCI/MCLK dependency table " "does not match VBIOS boot MCLK level"); if (dep_mclk_table->entries[0].v != data->vbios_boot_state.vddci_bootup_value) - pr_err("Single VDDCI entry VDDCI/MCLK dependency table " + pr_debug("Single VDDCI entry VDDCI/MCLK dependency table " "does not match VBIOS boot VDDCI level"); } @@ -3312,14 +3315,14 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, struct pp_gpu_power *query) { - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart), "Failed to start pm status log!", return -1); msleep_interruptible(20); - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample), "Failed to sample pm status log!", return -1); @@ -3353,19 +3356,19 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency); sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); *((uint32_t *)value) = sclk; *size = 4; return 0; case AMDGPU_PP_SENSOR_GFX_MCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency); mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); *((uint32_t *)value) = mclk; *size = 4; return 0; case AMDGPU_PP_SENSOR_GPU_LOAD: - offset = data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr, + offset = data->soft_regs_start + smum_get_offsetof(hwmgr, SMU_SoftRegisters, AverageGraphicsActivity); @@ -3532,7 +3535,7 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), "Trying to freeze SCLK DPM when DPM is disabled", ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel), "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", return -EINVAL); @@ -3544,7 +3547,7 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), "Trying to freeze MCLK DPM when DPM is disabled", ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel), "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", return -EINVAL); @@ -3762,7 +3765,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), "Trying to Unfreeze SCLK DPM when DPM is disabled", ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel), "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", return -EINVAL); @@ -3774,7 +3777,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), "Trying to Unfreeze MCLK DPM when DPM is disabled", ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel), "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", return -EINVAL); @@ -3822,11 +3825,14 @@ static int smu7_notify_link_speed_change_after_state_change( static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int ret = 0; - if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) { + smum_send_msg_to_smc_with_parameter(hwmgr, (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2); - return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; + ret = (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; + } + return ret; } static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) @@ -3899,10 +3905,7 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f hwmgr->thermal_controller. advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; - if (phm_is_hw_access_blocked(hwmgr)) - return 0; - - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); } @@ -3911,7 +3914,7 @@ smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) { PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; - return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1; + return (smum_send_msg_to_smc(hwmgr, msg) == 0) ? 0 : -1; } static int @@ -3974,12 +3977,12 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr) cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr, + data->soft_regs_start + smum_get_offsetof(hwmgr, SMU_SoftRegisters, PreVBlankGap), 0x64); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr, + data->soft_regs_start + smum_get_offsetof(hwmgr, SMU_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); @@ -4004,10 +4007,7 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f hwmgr->thermal_controller. advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm; - if (phm_is_hw_access_blocked(hwmgr)) - return 0; - - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm); } @@ -4249,21 +4249,21 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO | - AMD_DPM_FORCED_LEVEL_LOW | - AMD_DPM_FORCED_LEVEL_HIGH)) + if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO | + AMD_DPM_FORCED_LEVEL_LOW | + AMD_DPM_FORCED_LEVEL_HIGH)) return -EINVAL; switch (type) { case PP_SCLK: if (!data->sclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SCLKDPM_SetEnabledMask, data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); break; case PP_MCLK: if (!data->mclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_MCLKDPM_SetEnabledMask, data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); break; @@ -4276,7 +4276,7 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, level++; if (!data->pcie_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PCIeDPM_ForceLevel, level); break; @@ -4300,7 +4300,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, switch (type) { case PP_SCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency); clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); for (i = 0; i < sclk_table->count; i++) { @@ -4316,7 +4316,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, (i == now) ? "*" : ""); break; case PP_MCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency); clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); for (i = 0; i < mclk_table->count; i++) { @@ -4353,31 +4353,27 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, return size; } -static int smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) { - int result = 0; - switch (mode) { case AMD_FAN_CTRL_NONE: - result = smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); + smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); break; case AMD_FAN_CTRL_MANUAL: if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) - result = smu7_fan_ctrl_stop_smc_fan_control(hwmgr); + smu7_fan_ctrl_stop_smc_fan_control(hwmgr); break; case AMD_FAN_CTRL_AUTO: - result = smu7_fan_ctrl_set_static_mode(hwmgr, mode); - if (!result) - result = smu7_fan_ctrl_start_smc_fan_control(hwmgr); + if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode)) + smu7_fan_ctrl_start_smc_fan_control(hwmgr); break; default: break; } - return result; } -static int smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr) +static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr) { return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL; } @@ -4606,7 +4602,7 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr, if (sclk_mask) { if (!data->sclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SCLKDPM_SetEnabledMask, data->dpm_level_enable_mask. sclk_dpm_enable_mask & @@ -4615,7 +4611,7 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr, if (mclk_mask) { if (!data->mclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_MCLKDPM_SetEnabledMask, data->dpm_level_enable_mask. mclk_dpm_enable_mask & @@ -4627,8 +4623,7 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr, static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable) { - struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); if (smu_data == NULL) return -EINVAL; @@ -4640,13 +4635,13 @@ static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable) if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( - hwmgr->smumgr, PPSMC_MSG_EnableAvfs), + hwmgr, PPSMC_MSG_EnableAvfs), "Failed to enable AVFS!", return -EINVAL); } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( - hwmgr->smumgr, PPSMC_MSG_DisableAvfs), + hwmgr, PPSMC_MSG_DisableAvfs), "Failed to disable AVFS!", return -EINVAL); @@ -4703,6 +4698,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .set_power_profile_state = smu7_set_power_profile_state, .avfs_control = smu7_avfs_control, .disable_smc_firmware_ctf = smu7_thermal_disable_alert, + .start_thermal_controller = smu7_start_thermal_controller, }; uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h index f221e17b67e7..e021154aedbd 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h @@ -182,14 +182,7 @@ struct smu7_hwmgr { struct smu7_dpm_table dpm_table; struct smu7_dpm_table golden_dpm_table; - uint32_t voting_rights_clients0; - uint32_t voting_rights_clients1; - uint32_t voting_rights_clients2; - uint32_t voting_rights_clients3; - uint32_t voting_rights_clients4; - uint32_t voting_rights_clients5; - uint32_t voting_rights_clients6; - uint32_t voting_rights_clients7; + uint32_t voting_rights_clients[8]; uint32_t static_screen_threshold_unit; uint32_t static_screen_threshold; uint32_t voltage_control; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 1dc31aa72781..85ca16abb626 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -629,51 +629,38 @@ static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) uint32_t block_en = 0; int32_t result = 0; uint32_t didt_block; - uint32_t data; if (hwmgr->chip_id == CHIP_POLARIS11) didt_block = Polaris11_DIDTBlock_Info; else didt_block = DIDTBlock_Info; - block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ? en : 0; - - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0); - data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((block_en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data); + block_en = PP_CAP(PHM_PlatformCaps_SQRamping) ? en : 0; + CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, + DIDT_SQ_CTRL0, DIDT_CTRL_EN, block_en); didt_block &= ~SQ_Enable_MASK; didt_block |= block_en << SQ_Enable_SHIFT; - block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ? en : 0; - - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0); - data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((block_en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data); + block_en = PP_CAP(PHM_PlatformCaps_DBRamping) ? en : 0; + CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, + DIDT_DB_CTRL0, DIDT_CTRL_EN, block_en); didt_block &= ~DB_Enable_MASK; didt_block |= block_en << DB_Enable_SHIFT; - block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ? en : 0; - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0); - data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((block_en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data); + block_en = PP_CAP(PHM_PlatformCaps_TDRamping) ? en : 0; + CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, + DIDT_TD_CTRL0, DIDT_CTRL_EN, block_en); didt_block &= ~TD_Enable_MASK; didt_block |= block_en << TD_Enable_SHIFT; - block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping) ? en : 0; - - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0); - data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((block_en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data); + block_en = PP_CAP(PHM_PlatformCaps_TCPRamping) ? en : 0; + CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, + DIDT_TCP_CTRL0, DIDT_CTRL_EN, block_en); didt_block &= ~TCP_Enable_MASK; didt_block |= block_en << TCP_Enable_SHIFT; - if (enable) - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, didt_block); + result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Didt_Block_Function, didt_block); return result; } @@ -753,12 +740,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) if (result == 0) num_se = sys_info.value; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + if (PP_CAP(PHM_PlatformCaps_SQRamping) || + PP_CAP(PHM_PlatformCaps_DBRamping) || + PP_CAP(PHM_PlatformCaps_TDRamping) || + PP_CAP(PHM_PlatformCaps_TCPRamping)) { cgs_enter_safe_mode(hwmgr->device, true); + cgs_lock_grbm_idx(hwmgr->device, true); value = 0; value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX); for (count = 0; count < num_se; count++) { @@ -775,7 +763,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) } else if (hwmgr->chip_id == CHIP_POLARIS11) { result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); - if (hwmgr->smumgr->is_kicker) + if (hwmgr->is_kicker) result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11_Kicker); else result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); @@ -793,11 +781,12 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result); if (hwmgr->chip_id == CHIP_POLARIS11) { - result = smum_send_msg_to_smc(hwmgr->smumgr, + result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_EnableDpmDidt)); PP_ASSERT_WITH_CODE((0 == result), "Failed to enable DPM DIDT.", return result); } + cgs_lock_grbm_idx(hwmgr->device, false); cgs_enter_safe_mode(hwmgr->device, false); } @@ -808,10 +797,10 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) { int result; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + if (PP_CAP(PHM_PlatformCaps_SQRamping) || + PP_CAP(PHM_PlatformCaps_DBRamping) || + PP_CAP(PHM_PlatformCaps_TDRamping) || + PP_CAP(PHM_PlatformCaps_TCPRamping)) { cgs_enter_safe_mode(hwmgr->device, true); @@ -820,7 +809,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) "Post DIDT enable clock gating failed.", return result); if (hwmgr->chip_id == CHIP_POLARIS11) { - result = smum_send_msg_to_smc(hwmgr->smumgr, + result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_DisableDpmDidt)); PP_ASSERT_WITH_CODE((0 == result), "Failed to disable DPM DIDT.", return result); @@ -836,10 +825,9 @@ int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); int result = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC)) { + if (PP_CAP(PHM_PlatformCaps_CAC)) { int smc_result; - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + smc_result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_EnableCac)); PP_ASSERT_WITH_CODE((0 == smc_result), "Failed to enable CAC in SMC.", result = -1); @@ -854,9 +842,8 @@ int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); int result = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC) && data->cac_enabled) { - int smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + if (PP_CAP(PHM_PlatformCaps_CAC) && data->cac_enabled) { + int smc_result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_DisableCac)); PP_ASSERT_WITH_CODE((smc_result == 0), "Failed to disable CAC in SMC.", result = -1); @@ -872,7 +859,7 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) if (data->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PkgPwrSetLimit, n); return 0; } @@ -880,7 +867,7 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *hwmgr, uint32_t target_tdp) { - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); } @@ -899,11 +886,9 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr) else cac_table = hwmgr->dyn_state.cac_dtp_table; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - + if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { if (data->enable_tdc_limit_feature) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + smc_result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_TDCLimitEnable)); PP_ASSERT_WITH_CODE((0 == smc_result), "Failed to enable TDCLimit in SMC.", result = -1;); @@ -913,14 +898,13 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr) } if (data->enable_pkg_pwr_tracking_feature) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + smc_result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable)); PP_ASSERT_WITH_CODE((0 == smc_result), "Failed to enable PkgPwrTracking in SMC.", result = -1;); if (0 == smc_result) { uint32_t default_limit = (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256); - data->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit; @@ -937,14 +921,13 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); int result = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment) && - data->power_containment_features) { + if (PP_CAP(PHM_PlatformCaps_PowerContainment) && + data->power_containment_features) { int smc_result; if (data->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + smc_result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_TDCLimitDisable)); PP_ASSERT_WITH_CODE((smc_result == 0), "Failed to disable TDCLimit in SMC.", @@ -953,7 +936,7 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr) if (data->power_containment_features & POWERCONTAINMENT_FEATURE_DTE) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + smc_result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_DisableDTE)); PP_ASSERT_WITH_CODE((smc_result == 0), "Failed to disable DTE in SMC.", @@ -962,7 +945,7 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr) if (data->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + smc_result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable)); PP_ASSERT_WITH_CODE((smc_result == 0), "Failed to disable PkgPwrTracking in SMC.", @@ -987,16 +970,17 @@ int smu7_power_control_set_level(struct pp_hwmgr *hwmgr) cac_table = table_info->cac_dtp_table; else cac_table = hwmgr->dyn_state.cac_dtp_table; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { + if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { /* adjustment percentage has already been validated */ adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ? hwmgr->platform_descriptor.TDPAdjustment : (-1 * hwmgr->platform_descriptor.TDPAdjustment); - /* SMC requested that target_tdp to be 7 bit fraction in DPM table - * but message to be 8 bit fraction for messages - */ - target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; + + if (hwmgr->chip_id > CHIP_TONGA) + target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; + else + target_tdp = ((100 + adjust_percent) * (int)(cac_table->usConfigurableTDP * 256)) / 100; + result = smu7_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c index baddb569a8b8..d7aa643cdb51 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c @@ -37,9 +37,8 @@ int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, fan_speed_info->min_percent = 0; fan_speed_info->max_percent = 100; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_FanSpeedInTableIsRPM) && - hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { + if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM) && + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { fan_speed_info->supports_rpm_read = true; fan_speed_info->supports_rpm_write = true; fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM; @@ -87,8 +86,7 @@ int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) uint32_t crystal_clock_freq; if (hwmgr->thermal_controller.fanInfo.bNoFan || - (hwmgr->thermal_controller.fanInfo. - ucTachometerPulsesPerRevolution == 0)) + !hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) return -ENODEV; tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, @@ -152,13 +150,11 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) { int result; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODFuzzyFanControlSupport)) { + if (PP_CAP(PHM_PlatformCaps_ODFuzzyFanControlSupport)) { cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY); - result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl); + result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_FanSpeedInTableIsRPM)) + if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM)) hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr, hwmgr->thermal_controller. advanceFanControlParameters.usMaxFanRPM); @@ -169,12 +165,12 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) } else { cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE); - result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl); + result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl); } if (!result && hwmgr->thermal_controller. advanceFanControlParameters.ucTargetTemperature) - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanTemperatureTarget, hwmgr->thermal_controller. advanceFanControlParameters.ucTargetTemperature); @@ -187,7 +183,7 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) { hwmgr->fan_ctrl_enabled = false; - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl); + return smum_send_msg_to_smc(hwmgr, PPSMC_StopFanControl); } /** @@ -209,8 +205,7 @@ int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, if (speed > 100) speed = 100; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) smu7_fan_ctrl_stop_smc_fan_control(hwmgr); duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, @@ -241,8 +236,7 @@ int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) if (hwmgr->thermal_controller.fanInfo.bNoFan) return 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) { + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) { result = smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); if (!result) result = smu7_fan_ctrl_start_smc_fan_control(hwmgr); @@ -270,8 +264,7 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) return 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) smu7_fan_ctrl_stop_smc_fan_control(hwmgr); crystal_clock_freq = smu7_get_xclk(hwmgr); @@ -367,7 +360,7 @@ static int smu7_thermal_initialize(struct pp_hwmgr *hwmgr) * * @param hwmgr The address of the hardware manager. */ -int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr) +static void smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr) { uint32_t alert; @@ -378,7 +371,7 @@ int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr) CG_THERMAL_INT, THERM_INT_MASK, alert); /* send message to SMU to enable internal thermal interrupts */ - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Enable); } /** @@ -396,7 +389,7 @@ int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr) CG_THERMAL_INT, THERM_INT_MASK, alert); /* send message to SMU to disable internal thermal interrupts */ - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Disable); } /** @@ -423,16 +416,14 @@ int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) * @param Result the last failure code * @return result from set temperature range routine */ -static int tf_smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) +static int smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr) { /* If the fantable setup has failed we could have disabled * PHM_PlatformCaps_MicrocodeFanControl even after * this function was included in the table. * Make sure that we still think controlling the fan is OK. */ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) { + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) { smu7_fan_ctrl_start_smc_fan_control(hwmgr); smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); } @@ -440,108 +431,34 @@ static int tf_smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, return 0; } -/** -* Set temperature range for high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -static int tf_smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) +int smu7_start_thermal_controller(struct pp_hwmgr *hwmgr, + struct PP_TemperatureRange *range) { - struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; + int ret = 0; if (range == NULL) return -EINVAL; - return smu7_thermal_set_temperature_range(hwmgr, range->min, range->max); -} - -/** -* Programs one-time setting registers -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from initialize thermal controller routine -*/ -static int tf_smu7_thermal_initialize(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return smu7_thermal_initialize(hwmgr); -} - -/** -* Enable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from enable alert routine -*/ -static int tf_smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return smu7_thermal_enable_alert(hwmgr); -} - -/** -* Disable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from disable alert routine -*/ -static int tf_smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return smu7_thermal_disable_alert(hwmgr); -} + smu7_thermal_initialize(hwmgr); + ret = smu7_thermal_set_temperature_range(hwmgr, range->min, range->max); + if (ret) + return -EINVAL; + smu7_thermal_enable_alert(hwmgr); + ret = smum_thermal_avfs_enable(hwmgr); + if (ret) + return -EINVAL; -static const struct phm_master_table_item -phm_thermal_start_thermal_controller_master_list[] = { - { .tableFunction = tf_smu7_thermal_initialize }, - { .tableFunction = tf_smu7_thermal_set_temperature_range }, - { .tableFunction = tf_smu7_thermal_enable_alert }, - { .tableFunction = smum_thermal_avfs_enable }, /* We should restrict performance levels to low before we halt the SMC. * On the other hand we are still in boot state when we do this * so it would be pointless. * If this assumption changes we have to revisit this table. */ - { .tableFunction = smum_thermal_setup_fan_table }, - { .tableFunction = tf_smu7_thermal_start_smc_fan_control }, - { } -}; - -static const struct phm_master_table_header -phm_thermal_start_thermal_controller_master = { - 0, - PHM_MasterTableFlag_None, - phm_thermal_start_thermal_controller_master_list -}; - -static const struct phm_master_table_item -phm_thermal_set_temperature_range_master_list[] = { - { .tableFunction = tf_smu7_thermal_disable_alert }, - { .tableFunction = tf_smu7_thermal_set_temperature_range }, - { .tableFunction = tf_smu7_thermal_enable_alert }, - { } -}; - -static const struct phm_master_table_header -phm_thermal_set_temperature_range_master = { - 0, - PHM_MasterTableFlag_None, - phm_thermal_set_temperature_range_master_list -}; + smum_thermal_setup_fan_table(hwmgr); + smu7_thermal_start_smc_fan_control(hwmgr); + return 0; +} + + int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) { @@ -550,35 +467,3 @@ int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) return 0; } -/** -* Initializes the thermal controller related functions in the Hardware Manager structure. -* @param hwmgr The address of the hardware manager. -* @exception Any error code from the low-level communication. -*/ -int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr) -{ - int result; - - result = phm_construct_table(hwmgr, - &phm_thermal_set_temperature_range_master, - &(hwmgr->set_temperature_range)); - - if (!result) { - result = phm_construct_table(hwmgr, - &phm_thermal_start_thermal_controller_master, - &(hwmgr->start_thermal_controller)); - if (result) - phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); - } - - if (!result) - hwmgr->fan_ctrl_is_in_default_mode = true; - return result; -} - -void pp_smu7_thermal_fini(struct pp_hwmgr *hwmgr) -{ - phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); - phm_destroy_table(hwmgr, &(hwmgr->start_thermal_controller)); - return; -}
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h index ba71b608fa75..42c1ba0fad78 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h @@ -46,14 +46,13 @@ extern int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr); extern int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode); extern int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed); extern int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); -extern int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr); -extern void pp_smu7_thermal_fini(struct pp_hwmgr *hwmgr); extern int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr); extern int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed); extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); extern int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); -extern int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr); extern int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr); extern int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr); +extern int smu7_start_thermal_controller(struct pp_hwmgr *hwmgr, + struct PP_TemperatureRange *temperature_range); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index f8f02e70b8bc..a59d282797f5 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -201,9 +201,6 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_ControlVDDCI); phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EnableSMU7ThermalManagement); sys_info.size = sizeof(struct cgs_system_info); @@ -381,12 +378,10 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) if (!data->registry_data.socclk_dpm_key_disabled) data->smu_features[GNLD_DPM_SOCCLK].supported = true; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UVDDPM)) + if (PP_CAP(PHM_PlatformCaps_UVDDPM)) data->smu_features[GNLD_DPM_UVD].supported = true; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_VCEDPM)) + if (PP_CAP(PHM_PlatformCaps_VCEDPM)) data->smu_features[GNLD_DPM_VCE].supported = true; if (!data->registry_data.pcie_dpm_key_disabled) @@ -395,9 +390,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) if (!data->registry_data.dcefclk_dpm_key_disabled) data->smu_features[GNLD_DPM_DCEFCLK].supported = true; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep) && - data->registry_data.sclk_deep_sleep_support) { + if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep) && + data->registry_data.sclk_deep_sleep_support) { data->smu_features[GNLD_DS_GFXCLK].supported = true; data->smu_features[GNLD_DS_SOCCLK].supported = true; data->smu_features[GNLD_DS_LCLK].supported = true; @@ -431,8 +425,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) if (data->registry_data.vr0hot_enabled) data->smu_features[GNLD_VR0HOT].supported = true; - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetSmuVersion); - vega10_read_arg_from_smc(hwmgr->smumgr, &(data->smu_version)); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion); + vega10_read_arg_from_smc(hwmgr, &(data->smu_version)); /* ACG firmware has major version 5 */ if ((data->smu_version & 0xff000000) == 0x5000000) data->smu_features[GNLD_ACG].supported = true; @@ -497,8 +491,7 @@ static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr) if (!vega10_get_socclk_for_voltage_evv(hwmgr, table_info->vddc_lookup_table, vv_id, &sclk)) { - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher)) { + if (PP_CAP(PHM_PlatformCaps_ClockStretcher)) { for (j = 1; j < socclk_table->count; j++) { if (socclk_table->entries[j].clk == sclk && socclk_table->entries[j].cks_enable == 0) { @@ -591,61 +584,37 @@ static int vega10_patch_clock_voltage_limits_with_vddc_leakage( static int vega10_patch_voltage_dependency_tables_with_lookup_table( struct pp_hwmgr *hwmgr) { - uint8_t entry_id; - uint8_t voltage_id; + uint8_t entry_id, voltage_id; + unsigned i; struct phm_ppt_v2_information *table_info = (struct phm_ppt_v2_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table = - table_info->vdd_dep_on_socclk; - struct phm_ppt_v1_clock_voltage_dependency_table *gfxclk_table = - table_info->vdd_dep_on_sclk; - struct phm_ppt_v1_clock_voltage_dependency_table *dcefclk_table = - table_info->vdd_dep_on_dcefclk; - struct phm_ppt_v1_clock_voltage_dependency_table *pixclk_table = - table_info->vdd_dep_on_pixclk; - struct phm_ppt_v1_clock_voltage_dependency_table *dspclk_table = - table_info->vdd_dep_on_dispclk; - struct phm_ppt_v1_clock_voltage_dependency_table *phyclk_table = - table_info->vdd_dep_on_phyclk; - struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = - table_info->vdd_dep_on_mclk; struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = table_info->mm_dep_table; + struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = + table_info->vdd_dep_on_mclk; - for (entry_id = 0; entry_id < socclk_table->count; entry_id++) { - voltage_id = socclk_table->entries[entry_id].vddInd; - socclk_table->entries[entry_id].vddc = - table_info->vddc_lookup_table->entries[voltage_id].us_vdd; - } - - for (entry_id = 0; entry_id < gfxclk_table->count; entry_id++) { - voltage_id = gfxclk_table->entries[entry_id].vddInd; - gfxclk_table->entries[entry_id].vddc = - table_info->vddc_lookup_table->entries[voltage_id].us_vdd; - } - - for (entry_id = 0; entry_id < dcefclk_table->count; entry_id++) { - voltage_id = dcefclk_table->entries[entry_id].vddInd; - dcefclk_table->entries[entry_id].vddc = - table_info->vddc_lookup_table->entries[voltage_id].us_vdd; - } - - for (entry_id = 0; entry_id < pixclk_table->count; entry_id++) { - voltage_id = pixclk_table->entries[entry_id].vddInd; - pixclk_table->entries[entry_id].vddc = - table_info->vddc_lookup_table->entries[voltage_id].us_vdd; - } + for (i = 0; i < 6; i++) { + struct phm_ppt_v1_clock_voltage_dependency_table *vdt; + switch (i) { + case 0: vdt = table_info->vdd_dep_on_socclk; break; + case 1: vdt = table_info->vdd_dep_on_sclk; break; + case 2: vdt = table_info->vdd_dep_on_dcefclk; break; + case 3: vdt = table_info->vdd_dep_on_pixclk; break; + case 4: vdt = table_info->vdd_dep_on_dispclk; break; + case 5: vdt = table_info->vdd_dep_on_phyclk; break; + } - for (entry_id = 0; entry_id < dspclk_table->count; entry_id++) { - voltage_id = dspclk_table->entries[entry_id].vddInd; - dspclk_table->entries[entry_id].vddc = - table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + for (entry_id = 0; entry_id < vdt->count; entry_id++) { + voltage_id = vdt->entries[entry_id].vddInd; + vdt->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } } - for (entry_id = 0; entry_id < phyclk_table->count; entry_id++) { - voltage_id = phyclk_table->entries[entry_id].vddInd; - phyclk_table->entries[entry_id].vddc = - table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + for (entry_id = 0; entry_id < mm_table->count; ++entry_id) { + voltage_id = mm_table->entries[entry_id].vddcInd; + mm_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; } for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { @@ -660,11 +629,6 @@ static int vega10_patch_voltage_dependency_tables_with_lookup_table( table_info->vddmem_lookup_table->entries[voltage_id].us_vdd; } - for (entry_id = 0; entry_id < mm_table->count; ++entry_id) { - voltage_id = mm_table->entries[entry_id].vddcInd; - mm_table->entries[entry_id].vddc = - table_info->vddc_lookup_table->entries[voltage_id].us_vdd; - } return 0; @@ -838,8 +802,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) } /* VDDCI_MEM */ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDCI)) { + if (PP_CAP(PHM_PlatformCaps_ControlVDDCI)) { if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO; @@ -959,7 +922,7 @@ static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr) { uint32_t features_enabled; - if (!vega10_get_smc_features(hwmgr->smumgr, &features_enabled)) { + if (!vega10_get_smc_features(hwmgr, &features_enabled)) { if (features_enabled & SMC_DPM_FEATURES) return true; } @@ -1411,10 +1374,8 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct vega10_dpm_table)); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODNinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODNinDCSupport)) { + if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) || + PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) { data->odn_dpm_table.odn_core_clock_dpm_levels. number_of_performance_levels = data->dpm_table.gfx_table.count; for (i = 0; i < data->dpm_table.gfx_table.count; i++) { @@ -2311,21 +2272,21 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr) uint32_t agc_btc_response; if (data->smu_features[GNLD_ACG].supported) { - if (0 == vega10_enable_smc_features(hwmgr->smumgr, true, + if (0 == vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap)) data->smu_features[GNLD_DPM_PREFETCHER].enabled = true; - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_InitializeAcg); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg); - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgBtc); - vega10_read_arg_from_smc(hwmgr->smumgr, &agc_btc_response); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc); + vega10_read_arg_from_smc(hwmgr, &agc_btc_response); if (1 == agc_btc_response) { if (1 == data->acg_loop_state) - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInClosedLoop); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop); else if (2 == data->acg_loop_state) - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInOpenLoop); - if (0 == vega10_enable_smc_features(hwmgr->smumgr, true, + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop); + if (0 == vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_ACG].smu_feature_bitmap)) data->smu_features[GNLD_ACG].enabled = true; } else { @@ -2342,13 +2303,11 @@ static int vega10_acg_disable(struct pp_hwmgr *hwmgr) struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); - if (data->smu_features[GNLD_ACG].supported) { - if (data->smu_features[GNLD_ACG].enabled) { - if (0 == vega10_enable_smc_features(hwmgr->smumgr, false, - data->smu_features[GNLD_ACG].smu_feature_bitmap)) + if (data->smu_features[GNLD_ACG].supported && + data->smu_features[GNLD_ACG].enabled) + if (!vega10_enable_smc_features(hwmgr, false, + data->smu_features[GNLD_ACG].smu_feature_bitmap)) data->smu_features[GNLD_ACG].enabled = false; - } - } return 0; } @@ -2363,9 +2322,8 @@ static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr) result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params); if (!result) { - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot) && - (data->registry_data.regulator_hot_gpio_support)) { + if (PP_CAP(PHM_PlatformCaps_RegulatorHot) && + data->registry_data.regulator_hot_gpio_support) { pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio; pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity; pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio; @@ -2377,9 +2335,8 @@ static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr) pp_table->VR1HotPolarity = 0; } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition) && - (data->registry_data.ac_dc_switch_gpio_support)) { + if (PP_CAP(PHM_PlatformCaps_AutomaticDCTransition) && + data->registry_data.ac_dc_switch_gpio_support) { pp_table->AcDcGpio = gpio_params.ucAcDcGpio; pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity; } else { @@ -2398,14 +2355,14 @@ static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable) if (data->smu_features[GNLD_AVFS].supported) { if (enable) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_AVFS].smu_feature_bitmap), "[avfs_control] Attempt to Enable AVFS feature Failed!", return -1); data->smu_features[GNLD_AVFS].enabled = true; } else { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_AVFS].smu_feature_id), "[avfs_control] Attempt to Disable AVFS feature Failed!", @@ -2428,11 +2385,11 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr) struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table); - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumTop32); - vega10_read_arg_from_smc(hwmgr->smumgr, &top32); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32); + vega10_read_arg_from_smc(hwmgr, &top32); - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumBottom32); - vega10_read_arg_from_smc(hwmgr->smumgr, &bottom32); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32); + vega10_read_arg_from_smc(hwmgr, &bottom32); serial_number = ((uint64_t)bottom32 << 32) | top32; @@ -2446,7 +2403,7 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr) avfs_fuse_table->VFT2_b = fuse.VFT2_b; avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1; avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2; - result = vega10_copy_table_to_smc(hwmgr->smumgr, + result = vega10_copy_table_to_smc(hwmgr, (uint8_t *)avfs_fuse_table, AVFSFUSETABLE); PP_ASSERT_WITH_CODE(!result, "Failed to upload FuseOVerride!", @@ -2585,14 +2542,14 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; if (0 != boot_up_values.usVddc) { - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFloorSocVoltage, (boot_up_values.usVddc * 4)); data->vbios_boot_state.bsoc_vddc_lock = true; } else { data->vbios_boot_state.bsoc_vddc_lock = false; } - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, (uint32_t)(data->vbios_boot_state.dcef_clock / 100)); } @@ -2618,7 +2575,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) vega10_populate_and_upload_avfs_fuse_override(hwmgr); - result = vega10_copy_table_to_smc(hwmgr->smumgr, + result = vega10_copy_table_to_smc(hwmgr, (uint8_t *)pp_table, PPTABLE); PP_ASSERT_WITH_CODE(!result, "Failed to upload PPtable!", return result); @@ -2641,7 +2598,7 @@ static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr) pr_info("THERMAL Feature Already enabled!"); PP_ASSERT_WITH_CODE( - !vega10_enable_smc_features(hwmgr->smumgr, + !vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_THERMAL].smu_feature_bitmap), "Enable THERMAL Feature Failed!", @@ -2661,7 +2618,7 @@ static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr) pr_info("THERMAL Feature Already disabled!"); PP_ASSERT_WITH_CODE( - !vega10_enable_smc_features(hwmgr->smumgr, + !vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_THERMAL].smu_feature_bitmap), "disable THERMAL Feature Failed!", @@ -2677,11 +2634,10 @@ static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr) struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot)) { + if (PP_CAP(PHM_PlatformCaps_RegulatorHot)) { if (data->smu_features[GNLD_VR0HOT].supported) { PP_ASSERT_WITH_CODE( - !vega10_enable_smc_features(hwmgr->smumgr, + !vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_VR0HOT].smu_feature_bitmap), "Attempt to Enable VR0 Hot feature Failed!", @@ -2690,7 +2646,7 @@ static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr) } else { if (data->smu_features[GNLD_VR1HOT].supported) { PP_ASSERT_WITH_CODE( - !vega10_enable_smc_features(hwmgr->smumgr, + !vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_VR1HOT].smu_feature_bitmap), "Attempt to Enable VR0 Hot feature Failed!", @@ -2708,7 +2664,7 @@ static int vega10_enable_ulv(struct pp_hwmgr *hwmgr) (struct vega10_hwmgr *)(hwmgr->backend); if (data->registry_data.ulv_support) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_ULV].smu_feature_bitmap), "Enable ULV Feature Failed!", return -1); @@ -2724,7 +2680,7 @@ static int vega10_disable_ulv(struct pp_hwmgr *hwmgr) (struct vega10_hwmgr *)(hwmgr->backend); if (data->registry_data.ulv_support) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_ULV].smu_feature_bitmap), "disable ULV Feature Failed!", return -EINVAL); @@ -2740,7 +2696,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) (struct vega10_hwmgr *)(hwmgr->backend); if (data->smu_features[GNLD_DS_GFXCLK].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap), "Attempt to Enable DS_GFXCLK Feature Failed!", return -EINVAL); @@ -2748,7 +2704,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) } if (data->smu_features[GNLD_DS_SOCCLK].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap), "Attempt to Enable DS_SOCCLK Feature Failed!", return -EINVAL); @@ -2756,7 +2712,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) } if (data->smu_features[GNLD_DS_LCLK].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap), "Attempt to Enable DS_LCLK Feature Failed!", return -EINVAL); @@ -2764,7 +2720,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) } if (data->smu_features[GNLD_DS_DCEFCLK].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap), "Attempt to Enable DS_DCEFCLK Feature Failed!", return -EINVAL); @@ -2780,7 +2736,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) (struct vega10_hwmgr *)(hwmgr->backend); if (data->smu_features[GNLD_DS_GFXCLK].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap), "Attempt to disable DS_GFXCLK Feature Failed!", return -EINVAL); @@ -2788,7 +2744,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) } if (data->smu_features[GNLD_DS_SOCCLK].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap), "Attempt to disable DS_ Feature Failed!", return -EINVAL); @@ -2796,7 +2752,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) } if (data->smu_features[GNLD_DS_LCLK].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap), "Attempt to disable DS_LCLK Feature Failed!", return -EINVAL); @@ -2804,7 +2760,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) } if (data->smu_features[GNLD_DS_DCEFCLK].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap), "Attempt to disable DS_DCEFCLK Feature Failed!", return -EINVAL); @@ -2822,7 +2778,7 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) if(data->smu_features[GNLD_LED_DISPLAY].supported == true){ - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap), "Attempt to disable LED DPM feature failed!", return -EINVAL); data->smu_features[GNLD_LED_DISPLAY].enabled = false; @@ -2840,7 +2796,7 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) } } - vega10_enable_smc_features(hwmgr->smumgr, false, feature_mask); + vega10_enable_smc_features(hwmgr, false, feature_mask); return 0; } @@ -2870,7 +2826,7 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) } } - if (vega10_enable_smc_features(hwmgr->smumgr, + if (vega10_enable_smc_features(hwmgr, true, feature_mask)) { for (i = 0; i < GNLD_DPM_MAX; i++) { if (data->smu_features[i].smu_feature_bitmap & @@ -2880,22 +2836,21 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) } if(data->smu_features[GNLD_LED_DISPLAY].supported == true){ - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap), "Attempt to Enable LED DPM feature Failed!", return -EINVAL); data->smu_features[GNLD_LED_DISPLAY].enabled = true; } if (data->vbios_boot_state.bsoc_vddc_lock) { - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFloorSocVoltage, 0); data->vbios_boot_state.bsoc_vddc_lock = false; } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_Falcon_QuickTransition)) { + if (PP_CAP(PHM_PlatformCaps_Falcon_QuickTransition)) { if (data->smu_features[GNLD_ACDC].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_ACDC].smu_feature_bitmap), "Attempt to Enable DS_GFXCLK Feature Failed!", return -1); @@ -2912,13 +2867,13 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) (struct vega10_hwmgr *)(hwmgr->backend); int tmp_result, result = 0; - tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureTelemetry, data->config_telemetry); PP_ASSERT_WITH_CODE(!tmp_result, "Failed to configure telemetry!", return tmp_result); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, 0); tmp_result = (!vega10_is_dpm_running(hwmgr)) ? 0 : -1; @@ -2936,8 +2891,7 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) "Failed to initialize SMC table!", result = tmp_result); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalController)) { + if (PP_CAP(PHM_PlatformCaps_ThermalController)) { tmp_result = vega10_enable_thermal_protection(hwmgr); PP_ASSERT_WITH_CODE(!tmp_result, "Failed to enable thermal protection!", @@ -3172,8 +3126,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) { + if (PP_CAP(PHM_PlatformCaps_StablePState)) { PP_ASSERT_WITH_CODE( data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 && data->registry_data.stable_pstate_sclk_dpm_percentage <= 100, @@ -3238,10 +3191,8 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, disable_mclk_switching_for_frame_lock = phm_cap_enabled( hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); - disable_mclk_switching_for_vr = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableMclkSwitchForVR); - force_mclk_high = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ForceMclkHigh); + disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR); + force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh); disable_mclk_switching = (info.display_count > 1) || disable_mclk_switching_for_frame_lock || @@ -3292,8 +3243,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, vega10_ps->performance_levels[1].mem_clock; } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) { + if (PP_CAP(PHM_PlatformCaps_StablePState)) { for (i = 0; i < vega10_ps->performance_level_count; i++) { vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk; vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk; @@ -3325,10 +3275,8 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co data->need_update_dpm_table = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODNinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODNinDCSupport)) { + if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) || + PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) { for (i = 0; i < sclk_table->count; i++) { if (sclk == sclk_table->dpm_levels[i].value) break; @@ -3412,10 +3360,8 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels( uint32_t dpm_count, clock_percent; uint32_t i; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODNinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODNinDCSupport)) { + if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) || + PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) { if (!data->need_update_dpm_table && !data->apply_optimized_settings && @@ -3480,10 +3426,8 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels( dpm_table-> gfx_table.dpm_levels[dpm_table->gfx_table.count - 1]. value = sclk; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6PlusinDCSupport)) { + if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport) || + PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport)) { /* Need to do calculation based on the golden DPM table * as the Heatmap GPU Clock axis is also based on * the default values @@ -3537,10 +3481,8 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels( mem_table.dpm_levels[dpm_table->mem_table.count - 1]. value = mclk; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6PlusinDCSupport)) { + if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport) || + PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport)) { PP_ASSERT_WITH_CODE( golden_dpm_table->mem_table.dpm_levels @@ -3732,7 +3674,7 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) if (data->smc_state_table.gfx_boot_level != data->dpm_table.gfx_table.dpm_state.soft_min_level) { PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_SetSoftMinGfxclkByIndex, data->smc_state_table.gfx_boot_level), "Failed to set soft min sclk index!", @@ -3748,14 +3690,14 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) { socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr); PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_SetSoftMinSocclkByIndex, socclk_idx), "Failed to set soft min uclk index!", return -EINVAL); } else { PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, data->smc_state_table.mem_boot_level), "Failed to set soft min uclk index!", @@ -3780,7 +3722,7 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr) if (data->smc_state_table.gfx_max_level != data->dpm_table.gfx_table.dpm_state.soft_max_level) { PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_SetSoftMaxGfxclkByIndex, data->smc_state_table.gfx_max_level), "Failed to set soft max sclk index!", @@ -3794,7 +3736,7 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr) if (data->smc_state_table.mem_max_level != data->dpm_table.mem_table.dpm_state.soft_max_level) { PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_SetSoftMaxUclkByIndex, data->smc_state_table.mem_max_level), "Failed to set soft max mclk index!", @@ -3853,7 +3795,7 @@ int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) (struct vega10_hwmgr *)(hwmgr->backend); if (data->smu_features[GNLD_DPM_VCE].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, enable, data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap), "Attempt to Enable/Disable DPM VCE Failed!", @@ -3871,9 +3813,8 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr) int result = 0; uint32_t low_sclk_interrupt_threshold = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkThrottleLowNotification) - && (hwmgr->gfx_arbiter.sclk_threshold != + if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) && + (hwmgr->gfx_arbiter.sclk_threshold != data->low_sclk_interrupt_threshold)) { data->low_sclk_interrupt_threshold = hwmgr->gfx_arbiter.sclk_threshold; @@ -3884,7 +3825,7 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr) cpu_to_le32(low_sclk_interrupt_threshold); /* This message will also enable SmcToHost Interrupt */ - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetLowGfxclkInterruptThreshold, (uint32_t)low_sclk_interrupt_threshold); } @@ -3920,7 +3861,7 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr, "Failed to update SCLK threshold!", result = tmp_result); - result = vega10_copy_table_to_smc(hwmgr->smumgr, + result = vega10_copy_table_to_smc(hwmgr, (uint8_t *)pp_table, PPTABLE); PP_ASSERT_WITH_CODE(!result, "Failed to upload PPtable!", return result); @@ -3931,7 +3872,7 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr, return 0; } -static int vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) +static uint32_t vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) { struct pp_power_state *ps; struct vega10_power_state *vega10_ps; @@ -3953,7 +3894,7 @@ static int vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) [vega10_ps->performance_level_count - 1].gfx_clock; } -static int vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) +static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) { struct pp_power_state *ps; struct vega10_power_state *vega10_ps; @@ -3980,12 +3921,12 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr, { uint32_t value; - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr), "Failed to get current package power!", return -EINVAL); - vega10_read_arg_from_smc(hwmgr->smumgr, &value); + vega10_read_arg_from_smc(hwmgr, &value); /* power value is an integer */ query->average_gpu_power = value << 8; @@ -4002,25 +3943,25 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: - ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentGfxclkIndex); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex); if (!ret) { - vega10_read_arg_from_smc(hwmgr->smumgr, &sclk_idx); + vega10_read_arg_from_smc(hwmgr, &sclk_idx); *((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value; *size = 4; } break; case AMDGPU_PP_SENSOR_GFX_MCLK: - ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentUclkIndex); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex); if (!ret) { - vega10_read_arg_from_smc(hwmgr->smumgr, &mclk_idx); + vega10_read_arg_from_smc(hwmgr, &mclk_idx); *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value; *size = 4; } break; case AMDGPU_PP_SENSOR_GPU_LOAD: - ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetAverageGfxActivity, 0); + ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0); if (!ret) { - vega10_read_arg_from_smc(hwmgr->smumgr, &activity_percent); + vega10_read_arg_from_smc(hwmgr, &activity_percent); *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent; *size = 4; } @@ -4055,7 +3996,7 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, static int vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_disp) { - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetUclkFastSwitch, has_disp ? 0 : 1); } @@ -4090,7 +4031,7 @@ int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, if (!result) { clk_request = (clk_freq << 16) | clk_select; - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_RequestDisplayClockByFreq, clk_request); } @@ -4160,7 +4101,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment( clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value; if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) { PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, PPSMC_MSG_SetMinDeepSleepDcefclk, + hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, min_clocks.dcefClockInSR /100), "Attempt to set divider for DCEFCLK Failed!",); } else { @@ -4172,7 +4113,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment( if (min_clocks.memoryClock != 0) { idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx); data->dpm_table.mem_table.dpm_state.soft_min_level= idx; } @@ -4275,28 +4216,23 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo return 0; } -static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) { - int result = 0; - switch (mode) { case AMD_FAN_CTRL_NONE: - result = vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100); + vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100); break; case AMD_FAN_CTRL_MANUAL: - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) - result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr); + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) + vega10_fan_ctrl_stop_smc_fan_control(hwmgr); break; case AMD_FAN_CTRL_AUTO: - result = vega10_fan_ctrl_set_static_mode(hwmgr, mode); - if (!result) - result = vega10_fan_ctrl_start_smc_fan_control(hwmgr); + if (!vega10_fan_ctrl_set_static_mode(hwmgr, mode)) + vega10_fan_ctrl_start_smc_fan_control(hwmgr); break; default: break; } - return result; } static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, @@ -4306,51 +4242,16 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, uint32_t sclk_mask = 0; uint32_t mclk_mask = 0; uint32_t soc_mask = 0; - uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | - AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | - AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | - AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; - - if (level == hwmgr->dpm_level) - return ret; - - if (!(hwmgr->dpm_level & profile_mode_mask)) { - /* enter profile mode, save current level, disable gfx cg*/ - if (level & profile_mode_mask) { - hwmgr->saved_dpm_level = hwmgr->dpm_level; - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_UNGATE); - } - } else { - /* exit profile mode, restore level, enable gfx cg*/ - if (!(level & profile_mode_mask)) { - if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) - level = hwmgr->saved_dpm_level; - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_GATE); - } - } switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: ret = vega10_force_dpm_highest(hwmgr); - if (ret) - return ret; - hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_LOW: ret = vega10_force_dpm_lowest(hwmgr); - if (ret) - return ret; - hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_AUTO: ret = vega10_unforce_dpm_levels(hwmgr); - if (ret) - return ret; - hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: @@ -4359,27 +4260,25 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask); if (ret) return ret; - hwmgr->dpm_level = level; vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); break; case AMD_DPM_FORCED_LEVEL_MANUAL: - hwmgr->dpm_level = level; - break; case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: default: break; } - if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) - vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE); - else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) - vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO); - - return 0; + if (!ret) { + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE); + else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO); + } + return ret; } -static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr) +static uint32_t vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr) { struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); @@ -4624,7 +4523,7 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr, struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); int i; - if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO | + if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO | AMD_DPM_FORCED_LEVEL_LOW | AMD_DPM_FORCED_LEVEL_HIGH)) return -EINVAL; @@ -4697,11 +4596,11 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, if (data->registry_data.sclk_dpm_key_disabled) break; - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex), "Attempt to get current sclk index Failed!", return -1); - PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr, &now), "Attempt to read sclk index Failed!", return -1); @@ -4715,11 +4614,11 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, if (data->registry_data.mclk_dpm_key_disabled) break; - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex), "Attempt to get current mclk index Failed!", return -1); - PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr, &now), "Attempt to read mclk index Failed!", return -1); @@ -4730,11 +4629,11 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, (i == now) ? "*" : ""); break; case PP_PCIE: - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex), "Attempt to get current mclk index Failed!", return -1); - PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr, &now), "Attempt to read mclk index Failed!", return -1); @@ -4762,7 +4661,7 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr) if ((data->water_marks_bitmap & WaterMarksExist) && !(data->water_marks_bitmap & WaterMarksLoaded)) { - result = vega10_copy_table_to_smc(hwmgr->smumgr, + result = vega10_copy_table_to_smc(hwmgr, (uint8_t *)wm_table, WMTABLE); PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL); data->water_marks_bitmap |= WaterMarksLoaded; @@ -4771,7 +4670,7 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr) if (data->water_marks_bitmap & WaterMarksLoaded) { cgs_get_active_displays_info(hwmgr->device, &info); num_turned_on_displays = info.display_count; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, num_turned_on_displays); } @@ -4784,7 +4683,7 @@ int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) (struct vega10_hwmgr *)(hwmgr->backend); if (data->smu_features[GNLD_DPM_UVD].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, enable, data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap), "Attempt to Enable/Disable DPM UVD Failed!", @@ -4794,20 +4693,20 @@ int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) return 0; } -static int vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate) +static void vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate) { struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); data->vce_power_gated = bgate; - return vega10_enable_disable_vce_dpm(hwmgr, !bgate); + vega10_enable_disable_vce_dpm(hwmgr, !bgate); } -static int vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate) +static void vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate) { struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); data->uvd_power_gated = bgate; - return vega10_enable_disable_uvd_dpm(hwmgr, !bgate); + vega10_enable_disable_uvd_dpm(hwmgr, !bgate); } static inline bool vega10_are_power_levels_equal( @@ -4866,7 +4765,7 @@ vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmg if (data->display_timing.num_existing_displays != info.display_count) is_update_required = true; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { + if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) { if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr) is_update_required = true; } @@ -4883,8 +4782,7 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) "DPM is not running right now, no need to disable DPM!", return 0); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalController)) + if (PP_CAP(PHM_PlatformCaps_ThermalController)) vega10_disable_thermal_protection(hwmgr); tmp_result = vega10_disable_power_containment(hwmgr); @@ -4972,7 +4870,7 @@ static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr, if (!data->registry_data.sclk_dpm_key_disabled) PP_ASSERT_WITH_CODE( !smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_SetSoftMinGfxclkByIndex, sclk_idx), "Failed to set soft min sclk index!", @@ -4983,7 +4881,7 @@ static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr, if (!data->registry_data.mclk_dpm_key_disabled) PP_ASSERT_WITH_CODE( !smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, mclk_idx), "Failed to set soft min mclk index!", @@ -5096,6 +4994,38 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) return 0; } +static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr, + const void *info) +{ + struct cgs_irq_src_funcs *irq_src = + (struct cgs_irq_src_funcs *)info; + + if (hwmgr->thermal_controller.ucType == + ATOM_VEGA10_PP_THERMALCONTROLLER_VEGA10 || + hwmgr->thermal_controller.ucType == + ATOM_VEGA10_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { + PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device, + 0xf, /* AMDGPU_IH_CLIENTID_THM */ + 0, 0, irq_src[0].set, irq_src[0].handler, hwmgr), + "Failed to register high thermal interrupt!", + return -EINVAL); + PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device, + 0xf, /* AMDGPU_IH_CLIENTID_THM */ + 1, 0, irq_src[1].set, irq_src[1].handler, hwmgr), + "Failed to register low thermal interrupt!", + return -EINVAL); + } + + /* Register CTF(GPIO_19) interrupt */ + PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device, + 0x16, /* AMDGPU_IH_CLIENTID_ROM_SMUIO, */ + 83, 0, irq_src[2].set, irq_src[2].handler, hwmgr), + "Failed to register CTF thermal interrupt!", + return -EINVAL); + + return 0; +} + static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .backend_init = vega10_hwmgr_backend_init, .backend_fini = vega10_hwmgr_backend_fini, @@ -5149,12 +5079,13 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .get_mclk_od = vega10_get_mclk_od, .set_mclk_od = vega10_set_mclk_od, .avfs_control = vega10_avfs_enable, + .register_internal_thermal_interrupt = vega10_register_thermal_interrupt, }; int vega10_hwmgr_init(struct pp_hwmgr *hwmgr) { hwmgr->hwmgr_func = &vega10_hwmgr_funcs; hwmgr->pptable_func = &vega10_pptable_funcs; - pp_vega10_thermal_initialize(hwmgr); + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index e7fa67063cdc..d2f695692f77 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c @@ -854,99 +854,79 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable) uint32_t en = (enable ? 1 : 0); uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) { - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0); - data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data); + if (PP_CAP(PHM_PlatformCaps_SQRamping)) { + CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, + DIDT_SQ_CTRL0, DIDT_CTRL_EN, en); didt_block_info &= ~SQ_Enable_MASK; didt_block_info |= en << SQ_Enable_SHIFT; } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) { - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0); - data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data); + if (PP_CAP(PHM_PlatformCaps_DBRamping)) { + CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, + DIDT_DB_CTRL0, DIDT_CTRL_EN, en); didt_block_info &= ~DB_Enable_MASK; didt_block_info |= en << DB_Enable_SHIFT; } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) { - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0); - data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data); + if (PP_CAP(PHM_PlatformCaps_TDRamping)) { + CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, + DIDT_TD_CTRL0, DIDT_CTRL_EN, en); didt_block_info &= ~TD_Enable_MASK; didt_block_info |= en << TD_Enable_SHIFT; } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0); - data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data); + if (PP_CAP(PHM_PlatformCaps_TCPRamping)) { + CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, + DIDT_TCP_CTRL0, DIDT_CTRL_EN, en); didt_block_info &= ~TCP_Enable_MASK; didt_block_info |= en << TCP_Enable_SHIFT; } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) { - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0); - data &= ~DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((en << DIDT_DBR_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0, data); + if (PP_CAP(PHM_PlatformCaps_DBRRamping)) { + CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, + DIDT_DBR_CTRL0, DIDT_CTRL_EN, en); } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable)) { - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) { + if (PP_CAP(PHM_PlatformCaps_DiDtEDCEnable)) { + if (PP_CAP(PHM_PlatformCaps_SQRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL); - data &= ~DIDT_SQ_EDC_CTRL__EDC_EN_MASK; - data |= ((en << DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_EN_MASK); - data &= ~DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK; - data |= ((~en << DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK); + data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en); + data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data); } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) { + if (PP_CAP(PHM_PlatformCaps_DBRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL); - data &= ~DIDT_DB_EDC_CTRL__EDC_EN_MASK; - data |= ((en << DIDT_DB_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DB_EDC_CTRL__EDC_EN_MASK); - data &= ~DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK; - data |= ((~en << DIDT_DB_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK); + data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en); + data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data); } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) { + if (PP_CAP(PHM_PlatformCaps_TDRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL); - data &= ~DIDT_TD_EDC_CTRL__EDC_EN_MASK; - data |= ((en << DIDT_TD_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TD_EDC_CTRL__EDC_EN_MASK); - data &= ~DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK; - data |= ((~en << DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK); + data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en); + data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data); } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + if (PP_CAP(PHM_PlatformCaps_TCPRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL); - data &= ~DIDT_TCP_EDC_CTRL__EDC_EN_MASK; - data |= ((en << DIDT_TCP_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_EN_MASK); - data &= ~DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK; - data |= ((~en << DIDT_TCP_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK); + data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en); + data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data); } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) { + if (PP_CAP(PHM_PlatformCaps_DBRRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL); - data &= ~DIDT_DBR_EDC_CTRL__EDC_EN_MASK; - data |= ((en << DIDT_DBR_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_EN_MASK); - data &= ~DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK; - data |= ((~en << DIDT_DBR_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK); + data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en); + data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data); } } if (enable) { /* For Vega10, SMC does not support any mask yet. */ - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info); + result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info); PP_ASSERT((0 == result), "[EnableDiDtConfig] SMC Configure Gfx Didt Failed!"); } } @@ -1040,10 +1020,10 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) cgs_enter_safe_mode(hwmgr->device, false); vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) + if (PP_CAP(PHM_PlatformCaps_GCEDC)) vega10_program_gc_didt_config_registers(hwmgr, GCDiDtCtrl0Config_vega10); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM)) + if (PP_CAP(PHM_PlatformCaps_PSM)) vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega10); return 0; @@ -1059,12 +1039,12 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) cgs_enter_safe_mode(hwmgr->device, false); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) { + if (PP_CAP(PHM_PlatformCaps_GCEDC)) { data = 0x00000000; cgs_write_register(hwmgr->device, mmGC_DIDT_CTRL0, data); } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM)) + if (PP_CAP(PHM_PlatformCaps_PSM)) vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10); return 0; @@ -1159,12 +1139,12 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) { + if (PP_CAP(PHM_PlatformCaps_GCEDC)) { vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlResetConfig_vega10); vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlConfig_vega10); } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM)) + if (PP_CAP(PHM_PlatformCaps_PSM)) vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega10); return 0; @@ -1180,12 +1160,12 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) cgs_enter_safe_mode(hwmgr->device, false); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) { + if (PP_CAP(PHM_PlatformCaps_GCEDC)) { data = 0x00000000; cgs_write_register(hwmgr->device, mmGC_EDC_CTRL, data); } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM)) + if (PP_CAP(PHM_PlatformCaps_PSM)) vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10); return 0; @@ -1263,7 +1243,7 @@ int vega10_enable_didt_config(struct pp_hwmgr *hwmgr) } if (0 == result) { - PP_ASSERT_WITH_CODE((!vega10_enable_smc_features(hwmgr->smumgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap)), + PP_ASSERT_WITH_CODE((!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap)), "[EnableDiDtConfig] Attempt to Enable DiDt feature Failed!", return result); data->smu_features[GNLD_DIDT].enabled = true; } @@ -1310,7 +1290,7 @@ int vega10_disable_didt_config(struct pp_hwmgr *hwmgr) } if (0 == result) { - PP_ASSERT_WITH_CODE((0 != vega10_enable_smc_features(hwmgr->smumgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap)), + PP_ASSERT_WITH_CODE((0 != vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap)), "[DisableDiDtConfig] Attempt to Disable DiDt feature Failed!", return result); data->smu_features[GNLD_DIDT].enabled = false; } @@ -1364,7 +1344,7 @@ int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) (struct vega10_hwmgr *)(hwmgr->backend); if (data->registry_data.enable_pkg_pwr_tracking_feature) - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetPptLimit, n); return 0; @@ -1381,16 +1361,15 @@ int vega10_enable_power_containment(struct pp_hwmgr *hwmgr) (uint32_t)(tdp_table->usMaximumPowerDeliveryLimit); int result = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { + if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { if (data->smu_features[GNLD_PPT].supported) - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_PPT].smu_feature_bitmap), "Attempt to enable PPT feature Failed!", data->smu_features[GNLD_PPT].supported = false); if (data->smu_features[GNLD_TDC].supported) - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_TDC].smu_feature_bitmap), "Attempt to enable PPT feature Failed!", data->smu_features[GNLD_TDC].supported = false); @@ -1409,16 +1388,15 @@ int vega10_disable_power_containment(struct pp_hwmgr *hwmgr) struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { + if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { if (data->smu_features[GNLD_PPT].supported) - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_PPT].smu_feature_bitmap), "Attempt to disable PPT feature Failed!", data->smu_features[GNLD_PPT].supported = false); if (data->smu_features[GNLD_TDC].supported) - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_TDC].smu_feature_bitmap), "Attempt to disable PPT feature Failed!", data->smu_features[GNLD_TDC].supported = false); @@ -1430,7 +1408,7 @@ int vega10_disable_power_containment(struct pp_hwmgr *hwmgr) static int vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr, uint32_t adjust_percent) { - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_OverDriveSetPercentage, adjust_percent); } @@ -1438,8 +1416,7 @@ int vega10_power_control_set_level(struct pp_hwmgr *hwmgr) { int adjust_percent, result = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { + if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ? hwmgr->platform_descriptor.TDPAdjustment : diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index d44243441d28..1feefac49ea9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -31,11 +31,11 @@ static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) { - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm), "Attempt to get current RPM from SMC Failed!", return -1); - PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr, current_rpm), "Attempt to read current RPM from SMC Failed!", return -1); @@ -54,8 +54,7 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, fan_speed_info->min_percent = 0; fan_speed_info->max_percent = 100; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_FanSpeedInTableIsRPM) && + if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM) && hwmgr->thermal_controller.fanInfo. ucTachometerPulsesPerRevolution) { fan_speed_info->supports_rpm_read = true; @@ -105,14 +104,15 @@ int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) if (hwmgr->thermal_controller.fanInfo.bNoFan) return -1; - if (data->smu_features[GNLD_FAN_CONTROL].supported) + if (data->smu_features[GNLD_FAN_CONTROL].supported) { result = vega10_get_current_rpm(hwmgr, speed); - else { + } else { uint32_t reg = soc15_get_register_offset(THM_HWID, 0, mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS); - tach_period = (cgs_read_register(hwmgr->device, - reg) & CG_TACH_STATUS__TACH_PERIOD_MASK) >> - CG_TACH_STATUS__TACH_PERIOD__SHIFT; + tach_period = + CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_TACH_STATUS, + TACH_PERIOD); if (tach_period == 0) return -EINVAL; @@ -141,23 +141,20 @@ int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) if (hwmgr->fan_ctrl_is_in_default_mode) { hwmgr->fan_ctrl_default_mode = - (cgs_read_register(hwmgr->device, reg) & - CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >> - CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; - hwmgr->tmin = (cgs_read_register(hwmgr->device, reg) & - CG_FDO_CTRL2__TMIN_MASK) >> - CG_FDO_CTRL2__TMIN__SHIFT; + CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_FDO_CTRL2, FDO_PWM_MODE); + hwmgr->tmin = + CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_FDO_CTRL2, TMIN); hwmgr->fan_ctrl_is_in_default_mode = false; } cgs_write_register(hwmgr->device, reg, - (cgs_read_register(hwmgr->device, reg) & - ~CG_FDO_CTRL2__TMIN_MASK) | - (0 << CG_FDO_CTRL2__TMIN__SHIFT)); + CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_FDO_CTRL2, TMIN, 0)); cgs_write_register(hwmgr->device, reg, - (cgs_read_register(hwmgr->device, reg) & - ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK) | - (mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT)); + CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_FDO_CTRL2, FDO_PWM_MODE, mode)); return 0; } @@ -176,14 +173,13 @@ int vega10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) if (!hwmgr->fan_ctrl_is_in_default_mode) { cgs_write_register(hwmgr->device, reg, - (cgs_read_register(hwmgr->device, reg) & - ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK) | - (hwmgr->fan_ctrl_default_mode << - CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT)); + CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_FDO_CTRL2, FDO_PWM_MODE, + hwmgr->fan_ctrl_default_mode)); cgs_write_register(hwmgr->device, reg, - (cgs_read_register(hwmgr->device, reg) & - ~CG_FDO_CTRL2__TMIN_MASK) | - (hwmgr->tmin << CG_FDO_CTRL2__TMIN__SHIFT)); + CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_FDO_CTRL2, TMIN, + hwmgr->tmin << CG_FDO_CTRL2__TMIN__SHIFT)); hwmgr->fan_ctrl_is_in_default_mode = true; } @@ -203,7 +199,7 @@ static int vega10_enable_fan_control_feature(struct pp_hwmgr *hwmgr) if (data->smu_features[GNLD_FAN_CONTROL].supported) { PP_ASSERT_WITH_CODE(!vega10_enable_smc_features( - hwmgr->smumgr, true, + hwmgr, true, data->smu_features[GNLD_FAN_CONTROL]. smu_feature_bitmap), "Attempt to Enable FAN CONTROL feature Failed!", @@ -220,7 +216,7 @@ static int vega10_disable_fan_control_feature(struct pp_hwmgr *hwmgr) if (data->smu_features[GNLD_FAN_CONTROL].supported) { PP_ASSERT_WITH_CODE(!vega10_enable_smc_features( - hwmgr->smumgr, false, + hwmgr, false, data->smu_features[GNLD_FAN_CONTROL]. smu_feature_bitmap), "Attempt to Enable FAN CONTROL feature Failed!", @@ -279,16 +275,14 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, if (speed > 100) speed = 100; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) vega10_fan_ctrl_stop_smc_fan_control(hwmgr); reg = soc15_get_register_offset(THM_HWID, 0, mmCG_FDO_CTRL1_BASE_IDX, mmCG_FDO_CTRL1); - duty100 = (cgs_read_register(hwmgr->device, reg) & - CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> - CG_FDO_CTRL1__FMAX_DUTY100__SHIFT; + duty100 = CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_FDO_CTRL1, FMAX_DUTY100); if (duty100 == 0) return -EINVAL; @@ -300,9 +294,8 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, reg = soc15_get_register_offset(THM_HWID, 0, mmCG_FDO_CTRL0_BASE_IDX, mmCG_FDO_CTRL0); cgs_write_register(hwmgr->device, reg, - (cgs_read_register(hwmgr->device, reg) & - ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK) | - (duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT)); + CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); } @@ -314,18 +307,13 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, */ int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) { - int result; - if (hwmgr->thermal_controller.fanInfo.bNoFan) return 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) { - result = vega10_fan_ctrl_start_smc_fan_control(hwmgr); - } else - result = vega10_fan_ctrl_set_default_mode(hwmgr); - - return result; + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) + return vega10_fan_ctrl_start_smc_fan_control(hwmgr); + else + return vega10_fan_ctrl_set_default_mode(hwmgr); } /** @@ -342,12 +330,11 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) uint32_t reg; if (hwmgr->thermal_controller.fanInfo.bNoFan || - (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || - (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) + (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || + (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) return -1; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr); if (!result) { @@ -356,9 +343,9 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) reg = soc15_get_register_offset(THM_HWID, 0, mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS); cgs_write_register(hwmgr->device, reg, - (cgs_read_register(hwmgr->device, reg) & - ~CG_TACH_STATUS__TACH_PERIOD_MASK) | - (tach_period << CG_TACH_STATUS__TACH_PERIOD__SHIFT)); + CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_TACH_STATUS, TACH_PERIOD, + tach_period)); } return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM); } @@ -374,7 +361,7 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr) uint32_t reg; reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_TACH_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS); + mmCG_MULT_THERMAL_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS); temp = cgs_read_register(hwmgr->device, reg); @@ -418,20 +405,10 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, val = cgs_read_register(hwmgr->device, reg); - val &= (~THM_THERMAL_INT_CTRL__MAX_IH_CREDIT_MASK); - val |= (5 << THM_THERMAL_INT_CTRL__MAX_IH_CREDIT__SHIFT); - - val &= (~THM_THERMAL_INT_CTRL__THERM_IH_HW_ENA_MASK); - val |= (1 << THM_THERMAL_INT_CTRL__THERM_IH_HW_ENA__SHIFT); - - val &= (~THM_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK); - val |= ((high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) - << THM_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT); - - val &= (~THM_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK); - val |= ((low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) - << THM_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT); - + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); cgs_write_register(hwmgr->device, reg, val); @@ -452,19 +429,16 @@ static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr) reg = soc15_get_register_offset(THM_HWID, 0, mmCG_TACH_CTRL_BASE_IDX, mmCG_TACH_CTRL); cgs_write_register(hwmgr->device, reg, - (cgs_read_register(hwmgr->device, reg) & - ~CG_TACH_CTRL__EDGE_PER_REV_MASK) | - ((hwmgr->thermal_controller.fanInfo. - ucTachometerPulsesPerRevolution - 1) << - CG_TACH_CTRL__EDGE_PER_REV__SHIFT)); + CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_TACH_CTRL, EDGE_PER_REV, + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1)); } reg = soc15_get_register_offset(THM_HWID, 0, mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2); cgs_write_register(hwmgr->device, reg, - (cgs_read_register(hwmgr->device, reg) & - ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK) | - (0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT)); + CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28)); return 0; } @@ -484,7 +458,7 @@ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr) if (data->smu_features[GNLD_FW_CTF].enabled) printk("[Thermal_EnableAlert] FW CTF Already Enabled!\n"); - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_FW_CTF].smu_feature_bitmap), "Attempt to Enable FW CTF feature Failed!", @@ -516,7 +490,7 @@ int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr) printk("[Thermal_EnableAlert] FW CTF Already disabled!\n"); - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_FW_CTF].smu_feature_bitmap), "Attempt to disable FW CTF feature Failed!", @@ -554,8 +528,7 @@ int vega10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) * @param Result the last failure code * @return result from set temperature range routine */ -int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) +int vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) { int ret; struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); @@ -573,7 +546,7 @@ int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, table->FanTargetTemperature = hwmgr->thermal_controller. advanceFanControlParameters.usTMax; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanTemperatureTarget, (uint32_t)table->FanTargetTemperature); @@ -602,7 +575,7 @@ int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, table->FanStartTemp = hwmgr->thermal_controller. advanceFanControlParameters.usZeroRPMStartTemperature; - ret = vega10_copy_table_to_smc(hwmgr->smumgr, + ret = vega10_copy_table_to_smc(hwmgr, (uint8_t *)(&(data->smc_state_table.pp_table)), PPTABLE); if (ret) pr_info("Failed to update Fan Control Table in PPTable!"); @@ -619,123 +592,50 @@ int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, * @param Result the last failure code * @return result from set temperature range routine */ -int tf_vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) +int vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr) { /* If the fantable setup has failed we could have disabled * PHM_PlatformCaps_MicrocodeFanControl even after * this function was included in the table. * Make sure that we still think controlling the fan is OK. */ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) { + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) vega10_fan_ctrl_start_smc_fan_control(hwmgr); - } return 0; } -/** -* Set temperature range for high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -int tf_vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) + +int vega10_start_thermal_controller(struct pp_hwmgr *hwmgr, + struct PP_TemperatureRange *range) { - struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; + int ret = 0; if (range == NULL) return -EINVAL; - return vega10_thermal_set_temperature_range(hwmgr, range); -} - -/** -* Programs one-time setting registers -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from initialize thermal controller routine -*/ -int tf_vega10_thermal_initialize(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return vega10_thermal_initialize(hwmgr); -} - -/** -* Enable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from enable alert routine -*/ -int tf_vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return vega10_thermal_enable_alert(hwmgr); -} - -/** -* Disable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from disable alert routine -*/ -static int tf_vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return vega10_thermal_disable_alert(hwmgr); -} + vega10_thermal_initialize(hwmgr); + ret = vega10_thermal_set_temperature_range(hwmgr, range); + if (ret) + return -EINVAL; -static struct phm_master_table_item -vega10_thermal_start_thermal_controller_master_list[] = { - { .tableFunction = tf_vega10_thermal_initialize }, - { .tableFunction = tf_vega10_thermal_set_temperature_range }, - { .tableFunction = tf_vega10_thermal_enable_alert }, + vega10_thermal_enable_alert(hwmgr); /* We should restrict performance levels to low before we halt the SMC. * On the other hand we are still in boot state when we do this * so it would be pointless. * If this assumption changes we have to revisit this table. */ - { .tableFunction = tf_vega10_thermal_setup_fan_table }, - { .tableFunction = tf_vega10_thermal_start_smc_fan_control }, - { } -}; + ret = vega10_thermal_setup_fan_table(hwmgr); + if (ret) + return -EINVAL; -static struct phm_master_table_header -vega10_thermal_start_thermal_controller_master = { - 0, - PHM_MasterTableFlag_None, - vega10_thermal_start_thermal_controller_master_list -}; + vega10_thermal_start_smc_fan_control(hwmgr); -static struct phm_master_table_item -vega10_thermal_set_temperature_range_master_list[] = { - { .tableFunction = tf_vega10_thermal_disable_alert }, - { .tableFunction = tf_vega10_thermal_set_temperature_range }, - { .tableFunction = tf_vega10_thermal_enable_alert }, - { } + return 0; }; -struct phm_master_table_header -vega10_thermal_set_temperature_range_master = { - 0, - PHM_MasterTableFlag_None, - vega10_thermal_set_temperature_range_master_list -}; + + int vega10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) { @@ -745,32 +645,3 @@ int vega10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) } return 0; } - -/** -* Initializes the thermal controller related functions -* in the Hardware Manager structure. -* @param hwmgr The address of the hardware manager. -* @exception Any error code from the low-level communication. -*/ -int pp_vega10_thermal_initialize(struct pp_hwmgr *hwmgr) -{ - int result; - - result = phm_construct_table(hwmgr, - &vega10_thermal_set_temperature_range_master, - &(hwmgr->set_temperature_range)); - - if (!result) { - result = phm_construct_table(hwmgr, - &vega10_thermal_start_thermal_controller_master, - &(hwmgr->start_thermal_controller)); - if (result) - phm_destroy_table(hwmgr, - &(hwmgr->set_temperature_range)); - } - - if (!result) - hwmgr->fan_ctrl_is_in_default_mode = true; - return result; -} - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h index 776f3a2effc0..f34ce04cfd89 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h @@ -50,13 +50,6 @@ struct vega10_temperature { #define FDO_PWM_MODE_STATIC_RPM 5 -extern int tf_vega10_thermal_initialize(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result); -extern int tf_vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result); -extern int tf_vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result); - extern int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr); extern int vega10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); extern int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, @@ -69,7 +62,6 @@ extern int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, extern int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed); extern int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); -extern int pp_vega10_thermal_initialize(struct pp_hwmgr *hwmgr); extern int vega10_thermal_ctrl_uninitialize_thermal_controller( struct pp_hwmgr *hwmgr); extern int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, @@ -77,9 +69,10 @@ extern int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, extern int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); extern int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); -extern uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr); extern int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr); -int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr); +extern int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr); + +extern uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index 07e9c0b5915d..435da2647727 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -31,7 +31,7 @@ #include "dm_pp_interface.h" extern const struct amd_ip_funcs pp_ip_funcs; -extern const struct amd_powerplay_funcs pp_dpm_funcs; +extern const struct amd_pm_funcs pp_dpm_funcs; #define PP_DPM_DISABLED 0xCCCC @@ -50,94 +50,12 @@ enum amd_pp_sensors { AMDGPU_PP_SENSOR_GPU_POWER, }; -enum amd_pp_event { - AMD_PP_EVENT_INITIALIZE = 0, - AMD_PP_EVENT_UNINITIALIZE, - AMD_PP_EVENT_POWER_SOURCE_CHANGE, - AMD_PP_EVENT_SUSPEND, - AMD_PP_EVENT_RESUME, - AMD_PP_EVENT_ENTER_REST_STATE, - AMD_PP_EVENT_EXIT_REST_STATE, - AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, - AMD_PP_EVENT_THERMAL_NOTIFICATION, - AMD_PP_EVENT_VBIOS_NOTIFICATION, - AMD_PP_EVENT_ENTER_THERMAL_STATE, - AMD_PP_EVENT_EXIT_THERMAL_STATE, - AMD_PP_EVENT_ENTER_FORCED_STATE, - AMD_PP_EVENT_EXIT_FORCED_STATE, - AMD_PP_EVENT_ENTER_EXCLUSIVE_MODE, - AMD_PP_EVENT_EXIT_EXCLUSIVE_MODE, - AMD_PP_EVENT_ENTER_SCREEN_SAVER, - AMD_PP_EVENT_EXIT_SCREEN_SAVER, - AMD_PP_EVENT_VPU_RECOVERY_BEGIN, - AMD_PP_EVENT_VPU_RECOVERY_END, - AMD_PP_EVENT_ENABLE_POWER_PLAY, - AMD_PP_EVENT_DISABLE_POWER_PLAY, - AMD_PP_EVENT_CHANGE_POWER_SOURCE_UI_LABEL, - AMD_PP_EVENT_ENABLE_USER2D_PERFORMANCE, - AMD_PP_EVENT_DISABLE_USER2D_PERFORMANCE, - AMD_PP_EVENT_ENABLE_USER3D_PERFORMANCE, - AMD_PP_EVENT_DISABLE_USER3D_PERFORMANCE, - AMD_PP_EVENT_ENABLE_OVER_DRIVE_TEST, - AMD_PP_EVENT_DISABLE_OVER_DRIVE_TEST, - AMD_PP_EVENT_ENABLE_REDUCED_REFRESH_RATE, - AMD_PP_EVENT_DISABLE_REDUCED_REFRESH_RATE, - AMD_PP_EVENT_ENABLE_GFX_CLOCK_GATING, - AMD_PP_EVENT_DISABLE_GFX_CLOCK_GATING, - AMD_PP_EVENT_ENABLE_CGPG, - AMD_PP_EVENT_DISABLE_CGPG, - AMD_PP_EVENT_ENTER_TEXT_MODE, - AMD_PP_EVENT_EXIT_TEXT_MODE, - AMD_PP_EVENT_VIDEO_START, - AMD_PP_EVENT_VIDEO_STOP, - AMD_PP_EVENT_ENABLE_USER_STATE, - AMD_PP_EVENT_DISABLE_USER_STATE, - AMD_PP_EVENT_READJUST_POWER_STATE, - AMD_PP_EVENT_START_INACTIVITY, - AMD_PP_EVENT_STOP_INACTIVITY, - AMD_PP_EVENT_LINKED_ADAPTERS_READY, - AMD_PP_EVENT_ADAPTER_SAFE_TO_DISABLE, - AMD_PP_EVENT_COMPLETE_INIT, - AMD_PP_EVENT_CRITICAL_THERMAL_FAULT, - AMD_PP_EVENT_BACKLIGHT_CHANGED, - AMD_PP_EVENT_ENABLE_VARI_BRIGHT, - AMD_PP_EVENT_DISABLE_VARI_BRIGHT, - AMD_PP_EVENT_ENABLE_VARI_BRIGHT_ON_POWER_XPRESS, - AMD_PP_EVENT_DISABLE_VARI_BRIGHT_ON_POWER_XPRESS, - AMD_PP_EVENT_SET_VARI_BRIGHT_LEVEL, - AMD_PP_EVENT_VARI_BRIGHT_MONITOR_MEASUREMENT, - AMD_PP_EVENT_SCREEN_ON, - AMD_PP_EVENT_SCREEN_OFF, - AMD_PP_EVENT_PRE_DISPLAY_CONFIG_CHANGE, - AMD_PP_EVENT_ENTER_ULP_STATE, - AMD_PP_EVENT_EXIT_ULP_STATE, - AMD_PP_EVENT_REGISTER_IP_STATE, - AMD_PP_EVENT_UNREGISTER_IP_STATE, - AMD_PP_EVENT_ENTER_MGPU_MODE, - AMD_PP_EVENT_EXIT_MGPU_MODE, - AMD_PP_EVENT_ENTER_MULTI_GPU_MODE, - AMD_PP_EVENT_PRE_SUSPEND, - AMD_PP_EVENT_PRE_RESUME, - AMD_PP_EVENT_ENTER_BACOS, - AMD_PP_EVENT_EXIT_BACOS, - AMD_PP_EVENT_RESUME_BACO, - AMD_PP_EVENT_RESET_BACO, - AMD_PP_EVENT_PRE_DISPLAY_PHY_ACCESS, - AMD_PP_EVENT_POST_DISPLAY_PHY_CCESS, - AMD_PP_EVENT_START_COMPUTE_APPLICATION, - AMD_PP_EVENT_STOP_COMPUTE_APPLICATION, - AMD_PP_EVENT_REDUCE_POWER_LIMIT, - AMD_PP_EVENT_ENTER_FRAME_LOCK, - AMD_PP_EVENT_EXIT_FRAME_LOOCK, - AMD_PP_EVENT_LONG_IDLE_REQUEST_BACO, - AMD_PP_EVENT_LONG_IDLE_ENTER_BACO, - AMD_PP_EVENT_LONG_IDLE_EXIT_BACO, - AMD_PP_EVENT_HIBERNATE, - AMD_PP_EVENT_CONNECTED_STANDBY, - AMD_PP_EVENT_ENTER_SELF_REFRESH, - AMD_PP_EVENT_EXIT_SELF_REFRESH, - AMD_PP_EVENT_START_AVFS_BTC, - AMD_PP_EVENT_MAX +enum amd_pp_task { + AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, + AMD_PP_TASK_ENABLE_USER_STATE, + AMD_PP_TASK_READJUST_POWER_STATE, + AMD_PP_TASK_COMPLETE_INIT, + AMD_PP_TASK_MAX }; struct amd_pp_init { @@ -295,12 +213,6 @@ enum { PP_GROUP_MAX }; -enum pp_clock_type { - PP_SCLK, - PP_MCLK, - PP_PCIE, -}; - struct pp_states_info { uint32_t nums; uint32_t states[16]; @@ -355,49 +267,10 @@ struct pp_display_clock_request { support << PP_STATE_SUPPORT_SHIFT |\ state << PP_STATE_SHIFT) -struct amd_powerplay_funcs { - int (*get_temperature)(void *handle); - int (*load_firmware)(void *handle); - int (*wait_for_fw_loading_complete)(void *handle); - int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level); - enum amd_dpm_forced_level (*get_performance_level)(void *handle); - enum amd_pm_state_type (*get_current_power_state)(void *handle); - int (*get_sclk)(void *handle, bool low); - int (*get_mclk)(void *handle, bool low); - int (*powergate_vce)(void *handle, bool gate); - int (*powergate_uvd)(void *handle, bool gate); - int (*dispatch_tasks)(void *handle, enum amd_pp_event event_id, - void *input, void *output); - int (*set_fan_control_mode)(void *handle, uint32_t mode); - int (*get_fan_control_mode)(void *handle); - int (*set_fan_speed_percent)(void *handle, uint32_t percent); - int (*get_fan_speed_percent)(void *handle, uint32_t *speed); - int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm); - int (*get_pp_num_states)(void *handle, struct pp_states_info *data); - int (*get_pp_table)(void *handle, char **table); - int (*set_pp_table)(void *handle, const char *buf, size_t size); - int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask); - int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf); - int (*get_sclk_od)(void *handle); - int (*set_sclk_od)(void *handle, uint32_t value); - int (*get_mclk_od)(void *handle); - int (*set_mclk_od)(void *handle, uint32_t value); - int (*read_sensor)(void *handle, int idx, void *value, int *size); - struct amd_vce_state* (*get_vce_clock_state)(void *handle, unsigned idx); - int (*reset_power_profile_state)(void *handle, - struct amd_pp_profile *request); - int (*get_power_profile_state)(void *handle, - struct amd_pp_profile *query); - int (*set_power_profile_state)(void *handle, - struct amd_pp_profile *request); - int (*switch_power_profile)(void *handle, - enum amd_pp_profile_type type); -}; - struct amd_powerplay { void *pp_handle; const struct amd_ip_funcs *ip_funcs; - const struct amd_powerplay_funcs *pp_funcs; + const struct amd_pm_funcs *pp_funcs; }; int amd_powerplay_create(struct amd_pp_init *pp_init, diff --git a/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h b/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h deleted file mode 100644 index b9d84de8a44d..000000000000 --- a/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef _EVENT_MANAGER_H_ -#define _EVENT_MANAGER_H_ - -#include "power_state.h" -#include "pp_power_source.h" -#include "hardwaremanager.h" -#include "pp_asicblocks.h" - -struct pp_eventmgr; -enum amd_pp_event; - -enum PEM_EventDataValid { - PEM_EventDataValid_RequestedStateID = 0, - PEM_EventDataValid_RequestedUILabel, - PEM_EventDataValid_NewPowerState, - PEM_EventDataValid_RequestedPowerSource, - PEM_EventDataValid_RequestedClocks, - PEM_EventDataValid_CurrentTemperature, - PEM_EventDataValid_AsicBlocks, - PEM_EventDataValid_ODParameters, - PEM_EventDataValid_PXAdapterPrefs, - PEM_EventDataValid_PXUserPrefs, - PEM_EventDataValid_PXSwitchReason, - PEM_EventDataValid_PXSwitchPhase, - PEM_EventDataValid_HdVideo, - PEM_EventDataValid_BacklightLevel, - PEM_EventDatavalid_VariBrightParams, - PEM_EventDataValid_VariBrightLevel, - PEM_EventDataValid_VariBrightImmediateChange, - PEM_EventDataValid_PercentWhite, - PEM_EventDataValid_SdVideo, - PEM_EventDataValid_HTLinkChangeReason, - PEM_EventDataValid_HWBlocks, - PEM_EventDataValid_RequestedThermalState, - PEM_EventDataValid_MvcVideo, - PEM_EventDataValid_Max -}; - -typedef enum PEM_EventDataValid PEM_EventDataValid; - -/* Number of bits in ULONG variable */ -#define PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD (sizeof(unsigned long)*8) - -/* Number of ULONG entries used by event data valid bits */ -#define PEM_MAX_NUM_EVENTDATAVALID_ULONG_ENTRIES \ - ((PEM_EventDataValid_Max + PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD - 1) / \ - PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD) - -static inline void pem_set_event_data_valid(unsigned long *fields, PEM_EventDataValid valid_field) -{ - fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] |= - (1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD)); -} - -static inline void pem_unset_event_data_valid(unsigned long *fields, PEM_EventDataValid valid_field) -{ - fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] &= - ~(1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD)); -} - -static inline unsigned long pem_is_event_data_valid(const unsigned long *fields, PEM_EventDataValid valid_field) -{ - return fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] & - (1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD)); -} - -struct pem_event_data { - unsigned long valid_fields[100]; - unsigned long requested_state_id; - enum PP_StateUILabel requested_ui_label; - struct pp_power_state *pnew_power_state; - enum pp_power_source requested_power_source; - struct PP_Clocks requested_clocks; - bool skip_state_adjust_rules; - struct phm_asic_blocks asic_blocks; - /* to doPP_ThermalState requestedThermalState; - enum ThermalStateRequestSrc requestThermalStateSrc; - PP_Temperature currentTemperature;*/ - -}; - -int pem_handle_event(struct pp_eventmgr *eventmgr, enum amd_pp_event event, - struct pem_event_data *event_data); - -bool pem_is_hw_access_blocked(struct pp_eventmgr *eventmgr); - -#endif /* _EVENT_MANAGER_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h b/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h deleted file mode 100644 index 7bd8a7e57080..000000000000 --- a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _EVENTMGR_H_ -#define _EVENTMGR_H_ - -#include <linux/mutex.h> -#include "pp_instance.h" -#include "hardwaremanager.h" -#include "eventmanager.h" -#include "pp_feature.h" -#include "pp_power_source.h" -#include "power_state.h" - -typedef int (*pem_event_action)(struct pp_eventmgr *eventmgr, - struct pem_event_data *event_data); - -struct action_chain { - const char *description; /* action chain description for debugging purpose */ - const pem_event_action * const *action_chain; /* pointer to chain of event actions */ -}; - -struct pem_power_source_ui_state_info { - enum PP_StateUILabel current_ui_label; - enum PP_StateUILabel default_ui_lable; - unsigned long configurable_ui_mapping; -}; - -struct pp_clock_range { - uint32_t min_sclk_khz; - uint32_t max_sclk_khz; - - uint32_t min_mclk_khz; - uint32_t max_mclk_khz; - - uint32_t min_vclk_khz; - uint32_t max_vclk_khz; - - uint32_t min_dclk_khz; - uint32_t max_dclk_khz; - - uint32_t min_aclk_khz; - uint32_t max_aclk_khz; - - uint32_t min_eclk_khz; - uint32_t max_eclk_khz; -}; - -enum pp_state { - UNINITIALIZED, - INACTIVE, - ACTIVE -}; - -enum pp_ring_index { - PP_RING_TYPE_GFX_INDEX = 0, - PP_RING_TYPE_DMA_INDEX, - PP_RING_TYPE_DMA1_INDEX, - PP_RING_TYPE_UVD_INDEX, - PP_RING_TYPE_VCE0_INDEX, - PP_RING_TYPE_VCE1_INDEX, - PP_RING_TYPE_CP1_INDEX, - PP_RING_TYPE_CP2_INDEX, - PP_NUM_RINGS, -}; - -struct pp_request { - uint32_t flags; - uint32_t sclk; - uint32_t sclk_throttle; - uint32_t mclk; - uint32_t vclk; - uint32_t dclk; - uint32_t eclk; - uint32_t aclk; - uint32_t iclk; - uint32_t vp8clk; - uint32_t rsv[32]; -}; - -struct pp_eventmgr { - struct pp_hwmgr *hwmgr; - struct pp_smumgr *smumgr; - - struct pp_feature_info features[PP_Feature_Max]; - const struct action_chain *event_chain[AMD_PP_EVENT_MAX]; - struct phm_platform_descriptor *platform_descriptor; - struct pp_clock_range clock_range; - enum pp_power_source current_power_source; - struct pem_power_source_ui_state_info ui_state_info[PP_PowerSource_Max]; - enum pp_state states[PP_NUM_RINGS]; - struct pp_request hi_req; - struct list_head context_list; - struct mutex lock; - bool block_adjust_power_state; - bool enable_cg; - bool enable_gfx_cgpg; - int (*pp_eventmgr_init)(struct pp_eventmgr *eventmgr); - void (*pp_eventmgr_fini)(struct pp_eventmgr *eventmgr); -}; - -int eventmgr_early_init(struct pp_instance *handle); - -#endif /* _EVENTMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index a4c8b09b6f14..57a0467b7267 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -283,6 +283,8 @@ static inline bool phm_cap_enabled(const uint32_t *caps, enum phm_platform_caps (1UL << (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1))))); } +#define PP_CAP(c) phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, (c)) + #define PP_PCIEGenInvalid 0xffff enum PP_PCIEGen { PP_PCIEGen1 = 0, /* PCIE 1.0 - Transfer rate of 2.5 GT/s */ @@ -295,7 +297,7 @@ typedef enum PP_PCIEGen PP_PCIEGen; #define PP_Min_PCIEGen PP_PCIEGen1 #define PP_Max_PCIEGen PP_PCIEGen3 #define PP_Min_PCIELane 1 -#define PP_Max_PCIELane 32 +#define PP_Max_PCIELane 16 enum phm_clock_Type { PHM_DispClock = 1, @@ -373,8 +375,6 @@ struct phm_odn_clock_levels { extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr); extern int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr); -extern int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate); -extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate); extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr); extern int phm_setup_asic(struct pp_hwmgr *hwmgr); extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 91b0105e8240..126b44d47a99 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -32,6 +32,7 @@ #include "ppatomctrl.h" #include "hwmgr_ppt.h" #include "power_state.h" +#include "cgs_linux.h" struct pp_instance; struct pp_hwmgr; @@ -61,10 +62,6 @@ struct vi_dpm_table { struct vi_dpm_level dpm_level[1]; }; -enum PP_Result { - PP_Result_TableImmediateExit = 0x13, -}; - #define PCIE_PERF_REQ_REMOVE_REGISTRY 0 #define PCIE_PERF_REQ_FORCE_LOWPOWER 1 #define PCIE_PERF_REQ_GEN1 2 @@ -103,17 +100,6 @@ enum PHM_BackEnd_Magic { PHM_Rv_Magic = 0x20161121 }; - -#define PHM_PCIE_POWERGATING_TARGET_GFX 0 -#define PHM_PCIE_POWERGATING_TARGET_DDI 1 -#define PHM_PCIE_POWERGATING_TARGET_PLLCASCADE 2 -#define PHM_PCIE_POWERGATING_TARGET_PHY 3 - -typedef int (*phm_table_function)(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result); - -typedef bool (*phm_check_function)(struct pp_hwmgr *hwmgr); - struct phm_set_power_state_input { const struct pp_hw_power_state *pcurrent_state; const struct pp_hw_power_state *pnew_state; @@ -149,30 +135,6 @@ struct phm_gfx_arbiter { uint32_t fclk; }; -/* Entries in the master tables */ -struct phm_master_table_item { - phm_check_function isFunctionNeededInRuntimeTable; - phm_table_function tableFunction; -}; - -enum phm_master_table_flag { - PHM_MasterTableFlag_None = 0, - PHM_MasterTableFlag_ExitOnError = 1, -}; - -/* The header of the master tables */ -struct phm_master_table_header { - uint32_t storage_size; - uint32_t flags; - const struct phm_master_table_item *master_list; -}; - -struct phm_runtime_table_header { - uint32_t storage_size; - bool exit_error; - phm_table_function *function_list; -}; - struct phm_clock_array { uint32_t count; uint32_t values[1]; @@ -216,19 +178,6 @@ struct phm_phase_shedding_limits_record { uint32_t Mclk; }; - -extern int phm_dispatch_table(struct pp_hwmgr *hwmgr, - struct phm_runtime_table_header *rt_table, - void *input, void *output); - -extern int phm_construct_table(struct pp_hwmgr *hwmgr, - const struct phm_master_table_header *master_table, - struct phm_runtime_table_header *rt_table); - -extern int phm_destroy_table(struct pp_hwmgr *hwmgr, - struct phm_runtime_table_header *rt_table); - - struct phm_uvd_clock_voltage_dependency_record { uint32_t vclk; uint32_t dclk; @@ -286,6 +235,39 @@ struct phm_vce_clock_voltage_dependency_table { struct phm_vce_clock_voltage_dependency_record entries[1]; }; +struct pp_smumgr_func { + int (*smu_init)(struct pp_hwmgr *hwmgr); + int (*smu_fini)(struct pp_hwmgr *hwmgr); + int (*start_smu)(struct pp_hwmgr *hwmgr); + int (*check_fw_load_finish)(struct pp_hwmgr *hwmgr, + uint32_t firmware); + int (*request_smu_load_fw)(struct pp_hwmgr *hwmgr); + int (*request_smu_load_specific_fw)(struct pp_hwmgr *hwmgr, + uint32_t firmware); + int (*get_argument)(struct pp_hwmgr *hwmgr); + int (*send_msg_to_smc)(struct pp_hwmgr *hwmgr, uint16_t msg); + int (*send_msg_to_smc_with_parameter)(struct pp_hwmgr *hwmgr, + uint16_t msg, uint32_t parameter); + int (*download_pptable_settings)(struct pp_hwmgr *hwmgr, + void **table); + int (*upload_pptable_settings)(struct pp_hwmgr *hwmgr); + int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type); + int (*process_firmware_header)(struct pp_hwmgr *hwmgr); + int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr); + int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr); + int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr); + int (*init_smc_table)(struct pp_hwmgr *hwmgr); + int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr); + int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr); + int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr); + uint32_t (*get_offsetof)(uint32_t type, uint32_t member); + uint32_t (*get_mac_definition)(uint32_t value); + bool (*is_dpm_running)(struct pp_hwmgr *hwmgr); + int (*populate_requested_graphic_levels)(struct pp_hwmgr *hwmgr, + struct amd_pp_profile *request); + bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr); +}; + struct pp_hwmgr_func { int (*backend_init)(struct pp_hwmgr *hw_mgr); int (*backend_fini)(struct pp_hwmgr *hw_mgr); @@ -311,10 +293,10 @@ struct pp_hwmgr_func { unsigned long, struct pp_power_state *); int (*get_num_of_pp_table_entries)(struct pp_hwmgr *hwmgr); int (*powerdown_uvd)(struct pp_hwmgr *hwmgr); - int (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate); - int (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate); - int (*get_mclk)(struct pp_hwmgr *hwmgr, bool low); - int (*get_sclk)(struct pp_hwmgr *hwmgr, bool low); + void (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate); + void (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate); + uint32_t (*get_mclk)(struct pp_hwmgr *hwmgr, bool low); + uint32_t (*get_sclk)(struct pp_hwmgr *hwmgr, bool low); int (*power_state_set)(struct pp_hwmgr *hwmgr, const void *state); int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr); @@ -328,8 +310,8 @@ struct pp_hwmgr_func { int (*get_temperature)(struct pp_hwmgr *hwmgr); int (*stop_thermal_controller)(struct pp_hwmgr *hwmgr); int (*get_fan_speed_info)(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); - int (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode); - int (*get_fan_control_mode)(struct pp_hwmgr *hwmgr); + void (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode); + uint32_t (*get_fan_control_mode)(struct pp_hwmgr *hwmgr); int (*set_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t percent); int (*get_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t *speed); int (*set_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t percent); @@ -378,6 +360,9 @@ struct pp_hwmgr_func { struct amd_pp_profile *request); int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable); int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr); + int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count); + int (*set_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock); + int (*start_thermal_controller)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range); }; struct pp_table_func { @@ -745,7 +730,7 @@ struct pp_hwmgr { enum amd_dpm_forced_level dpm_level; enum amd_dpm_forced_level saved_dpm_level; - bool block_hw_access; + enum amd_dpm_forced_level request_dpm_level; struct phm_gfx_arbiter gfx_arbiter; struct phm_acp_arbiter acp_arbiter; struct phm_uvd_arbiter uvd_arbiter; @@ -754,19 +739,17 @@ struct pp_hwmgr { void *pptable; struct phm_platform_descriptor platform_descriptor; void *backend; + + void *smu_backend; + const struct pp_smumgr_func *smumgr_funcs; + bool is_kicker; + bool reload_fw; + enum PP_DAL_POWERLEVEL dal_power_level; struct phm_dynamic_state_info dyn_state; - struct phm_runtime_table_header setup_asic; - struct phm_runtime_table_header power_down_asic; - struct phm_runtime_table_header disable_dynamic_state_management; - struct phm_runtime_table_header enable_dynamic_state_management; - struct phm_runtime_table_header set_power_state; - struct phm_runtime_table_header enable_clock_power_gatings; - struct phm_runtime_table_header display_configuration_changed; - struct phm_runtime_table_header start_thermal_controller; - struct phm_runtime_table_header set_temperature_range; const struct pp_hwmgr_func *hwmgr_func; const struct pp_table_func *pptable_func; + struct pp_power_state *ps; enum pp_power_source power_source; uint32_t num_ps; @@ -784,26 +767,44 @@ struct pp_hwmgr { struct amd_pp_display_configuration display_config; uint32_t feature_mask; - /* power profile */ + /* UMD Pstate */ struct amd_pp_profile gfx_power_profile; struct amd_pp_profile compute_power_profile; struct amd_pp_profile default_gfx_power_profile; struct amd_pp_profile default_compute_power_profile; enum amd_pp_profile_type current_power_profile; + bool en_umd_pstate; +}; + +struct cgs_irq_src_funcs { + cgs_irq_source_set_func_t set; + cgs_irq_handler_func_t handler; }; extern int hwmgr_early_init(struct pp_instance *handle); extern int hwmgr_hw_init(struct pp_instance *handle); extern int hwmgr_hw_fini(struct pp_instance *handle); +extern int hwmgr_hw_suspend(struct pp_instance *handle); +extern int hwmgr_hw_resume(struct pp_instance *handle); +extern int hwmgr_handle_task(struct pp_instance *handle, + enum amd_pp_task task_id, + void *input, void *output); extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, uint32_t value, uint32_t mask); -extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, +extern int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, uint32_t indirect_port, uint32_t index, uint32_t value, uint32_t mask); +extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, + uint32_t index, + uint32_t value, uint32_t mask); +extern int phm_wait_for_indirect_register_unequal( + struct pp_hwmgr *hwmgr, + uint32_t indirect_port, uint32_t index, + uint32_t value, uint32_t mask); extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr); @@ -888,5 +889,58 @@ extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_t PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \ << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) +#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \ + phm_wait_for_indirect_register_unequal(hwmgr, \ + mm##port##_INDEX, index, value, mask) + +#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \ + PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) + +#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \ + PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \ + (fieldval) << PHM_FIELD_SHIFT(reg, field), \ + PHM_FIELD_MASK(reg, field) ) + + +#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \ + port, index, value, mask) \ + phm_wait_for_indirect_register_unequal(hwmgr, \ + mm##port##_INDEX_11, index, value, mask) + +#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \ + PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) + +#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \ + PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \ + (fieldval) << PHM_FIELD_SHIFT(reg, field), \ + PHM_FIELD_MASK(reg, field)) + + +#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \ + port, index, value, mask) \ + phm_wait_on_indirect_register(hwmgr, \ + mm##port##_INDEX_11, index, value, mask) + +#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ + PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) + +#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \ + PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, \ + (fieldval) << PHM_FIELD_SHIFT(reg, field), \ + PHM_FIELD_MASK(reg, field)) + +#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \ + index, value, mask) \ + phm_wait_for_register_unequal(hwmgr, \ + index, value, mask) + +#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \ + PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \ + mm##reg, value, mask) + +#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \ + PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, \ + (fieldval) << PHM_FIELD_SHIFT(reg, field), \ + PHM_FIELD_MASK(reg, field)) #endif /* _HWMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h index 4c3b537a714f..25fb1460a194 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h @@ -23,9 +23,7 @@ #ifndef _PP_INSTANCE_H_ #define _PP_INSTANCE_H_ -#include "smumgr.h" #include "hwmgr.h" -#include "eventmgr.h" #define PP_VALID 0x1F1F1F1F @@ -36,9 +34,7 @@ struct pp_instance { bool pm_en; uint32_t feature_mask; void *device; - struct pp_smumgr *smu_mgr; struct pp_hwmgr *hwmgr; - struct pp_eventmgr *eventmgr; struct mutex pp_lock; }; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 5d61cc9d4554..7c9aba81cd6a 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -23,23 +23,13 @@ #ifndef _SMUMGR_H_ #define _SMUMGR_H_ #include <linux/types.h> -#include "pp_instance.h" #include "amd_powerplay.h" - -struct pp_smumgr; -struct pp_instance; -struct pp_hwmgr; +#include "hwmgr.h" #define smu_lower_32_bits(n) ((uint32_t)(n)) #define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16)) -extern const struct pp_smumgr_func cz_smu_funcs; -extern const struct pp_smumgr_func iceland_smu_funcs; -extern const struct pp_smumgr_func tonga_smu_funcs; -extern const struct pp_smumgr_func fiji_smu_funcs; -extern const struct pp_smumgr_func polaris10_smu_funcs; -extern const struct pp_smumgr_func vega10_smu_funcs; -extern const struct pp_smumgr_func rv_smu_funcs; + enum AVFS_BTC_STATUS { AVFS_BTC_BOOT = 0, @@ -100,216 +90,44 @@ enum SMU_MAC_DEFINITION { SMU_UVD_MCLK_HANDSHAKE_DISABLE, }; +extern int smum_get_argument(struct pp_hwmgr *hwmgr); -struct pp_smumgr_func { - int (*smu_init)(struct pp_smumgr *smumgr); - int (*smu_fini)(struct pp_smumgr *smumgr); - int (*start_smu)(struct pp_smumgr *smumgr); - int (*check_fw_load_finish)(struct pp_smumgr *smumgr, - uint32_t firmware); - int (*request_smu_load_fw)(struct pp_smumgr *smumgr); - int (*request_smu_load_specific_fw)(struct pp_smumgr *smumgr, - uint32_t firmware); - int (*get_argument)(struct pp_smumgr *smumgr); - int (*send_msg_to_smc)(struct pp_smumgr *smumgr, uint16_t msg); - int (*send_msg_to_smc_with_parameter)(struct pp_smumgr *smumgr, - uint16_t msg, uint32_t parameter); - int (*download_pptable_settings)(struct pp_smumgr *smumgr, - void **table); - int (*upload_pptable_settings)(struct pp_smumgr *smumgr); - int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type); - int (*process_firmware_header)(struct pp_hwmgr *hwmgr); - int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr); - int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr); - int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr); - int (*init_smc_table)(struct pp_hwmgr *hwmgr); - int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr); - int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr); - int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr); - uint32_t (*get_offsetof)(uint32_t type, uint32_t member); - uint32_t (*get_mac_definition)(uint32_t value); - bool (*is_dpm_running)(struct pp_hwmgr *hwmgr); - int (*populate_requested_graphic_levels)(struct pp_hwmgr *hwmgr, - struct amd_pp_profile *request); - bool (*is_hw_avfs_present)(struct pp_smumgr *smumgr); -}; - -struct pp_smumgr { - uint32_t chip_family; - uint32_t chip_id; - void *device; - void *backend; - uint32_t usec_timeout; - bool reload_fw; - const struct pp_smumgr_func *smumgr_funcs; - bool is_kicker; -}; - -extern int smum_early_init(struct pp_instance *handle); +extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table); -extern int smum_get_argument(struct pp_smumgr *smumgr); +extern int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr); -extern int smum_download_powerplay_table(struct pp_smumgr *smumgr, void **table); +extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg); -extern int smum_upload_powerplay_table(struct pp_smumgr *smumgr); - -extern int smum_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg); - -extern int smum_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, +extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter); -extern int smum_wait_on_register(struct pp_smumgr *smumgr, - uint32_t index, uint32_t value, uint32_t mask); - -extern int smum_wait_for_register_unequal(struct pp_smumgr *smumgr, - uint32_t index, uint32_t value, uint32_t mask); - -extern int smum_wait_on_indirect_register(struct pp_smumgr *smumgr, - uint32_t indirect_port, uint32_t index, - uint32_t value, uint32_t mask); - - -extern void smum_wait_for_indirect_register_unequal( - struct pp_smumgr *smumgr, - uint32_t indirect_port, uint32_t index, - uint32_t value, uint32_t mask); - extern int smu_allocate_memory(void *device, uint32_t size, enum cgs_gpu_mem_type type, uint32_t byte_align, uint64_t *mc_addr, void **kptr, void *handle); extern int smu_free_memory(void *device, void *handle); -extern int vega10_smum_init(struct pp_smumgr *smumgr); extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr); extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); extern int smum_process_firmware_header(struct pp_hwmgr *hwmgr); -extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result); -extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result); +extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr); +extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr); extern int smum_init_smc_table(struct pp_hwmgr *hwmgr); extern int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); extern int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr); extern int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr); -extern uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, +extern uint32_t smum_get_offsetof(struct pp_hwmgr *hwmgr, uint32_t type, uint32_t member); -extern uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value); +extern uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value); extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr); extern int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request); -extern bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr); - -#define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT - -#define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK - -#define SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \ - port, index, value, mask) \ - smum_wait_on_indirect_register(smumgr, \ - mm##port##_INDEX, index, value, mask) - -#define SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \ - SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask) - -#define SMUM_WAIT_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \ - SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ - SMUM_FIELD_MASK(reg, field) ) - -#define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \ - index, value, mask) \ - smum_wait_for_register_unequal(smumgr, \ - index, value, mask) - -#define SMUM_WAIT_REGISTER_UNEQUAL(smumgr, reg, value, mask) \ - SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \ - mm##reg, value, mask) - -#define SMUM_WAIT_FIELD_UNEQUAL(smumgr, reg, field, fieldval) \ - SMUM_WAIT_REGISTER_UNEQUAL(smumgr, reg, \ - (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ - SMUM_FIELD_MASK(reg, field)) - -#define SMUM_GET_FIELD(value, reg, field) \ - (((value) & SMUM_FIELD_MASK(reg, field)) \ - >> SMUM_FIELD_SHIFT(reg, field)) - -#define SMUM_READ_FIELD(device, reg, field) \ - SMUM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field) - -#define SMUM_SET_FIELD(value, reg, field, field_val) \ - (((value) & ~SMUM_FIELD_MASK(reg, field)) | \ - (SMUM_FIELD_MASK(reg, field) & ((field_val) << \ - SMUM_FIELD_SHIFT(reg, field)))) - -#define SMUM_READ_INDIRECT_FIELD(device, port, reg, field) \ - SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ - reg, field) - -#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \ - port, index, value, mask) \ - smum_wait_on_indirect_register(smumgr, \ - mm##port##_INDEX_0, index, value, mask) - -#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \ - port, index, value, mask) \ - smum_wait_for_indirect_register_unequal(smumgr, \ - mm##port##_INDEX_0, index, value, mask) - - -#define SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \ - SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask) - -#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \ - SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask) - - -/*Operations on named fields.*/ - -#define SMUM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \ - SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ - reg, field) - -#define SMUM_WRITE_FIELD(device, reg, field, fieldval) \ - cgs_write_register(device, mm##reg, \ - SMUM_SET_FIELD(cgs_read_register(device, mm##reg), reg, field, fieldval)) - -#define SMUM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \ - cgs_write_ind_register(device, port, ix##reg, \ - SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ - reg, field, fieldval)) - - -#define SMUM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \ - cgs_write_ind_register(device, port, ix##reg, \ - SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ - reg, field, fieldval)) - - -#define SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \ - SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, \ - (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ - SMUM_FIELD_MASK(reg, field)) - -#define SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \ - SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, \ - (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ - SMUM_FIELD_MASK(reg, field)) - -#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, index, value, mask) \ - smum_wait_for_indirect_register_unequal(smumgr, \ - mm##port##_INDEX, index, value, mask) - -#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \ - SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask) +extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); -#define SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \ - SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ - SMUM_FIELD_MASK(reg, field) ) #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile index 1703bbefbfd5..a423c0a85129 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile @@ -4,7 +4,7 @@ SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \ polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \ - smu7_smumgr.o iceland_smc.o vega10_smumgr.o rv_smumgr.o + smu7_smumgr.o iceland_smc.o vega10_smumgr.o rv_smumgr.o ci_smc.o ci_smumgr.o AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c new file mode 100644 index 000000000000..9ee14315dce7 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c @@ -0,0 +1,2753 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/fb.h> +#include "linux/delay.h" +#include <linux/types.h> + +#include "smumgr.h" +#include "pp_debug.h" +#include "ci_smc.h" +#include "ci_smumgr.h" +#include "ppsmc.h" +#include "smu7_hwmgr.h" +#include "hardwaremanager.h" +#include "ppatomctrl.h" +#include "cgs_common.h" +#include "atombios.h" +#include "pppcielanes.h" + +#include "smu/smu_7_0_1_d.h" +#include "smu/smu_7_0_1_sh_mask.h" + +#include "dce/dce_8_0_d.h" +#include "dce/dce_8_0_sh_mask.h" + +#include "bif/bif_4_1_d.h" +#include "bif/bif_4_1_sh_mask.h" + +#include "gca/gfx_7_2_d.h" +#include "gca/gfx_7_2_sh_mask.h" + +#include "gmc/gmc_7_1_d.h" +#include "gmc/gmc_7_1_sh_mask.h" + +#include "processpptables.h" + +#define MC_CG_ARB_FREQ_F0 0x0a +#define MC_CG_ARB_FREQ_F1 0x0b +#define MC_CG_ARB_FREQ_F2 0x0c +#define MC_CG_ARB_FREQ_F3 0x0d + +#define SMC_RAM_END 0x40000 + +#define VOLTAGE_SCALE 4 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 +#define CISLAND_MINIMUM_ENGINE_CLOCK 800 +#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5 + +static const struct ci_pt_defaults defaults_hawaii_xt = { + 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, + { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, + { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } +}; + +static const struct ci_pt_defaults defaults_hawaii_pro = { + 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062, + { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, + { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } +}; + +static const struct ci_pt_defaults defaults_bonaire_xt = { + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, + { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, + { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } +}; + + +static const struct ci_pt_defaults defaults_saturn_xt = { + 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000, + { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D }, + { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } +}; + + +static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr, + uint32_t smc_addr, uint32_t limit) +{ + if ((0 != (3 & smc_addr)) + || ((smc_addr + 3) >= limit)) { + pr_err("smc_addr invalid \n"); + return -EINVAL; + } + + cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + return 0; +} + +static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, + const uint8_t *src, uint32_t byte_count, uint32_t limit) +{ + int result; + uint32_t data = 0; + uint32_t original_data; + uint32_t addr = 0; + uint32_t extra_shift; + + if ((3 & smc_start_address) + || ((smc_start_address + byte_count) >= limit)) { + pr_err("smc_start_address invalid \n"); + return -EINVAL; + } + + addr = smc_start_address; + + while (byte_count >= 4) { + /* Bytes are written into the SMC address space with the MSB first. */ + data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; + + result = ci_set_smc_sram_address(hwmgr, addr, limit); + + if (0 != result) + return result; + + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data); + + src += 4; + byte_count -= 4; + addr += 4; + } + + if (0 != byte_count) { + + data = 0; + + result = ci_set_smc_sram_address(hwmgr, addr, limit); + + if (0 != result) + return result; + + + original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0); + + extra_shift = 8 * (4 - byte_count); + + while (byte_count > 0) { + /* Bytes are written into the SMC addres space with the MSB first. */ + data = (0x100 * data) + *src++; + byte_count--; + } + + data <<= extra_shift; + + data |= (original_data & ~((~0UL) << extra_shift)); + + result = ci_set_smc_sram_address(hwmgr, addr, limit); + + if (0 != result) + return result; + + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data); + } + + return 0; +} + + +static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr) +{ + static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 }; + + ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1); + + return 0; +} + +bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr) +{ + return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) + && (0x20100 <= cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixSMC_PC_C))); +} + +static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, + uint32_t *value, uint32_t limit) +{ + int result; + + result = ci_set_smc_sram_address(hwmgr, smc_addr, limit); + + if (result) + return result; + + *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0); + return 0; +} + +int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) +{ + int ret; + + if (!ci_is_smc_ram_running(hwmgr)) + return -EINVAL; + + cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); + + PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); + + ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); + + if (ret != 1) + pr_info("\n failed to send message %x ret is %d\n", msg, ret); + + return 0; +} + +int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, + uint16_t msg, uint32_t parameter) +{ + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter); + return ci_send_msg_to_smc(hwmgr, msg); +} + +static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + struct cgs_system_info sys_info = {0}; + uint32_t dev_id; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; + cgs_query_system_info(hwmgr->device, &sys_info); + dev_id = (uint32_t)sys_info.value; + + switch (dev_id) { + case 0x67BA: + case 0x66B1: + smu_data->power_tune_defaults = &defaults_hawaii_pro; + break; + case 0x67B8: + case 0x66B0: + smu_data->power_tune_defaults = &defaults_hawaii_xt; + break; + case 0x6640: + case 0x6641: + case 0x6646: + case 0x6647: + smu_data->power_tune_defaults = &defaults_saturn_xt; + break; + case 0x6649: + case 0x6650: + case 0x6651: + case 0x6658: + case 0x665C: + case 0x665D: + case 0x67A0: + case 0x67A1: + case 0x67A2: + case 0x67A8: + case 0x67A9: + case 0x67AA: + case 0x67B9: + case 0x67BE: + default: + smu_data->power_tune_defaults = &defaults_bonaire_xt; + break; + } +} + +static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table, + uint32_t clock, uint32_t *vol) +{ + uint32_t i = 0; + + if (allowed_clock_voltage_table->count == 0) + return -EINVAL; + + for (i = 0; i < allowed_clock_voltage_table->count; i++) { + if (allowed_clock_voltage_table->entries[i].clk >= clock) { + *vol = allowed_clock_voltage_table->entries[i].v; + return 0; + } + } + + *vol = allowed_clock_voltage_table->entries[i - 1].v; + return 0; +} + +static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct pp_atomctrl_clock_dividers_vi dividers; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t ref_clock; + uint32_t ref_divider; + uint32_t fbdiv; + int result; + + /* get the engine clock dividers for this clock value */ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", + return result); + + /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */ + ref_clock = atomctrl_get_reference_clock(hwmgr); + ref_divider = 1 + dividers.uc_pll_ref_div; + + /* low 14 bits is fraction and high 12 bits is divider */ + fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; + + /* SPLL_FUNC_CNTL setup */ + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_REF_DIV, dividers.uc_pll_ref_div); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_PDIV_A, dividers.uc_pll_post_div); + + /* SPLL_FUNC_CNTL_3 setup*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3, + SPLL_FB_DIV, fbdiv); + + /* set to use fractional accumulation*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3, + SPLL_DITHEN, 1); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { + struct pp_atomctrl_internal_ss_info ss_info; + uint32_t vco_freq = clock * dividers.uc_pll_post_div; + + if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr, + vco_freq, &ss_info)) { + uint32_t clk_s = ref_clock * 5 / + (ref_divider * ss_info.speed_spectrum_rate); + uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage * + fbdiv / (clk_s * 10000); + + cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum, + CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s); + cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum, + CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); + cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2, + CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v); + } + } + + sclk->SclkFrequency = clock; + sclk->CgSpllFuncCntl3 = spll_func_cntl_3; + sclk->CgSpllFuncCntl4 = spll_func_cntl_4; + sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; + sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; + sclk->SclkDid = (uint8_t)dividers.pll_post_divider; + + return 0; +} + +static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr, + const struct phm_phase_shedding_limits_table *pl, + uint32_t sclk, uint32_t *p_shed) +{ + unsigned int i; + + /* use the minimum phase shedding */ + *p_shed = 1; + + for (i = 0; i < pl->count; i++) { + if (sclk < pl->entries[i].Sclk) { + *p_shed = i; + break; + } + } +} + +static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock, + uint32_t clock_insr) +{ + uint8_t i; + uint32_t temp; + uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK); + + if (clock < min) { + pr_info("Engine clock can't satisfy stutter requirement!\n"); + return 0; + } + for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { + temp = clock >> i; + + if (temp >= min || i == 0) + break; + } + return i; +} + +static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr, + uint32_t clock, uint16_t sclk_al_threshold, + struct SMU7_Discrete_GraphicsLevel *level) +{ + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + + result = ci_calculate_sclk_params(hwmgr, clock, level); + + /* populate graphics levels */ + result = ci_get_dependency_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddc_dependency_on_sclk, clock, + (uint32_t *)(&level->MinVddc)); + if (result) { + pr_err("vdd_dep_on_sclk table is NULL\n"); + return result; + } + + level->SclkFrequency = clock; + level->MinVddcPhases = 1; + + if (data->vddc_phase_shed_control) + ci_populate_phase_value_based_on_sclk(hwmgr, + hwmgr->dyn_state.vddc_phase_shed_limits_table, + clock, + &level->MinVddcPhases); + + level->ActivityLevel = sclk_al_threshold; + level->CcPwrDynRm = 0; + level->CcPwrDynRm1 = 0; + level->EnabledForActivity = 0; + /* this level can be used for throttling.*/ + level->EnabledForThrottle = 1; + level->UpH = 0; + level->DownH = 0; + level->VoltageDownH = 0; + level->PowerThrottle = 0; + + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) + level->DeepSleepDivId = + ci_get_sleep_divider_id_from_clock(clock, + CISLAND_MINIMUM_ENGINE_CLOCK); + + /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/ + level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + if (0 == result) { + level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases); + CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1); + } + + return result; +} + +int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + int result = 0; + uint32_t array = smu_data->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, GraphicsLevel); + uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) * + SMU7_MAX_LEVELS_GRAPHICS; + struct SMU7_Discrete_GraphicsLevel *levels = + smu_data->smc_state_table.GraphicsLevel; + uint32_t i; + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + result = ci_populate_single_graphic_level(hwmgr, + dpm_table->sclk_table.dpm_levels[i].value, + (uint16_t)smu_data->activity_target[i], + &levels[i]); + if (result) + return result; + if (i > 1) + smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; + if (i == (dpm_table->sclk_table.count - 1)) + smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + } + + smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; + + smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count; + data->dpm_level_enable_mask.sclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + result = ci_copy_bytes_to_smc(hwmgr, array, + (u8 *)levels, array_size, + SMC_RAM_END); + + return result; + +} + +static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; + + smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; + smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc; + smu_data->power_tune_table.SviLoadLineTrimVddC = 3; + smu_data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; + + tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256); + smu_data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->tdc_vddc_throttle_release_limit_perc; + smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt; + + return 0; +} + +static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; + uint32_t temp; + + if (ci_read_smc_sram_dword(hwmgr, + fuse_table_offset + + offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", + return -EINVAL); + else + smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl; + + return 0; +} + +static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + uint16_t tmp; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + + if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15)) + || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity) + tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity; + else + tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity; + + smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp); + + return 0; +} + +static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr) +{ + int i; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd; + uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd; + uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2; + + PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table, + "The CAC Leakage table does not exist!", return -EINVAL); + PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8, + "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL); + PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count, + "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL); + + for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) { + lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1); + hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2); + hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3); + } else { + lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc); + hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage); + } + } + + return 0; +} + +static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr) +{ + int i; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + uint8_t *vid = smu_data->power_tune_table.VddCVid; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8, + "There should never be more than 8 entries for VddcVid!!!", + return -EINVAL); + + for (i = 0; i < (int)data->vddc_voltage_table.count; i++) + vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value); + + return 0; +} + +static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd; + u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd; + int i, min, max; + + min = max = hi_vid[0]; + for (i = 0; i < 8; i++) { + if (0 != hi_vid[i]) { + if (min > hi_vid[i]) + min = hi_vid[i]; + if (max < hi_vid[i]) + max = hi_vid[i]; + } + + if (0 != lo_vid[i]) { + if (min > lo_vid[i]) + min = lo_vid[i]; + if (max < lo_vid[i]) + max = lo_vid[i]; + } + } + + if ((min == 0) || (max == 0)) + return -EINVAL; + smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max; + smu_data->power_tune_table.GnbLPMLMinVid = (u8)min; + + return 0; +} + +static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; + + HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(HiSidd); + smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(LoSidd); + + return 0; +} + +static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + uint32_t pm_fuse_table_offset; + int ret = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (ci_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, SMC_RAM_END)) { + pr_err("Attempt to get pm_fuse_table_offset Failed!\n"); + return -EINVAL; + } + + /* DW0 - DW3 */ + ret = ci_populate_bapm_vddc_vid_sidd(hwmgr); + /* DW4 - DW5 */ + ret |= ci_populate_vddc_vid(hwmgr); + /* DW6 */ + ret |= ci_populate_svi_load_line(hwmgr); + /* DW7 */ + ret |= ci_populate_tdc_limit(hwmgr); + /* DW8 */ + ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset); + + ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset); + + ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr); + + ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr); + if (ret) + return ret; + + ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, + (uint8_t *)&smu_data->power_tune_table, + sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END); + } + return ret; +} + +static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; + SMU7_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); + struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table; + struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table; + const uint16_t *def1, *def2; + int i, j, k; + + dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256)); + dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256)); + + dpm_table->DTETjOffset = 0; + dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES); + dpm_table->GpuTjHyst = 8; + + dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base; + + if (ppm) { + dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000; + dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256; + } else { + dpm_table->PPM_PkgPwrLimit = 0; + dpm_table->PPM_TemperatureLimit = 0; + } + + CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit); + CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit); + + dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient); + def1 = defaults->bapmti_r; + def2 = defaults->bapmti_rc; + + for (i = 0; i < SMU7_DTE_ITERATIONS; i++) { + for (j = 0; j < SMU7_DTE_SOURCES; j++) { + for (k = 0; k < SMU7_DTE_SINKS; k++) { + dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1); + dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2); + def1++; + def2++; + } + } + } + + return 0; +} + +static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr, + pp_atomctrl_voltage_table_entry *tab, uint16_t *hi, + uint16_t *lo) +{ + uint16_t v_index; + bool vol_found = false; + *hi = tab->value * VOLTAGE_SCALE; + *lo = tab->value * VOLTAGE_SCALE; + + PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk, + "The SCLK/VDDC Dependency Table does not exist.\n", + return -EINVAL); + + if (NULL == hwmgr->dyn_state.cac_leakage_table) { + pr_warn("CAC Leakage Table does not exist, using vddc.\n"); + return 0; + } + + for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { + if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { + vol_found = true; + if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { + *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE); + } else { + pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n"); + *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); + } + break; + } + } + + if (!vol_found) { + for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { + if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { + vol_found = true; + if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { + *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE; + } else { + pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table."); + *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); + } + break; + } + } + + if (!vol_found) + pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n"); + } + + return 0; +} + +static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr, + pp_atomctrl_voltage_table_entry *tab, + SMU7_Discrete_VoltageLevel *smc_voltage_tab) +{ + int result; + + result = ci_get_std_voltage_value_sidd(hwmgr, tab, + &smc_voltage_tab->StdVoltageHiSidd, + &smc_voltage_tab->StdVoltageLoSidd); + if (result) { + smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE; + smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE; + } + + smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd); + CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd); + + return 0; +} + +static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + unsigned int count; + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + table->VddcLevelCount = data->vddc_voltage_table.count; + for (count = 0; count < table->VddcLevelCount; count++) { + result = ci_populate_smc_voltage_table(hwmgr, + &(data->vddc_voltage_table.entries[count]), + &(table->VddcLevel[count])); + PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL); + + /* GPIO voltage control */ + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) + table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low; + else + table->VddcLevel[count].Smio = 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); + + return 0; +} + +static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t count; + int result; + + table->VddciLevelCount = data->vddci_voltage_table.count; + + for (count = 0; count < table->VddciLevelCount; count++) { + result = ci_populate_smc_voltage_table(hwmgr, + &(data->vddci_voltage_table.entries[count]), + &(table->VddciLevel[count])); + PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL); + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low; + else + table->VddciLevel[count].Smio |= 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount); + + return 0; +} + +static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t count; + int result; + + table->MvddLevelCount = data->mvdd_voltage_table.count; + + for (count = 0; count < table->MvddLevelCount; count++) { + result = ci_populate_smc_voltage_table(hwmgr, + &(data->mvdd_voltage_table.entries[count]), + &table->MvddLevel[count]); + PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL); + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) + table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low; + else + table->MvddLevel[count].Smio |= 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); + + return 0; +} + + +static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result; + + result = ci_populate_smc_vddc_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDC voltage table to SMC", return -EINVAL); + + result = ci_populate_smc_vdd_ci_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDCI voltage table to SMC", return -EINVAL); + + result = ci_populate_smc_mvdd_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate MVDD voltage table to SMC", return -EINVAL); + + return 0; +} + +static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr, + struct SMU7_Discrete_Ulv *state) +{ + uint32_t voltage_response_time, ulv_voltage; + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + state->CcPwrDynRm = 0; + state->CcPwrDynRm1 = 0; + + result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage); + PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;); + + if (ulv_voltage == 0) { + data->ulv_supported = false; + return 0; + } + + if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) { + /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ + if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) + state->VddcOffset = 0; + else + /* used in SMIO Mode. not implemented for now. this is backup only for CI. */ + state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage); + } else { + /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ + if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) + state->VddcOffsetVid = 0; + else /* used in SVI2 Mode */ + state->VddcOffsetVid = (uint8_t)( + (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage) + * VOLTAGE_VID_OFFSET_SCALE2 + / VOLTAGE_VID_OFFSET_SCALE1); + } + state->VddcPhase = 1; + + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset); + + return 0; +} + +static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr, + SMU7_Discrete_Ulv *ulv_level) +{ + return ci_populate_ulv_level(hwmgr, ulv_level); +} + +static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + uint32_t i; + +/* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/ + for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = + (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = 1; + table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5); + table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30); + } + + smu_data->smc_state_table.LinkLevelCount = + (uint8_t)dpm_table->pcie_speed_table.count; + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); + + return 0; +} + +static int ci_calculate_mclk_params( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU7_Discrete_MemoryLevel *mclk, + bool strobe_mode, + bool dllStateOn + ) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL; + uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL; + uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL; + uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1; + uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2; + uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1; + uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2; + + pp_atomctrl_memory_clock_param mpll_param; + int result; + + result = atomctrl_get_memory_pll_dividers_si(hwmgr, + memory_clock, &mpll_param, strobe_mode); + PP_ASSERT_WITH_CODE(0 == result, + "Error retrieving Memory Clock Parameters from VBIOS.", return result); + + mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl); + + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode); + + mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl, + MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); + + if (data->is_memory_gddr5) { + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel); + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MemorySpreadSpectrumSupport)) { + pp_atomctrl_internal_ss_info ss_info; + uint32_t freq_nom; + uint32_t tmp; + uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr); + + /* for GDDR5 for all modes and DDR3 */ + if (1 == mpll_param.qdr) + freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider); + else + freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider); + + /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/ + tmp = (freq_nom / reference_clock); + tmp = tmp * tmp; + + if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) { + uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate; + uint32_t clkv = + (uint32_t)((((131 * ss_info.speed_spectrum_percentage * + ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom); + + mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv); + mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks); + } + } + + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn); + + + mclk->MclkFrequency = memory_clock; + mclk->MpllFuncCntl = mpll_func_cntl; + mclk->MpllFuncCntl_1 = mpll_func_cntl_1; + mclk->MpllFuncCntl_2 = mpll_func_cntl_2; + mclk->MpllAdFuncCntl = mpll_ad_func_cntl; + mclk->MpllDqFuncCntl = mpll_dq_func_cntl; + mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; + mclk->DllCntl = dll_cntl; + mclk->MpllSs1 = mpll_ss1; + mclk->MpllSs2 = mpll_ss2; + + return 0; +} + +static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock, + bool strobe_mode) +{ + uint8_t mc_para_index; + + if (strobe_mode) { + if (memory_clock < 12500) + mc_para_index = 0x00; + else if (memory_clock > 47500) + mc_para_index = 0x0f; + else + mc_para_index = (uint8_t)((memory_clock - 10000) / 2500); + } else { + if (memory_clock < 65000) + mc_para_index = 0x00; + else if (memory_clock > 135000) + mc_para_index = 0x0f; + else + mc_para_index = (uint8_t)((memory_clock - 60000) / 5000); + } + + return mc_para_index; +} + +static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock) +{ + uint8_t mc_para_index; + + if (memory_clock < 10000) + mc_para_index = 0; + else if (memory_clock >= 80000) + mc_para_index = 0x0f; + else + mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1); + + return mc_para_index; +} + +static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl, + uint32_t memory_clock, uint32_t *p_shed) +{ + unsigned int i; + + *p_shed = 1; + + for (i = 0; i < pl->count; i++) { + if (memory_clock < pl->entries[i].Mclk) { + *p_shed = i; + break; + } + } + + return 0; +} + +static int ci_populate_single_memory_level( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU7_Discrete_MemoryLevel *memory_level + ) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int result = 0; + bool dll_state_on; + struct cgs_display_info info = {0}; + uint32_t mclk_edc_wr_enable_threshold = 40000; + uint32_t mclk_edc_enable_threshold = 40000; + uint32_t mclk_strobe_mode_threshold = 40000; + + if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) { + result = ci_get_dependency_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result); + } + + if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) { + result = ci_get_dependency_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddci_dependency_on_mclk, + memory_clock, + &memory_level->MinVddci); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result); + } + + if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) { + result = ci_get_dependency_volt_by_clk(hwmgr, + hwmgr->dyn_state.mvdd_dependency_on_mclk, + memory_clock, + &memory_level->MinMvdd); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddci voltage value from memory MVDD voltage dependency table", return result); + } + + memory_level->MinVddcPhases = 1; + + if (data->vddc_phase_shed_control) { + ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table, + memory_clock, &memory_level->MinVddcPhases); + } + + memory_level->EnabledForThrottle = 1; + memory_level->EnabledForActivity = 1; + memory_level->UpH = 0; + memory_level->DownH = 100; + memory_level->VoltageDownH = 0; + + /* Indicates maximum activity level for this performance level.*/ + memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target; + memory_level->StutterEnable = 0; + memory_level->StrobeEnable = 0; + memory_level->EdcReadEnable = 0; + memory_level->EdcWriteEnable = 0; + memory_level->RttEnable = 0; + + /* default set to low watermark. Highest level will be set to high later.*/ + memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + cgs_get_active_displays_info(hwmgr->device, &info); + data->display_timing.num_existing_displays = info.display_count; + + /* stutter mode not support on ci */ + + /* decide strobe mode*/ + memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) && + (memory_clock <= mclk_strobe_mode_threshold); + + /* decide EDC mode and memory clock ratio*/ + if (data->is_memory_gddr5) { + memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock, + memory_level->StrobeEnable); + + if ((mclk_edc_enable_threshold != 0) && + (memory_clock > mclk_edc_enable_threshold)) { + memory_level->EdcReadEnable = 1; + } + + if ((mclk_edc_wr_enable_threshold != 0) && + (memory_clock > mclk_edc_wr_enable_threshold)) { + memory_level->EdcWriteEnable = 1; + } + + if (memory_level->StrobeEnable) { + if (ci_get_mclk_frequency_ratio(memory_clock, 1) >= + ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + else + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0; + } else + dll_state_on = data->dll_default_on; + } else { + memory_level->StrobeRatio = + ci_get_ddr3_mclk_frequency_ratio(memory_clock); + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + } + + result = ci_calculate_mclk_params(hwmgr, + memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on); + + if (0 == result) { + memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases); + memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE); + memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE); + /* MCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency); + /* Indicates maximum activity level for this performance level.*/ + CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2); + } + + return result; +} + +int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + int result; + struct cgs_system_info sys_info = {0}; + uint32_t dev_id; + + uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel); + uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY; + SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel; + uint32_t i; + + memset(levels, 0x00, level_array_size); + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), + "can not populate memory level as memory clock is zero", return -EINVAL); + result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value, + &(smu_data->smc_state_table.MemoryLevel[i])); + if (0 != result) + return result; + } + + smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; + cgs_query_system_info(hwmgr->device, &sys_info); + dev_id = (uint32_t)sys_info.value; + + if ((dpm_table->mclk_table.count >= 2) + && ((dev_id == 0x67B0) || (dev_id == 0x67B1))) { + smu_data->smc_state_table.MemoryLevel[1].MinVddci = + smu_data->smc_state_table.MemoryLevel[0].MinVddci; + smu_data->smc_state_table.MemoryLevel[1].MinMvdd = + smu_data->smc_state_table.MemoryLevel[0].MinMvdd; + } + smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F; + CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel); + + smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count; + data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; + + result = ci_copy_bytes_to_smc(hwmgr, + level_array_address, (uint8_t *)levels, (uint32_t)level_array_size, + SMC_RAM_END); + + return result; +} + +static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, + SMU7_Discrete_VoltageLevel *voltage) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + uint32_t i = 0; + + if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) { + /* find mvdd value which clock is more than request */ + for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) { + if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) { + /* Always round to higher voltage. */ + voltage->Voltage = data->mvdd_voltage_table.entries[i].value; + break; + } + } + + PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count, + "MVDD Voltage is outside the supported range.", return -EINVAL); + + } else { + return -EINVAL; + } + + return 0; +} + +static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result = 0; + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct pp_atomctrl_clock_dividers_vi dividers; + + SMU7_Discrete_VoltageLevel voltage_level; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + + + /* The ACPI state should not do DPM on DC (or ever).*/ + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + if (data->acpi_vddc) + table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE); + else + table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE); + + table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1; + /* assign zero for now*/ + table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr); + + /* get the engine clock dividers for this clock value*/ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, + table->ACPILevel.SclkFrequency, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", return result); + + /* divider ID for required SCLK*/ + table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; + table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + table->ACPILevel.DeepSleepDivId = 0; + + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_RESET, 1); + spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, + CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4); + + table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; + table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; + table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + /* For various features to be enabled/disabled while this level is active.*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); + /* SCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); + + + /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/ + table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; + table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; + + if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) + table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc; + else { + if (data->acpi_vddci != 0) + table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE); + else + table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE); + } + + if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level)) + table->MemoryACPILevel.MinMvdd = + PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE); + else + table->MemoryACPILevel.MinMvdd = 0; + + /* Force reset on DLL*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1); + + /* Disable DLL in ACPIState*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0); + + /* Enable DLL bypass signal*/ + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK0_BYPASS, 0); + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK1_BYPASS, 0); + + table->MemoryACPILevel.DllCntl = + PP_HOST_TO_SMC_UL(dll_cntl); + table->MemoryACPILevel.MclkPwrmgtCntl = + PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl); + table->MemoryACPILevel.MpllAdFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL); + table->MemoryACPILevel.MpllDqFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl_1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1); + table->MemoryACPILevel.MpllFuncCntl_2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2); + table->MemoryACPILevel.MpllSs1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1); + table->MemoryACPILevel.MpllSs2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2); + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpH = 0; + table->MemoryACPILevel.DownH = 100; + table->MemoryACPILevel.VoltageDownH = 0; + /* Indicates maximum activity level for this performance level.*/ + table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); + + table->MemoryACPILevel.StutterEnable = 0; + table->MemoryACPILevel.StrobeEnable = 0; + table->MemoryACPILevel.EdcReadEnable = 0; + table->MemoryACPILevel.EdcWriteEnable = 0; + table->MemoryACPILevel.RttEnable = 0; + + return result; +} + +static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result = 0; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_uvd_clock_voltage_dependency_table *uvd_table = + hwmgr->dyn_state.uvd_clock_voltage_dependency_table; + + table->UvdLevelCount = (uint8_t)(uvd_table->count); + + for (count = 0; count < table->UvdLevelCount; count++) { + table->UvdLevel[count].VclkFrequency = + uvd_table->entries[count].vclk; + table->UvdLevel[count].DclkFrequency = + uvd_table->entries[count].dclk; + table->UvdLevel[count].MinVddc = + uvd_table->entries[count].v * VOLTAGE_SCALE; + table->UvdLevel[count].MinVddcPhases = 1; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].VclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Vclk clock", return result); + + table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].DclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Dclk clock", return result); + + table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc); + } + + return result; +} + +static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_vce_clock_voltage_dependency_table *vce_table = + hwmgr->dyn_state.vce_clock_voltage_dependency_table; + + table->VceLevelCount = (uint8_t)(vce_table->count); + table->VceBootLevel = 0; + + for (count = 0; count < table->VceLevelCount; count++) { + table->VceLevel[count].Frequency = vce_table->entries[count].evclk; + table->VceLevel[count].MinVoltage = + vce_table->entries[count].v * VOLTAGE_SCALE; + table->VceLevel[count].MinPhases = 1; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->VceLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for VCE engine clock", + return result); + + table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage); + } + return result; +} + +static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_acp_clock_voltage_dependency_table *acp_table = + hwmgr->dyn_state.acp_clock_voltage_dependency_table; + + table->AcpLevelCount = (uint8_t)(acp_table->count); + table->AcpBootLevel = 0; + + for (count = 0; count < table->AcpLevelCount; count++) { + table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk; + table->AcpLevel[count].MinVoltage = acp_table->entries[count].v; + table->AcpLevel[count].MinPhases = 1; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->AcpLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for engine clock", return result); + + table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage); + } + return result; +} + +static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_samu_clock_voltage_dependency_table *samu_table = + hwmgr->dyn_state.samu_clock_voltage_dependency_table; + + table->SamuBootLevel = 0; + table->SamuLevelCount = (uint8_t)(samu_table->count); + + for (count = 0; count < table->SamuLevelCount; count++) { + table->SamuLevel[count].Frequency = samu_table->entries[count].samclk; + table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE; + table->SamuLevel[count].MinPhases = 1; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->SamuLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for samu clock", return result); + + table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage); + } + return result; +} + +static int ci_populate_memory_timing_parameters( + struct pp_hwmgr *hwmgr, + uint32_t engine_clock, + uint32_t memory_clock, + struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs + ) +{ + uint32_t dramTiming; + uint32_t dramTiming2; + uint32_t burstTime; + int result; + + result = atomctrl_set_engine_dram_timings_rv770(hwmgr, + engine_clock, memory_clock); + + PP_ASSERT_WITH_CODE(result == 0, + "Error calling VBIOS to set DRAM_TIMING.", return result); + + dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + + arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming); + arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2); + arb_regs->McArbBurstTime = (uint8_t)burstTime; + + return 0; +} + +static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + int result = 0; + SMU7_Discrete_MCArbDramTimingTable arb_regs; + uint32_t i, j; + + memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable)); + + for (i = 0; i < data->dpm_table.sclk_table.count; i++) { + for (j = 0; j < data->dpm_table.mclk_table.count; j++) { + result = ci_populate_memory_timing_parameters + (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value, + data->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + + if (0 != result) + break; + } + } + + if (0 == result) { + result = ci_copy_bytes_to_smc( + hwmgr, + smu_data->arb_table_start, + (uint8_t *)&arb_regs, + sizeof(SMU7_Discrete_MCArbDramTimingTable), + SMC_RAM_END + ); + } + + return result; +} + +static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + /* find boot level from dpm table*/ + result = phm_find_boot_level(&(data->dpm_table.sclk_table), + data->vbios_boot_state.sclk_bootup_value, + (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel)); + + if (0 != result) { + smu_data->smc_state_table.GraphicsBootLevel = 0; + pr_err("VBIOS did not find boot engine clock value \ + in dependency table. Using Graphics DPM level 0!"); + result = 0; + } + + result = phm_find_boot_level(&(data->dpm_table.mclk_table), + data->vbios_boot_state.mclk_bootup_value, + (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel)); + + if (0 != result) { + smu_data->smc_state_table.MemoryBootLevel = 0; + pr_err("VBIOS did not find boot engine clock value \ + in dependency table. Using Memory DPM level 0!"); + result = 0; + } + + table->BootVddc = data->vbios_boot_state.vddc_bootup_value; + table->BootVddci = data->vbios_boot_state.vddci_bootup_value; + table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value; + + return result; +} + +static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr, + SMU7_Discrete_MCRegisters *mc_reg_table) +{ + const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend; + + uint32_t i, j; + + for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) { + if (smu_data->mc_reg_table.validflag & 1<<j) { + PP_ASSERT_WITH_CODE(i < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE, + "Index of mc_reg_table->address[] array out of boundary", return -EINVAL); + mc_reg_table->address[i].s0 = + PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0); + mc_reg_table->address[i].s1 = + PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1); + i++; + } + } + + mc_reg_table->last = (uint8_t)i; + + return 0; +} + +static void ci_convert_mc_registers( + const struct ci_mc_reg_entry *entry, + SMU7_Discrete_MCRegisterSet *data, + uint32_t num_entries, uint32_t valid_flag) +{ + uint32_t i, j; + + for (i = 0, j = 0; j < num_entries; j++) { + if (valid_flag & 1<<j) { + data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]); + i++; + } + } +} + +static int ci_convert_mc_reg_table_entry_to_smc( + struct pp_hwmgr *hwmgr, + const uint32_t memory_clock, + SMU7_Discrete_MCRegisterSet *mc_reg_table_data + ) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + uint32_t i = 0; + + for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) { + if (memory_clock <= + smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) { + break; + } + } + + if ((i == smu_data->mc_reg_table.num_entries) && (i > 0)) + --i; + + ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i], + mc_reg_table_data, smu_data->mc_reg_table.last, + smu_data->mc_reg_table.validflag); + + return 0; +} + +static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, + SMU7_Discrete_MCRegisters *mc_regs) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int res; + uint32_t i; + + for (i = 0; i < data->dpm_table.mclk_table.count; i++) { + res = ci_convert_mc_reg_table_entry_to_smc( + hwmgr, + data->dpm_table.mclk_table.dpm_levels[i].value, + &mc_regs->data[i] + ); + + if (0 != res) + result = res; + } + + return result; +} + +static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t address; + int32_t result; + + if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) + return 0; + + + memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters)); + + result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs)); + + if (result != 0) + return result; + + address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]); + + return ci_copy_bytes_to_smc(hwmgr, address, + (uint8_t *)&smu_data->mc_regs.data[0], + sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, + SMC_RAM_END); +} + +static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + + memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters)); + result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs)); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for the MC register addresses!", return result;); + + result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for driver state!", return result;); + + return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start, + (uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END); +} + +static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + uint8_t count, level; + + count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count); + + for (level = 0; level < count; level++) { + if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk + >= data->vbios_boot_state.sclk_bootup_value) { + smu_data->smc_state_table.GraphicsBootLevel = level; + break; + } + } + + count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count); + + for (level = 0; level < count; level++) { + if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk + >= data->vbios_boot_state.mclk_bootup_value) { + smu_data->smc_state_table.MemoryBootLevel = level; + break; + } + } + + return 0; +} + +static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) + table->SVI2Enable = 1; + else + table->SVI2Enable = 0; + return 0; +} + +static int ci_start_smc(struct pp_hwmgr *hwmgr) +{ + /* set smc instruct start point at 0x0 */ + ci_program_jump_on_start(hwmgr); + + /* enable smc clock */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); + + PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, + INTERRUPTS_ENABLED, 1); + + return 0; +} + +int ci_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + SMU7_Discrete_DpmTable *table = &(smu_data->smc_state_table); + struct pp_atomctrl_gpio_pin_assignment gpio_pin; + u32 i; + + ci_initialize_power_tune_defaults(hwmgr); + memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table)); + + if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) + ci_populate_smc_voltage_tables(hwmgr, table); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StepVddc)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + + if (data->is_memory_gddr5) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + + if (data->ulv_supported) { + result = ci_populate_ulv_state(hwmgr, &(table->Ulv)); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ULV state!", return result); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_ULV_PARAMETER, 0x40035); + } + + result = ci_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Graphics Level!", return result); + + result = ci_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Memory Level!", return result); + + result = ci_populate_smc_link_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Link Level!", return result); + + result = ci_populate_smc_acpi_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACPI Level!", return result); + + result = ci_populate_smc_vce_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize VCE Level!", return result); + + result = ci_populate_smc_acp_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACP Level!", return result); + + result = ci_populate_smc_samu_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize SAMU Level!", return result); + + /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */ + /* need to populate the ARB settings for the initial state. */ + result = ci_program_memory_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to Write ARB settings for the initial state.", return result); + + result = ci_populate_smc_uvd_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize UVD Level!", return result); + + table->UvdBootLevel = 0; + table->VceBootLevel = 0; + table->AcpBootLevel = 0; + table->SamuBootLevel = 0; + + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + result = ci_populate_smc_boot_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Boot Level!", return result); + + result = ci_populate_smc_initial_state(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result); + + result = ci_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result); + + table->UVDInterval = 1; + table->VCEInterval = 1; + table->ACPInterval = 1; + table->SAMUInterval = 1; + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + + table->TemperatureLimitHigh = + (data->thermal_temp_setting.temperature_high * + SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + table->TemperatureLimitLow = + (data->thermal_temp_setting.temperature_low * + SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->VddcVddciDelta = 4000; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + + PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count), + "There must be 1 or more PCIE levels defined in PPTable.", + return -EINVAL); + + table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count; + table->PCIeGenInterval = 1; + + ci_populate_smc_svi2_config(hwmgr, table); + + for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++) + CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]); + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) { + table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } else { + table->VRHotGpio = SMU7_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } + + table->AcDcGpio = SMU7_UNUSED_GPIO_PIN; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); + table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta); + CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); + CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); + + table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE); + table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE); + table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE); + + /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ + result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, SystemFlags), + (uint8_t *)&(table->SystemFlags), + sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController), + SMC_RAM_END); + + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload dpm data to SMC memory!", return result;); + + result = ci_populate_initial_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate initialize MC Reg table!", return result); + + result = ci_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate PM fuses to SMC memory!", return result); + + ci_start_smc(hwmgr); + + return 0; +} + +int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend); + SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; + uint32_t duty100; + uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; + uint16_t fdo_min, slope1, slope2; + uint32_t reference_clock; + int res; + uint64_t tmp64; + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) + return 0; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + if (0 == ci_data->fan_table_start) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); + + if (0 == duty100) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100; + do_div(tmp64, 10000); + fdo_min = (uint16_t)tmp64; + + t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin; + t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed; + + pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; + pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; + + slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); + slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); + + fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100); + fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100); + fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100); + + fan_table.Slope1 = cpu_to_be16(slope1); + fan_table.Slope2 = cpu_to_be16(slope2); + + fan_table.FdoMin = cpu_to_be16(fdo_min); + + fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst); + + fan_table.HystUp = cpu_to_be16(1); + + fan_table.HystSlope = cpu_to_be16(1); + + fan_table.TempRespLim = cpu_to_be16(5); + + reference_clock = smu7_get_xclk(hwmgr); + + fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600); + + fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); + + fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); + + res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); + + return 0; +} + +static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + return ci_program_memory_timing_parameters(hwmgr); + + return 0; +} + +int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (hwmgr->gfx_arbiter.sclk_threshold != + data->low_sclk_interrupt_threshold)) { + data->low_sclk_interrupt_threshold = + hwmgr->gfx_arbiter.sclk_threshold; + low_sclk_interrupt_threshold = + data->low_sclk_interrupt_threshold; + + CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); + + result = ci_copy_bytes_to_smc( + hwmgr, + smu_data->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, + LowSclkInterruptT), + (uint8_t *)&low_sclk_interrupt_threshold, + sizeof(uint32_t), + SMC_RAM_END); + } + + result = ci_update_and_upload_mc_reg_table(hwmgr); + + PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result); + + result = ci_program_mem_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE((result == 0), + "Failed to program memory timing parameters!", + ); + + return result; +} + +uint32_t ci_get_offsetof(uint32_t type, uint32_t member) +{ + switch (type) { + case SMU_SoftRegisters: + switch (member) { + case HandshakeDisables: + return offsetof(SMU7_SoftRegisters, HandshakeDisables); + case VoltageChangeTimeout: + return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout); + case AverageGraphicsActivity: + return offsetof(SMU7_SoftRegisters, AverageGraphicsA); + case PreVBlankGap: + return offsetof(SMU7_SoftRegisters, PreVBlankGap); + case VBlankTimeout: + return offsetof(SMU7_SoftRegisters, VBlankTimeout); + } + case SMU_Discrete_DpmTable: + switch (member) { + case LowSclkInterruptThreshold: + return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT); + } + } + pr_debug("can't get the offset of type %x member %x\n", type, member); + return 0; +} + +uint32_t ci_get_mac_definition(uint32_t value) +{ + switch (value) { + case SMU_MAX_LEVELS_GRAPHICS: + return SMU7_MAX_LEVELS_GRAPHICS; + case SMU_MAX_LEVELS_MEMORY: + return SMU7_MAX_LEVELS_MEMORY; + case SMU_MAX_LEVELS_LINK: + return SMU7_MAX_LEVELS_LINK; + case SMU_MAX_ENTRIES_SMIO: + return SMU7_MAX_ENTRIES_SMIO; + case SMU_MAX_LEVELS_VDDC: + return SMU7_MAX_LEVELS_VDDC; + case SMU_MAX_LEVELS_VDDCI: + return SMU7_MAX_LEVELS_VDDCI; + case SMU_MAX_LEVELS_MVDD: + return SMU7_MAX_LEVELS_MVDD; + } + + pr_debug("can't get the mac of %x\n", value); + return 0; +} + +static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr) +{ + uint32_t byte_count, start_addr; + uint8_t *src; + uint32_t data; + + struct cgs_firmware_info info = {0}; + + cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info); + + hwmgr->is_kicker = info.is_kicker; + byte_count = info.image_size; + src = (uint8_t *)info.kptr; + start_addr = info.ucode_start_address; + + if (byte_count > SMC_RAM_END) { + pr_err("SMC address is beyond the SMC RAM area.\n"); + return -EINVAL; + } + + cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); + + for (; byte_count >= 4; byte_count -= 4) { + data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data); + src += 4; + } + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + + if (0 != byte_count) { + pr_err("SMC size must be dividable by 4\n"); + return -EINVAL; + } + + return 0; +} + +static int ci_upload_firmware(struct pp_hwmgr *hwmgr) +{ + if (ci_is_smc_ram_running(hwmgr)) { + pr_info("smc is running, no need to load smc firmware\n"); + return 0; + } + PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, + boot_seq_done, 1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL, + pre_fetcher_en, 1); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); + return ci_load_smc_ucode(hwmgr); +} + +int ci_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend); + + uint32_t tmp = 0; + int result; + bool error = false; + + if (ci_upload_firmware(hwmgr)) + return -EINVAL; + + result = ci_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, DpmTable), + &tmp, SMC_RAM_END); + + if (0 == result) + ci_data->dpm_table_start = tmp; + + error |= (0 != result); + + result = ci_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, SoftRegisters), + &tmp, SMC_RAM_END); + + if (0 == result) { + data->soft_regs_start = tmp; + ci_data->soft_regs_start = tmp; + } + + error |= (0 != result); + + result = ci_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, mcRegisterTable), + &tmp, SMC_RAM_END); + + if (0 == result) + ci_data->mc_reg_table_start = tmp; + + result = ci_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, FanTable), + &tmp, SMC_RAM_END); + + if (0 == result) + ci_data->fan_table_start = tmp; + + error |= (0 != result); + + result = ci_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, mcArbDramTimingTable), + &tmp, SMC_RAM_END); + + if (0 == result) + ci_data->arb_table_start = tmp; + + error |= (0 != result); + + result = ci_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, Version), + &tmp, SMC_RAM_END); + + if (0 == result) + hwmgr->microcode_version_info.SMC = tmp; + + error |= (0 != result); + + return error ? 1 : 0; +} + +static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr) +{ + return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16)); +} + +static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg) +{ + bool result = true; + + switch (in_reg) { + case mmMC_SEQ_RAS_TIMING: + *out_reg = mmMC_SEQ_RAS_TIMING_LP; + break; + + case mmMC_SEQ_DLL_STBY: + *out_reg = mmMC_SEQ_DLL_STBY_LP; + break; + + case mmMC_SEQ_G5PDX_CMD0: + *out_reg = mmMC_SEQ_G5PDX_CMD0_LP; + break; + + case mmMC_SEQ_G5PDX_CMD1: + *out_reg = mmMC_SEQ_G5PDX_CMD1_LP; + break; + + case mmMC_SEQ_G5PDX_CTRL: + *out_reg = mmMC_SEQ_G5PDX_CTRL_LP; + break; + + case mmMC_SEQ_CAS_TIMING: + *out_reg = mmMC_SEQ_CAS_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING: + *out_reg = mmMC_SEQ_MISC_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING2: + *out_reg = mmMC_SEQ_MISC_TIMING2_LP; + break; + + case mmMC_SEQ_PMG_DVS_CMD: + *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP; + break; + + case mmMC_SEQ_PMG_DVS_CTL: + *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP; + break; + + case mmMC_SEQ_RD_CTL_D0: + *out_reg = mmMC_SEQ_RD_CTL_D0_LP; + break; + + case mmMC_SEQ_RD_CTL_D1: + *out_reg = mmMC_SEQ_RD_CTL_D1_LP; + break; + + case mmMC_SEQ_WR_CTL_D0: + *out_reg = mmMC_SEQ_WR_CTL_D0_LP; + break; + + case mmMC_SEQ_WR_CTL_D1: + *out_reg = mmMC_SEQ_WR_CTL_D1_LP; + break; + + case mmMC_PMG_CMD_EMRS: + *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP; + break; + + case mmMC_PMG_CMD_MRS: + *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP; + break; + + case mmMC_PMG_CMD_MRS1: + *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP; + break; + + case mmMC_SEQ_PMG_TIMING: + *out_reg = mmMC_SEQ_PMG_TIMING_LP; + break; + + case mmMC_PMG_CMD_MRS2: + *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP; + break; + + case mmMC_SEQ_WR_CTL_2: + *out_reg = mmMC_SEQ_WR_CTL_2_LP; + break; + + default: + result = false; + break; + } + + return result; +} + +static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table) +{ + uint32_t i; + uint16_t address; + + for (i = 0; i < table->last; i++) { + table->mc_reg_address[i].s0 = + ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) + ? address : table->mc_reg_address[i].s1; + } + return 0; +} + +static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, + struct ci_mc_reg_table *ni_table) +{ + uint8_t i, j; + + PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES), + "Invalid VramInfo table.", return -EINVAL); + + for (i = 0; i < table->last; i++) + ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; + + ni_table->last = table->last; + + for (i = 0; i < table->num_entries; i++) { + ni_table->mc_reg_table_entry[i].mclk_max = + table->mc_reg_table_entry[i].mclk_max; + for (j = 0; j < table->last; j++) { + ni_table->mc_reg_table_entry[i].mc_data[j] = + table->mc_reg_table_entry[i].mc_data[j]; + } + } + + ni_table->num_entries = table->num_entries; + + return 0; +} + +static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr, + struct ci_mc_reg_table *table) +{ + uint8_t i, j, k; + uint32_t temp_reg; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + for (i = 0, j = table->last; i < table->last; i++) { + PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + switch (table->mc_reg_address[i].s1) { + + case mmMC_SEQ_MISC1: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + ((temp_reg & 0xffff0000)) | + ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); + } + j++; + PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + + if (!data->is_memory_gddr5) + table->mc_reg_table_entry[k].mc_data[j] |= 0x100; + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + if (!data->is_memory_gddr5 && j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) { + table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; + table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + } + + break; + + case mmMC_SEQ_RESERVE_M: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + break; + + default: + break; + } + + } + + table->last = j; + + return 0; +} + +static int ci_set_valid_flag(struct ci_mc_reg_table *table) +{ + uint8_t i, j; + + for (i = 0; i < table->last; i++) { + for (j = 1; j < table->num_entries; j++) { + if (table->mc_reg_table_entry[j-1].mc_data[i] != + table->mc_reg_table_entry[j].mc_data[i]) { + table->validflag |= (1 << i); + break; + } + } + } + + return 0; +} + +int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + pp_atomctrl_mc_reg_table *table; + struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table; + uint8_t module_index = ci_get_memory_modile_index(hwmgr); + + table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL); + + if (NULL == table) + return -ENOMEM; + + /* Program additional LP registers that are no longer programmed by VBIOS */ + cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2)); + + memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table)); + + result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); + + if (0 == result) + result = ci_copy_vbios_smc_reg_table(table, ni_table); + + if (0 == result) { + ci_set_s0_mc_reg_index(ni_table); + result = ci_set_mc_special_registers(hwmgr, ni_table); + } + + if (0 == result) + ci_set_valid_flag(ni_table); + + kfree(table); + + return result; +} + +bool ci_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + return ci_is_smc_ram_running(hwmgr); +} + +int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, + struct amd_pp_profile *request) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *) + (hwmgr->smu_backend); + struct SMU7_Discrete_GraphicsLevel *levels = + smu_data->smc_state_table.GraphicsLevel; + uint32_t array = smu_data->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, GraphicsLevel); + uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) * + SMU7_MAX_LEVELS_GRAPHICS; + uint32_t i; + + for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { + levels[i].ActivityLevel = + cpu_to_be16(request->activity_threshold); + levels[i].EnabledForActivity = 1; + levels[i].UpH = request->up_hyst; + levels[i].DownH = request->down_hyst; + } + + return ci_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, + array_size, SMC_RAM_END); +} diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.h b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.h index fbdff3e02aa3..cc4176d2d25f 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.h @@ -1,5 +1,5 @@ /* - * Copyright 2015 Advanced Micro Devices, Inc. + * Copyright 2017 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -20,19 +20,33 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ -#include "eventmgr.h" -#include "eventinit.h" -#include "eventmanagement.h" -#include "eventmanager.h" -#include "power_state.h" -#include "hardwaremanager.h" +#ifndef CI_SMC_H +#define CI_SMC_H -int psm_get_ui_state(struct pp_eventmgr *eventmgr, enum PP_StateUILabel ui_label, unsigned long *state_id); +#include <linux/types.h> -int psm_get_state_by_classification(struct pp_eventmgr *eventmgr, enum PP_StateClassificationFlag flag, unsigned long *state_id); -int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id); +struct pp_smumgr; +struct pp_hwmgr; +struct amd_pp_profile; -int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip); +int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, + uint16_t msg, uint32_t parameter); +int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg); +int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); +int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr); +int ci_init_smc_table(struct pp_hwmgr *hwmgr); +int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr); +int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); +int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr); +uint32_t ci_get_offsetof(uint32_t type, uint32_t member); +uint32_t ci_get_mac_definition(uint32_t value); +int ci_process_firmware_header(struct pp_hwmgr *hwmgr); +int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr); +bool ci_is_dpm_running(struct pp_hwmgr *hwmgr); +int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, + struct amd_pp_profile *request); + + +#endif -int psm_adjust_power_state_static(struct pp_eventmgr *eventmgr, bool skip); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c new file mode 100644 index 000000000000..f265f42a7ed3 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -0,0 +1,86 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/fb.h> +#include "linux/delay.h" + +#include "smumgr.h" +#include "ci_smumgr.h" +#include "cgs_common.h" +#include "ci_smc.h" + +static int ci_smu_init(struct pp_hwmgr *hwmgr) +{ + int i; + struct ci_smumgr *ci_priv = NULL; + + ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL); + + if (ci_priv == NULL) + return -ENOMEM; + + for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) + ci_priv->activity_target[i] = 30; + + hwmgr->smu_backend = ci_priv; + + return 0; +} + +static int ci_smu_fini(struct pp_hwmgr *hwmgr) +{ + kfree(hwmgr->smu_backend); + hwmgr->smu_backend = NULL; + cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU); + return 0; +} + +static int ci_start_smu(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +const struct pp_smumgr_func ci_smu_funcs = { + .smu_init = ci_smu_init, + .smu_fini = ci_smu_fini, + .start_smu = ci_start_smu, + .check_fw_load_finish = NULL, + .request_smu_load_fw = NULL, + .request_smu_load_specific_fw = NULL, + .send_msg_to_smc = ci_send_msg_to_smc, + .send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter, + .download_pptable_settings = NULL, + .upload_pptable_settings = NULL, + .get_offsetof = ci_get_offsetof, + .process_firmware_header = ci_process_firmware_header, + .init_smc_table = ci_init_smc_table, + .update_sclk_threshold = ci_update_sclk_threshold, + .thermal_setup_fan_table = ci_thermal_setup_fan_table, + .populate_all_graphic_levels = ci_populate_all_graphic_levels, + .populate_all_memory_levels = ci_populate_all_memory_levels, + .get_mac_definition = ci_get_mac_definition, + .initialize_mc_reg_table = ci_initialize_mc_reg_table, + .is_dpm_running = ci_is_dpm_running, + .populate_requested_graphic_levels = ci_populate_requested_graphic_levels, +}; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h new file mode 100644 index 000000000000..8189cfa17c46 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h @@ -0,0 +1,78 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _CI_SMUMANAGER_H_ +#define _CI_SMUMANAGER_H_ + +#define SMU__NUM_SCLK_DPM_STATE 8 +#define SMU__NUM_MCLK_DPM_LEVELS 6 +#define SMU__NUM_LCLK_DPM_LEVELS 8 +#define SMU__NUM_PCIE_DPM_LEVELS 8 + +#include "smu7_discrete.h" +#include <pp_endian.h> +#include "ppatomctrl.h" + +struct ci_pt_defaults { + u8 svi_load_line_en; + u8 svi_load_line_vddc; + u8 tdc_vddc_throttle_release_limit_perc; + u8 tdc_mawt; + u8 tdc_waterfall_ctl; + u8 dte_ambient_temp_base; + u32 display_cac; + u32 bapm_temp_gradient; + u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS]; + u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS]; +}; + +struct ci_mc_reg_entry { + uint32_t mclk_max; + uint32_t mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + +struct ci_mc_reg_table { + uint8_t last; + uint8_t num_entries; + uint16_t validflag; + struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; + SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + +struct ci_smumgr { + uint32_t soft_regs_start; + uint32_t dpm_table_start; + uint32_t mc_reg_table_start; + uint32_t fan_table_start; + uint32_t arb_table_start; + uint32_t ulv_setting_starts; + struct SMU7_Discrete_DpmTable smc_state_table; + struct SMU7_Discrete_PmFuses power_tune_table; + const struct ci_pt_defaults *power_tune_defaults; + SMU7_Discrete_MCRegisters mc_regs; + struct ci_mc_reg_table mc_reg_table; + uint32_t activity_target[SMU7_MAX_LEVELS_GRAPHICS]; + +}; + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c index 652aaa43e95c..78ab0556e48f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c @@ -52,53 +52,52 @@ static const enum cz_scratch_entry firmware_list[] = { CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, }; -static int cz_smum_get_argument(struct pp_smumgr *smumgr) +static int cz_smum_get_argument(struct pp_hwmgr *hwmgr) { - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - return cgs_read_register(smumgr->device, + return cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0); } -static int cz_send_msg_to_smc_async(struct pp_smumgr *smumgr, - uint16_t msg) +static int cz_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg) { int result = 0; - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - result = SMUM_WAIT_FIELD_UNEQUAL(smumgr, + result = PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); if (result != 0) { pr_err("cz_send_msg_to_smc_async (0x%04x) failed\n", msg); return result; } - cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0); - cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg); + cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0); + cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg); return 0; } /* Send a message to the SMC, and wait for its response.*/ -static int cz_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +static int cz_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { int result = 0; - result = cz_send_msg_to_smc_async(smumgr, msg); + result = cz_send_msg_to_smc_async(hwmgr, msg); if (result != 0) return result; - return SMUM_WAIT_FIELD_UNEQUAL(smumgr, + return PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); } -static int cz_set_smc_sram_address(struct pp_smumgr *smumgr, +static int cz_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_address, uint32_t limit) { - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; if (0 != (3 & smc_address)) { @@ -111,39 +110,39 @@ static int cz_set_smc_sram_address(struct pp_smumgr *smumgr, return -EINVAL; } - cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0, + cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address); return 0; } -static int cz_write_smc_sram_dword(struct pp_smumgr *smumgr, +static int cz_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_address, uint32_t value, uint32_t limit) { int result; - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - result = cz_set_smc_sram_address(smumgr, smc_address, limit); + result = cz_set_smc_sram_address(hwmgr, smc_address, limit); if (!result) - cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value); + cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value); return result; } -static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, +static int cz_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter); + cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter); - return cz_send_msg_to_smc(smumgr, msg); + return cz_send_msg_to_smc(hwmgr, msg); } -static int cz_check_fw_load_finish(struct pp_smumgr *smumgr, +static int cz_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t firmware) { int i; @@ -151,19 +150,19 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr, SMU8_FIRMWARE_HEADER_LOCATION + offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index); + cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index); - for (i = 0; i < smumgr->usec_timeout; i++) { + for (i = 0; i < hwmgr->usec_timeout; i++) { if (firmware == - (cgs_read_register(smumgr->device, mmMP0PUB_IND_DATA) & firmware)) + (cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware)) break; udelay(1); } - if (i >= smumgr->usec_timeout) { + if (i >= hwmgr->usec_timeout) { pr_err("SMU check loaded firmware failed.\n"); return -EINVAL; } @@ -171,7 +170,7 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr, return 0; } -static int cz_load_mec_firmware(struct pp_smumgr *smumgr) +static int cz_load_mec_firmware(struct pp_hwmgr *hwmgr) { uint32_t reg_data; uint32_t tmp; @@ -179,44 +178,44 @@ static int cz_load_mec_firmware(struct pp_smumgr *smumgr) struct cgs_firmware_info info = {0}; struct cz_smumgr *cz_smu; - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - cz_smu = (struct cz_smumgr *)smumgr->backend; - ret = cgs_get_firmware_info(smumgr->device, + cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; + ret = cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_CP_MEC, &info); if (ret) return -EINVAL; /* Disable MEC parsing/prefetching */ - tmp = cgs_read_register(smumgr->device, + tmp = cgs_read_register(hwmgr->device, mmCP_MEC_CNTL); - tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); - tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); - cgs_write_register(smumgr->device, mmCP_MEC_CNTL, tmp); + tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); + tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); + cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp); - tmp = cgs_read_register(smumgr->device, + tmp = cgs_read_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL); - tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); - tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0); - tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); - tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1); - cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_CNTL, tmp); + tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); + tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0); + tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); + tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1); + cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp); reg_data = smu_lower_32_bits(info.mc_addr) & - SMUM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO); - cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_LO, reg_data); + PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO); + cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data); reg_data = smu_upper_32_bits(info.mc_addr) & - SMUM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI); - cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_HI, reg_data); + PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI); + cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data); return 0; } -static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr, +static uint8_t cz_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr, enum cz_scratch_entry firmware_enum) { uint8_t ret = 0; @@ -226,7 +225,7 @@ static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr, ret = UCODE_ID_SDMA0; break; case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1: - if (smumgr->chip_id == CHIP_STONEY) + if (hwmgr->chip_id == CHIP_STONEY) ret = UCODE_ID_SDMA0; else ret = UCODE_ID_SDMA1; @@ -244,7 +243,7 @@ static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr, ret = UCODE_ID_CP_MEC_JT1; break; case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2: - if (smumgr->chip_id == CHIP_STONEY) + if (hwmgr->chip_id == CHIP_STONEY) ret = UCODE_ID_CP_MEC_JT1; else ret = UCODE_ID_CP_MEC_JT2; @@ -326,17 +325,17 @@ static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type) } static int cz_smu_populate_single_scratch_task( - struct pp_smumgr *smumgr, + struct pp_hwmgr *hwmgr, enum cz_scratch_entry fw_enum, uint8_t type, bool is_last) { uint8_t i; - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; task->type = type; - task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum); + task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum); task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count; for (i = 0; i < cz_smu->scratch_buffer_length; i++) @@ -363,17 +362,17 @@ static int cz_smu_populate_single_scratch_task( } static int cz_smu_populate_single_ucode_load_task( - struct pp_smumgr *smumgr, + struct pp_hwmgr *hwmgr, enum cz_scratch_entry fw_enum, bool is_last) { uint8_t i; - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; task->type = TASK_TYPE_UCODE_LOAD; - task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum); + task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum); task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count; for (i = 0; i < cz_smu->driver_buffer_length; i++) @@ -392,22 +391,22 @@ static int cz_smu_populate_single_ucode_load_task( return 0; } -static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_smumgr *smumgr) +static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count; - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, TASK_TYPE_UCODE_SAVE, true); return 0; } -static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr *smumgr) +static int cz_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr) { int i; - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; for (i = 0; i < NUM_JOBLIST_ENTRIES; i++) @@ -416,17 +415,17 @@ static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr *smumgr) return 0; } -static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr *smumgr) +static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count; - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, TASK_TYPE_UCODE_SAVE, false); - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, TASK_TYPE_UCODE_SAVE, true); @@ -434,121 +433,120 @@ static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr *smumgr) } -static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_smumgr *smumgr) +static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count; - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); - if (smumgr->chip_id == CHIP_STONEY) - cz_smu_populate_single_ucode_load_task(smumgr, + if (hwmgr->chip_id == CHIP_STONEY) + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); else - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false); /* populate scratch */ - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, TASK_TYPE_UCODE_LOAD, false); - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, TASK_TYPE_UCODE_LOAD, false); - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, TASK_TYPE_UCODE_LOAD, true); return 0; } -static int cz_smu_construct_toc_for_power_profiling( - struct pp_smumgr *smumgr) +static int cz_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count; - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, TASK_TYPE_INITIALIZE, true); return 0; } -static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr) +static int cz_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count; - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); - if (smumgr->chip_id != CHIP_STONEY) - cz_smu_populate_single_ucode_load_task(smumgr, + if (hwmgr->chip_id != CHIP_STONEY) + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); - if (smumgr->chip_id != CHIP_STONEY) - cz_smu_populate_single_ucode_load_task(smumgr, + if (hwmgr->chip_id != CHIP_STONEY) + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true); return 0; } -static int cz_smu_construct_toc_for_clock_table(struct pp_smumgr *smumgr) +static int cz_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count; - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, TASK_TYPE_INITIALIZE, true); return 0; } -static int cz_smu_construct_toc(struct pp_smumgr *smumgr) +static int cz_smu_construct_toc(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; cz_smu->toc_entry_used_count = 0; - cz_smu_initialize_toc_empty_job_list(smumgr); - cz_smu_construct_toc_for_rlc_aram_save(smumgr); - cz_smu_construct_toc_for_vddgfx_enter(smumgr); - cz_smu_construct_toc_for_vddgfx_exit(smumgr); - cz_smu_construct_toc_for_power_profiling(smumgr); - cz_smu_construct_toc_for_bootup(smumgr); - cz_smu_construct_toc_for_clock_table(smumgr); + cz_smu_initialize_toc_empty_job_list(hwmgr); + cz_smu_construct_toc_for_rlc_aram_save(hwmgr); + cz_smu_construct_toc_for_vddgfx_enter(hwmgr); + cz_smu_construct_toc_for_vddgfx_exit(hwmgr); + cz_smu_construct_toc_for_power_profiling(hwmgr); + cz_smu_construct_toc_for_bootup(hwmgr); + cz_smu_construct_toc_for_clock_table(hwmgr); return 0; } -static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr) +static int cz_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; uint32_t firmware_type; uint32_t i; int ret; @@ -559,12 +557,12 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr) for (i = 0; i < ARRAY_SIZE(firmware_list); i++) { - firmware_type = cz_translate_firmware_enum_to_arg(smumgr, + firmware_type = cz_translate_firmware_enum_to_arg(hwmgr, firmware_list[i]); ucode_id = cz_convert_fw_type_to_cgs(firmware_type); - ret = cgs_get_firmware_info(smumgr->device, + ret = cgs_get_firmware_info(hwmgr->device, ucode_id, &info); if (ret == 0) { @@ -585,12 +583,12 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr) } static int cz_smu_populate_single_scratch_entry( - struct pp_smumgr *smumgr, + struct pp_hwmgr *hwmgr, enum cz_scratch_entry scratch_type, uint32_t ulsize_byte, struct cz_buffer_entry *entry) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; long long mc_addr = ((long long)(cz_smu->smu_buffer.mc_addr_high) << 32) | cz_smu->smu_buffer.mc_addr_low; @@ -611,9 +609,9 @@ static int cz_smu_populate_single_scratch_entry( return 0; } -static int cz_download_pptable_settings(struct pp_smumgr *smumgr, void **table) +static int cz_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; unsigned long i; for (i = 0; i < cz_smu->scratch_buffer_length; i++) { @@ -624,25 +622,25 @@ static int cz_download_pptable_settings(struct pp_smumgr *smumgr, void **table) *table = (struct SMU8_Fusion_ClkTable *)cz_smu->scratch_buffer[i].kaddr; - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetClkTableAddrHi, cz_smu->scratch_buffer[i].mc_addr_high); - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetClkTableAddrLo, cz_smu->scratch_buffer[i].mc_addr_low); - cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, cz_smu->toc_entry_clock_table); - cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToDram); + cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram); return 0; } -static int cz_upload_pptable_settings(struct pp_smumgr *smumgr) +static int cz_upload_pptable_settings(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; unsigned long i; for (i = 0; i < cz_smu->scratch_buffer_length; i++) { @@ -651,63 +649,63 @@ static int cz_upload_pptable_settings(struct pp_smumgr *smumgr) break; } - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetClkTableAddrHi, cz_smu->scratch_buffer[i].mc_addr_high); - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetClkTableAddrLo, cz_smu->scratch_buffer[i].mc_addr_low); - cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, cz_smu->toc_entry_clock_table); - cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToSmu); + cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu); return 0; } -static int cz_request_smu_load_fw(struct pp_smumgr *smumgr) +static int cz_request_smu_load_fw(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend); + struct cz_smumgr *cz_smu = (struct cz_smumgr *)(hwmgr->smu_backend); uint32_t smc_address; - if (!smumgr->reload_fw) { + if (!hwmgr->reload_fw) { pr_info("skip reloading...\n"); return 0; } - cz_smu_populate_firmware_entries(smumgr); + cz_smu_populate_firmware_entries(hwmgr); - cz_smu_construct_toc(smumgr); + cz_smu_construct_toc(hwmgr); smc_address = SMU8_FIRMWARE_HEADER_LOCATION + offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); - cz_write_smc_sram_dword(smumgr, smc_address, 0, smc_address+4); + cz_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4); - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DriverDramAddrHi, cz_smu->toc_buffer.mc_addr_high); - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DriverDramAddrLo, cz_smu->toc_buffer.mc_addr_low); - cz_send_msg_to_smc(smumgr, PPSMC_MSG_InitJobs); + cz_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs); - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, cz_smu->toc_entry_aram); - cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, cz_smu->toc_entry_power_profiling_index); - return cz_send_msg_to_smc_with_parameter(smumgr, + return cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, cz_smu->toc_entry_initialize_index); } -static int cz_start_smu(struct pp_smumgr *smumgr) +static int cz_start_smu(struct pp_hwmgr *hwmgr) { int ret = 0; uint32_t fw_to_check = 0; @@ -721,23 +719,23 @@ static int cz_start_smu(struct pp_smumgr *smumgr) UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK; - if (smumgr->chip_id == CHIP_STONEY) + if (hwmgr->chip_id == CHIP_STONEY) fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK); - ret = cz_request_smu_load_fw(smumgr); + ret = cz_request_smu_load_fw(hwmgr); if (ret) pr_err("SMU firmware load failed\n"); - cz_check_fw_load_finish(smumgr, fw_to_check); + cz_check_fw_load_finish(hwmgr, fw_to_check); - ret = cz_load_mec_firmware(smumgr); + ret = cz_load_mec_firmware(hwmgr); if (ret) pr_err("Mec Firmware load failed\n"); return ret; } -static int cz_smu_init(struct pp_smumgr *smumgr) +static int cz_smu_init(struct pp_hwmgr *hwmgr) { uint64_t mc_addr = 0; int ret = 0; @@ -747,7 +745,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr) if (cz_smu == NULL) return -ENOMEM; - smumgr->backend = cz_smu; + hwmgr->smu_backend = cz_smu; cz_smu->toc_buffer.data_size = 4096; cz_smu->smu_buffer.data_size = @@ -757,7 +755,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr) ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) + ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32); - ret = smu_allocate_memory(smumgr->device, + ret = smu_allocate_memory(hwmgr->device, cz_smu->toc_buffer.data_size, CGS_GPU_MEM_TYPE__GART_CACHEABLE, PAGE_SIZE, @@ -770,7 +768,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr) cz_smu->toc_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); cz_smu->toc_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); - ret = smu_allocate_memory(smumgr->device, + ret = smu_allocate_memory(hwmgr->device, cz_smu->smu_buffer.data_size, CGS_GPU_MEM_TYPE__GART_CACHEABLE, PAGE_SIZE, @@ -783,7 +781,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr) cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); - if (0 != cz_smu_populate_single_scratch_entry(smumgr, + if (0 != cz_smu_populate_single_scratch_entry(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, UCODE_ID_RLC_SCRATCH_SIZE_BYTE, &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { @@ -791,14 +789,14 @@ static int cz_smu_init(struct pp_smumgr *smumgr) return -1; } - if (0 != cz_smu_populate_single_scratch_entry(smumgr, + if (0 != cz_smu_populate_single_scratch_entry(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { pr_err("Error when Populate Firmware Entry.\n"); return -1; } - if (0 != cz_smu_populate_single_scratch_entry(smumgr, + if (0 != cz_smu_populate_single_scratch_entry(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { @@ -806,7 +804,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr) return -1; } - if (0 != cz_smu_populate_single_scratch_entry(smumgr, + if (0 != cz_smu_populate_single_scratch_entry(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, sizeof(struct SMU8_MultimediaPowerLogData), &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { @@ -814,7 +812,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr) return -1; } - if (0 != cz_smu_populate_single_scratch_entry(smumgr, + if (0 != cz_smu_populate_single_scratch_entry(hwmgr, CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, sizeof(struct SMU8_Fusion_ClkTable), &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { @@ -825,18 +823,18 @@ static int cz_smu_init(struct pp_smumgr *smumgr) return 0; } -static int cz_smu_fini(struct pp_smumgr *smumgr) +static int cz_smu_fini(struct pp_hwmgr *hwmgr) { struct cz_smumgr *cz_smu; - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - cz_smu = (struct cz_smumgr *)smumgr->backend; + cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; if (cz_smu) { - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, cz_smu->toc_buffer.handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, cz_smu->smu_buffer.handle); kfree(cz_smu); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c index 8712f093d6d9..b1a66b5ada4a 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c @@ -198,7 +198,7 @@ static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda) static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -216,7 +216,7 @@ static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; SMU73_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); @@ -299,7 +299,7 @@ static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn; @@ -314,7 +314,7 @@ static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr) static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr) { uint16_t tdc_limit; - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; @@ -334,11 +334,11 @@ static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr) static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, fuse_table_offset + offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl), (uint32_t *)&temp, SMC_RAM_END)) @@ -359,7 +359,7 @@ static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr) { int i; - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 16; i++) @@ -370,7 +370,7 @@ static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr) static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); if ((hwmgr->thermal_controller.advanceFanControlParameters. usFanOutputSensitivity & (1 << 15)) || @@ -389,7 +389,7 @@ static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr) { int i; - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 16; i++) @@ -398,14 +398,9 @@ static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr) return 0; } -static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) -{ - return 0; -} - static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; @@ -426,11 +421,11 @@ static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr) { uint32_t pm_fuse_table_offset; - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, PmFuseTable), &pm_fuse_table_offset, SMC_RAM_END)) @@ -472,19 +467,13 @@ static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Attempt to populate GnbLPML Failed!", return -EINVAL); - /* DW19 */ - if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Min and Max Vid Failed!", - return -EINVAL); - /* DW20 */ if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr)) PP_ASSERT_WITH_CODE(false, "Attempt to populate BapmVddCBaseLeakage Hi and Lo " "Sidd Failed!", return -EINVAL); - if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, (uint8_t *)&smu_data->power_tune_table, sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END)) PP_ASSERT_WITH_CODE(false, @@ -586,7 +575,7 @@ static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr, { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); int i; /* Index (dpm_table->pcie_speed_table.count) @@ -774,7 +763,7 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr, int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; struct phm_ppt_v1_information *table_info = @@ -859,7 +848,7 @@ int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) levels[1].pcieDpmLevel = mid_pcie_level_enabled; } /* level count will send to smc once at init smc table and never change */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, (uint32_t)array_size, SMC_RAM_END); return result; @@ -1000,7 +989,7 @@ static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr, int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; int result; /* populate MCLK dpm table to SMU7 */ @@ -1043,7 +1032,7 @@ int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr) PPSMC_DISPLAY_WATERMARK_HIGH; /* level count will send to smc once at init smc table and never change */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, (uint32_t)array_size, SMC_RAM_END); return result; @@ -1352,7 +1341,7 @@ static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct SMU73_Discrete_MCArbDramTimingTable arb_regs; uint32_t i, j; int result = 0; @@ -1370,7 +1359,7 @@ static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) if (!result) result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.arb_table_start, (uint8_t *)&arb_regs, sizeof(SMU73_Discrete_MCArbDramTimingTable), @@ -1460,7 +1449,7 @@ static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr, static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); uint8_t count, level; @@ -1491,7 +1480,7 @@ static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks, volt_with_cks, value; uint16_t clock_freq_u16; - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, volt_offset = 0; struct phm_ppt_v1_information *table_info = @@ -1694,9 +1683,9 @@ static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr, return 0; } -static int fiji_init_arb_table_index(struct pp_smumgr *smumgr) +static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); uint32_t tmp; int result; @@ -1708,7 +1697,7 @@ static int fiji_init_arb_table_index(struct pp_smumgr *smumgr) * In reality this field should not be in that structure * but in a soft register. */ - result = smu7_read_smc_sram_dword(smumgr, + result = smu7_read_smc_sram_dword(hwmgr, smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END); if (result) @@ -1717,13 +1706,13 @@ static int fiji_init_arb_table_index(struct pp_smumgr *smumgr) tmp &= 0x00FFFFFF; tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; - return smu7_write_smc_sram_dword(smumgr, + return smu7_write_smc_sram_dword(hwmgr, smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END); } static int fiji_save_default_power_profile(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct SMU73_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel; unsigned min_level = 1; @@ -1782,7 +1771,7 @@ static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr) } } if (mask) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LedConfig, mask); return 0; @@ -1799,7 +1788,7 @@ int fiji_init_smc_table(struct pp_hwmgr *hwmgr) { int result; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table); @@ -1985,7 +1974,7 @@ int fiji_init_smc_table(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable, SystemFlags), (uint8_t *)&(table->SystemFlags), @@ -1994,7 +1983,7 @@ int fiji_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to upload dpm data to SMC memory!", return result); - result = fiji_init_arb_table_index(hwmgr->smumgr); + result = fiji_init_arb_table_index(hwmgr); PP_ASSERT_WITH_CODE(0 == result, "Failed to upload arb data to SMC memory!", return result); @@ -2022,7 +2011,7 @@ int fiji_init_smc_table(struct pp_hwmgr *hwmgr) */ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; uint32_t duty100; @@ -2104,20 +2093,20 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); - res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start, + res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); if (!res && hwmgr->thermal_controller. advanceFanControlParameters.ucMinimumPWMLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + res = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanMinPwm, hwmgr->thermal_controller. advanceFanControlParameters.ucMinimumPWMLimit); if (!res && hwmgr->thermal_controller. advanceFanControlParameters.ulMinFanSCLKAcousticLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + res = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanSclkTarget, hwmgr->thermal_controller. advanceFanControlParameters.ulMinFanSCLKAcousticLimit); @@ -2133,13 +2122,12 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr) { int ret; - struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS) return 0; - ret = smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs); if (!ret) /* If this param is not changed, this function could fire unnecessarily */ @@ -2162,7 +2150,7 @@ static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); int result = 0; uint32_t low_sclk_interrupt_threshold = 0; @@ -2179,7 +2167,7 @@ int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold), @@ -2256,7 +2244,7 @@ uint32_t fiji_get_mac_definition(uint32_t value) static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2280,7 +2268,7 @@ static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_UVDDPM) || phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask, (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); return 0; @@ -2288,7 +2276,7 @@ static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr) static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2312,7 +2300,7 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr) CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask, (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); return 0; @@ -2320,7 +2308,7 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr) static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; @@ -2339,7 +2327,7 @@ static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SAMUDPM_SetEnabledMask, (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); return 0; @@ -2373,12 +2361,12 @@ int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); uint32_t tmp; int result; bool error = false; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, DpmTable), &tmp, SMC_RAM_END); @@ -2388,7 +2376,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, SoftRegisters), &tmp, SMC_RAM_END); @@ -2400,7 +2388,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, mcRegisterTable), &tmp, SMC_RAM_END); @@ -2408,7 +2396,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) if (!result) smu_data->smu7_data.mc_reg_table_start = tmp; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, FanTable), &tmp, SMC_RAM_END); @@ -2418,7 +2406,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, mcArbDramTimingTable), &tmp, SMC_RAM_END); @@ -2428,7 +2416,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, Version), &tmp, SMC_RAM_END); @@ -2476,7 +2464,7 @@ int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request) { struct fiji_smumgr *smu_data = (struct fiji_smumgr *) - (hwmgr->smumgr->backend); + (hwmgr->smu_backend); struct SMU73_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel; uint32_t array = smu_data->smu7_data.dpm_table_start + @@ -2493,6 +2481,6 @@ int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, levels[i].DownHyst = request->down_hyst; } - return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, array_size, SMC_RAM_END); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 6ae948fc524f..5b25e067b2f1 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -58,122 +58,122 @@ static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = { { 0xf811d047, 0x80380100, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000, 0, 0, 0x0c, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 } }; -static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr) +static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) { int result = 0; /* Wait for smc boot up */ - /* SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + /* PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = smu7_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(hwmgr); if (result) return result; /* Clear status */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /* De-assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for ROM firmware to initialize interrupt hendler */ - /*SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND, + /*SMUM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, SMC_IND, SMC_INTR_CNTL_MASK_0, 0x10040, 0xFFFFFFFF); */ /* Set SMU Auto Start */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_INPUT_DATA, AUTO_START, 1); /* Clear firmware interrupt enable flag */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, RCU_UC_EVENTS, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000); - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000); + cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); + PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); /* Wait for done bit to be set */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); /* Check pass/failed indicator */ - if (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS) != 1) { PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1); } /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; } -static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr) +static int fiji_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr) { int result = 0; /* wait for smc boot up */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); /* Clear firmware interrupt enable flag */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); /* Assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = smu7_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(hwmgr); if (result) return result; /* Set smc instruct start point at 0x0 */ - smu7_program_jump_on_start(smumgr); + smu7_program_jump_on_start(hwmgr); /* Enable clock */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /* De-assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; } -static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr) +static int fiji_setup_pwr_virus(struct pp_hwmgr *hwmgr) { int i; int result = -EINVAL; uint32_t reg, data; const PWR_Command_Table *pvirus = PwrVirusTable; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) { switch (pvirus->command) { case PwrCmdWrite: reg = pvirus->reg; data = pvirus->data; - cgs_write_register(smumgr->device, reg, data); + cgs_write_register(hwmgr->device, reg, data); break; case PwrCmdEnd: @@ -192,13 +192,13 @@ static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr) return result; } -static int fiji_start_avfs_btc(struct pp_smumgr *smumgr) +static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr) { int result = 0; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); if (0 != smu_data->avfs.avfs_btc_param) { - if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, + if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed"); result = -EINVAL; @@ -206,23 +206,23 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr) } /* Soft-Reset to reset the engine before loading uCode */ /* halt */ - cgs_write_register(smumgr->device, mmCP_MEC_CNTL, 0x50000000); + cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000); /* reset everything */ - cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0xffffffff); + cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0xffffffff); /* clear reset */ - cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0); + cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0); return result; } -static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) +static int fiji_setup_graphics_level_structure(struct pp_hwmgr *hwmgr) { int32_t vr_config; uint32_t table_start; uint32_t level_addr, vr_config_addr; uint32_t level_size = sizeof(avfs_graphics_level); - PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, DpmTable), &table_start, 0x40000), @@ -237,7 +237,7 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) vr_config_addr = table_start + offsetof(SMU73_Discrete_DpmTable, VRConfig); - PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_addr, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_addr, (uint8_t *)&vr_config, sizeof(int32_t), 0x40000), "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying " "vr_config value over to SMC", @@ -245,7 +245,7 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); - PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, level_addr, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, level_addr, (uint8_t *)(&avfs_graphics_level), level_size, 0x40000), "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!", return -1;); @@ -253,9 +253,9 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) return 0; } -static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) +static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool smu_started) { - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); switch (smu_data->avfs.avfs_btc_status) { case AVFS_BTC_COMPLETED_PREVIOUSLY: @@ -265,17 +265,17 @@ static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) if (!smu_started) break; smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED; - PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(smumgr), + PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(hwmgr), "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level" " table over to SMU", return -EINVAL;); smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL; - PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(smumgr), + PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(hwmgr), "[AVFS][fiji_avfs_event_mgr] Could not setup " "Pwr Virus for AVFS ", return -EINVAL;); smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED; - PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(smumgr), + PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(hwmgr), "[AVFS][fiji_avfs_event_mgr] Failure at " "fiji_start_avfs_btc. AVFS Disabled", return -EINVAL;); @@ -293,64 +293,64 @@ static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) return 0; } -static int fiji_start_smu(struct pp_smumgr *smumgr) +static int fiji_start_smu(struct pp_hwmgr *hwmgr) { int result = 0; - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); + struct fiji_smumgr *priv = (struct fiji_smumgr *)(hwmgr->smu_backend); /* Only start SMC if SMC RAM is not running */ - if (!(smu7_is_smc_ram_running(smumgr) - || cgs_is_virtualization_enabled(smumgr->device))) { - fiji_avfs_event_mgr(smumgr, false); + if (!(smu7_is_smc_ram_running(hwmgr) + || cgs_is_virtualization_enabled(hwmgr->device))) { + fiji_avfs_event_mgr(hwmgr, false); /* Check if SMU is running in protected mode */ - if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, + if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)) { - result = fiji_start_smu_in_non_protection_mode(smumgr); + result = fiji_start_smu_in_non_protection_mode(hwmgr); if (result) return result; } else { - result = fiji_start_smu_in_protection_mode(smumgr); + result = fiji_start_smu_in_protection_mode(hwmgr); if (result) return result; } - fiji_avfs_event_mgr(smumgr, true); + fiji_avfs_event_mgr(hwmgr, true); } /* To initialize all clock gating before RLC loaded and running.*/ - cgs_set_clockgating_state(smumgr->device, + cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE); - cgs_set_clockgating_state(smumgr->device, + cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_GMC, AMD_CG_STATE_GATE); - cgs_set_clockgating_state(smumgr->device, + cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_SDMA, AMD_CG_STATE_GATE); - cgs_set_clockgating_state(smumgr->device, + cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_COMMON, AMD_CG_STATE_GATE); /* Setup SoftRegsStart here for register lookup in case * DummyBackEnd is used and ProcessFirmwareHeader is not executed */ - smu7_read_smc_sram_dword(smumgr, + smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, SoftRegisters), &(priv->smu7_data.soft_regs_start), 0x40000); - result = smu7_request_smu_load_fw(smumgr); + result = smu7_request_smu_load_fw(hwmgr); return result; } -static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr) +static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr) { uint32_t efuse = 0; uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1; - if (cgs_is_virtualization_enabled(smumgr->device)) + if (cgs_is_virtualization_enabled(hwmgr->device)) return 0; - if (!atomctrl_read_efuse(smumgr->device, AVFS_EN_LSB, AVFS_EN_MSB, + if (!atomctrl_read_efuse(hwmgr->device, AVFS_EN_LSB, AVFS_EN_MSB, mask, &efuse)) { if (efuse) return true; @@ -365,7 +365,7 @@ static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr) * @param smc_addr the address in the SMC RAM to access. * @param value to write to the SMC SRAM. */ -static int fiji_smu_init(struct pp_smumgr *smumgr) +static int fiji_smu_init(struct pp_hwmgr *hwmgr) { int i; struct fiji_smumgr *fiji_priv = NULL; @@ -375,9 +375,9 @@ static int fiji_smu_init(struct pp_smumgr *smumgr) if (fiji_priv == NULL) return -ENOMEM; - smumgr->backend = fiji_priv; + hwmgr->smu_backend = fiji_priv; - if (smu7_init(smumgr)) + if (smu7_init(hwmgr)) return -EINVAL; for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c index 51adf04ab4b3..efb0fc033274 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c @@ -101,7 +101,7 @@ static const struct iceland_pt_defaults defaults_icelandpro = { static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); struct cgs_system_info sys_info = {0}; uint32_t dev_id; @@ -130,7 +130,7 @@ static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; @@ -144,7 +144,7 @@ static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr) static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr) { uint16_t tdc_limit; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256); @@ -159,11 +159,11 @@ static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr) static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, fuse_table_offset + offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl), (uint32_t *)&temp, SMC_RAM_END)) @@ -184,7 +184,7 @@ static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr) static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr) { int i; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 8; i++) @@ -193,14 +193,9 @@ static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr) return 0; } -static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) -{ - return 0; -} - static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; @@ -219,7 +214,7 @@ static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr) { int i; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd; uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd; @@ -245,7 +240,7 @@ static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr) static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr) { int i; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint8_t *vid = smu_data->power_tune_table.VddCVid; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -264,12 +259,12 @@ static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr) static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint32_t pm_fuse_table_offset; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, PmFuseTable), &pm_fuse_table_offset, SMC_RAM_END)) @@ -317,19 +312,13 @@ static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Attempt to populate GnbLPML Failed!", return -EINVAL); - /* DW17 */ - if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Min and Max Vid Failed!", - return -EINVAL); - /* DW18 */ if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr)) PP_ASSERT_WITH_CODE(false, "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!", return -EINVAL); - if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, (uint8_t *)&smu_data->power_tune_table, sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END)) PP_ASSERT_WITH_CODE(false, @@ -339,7 +328,7 @@ static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr) return 0; } -static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr, +static int iceland_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table, uint32_t clock, uint32_t *vol) { @@ -601,7 +590,7 @@ static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discret { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint32_t i; /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */ @@ -749,7 +738,7 @@ static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr, result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level); /* populate graphics levels*/ - result = iceland_get_dependecy_volt_by_clk(hwmgr, + result = iceland_get_dependency_volt_by_clk(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock, &graphic_level->MinVddc); PP_ASSERT_WITH_CODE((0 == result), @@ -816,7 +805,7 @@ static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr, int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, GraphicsLevel); @@ -892,7 +881,7 @@ int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; /* level count will send to smc once at init smc table and never change*/ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, + result = smu7_copy_bytes_to_smc(hwmgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, SMC_RAM_END); @@ -1104,7 +1093,7 @@ static int iceland_populate_single_memory_level( uint32_t mclk_strobe_mode_threshold = 40000; if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) { - result = iceland_get_dependecy_volt_by_clk(hwmgr, + result = iceland_get_dependency_volt_by_clk(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc); PP_ASSERT_WITH_CODE((0 == result), "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result); @@ -1113,7 +1102,7 @@ static int iceland_populate_single_memory_level( if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) { memory_level->MinVddci = memory_level->MinVddc; } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) { - result = iceland_get_dependecy_volt_by_clk(hwmgr, + result = iceland_get_dependency_volt_by_clk(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk, memory_clock, &memory_level->MinVddci); @@ -1218,7 +1207,7 @@ static int iceland_populate_single_memory_level( int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; int result; @@ -1257,7 +1246,7 @@ int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr) smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; /* level count will send to smc once at init smc table and never change*/ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + result = smu7_copy_bytes_to_smc(hwmgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, SMC_RAM_END); @@ -1496,7 +1485,7 @@ static int iceland_populate_memory_timing_parameters( static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); int result = 0; SMU71_Discrete_MCArbDramTimingTable arb_regs; uint32_t i, j; @@ -1518,7 +1507,7 @@ static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) if (0 == result) { result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.arb_table_start, (uint8_t *)&arb_regs, sizeof(SMU71_Discrete_MCArbDramTimingTable), @@ -1534,7 +1523,7 @@ static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr, { int result = 0; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); table->GraphicsBootLevel = 0; table->MemoryBootLevel = 0; @@ -1572,10 +1561,10 @@ static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr, return result; } -static int iceland_populate_mc_reg_address(struct pp_smumgr *smumgr, +static int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU71_Discrete_MCRegisters *mc_reg_table) { - const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)smumgr->backend; + const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)hwmgr->smu_backend; uint32_t i, j; @@ -1612,13 +1601,12 @@ static void iceland_convert_mc_registers( } } -static int iceland_convert_mc_reg_table_entry_to_smc( - struct pp_smumgr *smumgr, +static int iceland_convert_mc_reg_table_entry_to_smc(struct pp_hwmgr *hwmgr, const uint32_t memory_clock, SMU71_Discrete_MCRegisterSet *mc_reg_table_data ) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint32_t i = 0; for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) { @@ -1648,7 +1636,7 @@ static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, for (i = 0; i < data->dpm_table.mclk_table.count; i++) { res = iceland_convert_mc_reg_table_entry_to_smc( - hwmgr->smumgr, + hwmgr, data->dpm_table.mclk_table.dpm_levels[i].value, &mc_regs->data[i] ); @@ -1662,8 +1650,7 @@ static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); uint32_t address; int32_t result; @@ -1682,7 +1669,7 @@ static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]); - return smu7_copy_bytes_to_smc(hwmgr->smumgr, address, + return smu7_copy_bytes_to_smc(hwmgr, address, (uint8_t *)&smu_data->mc_regs.data[0], sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, SMC_RAM_END); @@ -1691,11 +1678,10 @@ static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) { int result; - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters)); - result = iceland_populate_mc_reg_address(smumgr, &(smu_data->mc_regs)); + result = iceland_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs)); PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize MCRegTable for the MC register addresses!", return result;); @@ -1703,14 +1689,14 @@ static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize MCRegTable for driver state!", return result;); - return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start, + return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start, (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END); } static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint8_t count, level; count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count); @@ -1739,7 +1725,7 @@ static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr) static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table; @@ -1776,7 +1762,7 @@ static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit); CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit); - dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient); + dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient); def1 = defaults->bapmti_r; def2 = defaults->bapmti_rc; @@ -1827,7 +1813,7 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr) { int result; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table); @@ -1955,7 +1941,7 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr) table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE); /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.dpm_table_start + + result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, SystemFlags), (uint8_t *)&(table->SystemFlags), sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController), @@ -1965,7 +1951,7 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr) "Failed to upload dpm data to SMC memory!", return result;); /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.ulv_setting_starts, (uint8_t *)&(smu_data->ulv_setting), sizeof(SMU71_Discrete_Ulv), @@ -1994,7 +1980,7 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr) */ int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) { - struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend); SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; uint32_t duty100; uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; @@ -2064,7 +2050,7 @@ int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) /* fan_table.FanControl_GL_Flag = 1; */ - res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); + res = smu7_copy_bytes_to_smc(hwmgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); return 0; } @@ -2084,7 +2070,7 @@ static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); int result = 0; uint32_t low_sclk_interrupt_threshold = 0; @@ -2101,7 +2087,7 @@ int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold), @@ -2182,13 +2168,13 @@ uint32_t iceland_get_mac_definition(uint32_t value) int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend); uint32_t tmp; int result; bool error = false; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, DpmTable), &tmp, SMC_RAM_END); @@ -2199,7 +2185,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, SoftRegisters), &tmp, SMC_RAM_END); @@ -2212,7 +2198,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, mcRegisterTable), &tmp, SMC_RAM_END); @@ -2221,7 +2207,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) smu7_data->mc_reg_table_start = tmp; } - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, FanTable), &tmp, SMC_RAM_END); @@ -2232,7 +2218,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, mcArbDramTimingTable), &tmp, SMC_RAM_END); @@ -2244,7 +2230,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, Version), &tmp, SMC_RAM_END); @@ -2255,7 +2241,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, UlvSettings), &tmp, SMC_RAM_END); @@ -2522,7 +2508,7 @@ static int iceland_set_valid_flag(struct iceland_mc_reg_table *table) int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) { int result; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); pp_atomctrl_mc_reg_table *table; struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table; uint8_t module_index = iceland_get_memory_modile_index(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index 0bf2def3b659..78aa1122eacc 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -39,55 +39,55 @@ #define ICELAND_SMC_SIZE 0x20000 -static int iceland_start_smc(struct pp_smumgr *smumgr) +static int iceland_start_smc(struct pp_hwmgr *hwmgr) { - SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); return 0; } -static void iceland_reset_smc(struct pp_smumgr *smumgr) +static void iceland_reset_smc(struct pp_hwmgr *hwmgr) { - SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); } -static void iceland_stop_smc_clock(struct pp_smumgr *smumgr) +static void iceland_stop_smc_clock(struct pp_hwmgr *hwmgr) { - SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); } -static void iceland_start_smc_clock(struct pp_smumgr *smumgr) +static void iceland_start_smc_clock(struct pp_hwmgr *hwmgr) { - SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); } -static int iceland_smu_start_smc(struct pp_smumgr *smumgr) +static int iceland_smu_start_smc(struct pp_hwmgr *hwmgr) { /* set smc instruct start point at 0x0 */ - smu7_program_jump_on_start(smumgr); + smu7_program_jump_on_start(hwmgr); /* enable smc clock */ - iceland_start_smc_clock(smumgr); + iceland_start_smc_clock(hwmgr); /* de-assert reset */ - iceland_start_smc(smumgr); + iceland_start_smc(hwmgr); - SMUM_WAIT_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS, + PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return 0; } -static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr, +static int iceland_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, uint32_t length, const uint8_t *src, uint32_t limit, uint32_t start_addr) { @@ -96,17 +96,17 @@ static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr, PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL); - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, start_addr); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); + cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); while (byte_count >= 4) { data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data); src += 4; byte_count -= 4; } - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL); @@ -114,16 +114,16 @@ static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr, } -static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr) +static int iceland_smu_upload_firmware_image(struct pp_hwmgr *hwmgr) { uint32_t val; struct cgs_firmware_info info = {0}; - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; /* load SMC firmware */ - cgs_get_firmware_info(smumgr->device, + cgs_get_firmware_info(hwmgr->device, smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); if (info.image_size & 3) { @@ -137,56 +137,56 @@ static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr) } /* wait for smc boot up */ - SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); /* clear firmware interrupt enable flag */ - val = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, + val = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_SYSCON_MISC_CNTL); - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_SYSCON_MISC_CNTL, val | 1); /* stop smc clock */ - iceland_stop_smc_clock(smumgr); + iceland_stop_smc_clock(hwmgr); /* reset smc */ - iceland_reset_smc(smumgr); - iceland_upload_smc_firmware_data(smumgr, info.image_size, + iceland_reset_smc(hwmgr); + iceland_upload_smc_firmware_data(hwmgr, info.image_size, (uint8_t *)info.kptr, ICELAND_SMC_SIZE, info.ucode_start_address); return 0; } -static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr, +static int iceland_request_smu_load_specific_fw(struct pp_hwmgr *hwmgr, uint32_t firmwareType) { return 0; } -static int iceland_start_smu(struct pp_smumgr *smumgr) +static int iceland_start_smu(struct pp_hwmgr *hwmgr) { int result; - result = iceland_smu_upload_firmware_image(smumgr); + result = iceland_smu_upload_firmware_image(hwmgr); if (result) return result; - result = iceland_smu_start_smc(smumgr); + result = iceland_smu_start_smc(hwmgr); if (result) return result; - if (!smu7_is_smc_ram_running(smumgr)) { + if (!smu7_is_smc_ram_running(hwmgr)) { pr_info("smu not running, upload firmware again \n"); - result = iceland_smu_upload_firmware_image(smumgr); + result = iceland_smu_upload_firmware_image(hwmgr); if (result) return result; - result = iceland_smu_start_smc(smumgr); + result = iceland_smu_start_smc(hwmgr); if (result) return result; } - result = smu7_request_smu_load_fw(smumgr); + result = smu7_request_smu_load_fw(hwmgr); return result; } @@ -198,7 +198,7 @@ static int iceland_start_smu(struct pp_smumgr *smumgr) * @param smcAddress the address in the SMC RAM to access. * @param value to write to the SMC SRAM. */ -static int iceland_smu_init(struct pp_smumgr *smumgr) +static int iceland_smu_init(struct pp_hwmgr *hwmgr) { int i; struct iceland_smumgr *iceland_priv = NULL; @@ -208,9 +208,9 @@ static int iceland_smu_init(struct pp_smumgr *smumgr) if (iceland_priv == NULL) return -ENOMEM; - smumgr->backend = iceland_priv; + hwmgr->smu_backend = iceland_priv; - if (smu7_init(smumgr)) + if (smu7_init(hwmgr)) return -EINVAL; for (i = 0; i < SMU71_MAX_LEVELS_GRAPHICS; i++) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h index 8eae01b37c40..802472530d34 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h @@ -39,7 +39,7 @@ struct iceland_pt_defaults { uint8_t tdc_waterfall_ctl; uint8_t dte_ambient_temp_base; uint32_t display_cac; - uint32_t bamp_temp_gradient; + uint32_t bapm_temp_gradient; uint16_t bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS]; uint16_t bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS]; }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c index 99a00bd39256..c92ea38d2e15 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c @@ -148,7 +148,7 @@ static uint16_t scale_fan_gain_settings(uint16_t raw_setting) static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); @@ -196,7 +196,7 @@ static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmg static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn; @@ -210,7 +210,7 @@ static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr) static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr) { uint16_t tdc_limit; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; @@ -227,11 +227,11 @@ static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr) static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, fuse_table_offset + offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl), (uint32_t *)&temp, SMC_RAM_END)) @@ -252,7 +252,7 @@ static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_of static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr) { int i; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 16; i++) @@ -263,7 +263,7 @@ static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr) static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); /* TO DO move to hwmgr */ if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15)) @@ -279,7 +279,7 @@ static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr) { int i; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 16; i++) @@ -288,14 +288,9 @@ static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr) return 0; } -static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) -{ - return 0; -} - static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; @@ -315,12 +310,12 @@ static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint32_t pm_fuse_table_offset; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, PmFuseTable), &pm_fuse_table_offset, SMC_RAM_END)) @@ -358,17 +353,12 @@ static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Attempt to populate GnbLPML Failed!", return -EINVAL); - if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Min and Max Vid Failed!", - return -EINVAL); - if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr)) PP_ASSERT_WITH_CODE(false, "Attempt to populate BapmVddCBaseLeakage Hi and Lo " "Sidd Failed!", return -EINVAL); - if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, (uint8_t *)&smu_data->power_tune_table, (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END)) PP_ASSERT_WITH_CODE(false, @@ -494,7 +484,6 @@ static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr, struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct pp_smumgr *smumgr = hwmgr->smumgr; state->CcPwrDynRm = 0; state->CcPwrDynRm1 = 0; @@ -503,7 +492,7 @@ static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr, state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); - if (smumgr->chip_id == CHIP_POLARIS12 || smumgr->is_kicker) + if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) state->VddcPhase = data->vddc_phase_shed_control ^ 0x3; else state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; @@ -525,7 +514,7 @@ static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr, struct SMU74_Discrete_DpmTable *table) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; int i; @@ -556,8 +545,7 @@ static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr, static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr, SMU74_Discrete_DpmTable *table) { - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint32_t i, ref_clk; struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } }; @@ -607,8 +595,7 @@ static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr, static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr, uint32_t clock, SMU_SclkSetting *sclk_setting) { - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); struct pp_atomctrl_clock_dividers_ai dividers; uint32_t ref_clock; @@ -751,9 +738,8 @@ static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr, */ int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr = hwmgr->smumgr; struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &hw_data->dpm_table; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -838,7 +824,7 @@ int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) levels[1].pcieDpmLevel = mid_pcie_level_enabled; } /* level count will send to smc once at init smc table and never change */ - result = smu7_copy_bytes_to_smc(smumgr, array, (uint8_t *)levels, + result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, (uint32_t)array_size, SMC_RAM_END); return result; @@ -880,7 +866,7 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, if (mclk_stutter_mode_threshold && (clock <= mclk_stutter_mode_threshold) && - (SMUM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, + (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)) mem_level->StutterEnable = true; @@ -900,9 +886,8 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, */ int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr = hwmgr->smumgr; struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &hw_data->dpm_table; int result; /* populate MCLK dpm table to SMU7 */ @@ -943,7 +928,7 @@ int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); /* level count will send to smc once at init smc table and never change */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, (uint32_t)array_size, SMC_RAM_END); return result; @@ -1201,9 +1186,8 @@ static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr = hwmgr->smumgr; struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct SMU74_Discrete_MCArbDramTimingTable arb_regs; uint32_t i, j; int result = 0; @@ -1222,7 +1206,7 @@ static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) } result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.arb_table_start, (uint8_t *)&arb_regs, sizeof(SMU74_Discrete_MCArbDramTimingTable), @@ -1321,9 +1305,8 @@ static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr, static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr = hwmgr->smumgr; struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); uint8_t count, level; @@ -1354,8 +1337,7 @@ static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) { uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; struct phm_ppt_v1_information *table_info = @@ -1438,7 +1420,7 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr, struct SMU74_Discrete_DpmTable *table) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint16_t config; config = VR_MERGED_WITH_VDDC; @@ -1482,8 +1464,7 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr, static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); int result = 0; @@ -1534,20 +1515,20 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100); } - result = smu7_read_smc_sram_dword(smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma), &tmp, SMC_RAM_END); - smu7_copy_bytes_to_smc(smumgr, + smu7_copy_bytes_to_smc(hwmgr, tmp, (uint8_t *)&AVFS_meanNsigma, sizeof(AVFS_meanNsigma_t), SMC_RAM_END); - result = smu7_read_smc_sram_dword(smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable), &tmp, SMC_RAM_END); - smu7_copy_bytes_to_smc(smumgr, + smu7_copy_bytes_to_smc(hwmgr, tmp, (uint8_t *)&AVFS_SclkOffset, sizeof(AVFS_Sclk_Offset_t), @@ -1569,9 +1550,9 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) * @param hwmgr the address of the powerplay hardware manager. * @return always 0 */ -static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr) +static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint32_t tmp; int result; @@ -1583,7 +1564,7 @@ static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr) * In reality this field should not be in that structure * but in a soft register. */ - result = smu7_read_smc_sram_dword(smumgr, + result = smu7_read_smc_sram_dword(hwmgr, smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END); if (result) @@ -1592,13 +1573,13 @@ static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr) tmp &= 0x00FFFFFF; tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; - return smu7_write_smc_sram_dword(smumgr, + return smu7_write_smc_sram_dword(hwmgr, smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END); } static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -1615,7 +1596,7 @@ static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct SMU74_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel; unsigned min_level = 1; @@ -1658,9 +1639,9 @@ static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr) int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) { int result; - struct pp_smumgr *smumgr = hwmgr->smumgr; struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); + struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); @@ -1852,7 +1833,7 @@ int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, SystemFlags), (uint8_t *)&(table->SystemFlags), @@ -1861,7 +1842,7 @@ int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to upload dpm data to SMC memory!", return result); - result = polaris10_init_arb_table_index(hwmgr->smumgr); + result = polaris10_init_arb_table_index(hwmgr); PP_ASSERT_WITH_CODE(0 == result, "Failed to upload arb data to SMC memory!", return result); @@ -1888,17 +1869,16 @@ static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr) { int ret; - struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) return 0; - ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting); - ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ? + ret = (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs) == 0) ? 0 : -1; if (!ret) @@ -1919,7 +1899,7 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr) */ int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; uint32_t duty100; uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; @@ -2000,20 +1980,20 @@ int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); - res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start, + res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); if (!res && hwmgr->thermal_controller. advanceFanControlParameters.ucMinimumPWMLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + res = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanMinPwm, hwmgr->thermal_controller. advanceFanControlParameters.ucMinimumPWMLimit); if (!res && hwmgr->thermal_controller. advanceFanControlParameters.ulMinFanSCLKAcousticLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + res = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanSclkTarget, hwmgr->thermal_controller. advanceFanControlParameters.ulMinFanSCLKAcousticLimit); @@ -2027,7 +2007,7 @@ int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2051,7 +2031,7 @@ static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_UVDDPM) || phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask, (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); return 0; @@ -2059,7 +2039,7 @@ static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr) static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2083,7 +2063,7 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr) CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask, (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); return 0; @@ -2091,7 +2071,7 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr) static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; @@ -2110,7 +2090,7 @@ static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SAMUDPM_SetEnabledMask, (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); return 0; @@ -2119,7 +2099,7 @@ static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr) static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; @@ -2157,7 +2137,7 @@ int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); int result = 0; uint32_t low_sclk_interrupt_threshold = 0; @@ -2174,7 +2154,7 @@ int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold), @@ -2262,13 +2242,13 @@ uint32_t polaris10_get_mac_definition(uint32_t value) */ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); uint32_t tmp; int result; bool error = false; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable), &tmp, SMC_RAM_END); @@ -2278,7 +2258,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters), &tmp, SMC_RAM_END); @@ -2290,7 +2270,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, mcRegisterTable), &tmp, SMC_RAM_END); @@ -2298,7 +2278,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) if (!result) smu_data->smu7_data.mc_reg_table_start = tmp; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, FanTable), &tmp, SMC_RAM_END); @@ -2308,7 +2288,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, mcArbDramTimingTable), &tmp, SMC_RAM_END); @@ -2318,7 +2298,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, Version), &tmp, SMC_RAM_END); @@ -2342,7 +2322,7 @@ int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request) { struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *) - (hwmgr->smumgr->backend); + (hwmgr->smu_backend); struct SMU74_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel; uint32_t array = smu_data->smu7_data.dpm_table_start + @@ -2359,6 +2339,6 @@ int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, levels[i].DownHyst = request->down_hyst; } - return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, array_size, SMC_RAM_END); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 75f43dadc56b..22b8ecbf7fce 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -60,21 +60,21 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = { 0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00}; -static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr) +static int polaris10_setup_pwr_virus(struct pp_hwmgr *hwmgr) { int i; int result = -EINVAL; uint32_t reg, data; const PWR_Command_Table *pvirus = pwr_virus_table; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) { switch (pvirus->command) { case PwrCmdWrite: reg = pvirus->reg; data = pvirus->data; - cgs_write_register(smumgr->device, reg, data); + cgs_write_register(hwmgr->device, reg, data); break; case PwrCmdEnd: @@ -93,13 +93,13 @@ static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr) return result; } -static int polaris10_perform_btc(struct pp_smumgr *smumgr) +static int polaris10_perform_btc(struct pp_hwmgr *hwmgr) { int result = 0; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); if (0 != smu_data->avfs.avfs_btc_param) { - if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { + if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed"); result = -1; } @@ -107,16 +107,16 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr) if (smu_data->avfs.avfs_btc_param > 1) { /* Soft-Reset to reset the engine before loading uCode */ /* halt */ - cgs_write_register(smumgr->device, mmCP_MEC_CNTL, 0x50000000); + cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000); /* reset everything */ - cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0xffffffff); - cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0); + cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0xffffffff); + cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0); } return result; } -static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) +static int polaris10_setup_graphics_level_structure(struct pp_hwmgr *hwmgr) { uint32_t vr_config; uint32_t dpm_table_start; @@ -127,7 +127,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) graphics_level_size = sizeof(avfs_graphics_level_polaris10); u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE); - PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable), &dpm_table_start, 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table", @@ -138,14 +138,14 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig); - PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_address, (uint8_t *)&vr_config, sizeof(uint32_t), 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC", return -1); graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel); - PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address, (uint8_t *)(&avfs_graphics_level_polaris10), graphics_level_size, 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!", @@ -153,7 +153,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel); - PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address, (uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!", return -1); @@ -162,7 +162,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd); - PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address, (uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!", return -1); @@ -172,9 +172,9 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) static int -polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) +polaris10_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool SMU_VFT_INTACT) { - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); switch (smu_data->avfs.avfs_btc_status) { case AVFS_BTC_COMPLETED_PREVIOUSLY: @@ -183,20 +183,20 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) case AVFS_BTC_BOOT: /* Cold Boot State - Post SMU Start */ smu_data->avfs.avfs_btc_status = AVFS_BTC_DPMTABLESETUP_FAILED; - PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(smumgr), + PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(hwmgr), "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU", return -EINVAL); if (smu_data->avfs.avfs_btc_param > 1) { pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting."); smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL; - PP_ASSERT_WITH_CODE(0 == polaris10_setup_pwr_virus(smumgr), + PP_ASSERT_WITH_CODE(0 == polaris10_setup_pwr_virus(hwmgr), "[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ", return -EINVAL); } smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED; - PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(smumgr), + PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(hwmgr), "[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled", return -EINVAL); smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS; @@ -215,146 +215,146 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) return 0; } -static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr) +static int polaris10_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) { int result = 0; /* Wait for smc boot up */ - /* SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */ + /* PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */ /* Assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = smu7_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(hwmgr); if (result != 0) return result; /* Clear status */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /* De-assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); /* Call Test SMU message with 0x20000 offset to trigger SMU start */ - smu7_send_msg_to_smc_offset(smumgr); + smu7_send_msg_to_smc_offset(hwmgr); /* Wait done bit to be set */ /* Check pass/failed indicator */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); - if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS)) PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1); - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; } -static int polaris10_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr) +static int polaris10_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr) { int result = 0; /* wait for smc boot up */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); /* Clear firmware interrupt enable flag */ - /* SMUM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + /* PHM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = smu7_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(hwmgr); if (result != 0) return result; /* Set smc instruct start point at 0x0 */ - smu7_program_jump_on_start(smumgr); + smu7_program_jump_on_start(hwmgr); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; } -static int polaris10_start_smu(struct pp_smumgr *smumgr) +static int polaris10_start_smu(struct pp_hwmgr *hwmgr) { int result = 0; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); bool SMU_VFT_INTACT; /* Only start SMC if SMC RAM is not running */ - if (!smu7_is_smc_ram_running(smumgr)) { + if (!smu7_is_smc_ram_running(hwmgr)) { SMU_VFT_INTACT = false; - smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); - smu_data->smu7_data.security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); + smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); + smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); /* Check if SMU is running in protected mode */ if (smu_data->protected_mode == 0) { - result = polaris10_start_smu_in_non_protection_mode(smumgr); + result = polaris10_start_smu_in_non_protection_mode(hwmgr); } else { - result = polaris10_start_smu_in_protection_mode(smumgr); + result = polaris10_start_smu_in_protection_mode(hwmgr); /* If failed, try with different security Key. */ if (result != 0) { smu_data->smu7_data.security_hard_key ^= 1; - cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); - result = polaris10_start_smu_in_protection_mode(smumgr); + cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU); + result = polaris10_start_smu_in_protection_mode(hwmgr); } } if (result != 0) PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result); - polaris10_avfs_event_mgr(smumgr, true); + polaris10_avfs_event_mgr(hwmgr, true); } else SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */ - polaris10_avfs_event_mgr(smumgr, SMU_VFT_INTACT); + polaris10_avfs_event_mgr(hwmgr, SMU_VFT_INTACT); /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */ - smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters), + smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters), &(smu_data->smu7_data.soft_regs_start), 0x40000); - result = smu7_request_smu_load_fw(smumgr); + result = smu7_request_smu_load_fw(hwmgr); return result; } -static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr) +static bool polaris10_is_hw_avfs_present(struct pp_hwmgr *hwmgr) { uint32_t efuse; - efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4)); + efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4)); efuse &= 0x00000001; if (efuse) return true; @@ -362,7 +362,7 @@ static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr) return false; } -static int polaris10_smu_init(struct pp_smumgr *smumgr) +static int polaris10_smu_init(struct pp_hwmgr *hwmgr) { struct polaris10_smumgr *smu_data; int i; @@ -371,9 +371,9 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr) if (smu_data == NULL) return -ENOMEM; - smumgr->backend = smu_data; + hwmgr->smu_backend = smu_data; - if (smu7_init(smumgr)) + if (smu7_init(hwmgr)) return -EINVAL; for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c index ce0a30388ea1..b98ade676d12 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c @@ -48,20 +48,20 @@ #define smnMP1_FIRMWARE_FLAGS 0x3010028 -bool rv_is_smc_ram_running(struct pp_smumgr *smumgr) +bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr) { uint32_t mp1_fw_flags, reg; reg = soc15_get_register_offset(NBIF_HWID, 0, mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2); - cgs_write_register(smumgr->device, reg, + cgs_write_register(hwmgr->device, reg, (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); reg = soc15_get_register_offset(NBIF_HWID, 0, mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2); - mp1_fw_flags = cgs_read_register(smumgr->device, reg); + mp1_fw_flags = cgs_read_register(hwmgr->device, reg); if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) return true; @@ -69,97 +69,97 @@ bool rv_is_smc_ram_running(struct pp_smumgr *smumgr) return false; } -static uint32_t rv_wait_for_response(struct pp_smumgr *smumgr) +static uint32_t rv_wait_for_response(struct pp_hwmgr *hwmgr) { uint32_t reg; - if (!rv_is_smc_ram_running(smumgr)) + if (!rv_is_smc_ram_running(hwmgr)) return -EINVAL; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - smum_wait_for_register_unequal(smumgr, reg, + phm_wait_for_register_unequal(hwmgr, reg, 0, MP1_C2PMSG_90__CONTENT_MASK); - return cgs_read_register(smumgr->device, reg); + return cgs_read_register(hwmgr->device, reg); } -int rv_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, +int rv_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg) { uint32_t reg; - if (!rv_is_smc_ram_running(smumgr)) + if (!rv_is_smc_ram_running(hwmgr)) return -EINVAL; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66); - cgs_write_register(smumgr->device, reg, msg); + cgs_write_register(hwmgr->device, reg, msg); return 0; } -int rv_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg) +int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg) { uint32_t reg; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - *arg = cgs_read_register(smumgr->device, reg); + *arg = cgs_read_register(hwmgr->device, reg); return 0; } -int rv_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +int rv_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { uint32_t reg; - rv_wait_for_response(smumgr); + rv_wait_for_response(hwmgr); reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(smumgr->device, reg, 0); + cgs_write_register(hwmgr->device, reg, 0); - rv_send_msg_to_smc_without_waiting(smumgr, msg); + rv_send_msg_to_smc_without_waiting(hwmgr, msg); - if (rv_wait_for_response(smumgr) == 0) + if (rv_wait_for_response(hwmgr) == 0) printk("Failed to send Message %x.\n", msg); return 0; } -int rv_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, +int rv_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { uint32_t reg; - rv_wait_for_response(smumgr); + rv_wait_for_response(hwmgr); reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(smumgr->device, reg, 0); + cgs_write_register(hwmgr->device, reg, 0); reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - cgs_write_register(smumgr->device, reg, parameter); + cgs_write_register(hwmgr->device, reg, parameter); - rv_send_msg_to_smc_without_waiting(smumgr, msg); + rv_send_msg_to_smc_without_waiting(hwmgr, msg); - if (rv_wait_for_response(smumgr) == 0) + if (rv_wait_for_response(hwmgr) == 0) printk("Failed to send Message %x.\n", msg); return 0; } -int rv_copy_table_from_smc(struct pp_smumgr *smumgr, +int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { struct rv_smumgr *priv = - (struct rv_smumgr *)(smumgr->backend); + (struct rv_smumgr *)(hwmgr->smu_backend); PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL;); @@ -167,16 +167,16 @@ int rv_copy_table_from_smc(struct pp_smumgr *smumgr, "Invalid SMU Table version!", return -EINVAL;); PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, "Invalid SMU Table Length!", return -EINVAL;); - PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, priv->smu_tables.entry[table_id].table_addr_high) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;); - PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, priv->smu_tables.entry[table_id].table_addr_low) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!", return -EINVAL;); - PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableSmu2Dram, priv->smu_tables.entry[table_id].table_id) == 0, "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", @@ -188,11 +188,11 @@ int rv_copy_table_from_smc(struct pp_smumgr *smumgr, return 0; } -int rv_copy_table_to_smc(struct pp_smumgr *smumgr, +int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { struct rv_smumgr *priv = - (struct rv_smumgr *)(smumgr->backend); + (struct rv_smumgr *)(hwmgr->smu_backend); PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL;); @@ -204,17 +204,17 @@ int rv_copy_table_to_smc(struct pp_smumgr *smumgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); - PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, priv->smu_tables.entry[table_id].table_addr_high) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;); - PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, priv->smu_tables.entry[table_id].table_addr_low) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!", return -EINVAL;); - PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableDram2Smu, priv->smu_tables.entry[table_id].table_id) == 0, "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!", @@ -223,15 +223,15 @@ int rv_copy_table_to_smc(struct pp_smumgr *smumgr, return 0; } -static int rv_verify_smc_interface(struct pp_smumgr *smumgr) +static int rv_verify_smc_interface(struct pp_hwmgr *hwmgr) { uint32_t smc_driver_if_version; - PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr, + PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr, PPSMC_MSG_GetDriverIfVersion), "Attempt to get SMC IF Version Number Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(smumgr, + PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr, &smc_driver_if_version), "Attempt to read SMC IF Version Number Failed!", return -EINVAL); @@ -243,9 +243,9 @@ static int rv_verify_smc_interface(struct pp_smumgr *smumgr) } /* sdma is disabled by default in vbios, need to re-enable in driver */ -static int rv_smc_enable_sdma(struct pp_smumgr *smumgr) +static int rv_smc_enable_sdma(struct pp_hwmgr *hwmgr) { - PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr, + PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma), "Attempt to power up sdma Failed!", return -EINVAL); @@ -253,9 +253,9 @@ static int rv_smc_enable_sdma(struct pp_smumgr *smumgr) return 0; } -static int rv_smc_disable_sdma(struct pp_smumgr *smumgr) +static int rv_smc_disable_sdma(struct pp_hwmgr *hwmgr) { - PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr, + PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma), "Attempt to power down sdma Failed!", return -EINVAL); @@ -264,9 +264,9 @@ static int rv_smc_disable_sdma(struct pp_smumgr *smumgr) } /* vcn is disabled by default in vbios, need to re-enable in driver */ -static int rv_smc_enable_vcn(struct pp_smumgr *smumgr) +static int rv_smc_enable_vcn(struct pp_hwmgr *hwmgr) { - PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PowerUpVcn, 0), "Attempt to power up vcn Failed!", return -EINVAL); @@ -274,9 +274,9 @@ static int rv_smc_enable_vcn(struct pp_smumgr *smumgr) return 0; } -static int rv_smc_disable_vcn(struct pp_smumgr *smumgr) +static int rv_smc_disable_vcn(struct pp_hwmgr *hwmgr) { - PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PowerDownVcn, 0), "Attempt to power down vcn Failed!", return -EINVAL); @@ -284,38 +284,38 @@ static int rv_smc_disable_vcn(struct pp_smumgr *smumgr) return 0; } -static int rv_smu_fini(struct pp_smumgr *smumgr) +static int rv_smu_fini(struct pp_hwmgr *hwmgr) { struct rv_smumgr *priv = - (struct rv_smumgr *)(smumgr->backend); + (struct rv_smumgr *)(hwmgr->smu_backend); if (priv) { - rv_smc_disable_sdma(smumgr); - rv_smc_disable_vcn(smumgr); - cgs_free_gpu_mem(smumgr->device, + rv_smc_disable_sdma(hwmgr); + rv_smc_disable_vcn(hwmgr); + cgs_free_gpu_mem(hwmgr->device, priv->smu_tables.entry[WMTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, priv->smu_tables.entry[CLOCKTABLE].handle); - kfree(smumgr->backend); - smumgr->backend = NULL; + kfree(hwmgr->smu_backend); + hwmgr->smu_backend = NULL; } return 0; } -static int rv_start_smu(struct pp_smumgr *smumgr) +static int rv_start_smu(struct pp_hwmgr *hwmgr) { - if (rv_verify_smc_interface(smumgr)) + if (rv_verify_smc_interface(hwmgr)) return -EINVAL; - if (rv_smc_enable_sdma(smumgr)) + if (rv_smc_enable_sdma(hwmgr)) return -EINVAL; - if (rv_smc_enable_vcn(smumgr)) + if (rv_smc_enable_vcn(hwmgr)) return -EINVAL; return 0; } -static int rv_smu_init(struct pp_smumgr *smumgr) +static int rv_smu_init(struct pp_hwmgr *hwmgr) { struct rv_smumgr *priv; uint64_t mc_addr; @@ -327,10 +327,10 @@ static int rv_smu_init(struct pp_smumgr *smumgr) if (!priv) return -ENOMEM; - smumgr->backend = priv; + hwmgr->smu_backend = priv; /* allocate space for watermarks table */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, sizeof(Watermarks_t), CGS_GPU_MEM_TYPE__GART_CACHEABLE, PAGE_SIZE, @@ -340,8 +340,8 @@ static int rv_smu_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE(kaddr, "[rv_smu_init] Out of memory for wmtable.", - kfree(smumgr->backend); - smumgr->backend = NULL; + kfree(hwmgr->smu_backend); + hwmgr->smu_backend = NULL; return -EINVAL); priv->smu_tables.entry[WMTABLE].version = 0x01; @@ -355,7 +355,7 @@ static int rv_smu_init(struct pp_smumgr *smumgr) priv->smu_tables.entry[WMTABLE].handle = handle; /* allocate space for watermarks table */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, sizeof(DpmClocks_t), CGS_GPU_MEM_TYPE__GART_CACHEABLE, PAGE_SIZE, @@ -365,10 +365,10 @@ static int rv_smu_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE(kaddr, "[rv_smu_init] Out of memory for CLOCKTABLE.", - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); - kfree(smumgr->backend); - smumgr->backend = NULL; + kfree(hwmgr->smu_backend); + hwmgr->smu_backend = NULL; return -EINVAL); priv->smu_tables.entry[CLOCKTABLE].version = 0x01; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h index 262c8ded87c0..58888400f1b8 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h @@ -51,11 +51,11 @@ struct rv_smumgr { struct smu_table_array smu_tables; }; -int rv_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg); -bool rv_is_smc_ram_running(struct pp_smumgr *smumgr); -int rv_copy_table_from_smc(struct pp_smumgr *smumgr, +int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg); +bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr); +int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id); -int rv_copy_table_to_smc(struct pp_smumgr *smumgr, +int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index c49a6f22002f..2ae05bbdb974 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -34,18 +34,18 @@ #define SMU7_SMC_SIZE 0x20000 -static int smu7_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit) +static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t limit) { PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL); PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL); - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */ + cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, smc_addr); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */ return 0; } -int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit) +int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit) { uint32_t data; uint32_t addr; @@ -59,7 +59,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres addr = smc_start_address; while (byte_count >= 4) { - smu7_read_smc_sram_dword(smumgr, addr, &data, limit); + smu7_read_smc_sram_dword(hwmgr, addr, &data, limit); *dest = PP_SMC_TO_HOST_UL(data); @@ -69,7 +69,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres } if (byte_count) { - smu7_read_smc_sram_dword(smumgr, addr, &data, limit); + smu7_read_smc_sram_dword(hwmgr, addr, &data, limit); *pdata = PP_SMC_TO_HOST_UL(data); /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */ dest_byte = (uint8_t *)dest; @@ -81,7 +81,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres } -int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, +int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit) { int result; @@ -99,12 +99,12 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, /* Bytes are written into the SMC addres space with the MSB first. */ data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; - result = smu7_set_smc_sram_address(smumgr, addr, limit); + result = smu7_set_smc_sram_address(hwmgr, addr, limit); if (0 != result) return result; - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data); + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data); src += 4; byte_count -= 4; @@ -115,13 +115,13 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, data = 0; - result = smu7_set_smc_sram_address(smumgr, addr, limit); + result = smu7_set_smc_sram_address(hwmgr, addr, limit); if (0 != result) return result; - original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11); + original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11); extra_shift = 8 * (4 - byte_count); @@ -135,53 +135,53 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, data |= (original_data & ~((~0UL) << extra_shift)); - result = smu7_set_smc_sram_address(smumgr, addr, limit); + result = smu7_set_smc_sram_address(hwmgr, addr, limit); if (0 != result) return result; - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data); + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data); } return 0; } -int smu7_program_jump_on_start(struct pp_smumgr *smumgr) +int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr) { static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 }; - smu7_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1); + smu7_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1); return 0; } -bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr) +bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr) { - return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) - && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); + return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) + && (0x20100 <= cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); } -int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { int ret; - if (!smu7_is_smc_ram_running(smumgr)) + if (!smu7_is_smc_ram_running(hwmgr)) return -EINVAL; - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); - ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); + ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); if (ret != 1) pr_info("\n failed to send pre message %x ret is %d \n", msg, ret); - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); - ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); + ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); if (ret != 1) pr_info("\n failed to send message %x ret is %d \n", msg, ret); @@ -189,53 +189,53 @@ int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) return 0; } -int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg) +int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg) { - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); return 0; } -int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) +int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - if (!smu7_is_smc_ram_running(smumgr)) { + if (!smu7_is_smc_ram_running(hwmgr)) { return -EINVAL; } - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter); - return smu7_send_msg_to_smc(smumgr, msg); + return smu7_send_msg_to_smc(hwmgr, msg); } -int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) +int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter); - return smu7_send_msg_to_smc_without_waiting(smumgr, msg); + return smu7_send_msg_to_smc_without_waiting(hwmgr, msg); } -int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr) +int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr) { - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000); + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000); - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); + cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); - if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) + if (1 != PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP)) pr_info("Failed to send Message.\n"); return 0; } -int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr) +int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr) { - if (!smu7_is_smc_ram_running(smumgr)) + if (!smu7_is_smc_ram_running(hwmgr)) return -EINVAL; - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0); + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0); return 0; } @@ -289,29 +289,29 @@ enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type) } -int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit) +int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t *value, uint32_t limit) { int result; - result = smu7_set_smc_sram_address(smumgr, smc_addr, limit); + result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit); if (result) return result; - *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11); + *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11); return 0; } -int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit) +int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit) { int result; - result = smu7_set_smc_sram_address(smumgr, smc_addr, limit); + result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit); if (result) return result; - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value); + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, value); return 0; } @@ -354,14 +354,14 @@ static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type) return result; } -static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr, +static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr, uint32_t fw_type, struct SMU_Entry *entry) { int result = 0; struct cgs_firmware_info info = {0}; - result = cgs_get_firmware_info(smumgr->device, + result = cgs_get_firmware_info(hwmgr->device, smu7_convert_fw_type_to_cgs(fw_type), &info); @@ -374,7 +374,7 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr, entry->meta_data_addr_low = 0; /* digest need be excluded out */ - if (cgs_is_virtualization_enabled(smumgr->device)) + if (cgs_is_virtualization_enabled(hwmgr->device)) info.image_size -= 20; entry->data_size_byte = info.image_size; entry->num_register_entries = 0; @@ -389,30 +389,30 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr, return 0; } -int smu7_request_smu_load_fw(struct pp_smumgr *smumgr) +int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) { - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); uint32_t fw_to_load; int result = 0; struct SMU_DRAMData_TOC *toc; - if (!smumgr->reload_fw) { + if (!hwmgr->reload_fw) { pr_info("skip reloading...\n"); return 0; } if (smu_data->soft_regs_start) - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, - smu_data->soft_regs_start + smum_get_offsetof(smumgr, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + smu_data->soft_regs_start + smum_get_offsetof(hwmgr, SMU_SoftRegisters, UcodeLoadStatus), 0x0); - if (smumgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */ - if (!cgs_is_virtualization_enabled(smumgr->device)) { - smu7_send_msg_to_smc_with_parameter(smumgr, + if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */ + if (!cgs_is_virtualization_enabled(hwmgr->device)) { + smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high); - smu7_send_msg_to_smc_with_parameter(smumgr, + smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low); } @@ -439,80 +439,79 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr) toc->num_entries = 0; toc->structure_version = 1; - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - if (cgs_is_virtualization_enabled(smumgr->device)) - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + if (cgs_is_virtualization_enabled(hwmgr->device)) + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high); - smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low); + smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high); + smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low); - if (smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load)) + if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load)) pr_err("Fail to Request SMU Load uCode"); return result; } /* Check if the FW has been loaded, SMU will not return if loading has not finished. */ -int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type) +int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type) { - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type); uint32_t ret; - ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11, - smu_data->soft_regs_start + smum_get_offsetof(smumgr, + ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11, + smu_data->soft_regs_start + smum_get_offsetof(hwmgr, SMU_SoftRegisters, UcodeLoadStatus), fw_mask, fw_mask); - return ret; } -int smu7_reload_firmware(struct pp_smumgr *smumgr) +int smu7_reload_firmware(struct pp_hwmgr *hwmgr) { - return smumgr->smumgr_funcs->start_smu(smumgr); + return hwmgr->smumgr_funcs->start_smu(hwmgr); } -static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit) +static int smu7_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, uint32_t length, uint32_t *src, uint32_t limit) { uint32_t byte_count = length; PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL); - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1); + cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, 0x20000); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1); for (; byte_count >= 4; byte_count -= 4) - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++); + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, *src++); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL); @@ -520,41 +519,41 @@ static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t leng } -int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr) +int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr) { int result = 0; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); struct cgs_firmware_info info = {0}; if (smu_data->security_hard_key == 1) - cgs_get_firmware_info(smumgr->device, + cgs_get_firmware_info(hwmgr->device, smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); else - cgs_get_firmware_info(smumgr->device, + cgs_get_firmware_info(hwmgr->device, smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info); - smumgr->is_kicker = info.is_kicker; + hwmgr->is_kicker = info.is_kicker; - result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE); + result = smu7_upload_smc_firmware_data(hwmgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE); return result; } -int smu7_init(struct pp_smumgr *smumgr) +int smu7_init(struct pp_hwmgr *hwmgr) { struct smu7_smumgr *smu_data; uint8_t *internal_buf; uint64_t mc_addr = 0; /* Allocate memory for backend private data */ - smu_data = (struct smu7_smumgr *)(smumgr->backend); + smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); smu_data->header_buffer.data_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; /* Allocate FW image data structure and header buffer and * send the header buffer address to SMU */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, smu_data->header_buffer.data_size, CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -568,16 +567,16 @@ int smu7_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE((NULL != smu_data->header), "Out of memory.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, + kfree(hwmgr->smu_backend); + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)smu_data->header_buffer.handle); return -EINVAL); - if (cgs_is_virtualization_enabled(smumgr->device)) + if (cgs_is_virtualization_enabled(hwmgr->device)) return 0; smu_data->smu_buffer.data_size = 200*4096; - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, smu_data->smu_buffer.data_size, CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -591,12 +590,12 @@ int smu7_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE((NULL != internal_buf), "Out of memory.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, + kfree(hwmgr->smu_backend); + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)smu_data->smu_buffer.handle); return -EINVAL); - if (smum_is_hw_avfs_present(smumgr)) + if (smum_is_hw_avfs_present(hwmgr)) smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT; else smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED; @@ -605,12 +604,10 @@ int smu7_init(struct pp_smumgr *smumgr) } -int smu7_smu_fini(struct pp_smumgr *smumgr) +int smu7_smu_fini(struct pp_hwmgr *hwmgr) { - if (smumgr->backend) { - kfree(smumgr->backend); - smumgr->backend = NULL; - } - cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); + kfree(hwmgr->smu_backend); + hwmgr->smu_backend = NULL; + cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h index ee5e32d2921e..0b63c5c1043c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h @@ -60,32 +60,32 @@ struct smu7_smumgr { }; -int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, +int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit); -int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, +int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit); -int smu7_program_jump_on_start(struct pp_smumgr *smumgr); -bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr); -int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg); -int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg); -int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, +int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr); +bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr); +int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg); +int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg); +int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter); -int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, +int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter); -int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr); -int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr); +int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr); +int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr); enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type); -int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, +int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t *value, uint32_t limit); -int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, +int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit); -int smu7_request_smu_load_fw(struct pp_smumgr *smumgr); -int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type); -int smu7_reload_firmware(struct pp_smumgr *smumgr); -int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr); -int smu7_init(struct pp_smumgr *smumgr); -int smu7_smu_fini(struct pp_smumgr *smumgr); +int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr); +int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type); +int smu7_reload_firmware(struct pp_hwmgr *hwmgr); +int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr); +int smu7_init(struct pp_hwmgr *hwmgr); +int smu7_smu_fini(struct pp_hwmgr *hwmgr); #endif
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 3bdf6478de7f..867388456530 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -27,7 +27,6 @@ #include <linux/slab.h> #include <linux/types.h> #include <drm/amdgpu_drm.h> -#include "pp_instance.h" #include "smumgr.h" #include "cgs_common.h" @@ -46,88 +45,18 @@ MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); MODULE_FIRMWARE("amdgpu/vega10_smc.bin"); MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin"); -int smum_early_init(struct pp_instance *handle) +int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr; - - if (handle == NULL) - return -EINVAL; - - smumgr = kzalloc(sizeof(struct pp_smumgr), GFP_KERNEL); - if (smumgr == NULL) - return -ENOMEM; - - smumgr->device = handle->device; - smumgr->chip_family = handle->chip_family; - smumgr->chip_id = handle->chip_id; - smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; - smumgr->reload_fw = 1; - handle->smu_mgr = smumgr; - - switch (smumgr->chip_family) { - case AMDGPU_FAMILY_CZ: - smumgr->smumgr_funcs = &cz_smu_funcs; - break; - case AMDGPU_FAMILY_VI: - switch (smumgr->chip_id) { - case CHIP_TOPAZ: - smumgr->smumgr_funcs = &iceland_smu_funcs; - break; - case CHIP_TONGA: - smumgr->smumgr_funcs = &tonga_smu_funcs; - break; - case CHIP_FIJI: - smumgr->smumgr_funcs = &fiji_smu_funcs; - break; - case CHIP_POLARIS11: - case CHIP_POLARIS10: - case CHIP_POLARIS12: - smumgr->smumgr_funcs = &polaris10_smu_funcs; - break; - default: - return -EINVAL; - } - break; - case AMDGPU_FAMILY_AI: - switch (smumgr->chip_id) { - case CHIP_VEGA10: - smumgr->smumgr_funcs = &vega10_smu_funcs; - break; - default: - return -EINVAL; - } - break; - case AMDGPU_FAMILY_RV: - switch (smumgr->chip_id) { - case CHIP_RAVEN: - smumgr->smumgr_funcs = &rv_smu_funcs; - break; - default: - return -EINVAL; - } - break; - default: - kfree(smumgr); - return -EINVAL; - } - - return 0; -} - -int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable) - return hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable(hwmgr); + if (NULL != hwmgr->smumgr_funcs->thermal_avfs_enable) + return hwmgr->smumgr_funcs->thermal_avfs_enable(hwmgr); return 0; } -int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) +int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table) - return hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table(hwmgr); + if (NULL != hwmgr->smumgr_funcs->thermal_setup_fan_table) + return hwmgr->smumgr_funcs->thermal_setup_fan_table(hwmgr); return 0; } @@ -135,8 +64,8 @@ int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->update_sclk_threshold) - return hwmgr->smumgr->smumgr_funcs->update_sclk_threshold(hwmgr); + if (NULL != hwmgr->smumgr_funcs->update_sclk_threshold) + return hwmgr->smumgr_funcs->update_sclk_threshold(hwmgr); return 0; } @@ -144,163 +73,75 @@ int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr) int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) { - if (NULL != hwmgr->smumgr->smumgr_funcs->update_smc_table) - return hwmgr->smumgr->smumgr_funcs->update_smc_table(hwmgr, type); + if (NULL != hwmgr->smumgr_funcs->update_smc_table) + return hwmgr->smumgr_funcs->update_smc_table(hwmgr, type); return 0; } -uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, uint32_t type, uint32_t member) +uint32_t smum_get_offsetof(struct pp_hwmgr *hwmgr, uint32_t type, uint32_t member) { - if (NULL != smumgr->smumgr_funcs->get_offsetof) - return smumgr->smumgr_funcs->get_offsetof(type, member); + if (NULL != hwmgr->smumgr_funcs->get_offsetof) + return hwmgr->smumgr_funcs->get_offsetof(type, member); return 0; } int smum_process_firmware_header(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->process_firmware_header) - return hwmgr->smumgr->smumgr_funcs->process_firmware_header(hwmgr); + if (NULL != hwmgr->smumgr_funcs->process_firmware_header) + return hwmgr->smumgr_funcs->process_firmware_header(hwmgr); return 0; } -int smum_get_argument(struct pp_smumgr *smumgr) +int smum_get_argument(struct pp_hwmgr *hwmgr) { - if (NULL != smumgr->smumgr_funcs->get_argument) - return smumgr->smumgr_funcs->get_argument(smumgr); + if (NULL != hwmgr->smumgr_funcs->get_argument) + return hwmgr->smumgr_funcs->get_argument(hwmgr); return 0; } -uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value) +uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value) { - if (NULL != smumgr->smumgr_funcs->get_mac_definition) - return smumgr->smumgr_funcs->get_mac_definition(value); + if (NULL != hwmgr->smumgr_funcs->get_mac_definition) + return hwmgr->smumgr_funcs->get_mac_definition(value); return 0; } -int smum_download_powerplay_table(struct pp_smumgr *smumgr, - void **table) +int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table) { - if (NULL != smumgr->smumgr_funcs->download_pptable_settings) - return smumgr->smumgr_funcs->download_pptable_settings(smumgr, + if (NULL != hwmgr->smumgr_funcs->download_pptable_settings) + return hwmgr->smumgr_funcs->download_pptable_settings(hwmgr, table); return 0; } -int smum_upload_powerplay_table(struct pp_smumgr *smumgr) +int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr) { - if (NULL != smumgr->smumgr_funcs->upload_pptable_settings) - return smumgr->smumgr_funcs->upload_pptable_settings(smumgr); + if (NULL != hwmgr->smumgr_funcs->upload_pptable_settings) + return hwmgr->smumgr_funcs->upload_pptable_settings(hwmgr); return 0; } -int smum_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { - if (smumgr == NULL || smumgr->smumgr_funcs->send_msg_to_smc == NULL) + if (hwmgr == NULL || hwmgr->smumgr_funcs->send_msg_to_smc == NULL) return -EINVAL; - return smumgr->smumgr_funcs->send_msg_to_smc(smumgr, msg); + return hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg); } -int smum_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, +int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - if (smumgr == NULL || - smumgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL) - return -EINVAL; - return smumgr->smumgr_funcs->send_msg_to_smc_with_parameter( - smumgr, msg, parameter); -} - -/* - * Returns once the part of the register indicated by the mask has - * reached the given value. - */ -int smum_wait_on_register(struct pp_smumgr *smumgr, - uint32_t index, - uint32_t value, uint32_t mask) -{ - uint32_t i; - uint32_t cur_value; - - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - - for (i = 0; i < smumgr->usec_timeout; i++) { - cur_value = cgs_read_register(smumgr->device, index); - if ((cur_value & mask) == (value & mask)) - break; - udelay(1); - } - - /* timeout means wrong logic*/ - if (i == smumgr->usec_timeout) - return -1; - - return 0; -} - -int smum_wait_for_register_unequal(struct pp_smumgr *smumgr, - uint32_t index, - uint32_t value, uint32_t mask) -{ - uint32_t i; - uint32_t cur_value; - - if (smumgr == NULL) + if (hwmgr == NULL || + hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL) return -EINVAL; - - for (i = 0; i < smumgr->usec_timeout; i++) { - cur_value = cgs_read_register(smumgr->device, - index); - if ((cur_value & mask) != (value & mask)) - break; - udelay(1); - } - - /* timeout means wrong logic */ - if (i == smumgr->usec_timeout) - return -1; - - return 0; -} - - -/* - * Returns once the part of the register indicated by the mask - * has reached the given value.The indirect space is described by - * giving the memory-mapped index of the indirect index register. - */ -int smum_wait_on_indirect_register(struct pp_smumgr *smumgr, - uint32_t indirect_port, - uint32_t index, - uint32_t value, - uint32_t mask) -{ - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - - cgs_write_register(smumgr->device, indirect_port, index); - return smum_wait_on_register(smumgr, indirect_port + 1, - mask, value); -} - -void smum_wait_for_indirect_register_unequal( - struct pp_smumgr *smumgr, - uint32_t indirect_port, - uint32_t index, - uint32_t value, - uint32_t mask) -{ - if (smumgr == NULL || smumgr->device == NULL) - return; - cgs_write_register(smumgr->device, indirect_port, index); - smum_wait_for_register_unequal(smumgr, indirect_port + 1, - value, mask); + return hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter( + hwmgr, msg, parameter); } int smu_allocate_memory(void *device, uint32_t size, @@ -316,7 +157,7 @@ int smu_allocate_memory(void *device, uint32_t size, return -EINVAL; ret = cgs_alloc_gpu_mem(device, type, size, byte_align, - 0, 0, (cgs_handle_t *)handle); + (cgs_handle_t *)handle); if (ret) return -ENOMEM; @@ -356,24 +197,24 @@ int smu_free_memory(void *device, void *handle) int smum_init_smc_table(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->init_smc_table) - return hwmgr->smumgr->smumgr_funcs->init_smc_table(hwmgr); + if (NULL != hwmgr->smumgr_funcs->init_smc_table) + return hwmgr->smumgr_funcs->init_smc_table(hwmgr); return 0; } int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels) - return hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels(hwmgr); + if (NULL != hwmgr->smumgr_funcs->populate_all_graphic_levels) + return hwmgr->smumgr_funcs->populate_all_graphic_levels(hwmgr); return 0; } int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels) - return hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels(hwmgr); + if (NULL != hwmgr->smumgr_funcs->populate_all_memory_levels) + return hwmgr->smumgr_funcs->populate_all_memory_levels(hwmgr); return 0; } @@ -381,16 +222,16 @@ int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr) /*this interface is needed by island ci/vi */ int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table) - return hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table(hwmgr); + if (NULL != hwmgr->smumgr_funcs->initialize_mc_reg_table) + return hwmgr->smumgr_funcs->initialize_mc_reg_table(hwmgr); return 0; } bool smum_is_dpm_running(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->is_dpm_running) - return hwmgr->smumgr->smumgr_funcs->is_dpm_running(hwmgr); + if (NULL != hwmgr->smumgr_funcs->is_dpm_running) + return hwmgr->smumgr_funcs->is_dpm_running(hwmgr); return true; } @@ -398,17 +239,17 @@ bool smum_is_dpm_running(struct pp_hwmgr *hwmgr) int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request) { - if (hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels) - return hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels( + if (hwmgr->smumgr_funcs->populate_requested_graphic_levels) + return hwmgr->smumgr_funcs->populate_requested_graphic_levels( hwmgr, request); return 0; } -bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr) +bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr) { - if (smumgr->smumgr_funcs->is_hw_avfs_present) - return smumgr->smumgr_funcs->is_hw_avfs_present(smumgr); + if (hwmgr->smumgr_funcs->is_hw_avfs_present) + return hwmgr->smumgr_funcs->is_hw_avfs_present(hwmgr); return false; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c index 65d3a4893958..1f720ccdaf99 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c @@ -97,7 +97,7 @@ static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = { */ -static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr, +static int tonga_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table, uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd) { @@ -406,7 +406,7 @@ static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_ { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t i; /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */ @@ -539,7 +539,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level); /* populate graphics levels*/ - result = tonga_get_dependecy_volt_by_clk(hwmgr, + result = tonga_get_dependency_volt_by_clk(hwmgr, pptable_info->vdd_dep_on_sclk, engine_clock, &graphic_level->MinVoltage, &mvdd); PP_ASSERT_WITH_CODE((!result), @@ -598,7 +598,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct smu7_dpm_table *dpm_table = &data->dpm_table; struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table; @@ -690,7 +690,7 @@ int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; } /* level count will send to smc once at init smc table and never change*/ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_address, + result = smu7_copy_bytes_to_smc(hwmgr, level_array_address, (uint8_t *)levels, (uint32_t)level_array_size, SMC_RAM_END); @@ -895,7 +895,7 @@ static int tonga_populate_single_memory_level( uint32_t mclk_strobe_mode_threshold = 40000; if (NULL != pptable_info->vdd_dep_on_mclk) { - result = tonga_get_dependecy_volt_by_clk(hwmgr, + result = tonga_get_dependency_volt_by_clk(hwmgr, pptable_info->vdd_dep_on_mclk, memory_clock, &memory_level->MinVoltage, &mvdd); @@ -1002,7 +1002,7 @@ int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; int result; @@ -1048,7 +1048,7 @@ int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr) smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; /* level count will send to smc once at init smc table and never change*/ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + result = smu7_copy_bytes_to_smc(hwmgr, level_array_address, (uint8_t *)levels, (uint32_t)level_array_size, SMC_RAM_END); @@ -1090,7 +1090,7 @@ static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, { int result = 0; struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct pp_atomctrl_clock_dividers_vi dividers; @@ -1454,7 +1454,7 @@ static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); int result = 0; SMU72_Discrete_MCArbDramTimingTable arb_regs; uint32_t i, j; @@ -1475,7 +1475,7 @@ static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) if (!result) { result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.arb_table_start, (uint8_t *)&arb_regs, sizeof(SMU72_Discrete_MCArbDramTimingTable), @@ -1492,7 +1492,7 @@ static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr, int result = 0; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); table->GraphicsBootLevel = 0; table->MemoryBootLevel = 0; @@ -1543,7 +1543,7 @@ static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) volt_with_cks, value; uint16_t clock_freq_u16; struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, volt_offset = 0; struct phm_ppt_v1_information *table_info = @@ -1782,9 +1782,9 @@ static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr, * @param hwmgr the address of the powerplay hardware manager. * @return always 0 */ -static int tonga_init_arb_table_index(struct pp_smumgr *smumgr) +static int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr) { - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t tmp; int result; @@ -1797,7 +1797,7 @@ static int tonga_init_arb_table_index(struct pp_smumgr *smumgr) * In reality this field should not be in that structure * but in a soft register. */ - result = smu7_read_smc_sram_dword(smumgr, + result = smu7_read_smc_sram_dword(hwmgr, smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END); if (result != 0) @@ -1806,7 +1806,7 @@ static int tonga_init_arb_table_index(struct pp_smumgr *smumgr) tmp &= 0x00FFFFFF; tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; - return smu7_write_smc_sram_dword(smumgr, + return smu7_write_smc_sram_dword(hwmgr, smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END); } @@ -1814,7 +1814,7 @@ static int tonga_init_arb_table_index(struct pp_smumgr *smumgr) static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); struct phm_ppt_v1_information *table_info = @@ -1838,7 +1838,7 @@ static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base; dpm_table->BAPM_TEMP_GRADIENT = - PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient); + PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient); pdef1 = defaults->bapmti_r; pdef2 = defaults->bapmti_rc; @@ -1861,7 +1861,7 @@ static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; @@ -1876,7 +1876,7 @@ static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr) { uint16_t tdc_limit; struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -1897,11 +1897,11 @@ static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr) static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, fuse_table_offset + offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl), (uint32_t *)&temp, SMC_RAM_END)) @@ -1919,7 +1919,7 @@ static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr) { int i; struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 16; i++) @@ -1930,7 +1930,7 @@ static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr) static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) { - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); if ((hwmgr->thermal_controller.advanceFanControlParameters. usFanOutputSensitivity & (1 << 15)) || @@ -1949,7 +1949,7 @@ static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr) { int i; struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 16; i++) @@ -1958,15 +1958,10 @@ static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr) return 0; } -static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) -{ - return 0; -} - static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; @@ -1987,12 +1982,12 @@ static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t pm_fuse_table_offset; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, PmFuseTable), &pm_fuse_table_offset, SMC_RAM_END)) @@ -2035,13 +2030,6 @@ static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Attempt to populate GnbLPML Failed !", return -EINVAL); - /* DW19 */ - if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML " - "Min and Max Vid Failed !", - return -EINVAL); - /* DW20 */ if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr)) PP_ASSERT_WITH_CODE( @@ -2050,7 +2038,7 @@ static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Hi and Lo Sidd Failed !", return -EINVAL); - if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, (uint8_t *)&smu_data->power_tune_table, sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END)) PP_ASSERT_WITH_CODE(false, @@ -2060,10 +2048,10 @@ static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) return 0; } -static int tonga_populate_mc_reg_address(struct pp_smumgr *smumgr, +static int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU72_Discrete_MCRegisters *mc_reg_table) { - const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)smumgr->backend; + const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)hwmgr->smu_backend; uint32_t i, j; @@ -2104,12 +2092,12 @@ static void tonga_convert_mc_registers( } static int tonga_convert_mc_reg_table_entry_to_smc( - struct pp_smumgr *smumgr, + struct pp_hwmgr *hwmgr, const uint32_t memory_clock, SMU72_Discrete_MCRegisterSet *mc_reg_table_data ) { - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t i = 0; for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) { @@ -2139,7 +2127,7 @@ static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, for (i = 0; i < data->dpm_table.mclk_table.count; i++) { res = tonga_convert_mc_reg_table_entry_to_smc( - hwmgr->smumgr, + hwmgr, data->dpm_table.mclk_table.dpm_levels[i].value, &mc_regs->data[i] ); @@ -2153,8 +2141,7 @@ static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); uint32_t address; int32_t result; @@ -2175,7 +2162,7 @@ static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]); return smu7_copy_bytes_to_smc( - hwmgr->smumgr, address, + hwmgr, address, (uint8_t *)&smu_data->mc_regs.data[0], sizeof(SMU72_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, @@ -2185,11 +2172,10 @@ static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) { int result; - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters)); - result = tonga_populate_mc_reg_address(smumgr, &(smu_data->mc_regs)); + result = tonga_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs)); PP_ASSERT_WITH_CODE(!result, "Failed to initialize MCRegTable for the MC register addresses !", return result;); @@ -2199,13 +2185,13 @@ static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) "Failed to initialize MCRegTable for driver state !", return result;); - return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start, + return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start, (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END); } static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) { - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2221,7 +2207,7 @@ static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) static void tonga_save_default_power_profile(struct pp_hwmgr *hwmgr) { - struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smu_backend); struct SMU72_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel; unsigned min_level = 1; @@ -2267,7 +2253,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) int result; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2483,7 +2469,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags), (uint8_t *)&(table->SystemFlags), sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController), @@ -2492,7 +2478,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!result, "Failed to upload dpm data to SMC memory !", return result;); - result = tonga_init_arb_table_index(hwmgr->smumgr); + result = tonga_init_arb_table_index(hwmgr); PP_ASSERT_WITH_CODE(!result, "Failed to upload arb data to SMC memory !", return result); @@ -2521,7 +2507,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; uint32_t duty100; uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; @@ -2600,7 +2586,7 @@ int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) fan_table.FanControl_GL_Flag = 1; - res = smu7_copy_bytes_to_smc(hwmgr->smumgr, + res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), @@ -2625,7 +2611,7 @@ int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); int result = 0; uint32_t low_sclk_interrupt_threshold = 0; @@ -2642,7 +2628,7 @@ int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold), @@ -2728,7 +2714,7 @@ uint32_t tonga_get_mac_definition(uint32_t value) static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2753,7 +2739,7 @@ static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_UVDDPM) || phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask, (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); return 0; @@ -2762,7 +2748,7 @@ static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr) static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2784,7 +2770,7 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask, (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); return 0; @@ -2792,7 +2778,7 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr) static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr) { - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; smu_data->smc_state_table.SamuBootLevel = 0; @@ -2810,7 +2796,7 @@ static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SAMUDPM_SetEnabledMask, (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); return 0; @@ -2844,13 +2830,13 @@ int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t tmp; int result; bool error = false; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, DpmTable), &tmp, SMC_RAM_END); @@ -2860,7 +2846,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (result != 0); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, SoftRegisters), &tmp, SMC_RAM_END); @@ -2873,7 +2859,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (result != 0); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, mcRegisterTable), &tmp, SMC_RAM_END); @@ -2881,7 +2867,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) if (!result) smu_data->smu7_data.mc_reg_table_start = tmp; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, FanTable), &tmp, SMC_RAM_END); @@ -2891,7 +2877,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (result != 0); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, mcArbDramTimingTable), &tmp, SMC_RAM_END); @@ -2901,7 +2887,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (result != 0); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, Version), &tmp, SMC_RAM_END); @@ -3170,7 +3156,7 @@ static int tonga_set_valid_flag(struct tonga_mc_reg_table *table) int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) { int result; - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); pp_atomctrl_mc_reg_table *table; struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table; uint8_t module_index = tonga_get_memory_modile_index(hwmgr); @@ -3253,7 +3239,7 @@ int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request) { struct tonga_smumgr *smu_data = (struct tonga_smumgr *) - (hwmgr->smumgr->backend); + (hwmgr->smu_backend); struct SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel; uint32_t array = smu_data->smu7_data.dpm_table_start + @@ -3270,6 +3256,6 @@ int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, levels[i].DownHyst = request->down_hyst; } - return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, array_size, SMC_RAM_END); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h index 962860f13f24..9d6a78a65976 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h @@ -40,7 +40,7 @@ struct tonga_pt_defaults { uint8_t tdc_waterfall_ctl; uint8_t dte_ambient_temp_base; uint32_t display_cac; - uint32_t bamp_temp_gradient; + uint32_t bapm_temp_gradient; uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index c35f4c35c9ca..d22cf218cf18 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -37,125 +37,125 @@ #include "smu7_smumgr.h" -static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr) +static int tonga_start_in_protection_mode(struct pp_hwmgr *hwmgr) { int result; /* Assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = smu7_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(hwmgr); if (result) return result; /* Clear status */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0); /* Enable clock */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /* De-assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Set SMU Auto Start */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_INPUT_DATA, AUTO_START, 1); /* Clear firmware interrupt enable flag */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); /** * Call Test SMU message with 0x20000 offset to trigger SMU start */ - smu7_send_msg_to_smc_offset(smumgr); + smu7_send_msg_to_smc_offset(hwmgr); /* Wait for done bit to be set */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); /* Check pass/failed indicator */ - if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, + if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS)) { pr_err("SMU Firmware start failed\n"); return -EINVAL; } /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return 0; } -static int tonga_start_in_non_protection_mode(struct pp_smumgr *smumgr) +static int tonga_start_in_non_protection_mode(struct pp_hwmgr *hwmgr) { int result = 0; /* wait for smc boot up */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); /*Clear firmware interrupt enable flag*/ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = smu7_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(hwmgr); if (result != 0) return result; /* Set smc instruct start point at 0x0 */ - smu7_program_jump_on_start(smumgr); + smu7_program_jump_on_start(hwmgr); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /*De-assert reset*/ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; } -static int tonga_start_smu(struct pp_smumgr *smumgr) +static int tonga_start_smu(struct pp_hwmgr *hwmgr) { int result; /* Only start SMC if SMC RAM is not running */ - if (!(smu7_is_smc_ram_running(smumgr) || - cgs_is_virtualization_enabled(smumgr->device))) { + if (!(smu7_is_smc_ram_running(hwmgr) || + cgs_is_virtualization_enabled(hwmgr->device))) { /*Check if SMU is running in protected mode*/ - if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)) { - result = tonga_start_in_non_protection_mode(smumgr); + result = tonga_start_in_non_protection_mode(hwmgr); if (result) return result; } else { - result = tonga_start_in_protection_mode(smumgr); + result = tonga_start_in_protection_mode(hwmgr); if (result) return result; } } - result = smu7_request_smu_load_fw(smumgr); + result = smu7_request_smu_load_fw(hwmgr); return result; } @@ -167,7 +167,7 @@ static int tonga_start_smu(struct pp_smumgr *smumgr) * @param smcAddress the address in the SMC RAM to access. * @param value to write to the SMC SRAM. */ -static int tonga_smu_init(struct pp_smumgr *smumgr) +static int tonga_smu_init(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *tonga_priv = NULL; int i; @@ -176,9 +176,9 @@ static int tonga_smu_init(struct pp_smumgr *smumgr) if (tonga_priv == NULL) return -ENOMEM; - smumgr->backend = tonga_priv; + hwmgr->smu_backend = tonga_priv; - if (smu7_init(smumgr)) + if (smu7_init(hwmgr)) return -EINVAL; for (i = 0; i < SMU72_MAX_LEVELS_GRAPHICS; i++) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index 408514c965a0..2f979fb86824 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -53,20 +53,20 @@ #define smnMP0_FW_INTF 0x3010104 #define smnMP1_PUB_CTRL 0x3010b14 -static bool vega10_is_smc_ram_running(struct pp_smumgr *smumgr) +static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr) { uint32_t mp1_fw_flags, reg; reg = soc15_get_register_offset(NBIF_HWID, 0, mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2); - cgs_write_register(smumgr->device, reg, + cgs_write_register(hwmgr->device, reg, (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); reg = soc15_get_register_offset(NBIF_HWID, 0, mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2); - mp1_fw_flags = cgs_read_register(smumgr->device, reg); + mp1_fw_flags = cgs_read_register(hwmgr->device, reg); if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) return true; @@ -80,20 +80,20 @@ static bool vega10_is_smc_ram_running(struct pp_smumgr *smumgr) * @param smumgr the address of the powerplay hardware manager. * @return TRUE SMC has responded, FALSE otherwise. */ -static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr) +static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr) { uint32_t reg; - if (!vega10_is_smc_ram_running(smumgr)) + if (!vega10_is_smc_ram_running(hwmgr)) return -EINVAL; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - smum_wait_for_register_unequal(smumgr, reg, + phm_wait_for_register_unequal(hwmgr, reg, 0, MP1_C2PMSG_90__CONTENT_MASK); - return cgs_read_register(smumgr->device, reg); + return cgs_read_register(hwmgr->device, reg); } /* @@ -102,43 +102,43 @@ static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr) * @param msg the message to send. * @return Always return 0. */ -int vega10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, +int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg) { uint32_t reg; - if (!vega10_is_smc_ram_running(smumgr)) + if (!vega10_is_smc_ram_running(hwmgr)) return -EINVAL; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66); - cgs_write_register(smumgr->device, reg, msg); + cgs_write_register(hwmgr->device, reg, msg); return 0; } /* * Send a message to the SMC, and wait for its response. - * @param smumgr the address of the powerplay hardware manager. + * @param hwmgr the address of the powerplay hardware manager. * @param msg the message to send. * @return Always return 0. */ -int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { uint32_t reg; - if (!vega10_is_smc_ram_running(smumgr)) + if (!vega10_is_smc_ram_running(hwmgr)) return -EINVAL; - vega10_wait_for_response(smumgr); + vega10_wait_for_response(hwmgr); reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(smumgr->device, reg, 0); + cgs_write_register(hwmgr->device, reg, 0); - vega10_send_msg_to_smc_without_waiting(smumgr, msg); + vega10_send_msg_to_smc_without_waiting(hwmgr, msg); - if (vega10_wait_for_response(smumgr) != 1) + if (vega10_wait_for_response(hwmgr) != 1) pr_err("Failed to send message: 0x%x\n", msg); return 0; @@ -146,32 +146,32 @@ int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) /* * Send a message to the SMC with parameter - * @param smumgr: the address of the powerplay hardware manager. + * @param hwmgr: the address of the powerplay hardware manager. * @param msg: the message to send. * @param parameter: the parameter to send * @return Always return 0. */ -int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, +int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { uint32_t reg; - if (!vega10_is_smc_ram_running(smumgr)) + if (!vega10_is_smc_ram_running(hwmgr)) return -EINVAL; - vega10_wait_for_response(smumgr); + vega10_wait_for_response(hwmgr); reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(smumgr->device, reg, 0); + cgs_write_register(hwmgr->device, reg, 0); reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - cgs_write_register(smumgr->device, reg, parameter); + cgs_write_register(hwmgr->device, reg, parameter); - vega10_send_msg_to_smc_without_waiting(smumgr, msg); + vega10_send_msg_to_smc_without_waiting(hwmgr, msg); - if (vega10_wait_for_response(smumgr) != 1) + if (vega10_wait_for_response(hwmgr) != 1) pr_err("Failed to send message: 0x%x\n", msg); return 0; @@ -180,51 +180,51 @@ int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, /* * Send a message to the SMC with parameter, do not wait for response - * @param smumgr: the address of the powerplay hardware manager. + * @param hwmgr: the address of the powerplay hardware manager. * @param msg: the message to send. * @param parameter: the parameter to send * @return The response that came from the SMC. */ int vega10_send_msg_to_smc_with_parameter_without_waiting( - struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) + struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { uint32_t reg; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - cgs_write_register(smumgr->device, reg, parameter); + cgs_write_register(hwmgr->device, reg, parameter); - return vega10_send_msg_to_smc_without_waiting(smumgr, msg); + return vega10_send_msg_to_smc_without_waiting(hwmgr, msg); } /* * Retrieve an argument from SMC. - * @param smumgr the address of the powerplay hardware manager. + * @param hwmgr the address of the powerplay hardware manager. * @param arg pointer to store the argument from SMC. * @return Always return 0. */ -int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg) +int vega10_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg) { uint32_t reg; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - *arg = cgs_read_register(smumgr->device, reg); + *arg = cgs_read_register(hwmgr->device, reg); return 0; } /* * Copy table from SMC into driver FB - * @param smumgr the address of the SMC manager + * @param hwmgr the address of the HW manager * @param table_id the driver's table ID to copy from */ -int vega10_copy_table_from_smc(struct pp_smumgr *smumgr, +int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { struct vega10_smumgr *priv = - (struct vega10_smumgr *)(smumgr->backend); + (struct vega10_smumgr *)(hwmgr->smu_backend); PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL); @@ -232,16 +232,16 @@ int vega10_copy_table_from_smc(struct pp_smumgr *smumgr, "Invalid SMU Table version!", return -EINVAL); PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, "Invalid SMU Table Length!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, priv->smu_tables.entry[table_id].table_addr_high) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, priv->smu_tables.entry[table_id].table_addr_low) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableSmu2Dram, priv->smu_tables.entry[table_id].table_id) == 0, "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", @@ -255,14 +255,14 @@ int vega10_copy_table_from_smc(struct pp_smumgr *smumgr, /* * Copy table from Driver FB into SMC - * @param smumgr the address of the SMC manager + * @param hwmgr the address of the HW manager * @param table_id the table to copy from */ -int vega10_copy_table_to_smc(struct pp_smumgr *smumgr, +int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { struct vega10_smumgr *priv = - (struct vega10_smumgr *)(smumgr->backend); + (struct vega10_smumgr *)(hwmgr->smu_backend); PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL); @@ -274,17 +274,17 @@ int vega10_copy_table_to_smc(struct pp_smumgr *smumgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); - PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, priv->smu_tables.entry[table_id].table_addr_high) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;); - PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, priv->smu_tables.entry[table_id].table_addr_low) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableDram2Smu, priv->smu_tables.entry[table_id].table_id) == 0, "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!", @@ -293,87 +293,87 @@ int vega10_copy_table_to_smc(struct pp_smumgr *smumgr, return 0; } -int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table) +int vega10_save_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table) { PP_ASSERT_WITH_CODE(avfs_table, "No access to SMC AVFS Table", return -EINVAL); - return vega10_copy_table_from_smc(smumgr, avfs_table, AVFSTABLE); + return vega10_copy_table_from_smc(hwmgr, avfs_table, AVFSTABLE); } -int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table) +int vega10_restore_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table) { PP_ASSERT_WITH_CODE(avfs_table, "No access to SMC AVFS Table", return -EINVAL); - return vega10_copy_table_to_smc(smumgr, avfs_table, AVFSTABLE); + return vega10_copy_table_to_smc(hwmgr, avfs_table, AVFSTABLE); } -int vega10_enable_smc_features(struct pp_smumgr *smumgr, +int vega10_enable_smc_features(struct pp_hwmgr *hwmgr, bool enable, uint32_t feature_mask) { int msg = enable ? PPSMC_MSG_EnableSmuFeatures : PPSMC_MSG_DisableSmuFeatures; - return vega10_send_msg_to_smc_with_parameter(smumgr, + return vega10_send_msg_to_smc_with_parameter(hwmgr, msg, feature_mask); } -int vega10_get_smc_features(struct pp_smumgr *smumgr, +int vega10_get_smc_features(struct pp_hwmgr *hwmgr, uint32_t *features_enabled) { if (features_enabled == NULL) return -EINVAL; - if (!vega10_send_msg_to_smc(smumgr, + if (!vega10_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures)) { - vega10_read_arg_from_smc(smumgr, features_enabled); + vega10_read_arg_from_smc(hwmgr, features_enabled); return 0; } return -EINVAL; } -int vega10_set_tools_address(struct pp_smumgr *smumgr) +int vega10_set_tools_address(struct pp_hwmgr *hwmgr) { struct vega10_smumgr *priv = - (struct vega10_smumgr *)(smumgr->backend); + (struct vega10_smumgr *)(hwmgr->smu_backend); if (priv->smu_tables.entry[TOOLSTABLE].table_addr_high || priv->smu_tables.entry[TOOLSTABLE].table_addr_low) { - if (!vega10_send_msg_to_smc_with_parameter(smumgr, + if (!vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrHigh, priv->smu_tables.entry[TOOLSTABLE].table_addr_high)) - vega10_send_msg_to_smc_with_parameter(smumgr, + vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrLow, priv->smu_tables.entry[TOOLSTABLE].table_addr_low); } return 0; } -static int vega10_verify_smc_interface(struct pp_smumgr *smumgr) +static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr) { uint32_t smc_driver_if_version; struct cgs_system_info sys_info = {0}; uint32_t dev_id; uint32_t rev_id; - PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(smumgr, + PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(hwmgr, PPSMC_MSG_GetDriverIfVersion), "Attempt to get SMC IF Version Number Failed!", return -EINVAL); - vega10_read_arg_from_smc(smumgr, &smc_driver_if_version); + vega10_read_arg_from_smc(hwmgr, &smc_driver_if_version); sys_info.size = sizeof(struct cgs_system_info); sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; - cgs_query_system_info(smumgr->device, &sys_info); + cgs_query_system_info(hwmgr->device, &sys_info); dev_id = (uint32_t)sys_info.value; sys_info.size = sizeof(struct cgs_system_info); sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV; - cgs_query_system_info(smumgr->device, &sys_info); + cgs_query_system_info(hwmgr->device, &sys_info); rev_id = (uint32_t)sys_info.value; if (!((dev_id == 0x687f) && @@ -392,7 +392,7 @@ static int vega10_verify_smc_interface(struct pp_smumgr *smumgr) return 0; } -static int vega10_smu_init(struct pp_smumgr *smumgr) +static int vega10_smu_init(struct pp_hwmgr *hwmgr) { struct vega10_smumgr *priv; uint64_t mc_addr; @@ -401,7 +401,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) int ret; struct cgs_firmware_info info = {0}; - ret = cgs_get_firmware_info(smumgr->device, + ret = cgs_get_firmware_info(hwmgr->device, smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); if (ret || !info.kptr) @@ -412,10 +412,10 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) if (!priv) return -ENOMEM; - smumgr->backend = priv; + hwmgr->smu_backend = priv; /* allocate space for pptable */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, sizeof(PPTable_t), CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -425,8 +425,8 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE(kaddr, "[vega10_smu_init] Out of memory for pptable.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, + kfree(hwmgr->smu_backend); + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)handle); return -EINVAL); @@ -441,7 +441,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) priv->smu_tables.entry[PPTABLE].handle = handle; /* allocate space for watermarks table */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, sizeof(Watermarks_t), CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -451,10 +451,10 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE(kaddr, "[vega10_smu_init] Out of memory for wmtable.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, + kfree(hwmgr->smu_backend); + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)handle); return -EINVAL); @@ -469,7 +469,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) priv->smu_tables.entry[WMTABLE].handle = handle; /* allocate space for AVFS table */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, sizeof(AvfsTable_t), CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -479,12 +479,12 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE(kaddr, "[vega10_smu_init] Out of memory for avfs table.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, + kfree(hwmgr->smu_backend); + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)handle); return -EINVAL); @@ -500,7 +500,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) tools_size = 0x19000; if (tools_size) { - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, tools_size, CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -522,7 +522,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) } /* allocate space for AVFS Fuse table */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, sizeof(AvfsFuseOverride_t), CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -532,16 +532,16 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE(kaddr, "[vega10_smu_init] Out of memory for avfs fuse table.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, + kfree(hwmgr->smu_backend); + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)handle); return -EINVAL); @@ -558,36 +558,36 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) return 0; } -static int vega10_smu_fini(struct pp_smumgr *smumgr) +static int vega10_smu_fini(struct pp_hwmgr *hwmgr) { struct vega10_smumgr *priv = - (struct vega10_smumgr *)(smumgr->backend); + (struct vega10_smumgr *)(hwmgr->smu_backend); if (priv) { - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle); if (priv->smu_tables.entry[TOOLSTABLE].table) - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[AVFSFUSETABLE].handle); - kfree(smumgr->backend); - smumgr->backend = NULL; + kfree(hwmgr->smu_backend); + hwmgr->smu_backend = NULL; } return 0; } -static int vega10_start_smu(struct pp_smumgr *smumgr) +static int vega10_start_smu(struct pp_hwmgr *hwmgr) { - PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(smumgr), + PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(hwmgr), "Failed to verify SMC interface!", return -EINVAL); - vega10_set_tools_address(smumgr); + vega10_set_tools_address(hwmgr); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h index 821425c1e4e0..0695455b21b2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h @@ -52,19 +52,19 @@ struct vega10_smumgr { struct smu_table_array smu_tables; }; -int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg); -int vega10_copy_table_from_smc(struct pp_smumgr *smumgr, +int vega10_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg); +int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id); -int vega10_copy_table_to_smc(struct pp_smumgr *smumgr, +int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id); -int vega10_enable_smc_features(struct pp_smumgr *smumgr, +int vega10_enable_smc_features(struct pp_hwmgr *hwmgr, bool enable, uint32_t feature_mask); -int vega10_get_smc_features(struct pp_smumgr *smumgr, +int vega10_get_smc_features(struct pp_hwmgr *hwmgr, uint32_t *features_enabled); -int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table); -int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table); +int vega10_save_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table); +int vega10_restore_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table); -int vega10_set_tools_address(struct pp_smumgr *smumgr); +int vega10_set_tools_address(struct pp_hwmgr *hwmgr); #endif |