mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
drm/amd/powerplay: centralize all buffer allocation in sw_init phase
To fit common design. And this can simplify the buffer deallocation. Signed-off-by: Evan Quan <evan.quan@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
61555ccbf9
commit
78eb4a3615
@ -799,6 +799,147 @@ int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu_init_fb_allocations(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct smu_table *tables = smu_table->tables;
|
||||
struct smu_table *driver_table = &(smu_table->driver_table);
|
||||
uint32_t max_table_size = 0;
|
||||
int ret, i;
|
||||
|
||||
/* VRAM allocation for tool table */
|
||||
if (tables[SMU_TABLE_PMSTATUSLOG].size) {
|
||||
ret = amdgpu_bo_create_kernel(adev,
|
||||
tables[SMU_TABLE_PMSTATUSLOG].size,
|
||||
tables[SMU_TABLE_PMSTATUSLOG].align,
|
||||
tables[SMU_TABLE_PMSTATUSLOG].domain,
|
||||
&tables[SMU_TABLE_PMSTATUSLOG].bo,
|
||||
&tables[SMU_TABLE_PMSTATUSLOG].mc_address,
|
||||
&tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
|
||||
if (ret) {
|
||||
pr_err("VRAM allocation for tool table failed!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* VRAM allocation for driver table */
|
||||
for (i = 0; i < SMU_TABLE_COUNT; i++) {
|
||||
if (tables[i].size == 0)
|
||||
continue;
|
||||
|
||||
if (i == SMU_TABLE_PMSTATUSLOG)
|
||||
continue;
|
||||
|
||||
if (max_table_size < tables[i].size)
|
||||
max_table_size = tables[i].size;
|
||||
}
|
||||
|
||||
driver_table->size = max_table_size;
|
||||
driver_table->align = PAGE_SIZE;
|
||||
driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
|
||||
ret = amdgpu_bo_create_kernel(adev,
|
||||
driver_table->size,
|
||||
driver_table->align,
|
||||
driver_table->domain,
|
||||
&driver_table->bo,
|
||||
&driver_table->mc_address,
|
||||
&driver_table->cpu_addr);
|
||||
if (ret) {
|
||||
pr_err("VRAM allocation for driver table failed!\n");
|
||||
if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
|
||||
amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
|
||||
&tables[SMU_TABLE_PMSTATUSLOG].mc_address,
|
||||
&tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_fini_fb_allocations(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct smu_table *tables = smu_table->tables;
|
||||
struct smu_table *driver_table = &(smu_table->driver_table);
|
||||
|
||||
if (!tables)
|
||||
return 0;
|
||||
|
||||
if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
|
||||
amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
|
||||
&tables[SMU_TABLE_PMSTATUSLOG].mc_address,
|
||||
&tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
|
||||
|
||||
amdgpu_bo_free_kernel(&driver_table->bo,
|
||||
&driver_table->mc_address,
|
||||
&driver_table->cpu_addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* smu_alloc_memory_pool - allocate memory pool in the system memory
|
||||
*
|
||||
* @smu: amdgpu_device pointer
|
||||
*
|
||||
* This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
|
||||
* and DramLogSetDramAddr can notify it changed.
|
||||
*
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
static int smu_alloc_memory_pool(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct smu_table *memory_pool = &smu_table->memory_pool;
|
||||
uint64_t pool_size = smu->pool_size;
|
||||
int ret = 0;
|
||||
|
||||
if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
|
||||
return ret;
|
||||
|
||||
memory_pool->size = pool_size;
|
||||
memory_pool->align = PAGE_SIZE;
|
||||
memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
|
||||
switch (pool_size) {
|
||||
case SMU_MEMORY_POOL_SIZE_256_MB:
|
||||
case SMU_MEMORY_POOL_SIZE_512_MB:
|
||||
case SMU_MEMORY_POOL_SIZE_1_GB:
|
||||
case SMU_MEMORY_POOL_SIZE_2_GB:
|
||||
ret = amdgpu_bo_create_kernel(adev,
|
||||
memory_pool->size,
|
||||
memory_pool->align,
|
||||
memory_pool->domain,
|
||||
&memory_pool->bo,
|
||||
&memory_pool->mc_address,
|
||||
&memory_pool->cpu_addr);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_free_memory_pool(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct smu_table *memory_pool = &smu_table->memory_pool;
|
||||
|
||||
if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
|
||||
return 0;
|
||||
|
||||
amdgpu_bo_free_kernel(&memory_pool->bo,
|
||||
&memory_pool->mc_address,
|
||||
&memory_pool->cpu_addr);
|
||||
|
||||
memset(memory_pool, 0, sizeof(struct smu_table));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu_smc_table_sw_init(struct smu_context *smu)
|
||||
{
|
||||
int ret;
|
||||
@ -823,6 +964,17 @@ static int smu_smc_table_sw_init(struct smu_context *smu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* allocate vram bos to store smc table contents.
|
||||
*/
|
||||
ret = smu_init_fb_allocations(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_alloc_memory_pool(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -830,6 +982,20 @@ static int smu_smc_table_sw_fini(struct smu_context *smu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = smu_free_memory_pool(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_fini_fb_allocations(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_fini_power(smu);
|
||||
if (ret) {
|
||||
pr_err("Failed to init smu_fini_power!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = smu_fini_smc_tables(smu);
|
||||
if (ret) {
|
||||
pr_err("Failed to smu_fini_smc_tables!\n");
|
||||
@ -920,91 +1086,6 @@ static int smu_sw_fini(void *handle)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = smu_fini_power(smu);
|
||||
if (ret) {
|
||||
pr_err("Failed to init smu_fini_power!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu_init_fb_allocations(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct smu_table *tables = smu_table->tables;
|
||||
struct smu_table *driver_table = &(smu_table->driver_table);
|
||||
uint32_t max_table_size = 0;
|
||||
int ret, i;
|
||||
|
||||
/* VRAM allocation for tool table */
|
||||
if (tables[SMU_TABLE_PMSTATUSLOG].size) {
|
||||
ret = amdgpu_bo_create_kernel(adev,
|
||||
tables[SMU_TABLE_PMSTATUSLOG].size,
|
||||
tables[SMU_TABLE_PMSTATUSLOG].align,
|
||||
tables[SMU_TABLE_PMSTATUSLOG].domain,
|
||||
&tables[SMU_TABLE_PMSTATUSLOG].bo,
|
||||
&tables[SMU_TABLE_PMSTATUSLOG].mc_address,
|
||||
&tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
|
||||
if (ret) {
|
||||
pr_err("VRAM allocation for tool table failed!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* VRAM allocation for driver table */
|
||||
for (i = 0; i < SMU_TABLE_COUNT; i++) {
|
||||
if (tables[i].size == 0)
|
||||
continue;
|
||||
|
||||
if (i == SMU_TABLE_PMSTATUSLOG)
|
||||
continue;
|
||||
|
||||
if (max_table_size < tables[i].size)
|
||||
max_table_size = tables[i].size;
|
||||
}
|
||||
|
||||
driver_table->size = max_table_size;
|
||||
driver_table->align = PAGE_SIZE;
|
||||
driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
|
||||
ret = amdgpu_bo_create_kernel(adev,
|
||||
driver_table->size,
|
||||
driver_table->align,
|
||||
driver_table->domain,
|
||||
&driver_table->bo,
|
||||
&driver_table->mc_address,
|
||||
&driver_table->cpu_addr);
|
||||
if (ret) {
|
||||
pr_err("VRAM allocation for driver table failed!\n");
|
||||
if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
|
||||
amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
|
||||
&tables[SMU_TABLE_PMSTATUSLOG].mc_address,
|
||||
&tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_fini_fb_allocations(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct smu_table *tables = smu_table->tables;
|
||||
struct smu_table *driver_table = &(smu_table->driver_table);
|
||||
|
||||
if (!tables)
|
||||
return 0;
|
||||
|
||||
if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
|
||||
amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
|
||||
&tables[SMU_TABLE_PMSTATUSLOG].mc_address,
|
||||
&tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
|
||||
|
||||
amdgpu_bo_free_kernel(&driver_table->bo,
|
||||
&driver_table->mc_address,
|
||||
&driver_table->cpu_addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1045,13 +1126,6 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* allocate vram bos to store smc table contents.
|
||||
*/
|
||||
ret = smu_init_fb_allocations(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Parse pptable format and fill PPTable_t smc_pptable to
|
||||
* smu_table_context structure. And read the smc_dpm_table from vbios,
|
||||
@ -1169,68 +1243,6 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* smu_alloc_memory_pool - allocate memory pool in the system memory
|
||||
*
|
||||
* @smu: amdgpu_device pointer
|
||||
*
|
||||
* This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
|
||||
* and DramLogSetDramAddr can notify it changed.
|
||||
*
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
static int smu_alloc_memory_pool(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct smu_table *memory_pool = &smu_table->memory_pool;
|
||||
uint64_t pool_size = smu->pool_size;
|
||||
int ret = 0;
|
||||
|
||||
if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
|
||||
return ret;
|
||||
|
||||
memory_pool->size = pool_size;
|
||||
memory_pool->align = PAGE_SIZE;
|
||||
memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
|
||||
switch (pool_size) {
|
||||
case SMU_MEMORY_POOL_SIZE_256_MB:
|
||||
case SMU_MEMORY_POOL_SIZE_512_MB:
|
||||
case SMU_MEMORY_POOL_SIZE_1_GB:
|
||||
case SMU_MEMORY_POOL_SIZE_2_GB:
|
||||
ret = amdgpu_bo_create_kernel(adev,
|
||||
memory_pool->size,
|
||||
memory_pool->align,
|
||||
memory_pool->domain,
|
||||
&memory_pool->bo,
|
||||
&memory_pool->mc_address,
|
||||
&memory_pool->cpu_addr);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_free_memory_pool(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct smu_table *memory_pool = &smu_table->memory_pool;
|
||||
|
||||
if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
|
||||
return 0;
|
||||
|
||||
amdgpu_bo_free_kernel(&memory_pool->bo,
|
||||
&memory_pool->mc_address,
|
||||
&memory_pool->cpu_addr);
|
||||
|
||||
memset(memory_pool, 0, sizeof(struct smu_table));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu_start_smc_engine(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
@ -1288,10 +1300,6 @@ static int smu_hw_init(void *handle)
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
||||
ret = smu_alloc_memory_pool(smu);
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
||||
/*
|
||||
* Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
|
||||
* pool location.
|
||||
@ -1395,7 +1403,6 @@ static int smu_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct smu_context *smu = &adev->smu;
|
||||
struct smu_table_context *table_context = &smu->smu_table;
|
||||
int ret = 0;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
@ -1426,23 +1433,6 @@ static int smu_hw_fini(void *handle)
|
||||
return ret;
|
||||
}
|
||||
|
||||
kfree(table_context->driver_pptable);
|
||||
table_context->driver_pptable = NULL;
|
||||
|
||||
kfree(table_context->max_sustainable_clocks);
|
||||
table_context->max_sustainable_clocks = NULL;
|
||||
|
||||
kfree(table_context->overdrive_table);
|
||||
table_context->overdrive_table = NULL;
|
||||
|
||||
ret = smu_fini_fb_allocations(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_free_memory_pool(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -439,25 +439,67 @@ int smu_v11_0_init_smc_tables(struct smu_context *smu)
|
||||
struct smu_table *tables = NULL;
|
||||
int ret = 0;
|
||||
|
||||
if (smu_table->tables)
|
||||
return -EINVAL;
|
||||
|
||||
tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
|
||||
GFP_KERNEL);
|
||||
if (!tables)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!tables) {
|
||||
ret = -ENOMEM;
|
||||
goto err0_out;
|
||||
}
|
||||
smu_table->tables = tables;
|
||||
|
||||
ret = smu_tables_init(smu, tables);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err1_out;
|
||||
|
||||
ret = smu_v11_0_init_dpm_context(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err1_out;
|
||||
|
||||
smu_table->driver_pptable =
|
||||
kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
|
||||
if (!smu_table->driver_pptable) {
|
||||
ret = -ENOMEM;
|
||||
goto err2_out;
|
||||
}
|
||||
|
||||
smu_table->max_sustainable_clocks =
|
||||
kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), GFP_KERNEL);
|
||||
if (!smu_table->max_sustainable_clocks) {
|
||||
ret = -ENOMEM;
|
||||
goto err3_out;
|
||||
}
|
||||
|
||||
/* Arcturus does not support OVERDRIVE */
|
||||
if (tables[SMU_TABLE_OVERDRIVE].size) {
|
||||
smu_table->overdrive_table =
|
||||
kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
|
||||
if (!smu_table->overdrive_table) {
|
||||
ret = -ENOMEM;
|
||||
goto err4_out;
|
||||
}
|
||||
|
||||
smu_table->boot_overdrive_table =
|
||||
kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
|
||||
if (!smu_table->boot_overdrive_table) {
|
||||
ret = -ENOMEM;
|
||||
goto err5_out;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err5_out:
|
||||
kfree(smu_table->overdrive_table);
|
||||
err4_out:
|
||||
kfree(smu_table->max_sustainable_clocks);
|
||||
err3_out:
|
||||
kfree(smu_table->driver_pptable);
|
||||
err2_out:
|
||||
smu_v11_0_fini_dpm_context(smu);
|
||||
err1_out:
|
||||
kfree(tables);
|
||||
err0_out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_v11_0_fini_smc_tables(struct smu_context *smu)
|
||||
@ -468,6 +510,17 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
|
||||
if (!smu_table->tables)
|
||||
return -EINVAL;
|
||||
|
||||
kfree(smu_table->boot_overdrive_table);
|
||||
kfree(smu_table->overdrive_table);
|
||||
kfree(smu_table->max_sustainable_clocks);
|
||||
kfree(smu_table->driver_pptable);
|
||||
smu_table->boot_overdrive_table = NULL;
|
||||
smu_table->overdrive_table = NULL;
|
||||
smu_table->max_sustainable_clocks = NULL;
|
||||
smu_table->driver_pptable = NULL;
|
||||
kfree(smu_table->hardcode_pptable);
|
||||
smu_table->hardcode_pptable = NULL;
|
||||
|
||||
kfree(smu_table->tables);
|
||||
kfree(smu_table->metrics_table);
|
||||
kfree(smu_table->watermarks_table);
|
||||
@ -730,18 +783,6 @@ int smu_v11_0_parse_pptable(struct smu_context *smu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
struct smu_table_context *table_context = &smu->smu_table;
|
||||
struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
|
||||
|
||||
/* during TDR we need to free and alloc the pptable */
|
||||
if (table_context->driver_pptable)
|
||||
kfree(table_context->driver_pptable);
|
||||
|
||||
table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
|
||||
|
||||
if (!table_context->driver_pptable)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = smu_store_powerplay_table(smu);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
@ -982,17 +1023,10 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
|
||||
|
||||
int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
|
||||
{
|
||||
struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
|
||||
struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks =
|
||||
smu->smu_table.max_sustainable_clocks;
|
||||
int ret = 0;
|
||||
|
||||
if (!smu->smu_table.max_sustainable_clocks)
|
||||
max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
|
||||
GFP_KERNEL);
|
||||
else
|
||||
max_sustainable_clocks = smu->smu_table.max_sustainable_clocks;
|
||||
|
||||
smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
|
||||
|
||||
max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
|
||||
max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
|
||||
max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
|
||||
@ -1944,24 +1978,11 @@ int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize,
|
||||
int ret = 0;
|
||||
|
||||
if (initialize) {
|
||||
if (table_context->overdrive_table) {
|
||||
return -EINVAL;
|
||||
}
|
||||
table_context->overdrive_table = kzalloc(overdrive_table_size, GFP_KERNEL);
|
||||
if (!table_context->overdrive_table) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
|
||||
if (ret) {
|
||||
pr_err("Failed to export overdrive table!\n");
|
||||
return ret;
|
||||
}
|
||||
if (!table_context->boot_overdrive_table) {
|
||||
table_context->boot_overdrive_table = kmemdup(table_context->overdrive_table, overdrive_table_size, GFP_KERNEL);
|
||||
if (!table_context->boot_overdrive_table) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
}
|
||||
ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
|
||||
if (ret) {
|
||||
|
Loading…
Reference in New Issue
Block a user