mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-26 02:20:53 +07:00
Merge branch 'drm-next-4.17' of git://people.freedesktop.org/~agd5f/linux into drm-next
More stuff for 4.17. Highlights: - More fixes for "wattman" like functionality (fine grained clk/voltage control) - Add more power profile infrastucture (context based dpm) - SR-IOV fixes - Add iomem debugging interface for use with umr - Powerplay and cgs cleanups - DC fixes and cleanups - ttm improvements - Misc cleanups all over * 'drm-next-4.17' of git://people.freedesktop.org/~agd5f/linux: (143 commits) drm/amdgpu:Always save uvd vcpu_bo in VM Mode drm/amdgpu:Correct max uvd handles drm/amdgpu: replace iova debugfs file with iomem (v3) drm/amd/display: validate plane format on primary plane drm/amdgpu: Clean sdma wptr register when only enable wptr polling drm/amd/amdgpu: re-add missing GC 9.1 and SDMA0 4.1 sh_mask header files drm/amdgpu: give warning before sleep in kiq_r/wreg drm/amdgpu: further mitigate workaround for i915 drm/amdgpu: drop gtt->adev drm/amdgpu: add amdgpu_evict_gtt debugfs entry drm/amd/pp: Add #ifdef checks for CONFIG_ACPI drm/amd/pp: fix "Delete the wrapper layer of smu_allocate/free_memory" drm/amd/pp: Drop wrapper functions for upper/lower_32_bits drm/amdgpu: Delete cgs wrapper functions for gpu memory manager drm/amd/pp: Delete the wrapper layer of smu_allocate/free_memory drm/amd/pp: Remove cgs wrapper function for temperature update Revert "drm/amd/pp: Add a pp feature mask bit for AutoWattman feature" drm/amd/pp: Add auto power profilng switch based on workloads (v2) drm/amd/pp: Revert gfx/compute profile switch sysfs drm/amd/pp: Fix sclk in highest two levels when compute on smu7 ...
This commit is contained in:
commit
128ccceaba
@ -30,7 +30,6 @@ FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
|
||||
ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
|
||||
-I$(FULL_AMD_PATH)/include \
|
||||
-I$(FULL_AMD_PATH)/amdgpu \
|
||||
-I$(FULL_AMD_PATH)/scheduler \
|
||||
-I$(FULL_AMD_PATH)/powerplay/inc \
|
||||
-I$(FULL_AMD_PATH)/acp/include \
|
||||
-I$(FULL_AMD_DISPLAY_PATH) \
|
||||
|
@ -181,10 +181,6 @@ extern int amdgpu_cik_support;
|
||||
#define CIK_CURSOR_WIDTH 128
|
||||
#define CIK_CURSOR_HEIGHT 128
|
||||
|
||||
/* GPU RESET flags */
|
||||
#define AMDGPU_RESET_INFO_VRAM_LOST (1 << 0)
|
||||
#define AMDGPU_RESET_INFO_FULLRESET (1 << 1)
|
||||
|
||||
struct amdgpu_device;
|
||||
struct amdgpu_ib;
|
||||
struct amdgpu_cs_parser;
|
||||
@ -343,14 +339,6 @@ struct amdgpu_ih_funcs {
|
||||
bool amdgpu_get_bios(struct amdgpu_device *adev);
|
||||
bool amdgpu_read_bios(struct amdgpu_device *adev);
|
||||
|
||||
/*
|
||||
* Dummy page
|
||||
*/
|
||||
struct amdgpu_dummy_page {
|
||||
struct page *page;
|
||||
dma_addr_t addr;
|
||||
};
|
||||
|
||||
/*
|
||||
* Clocks
|
||||
*/
|
||||
@ -1080,7 +1068,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
|
||||
/*
|
||||
* Writeback
|
||||
*/
|
||||
#define AMDGPU_MAX_WB 512 /* Reserve at most 512 WB slots for amdgpu-owned rings. */
|
||||
#define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */
|
||||
|
||||
struct amdgpu_wb {
|
||||
struct amdgpu_bo *wb_obj;
|
||||
@ -1505,7 +1493,7 @@ struct amdgpu_device {
|
||||
/* MC */
|
||||
struct amdgpu_gmc gmc;
|
||||
struct amdgpu_gart gart;
|
||||
struct amdgpu_dummy_page dummy_page;
|
||||
dma_addr_t dummy_page_addr;
|
||||
struct amdgpu_vm_manager vm_manager;
|
||||
struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS];
|
||||
|
||||
@ -1839,9 +1827,6 @@ void amdgpu_device_vram_location(struct amdgpu_device *adev,
|
||||
void amdgpu_device_gart_location(struct amdgpu_device *adev,
|
||||
struct amdgpu_gmc *mc);
|
||||
int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
|
||||
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
|
||||
int amdgpu_ttm_init(struct amdgpu_device *adev);
|
||||
void amdgpu_ttm_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
|
||||
const u32 *registers,
|
||||
const u32 array_size);
|
||||
|
@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
|
||||
size_t size;
|
||||
u32 retry = 3;
|
||||
|
||||
if (amdgpu_acpi_pcie_notify_device_ready(adev))
|
||||
return -EINVAL;
|
||||
|
||||
/* Get the device handle */
|
||||
handle = ACPI_HANDLE(&adev->pdev->dev);
|
||||
if (!handle)
|
||||
|
@ -24,7 +24,6 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
@ -42,152 +41,6 @@ struct amdgpu_cgs_device {
|
||||
struct amdgpu_device *adev = \
|
||||
((struct amdgpu_cgs_device *)cgs_device)->adev
|
||||
|
||||
static void *amdgpu_cgs_register_pp_handle(struct cgs_device *cgs_device,
|
||||
int (*call_back_func)(struct amd_pp_init *, void **))
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
struct amd_pp_init pp_init;
|
||||
struct amd_powerplay *amd_pp;
|
||||
|
||||
if (call_back_func == NULL)
|
||||
return NULL;
|
||||
|
||||
amd_pp = &(adev->powerplay);
|
||||
pp_init.chip_family = adev->family;
|
||||
pp_init.chip_id = adev->asic_type;
|
||||
pp_init.pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
|
||||
pp_init.feature_mask = amdgpu_pp_feature_mask;
|
||||
pp_init.device = cgs_device;
|
||||
if (call_back_func(&pp_init, &(amd_pp->pp_handle)))
|
||||
return NULL;
|
||||
|
||||
return adev->powerplay.pp_handle;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
|
||||
enum cgs_gpu_mem_type type,
|
||||
uint64_t size, uint64_t align,
|
||||
cgs_handle_t *handle)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
uint16_t flags = 0;
|
||||
int ret = 0;
|
||||
uint32_t domain = 0;
|
||||
struct amdgpu_bo *obj;
|
||||
|
||||
/* fail if the alignment is not a power of 2 */
|
||||
if (((align != 1) && (align & (align - 1)))
|
||||
|| size == 0 || align == 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
switch(type) {
|
||||
case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
|
||||
case CGS_GPU_MEM_TYPE__VISIBLE_FB:
|
||||
flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||
domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
break;
|
||||
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
|
||||
case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
|
||||
flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||
domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
break;
|
||||
case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
|
||||
domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
break;
|
||||
case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
|
||||
flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
||||
domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
*handle = 0;
|
||||
|
||||
ret = amdgpu_bo_create(adev, size, align, true, domain, flags,
|
||||
NULL, NULL, &obj);
|
||||
if (ret) {
|
||||
DRM_ERROR("(%d) bo create failed\n", ret);
|
||||
return ret;
|
||||
}
|
||||
*handle = (cgs_handle_t)obj;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
|
||||
{
|
||||
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
|
||||
|
||||
if (obj) {
|
||||
int r = amdgpu_bo_reserve(obj, true);
|
||||
if (likely(r == 0)) {
|
||||
amdgpu_bo_kunmap(obj);
|
||||
amdgpu_bo_unpin(obj);
|
||||
amdgpu_bo_unreserve(obj);
|
||||
}
|
||||
amdgpu_bo_unref(&obj);
|
||||
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
|
||||
uint64_t *mcaddr)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
|
||||
|
||||
WARN_ON_ONCE(obj->placement.num_placement > 1);
|
||||
|
||||
r = amdgpu_bo_reserve(obj, true);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = amdgpu_bo_pin(obj, obj->preferred_domains, mcaddr);
|
||||
amdgpu_bo_unreserve(obj);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
|
||||
r = amdgpu_bo_reserve(obj, true);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = amdgpu_bo_unpin(obj);
|
||||
amdgpu_bo_unreserve(obj);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
|
||||
void **map)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
|
||||
r = amdgpu_bo_reserve(obj, true);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = amdgpu_bo_kmap(obj, map);
|
||||
amdgpu_bo_unreserve(obj);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
|
||||
r = amdgpu_bo_reserve(obj, true);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
amdgpu_bo_kunmap(obj);
|
||||
amdgpu_bo_unreserve(obj);
|
||||
return r;
|
||||
}
|
||||
|
||||
static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
|
||||
{
|
||||
@ -857,61 +710,6 @@ static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
|
||||
return amdgpu_sriov_vf(adev);
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
|
||||
struct cgs_system_info *sys_info)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
|
||||
if (NULL == sys_info)
|
||||
return -ENODEV;
|
||||
|
||||
if (sizeof(struct cgs_system_info) != sys_info->size)
|
||||
return -ENODEV;
|
||||
|
||||
switch (sys_info->info_id) {
|
||||
case CGS_SYSTEM_INFO_ADAPTER_BDF_ID:
|
||||
sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8);
|
||||
break;
|
||||
case CGS_SYSTEM_INFO_PCIE_GEN_INFO:
|
||||
sys_info->value = adev->pm.pcie_gen_mask;
|
||||
break;
|
||||
case CGS_SYSTEM_INFO_PCIE_MLW:
|
||||
sys_info->value = adev->pm.pcie_mlw_mask;
|
||||
break;
|
||||
case CGS_SYSTEM_INFO_PCIE_DEV:
|
||||
sys_info->value = adev->pdev->device;
|
||||
break;
|
||||
case CGS_SYSTEM_INFO_PCIE_REV:
|
||||
sys_info->value = adev->pdev->revision;
|
||||
break;
|
||||
case CGS_SYSTEM_INFO_CG_FLAGS:
|
||||
sys_info->value = adev->cg_flags;
|
||||
break;
|
||||
case CGS_SYSTEM_INFO_PG_FLAGS:
|
||||
sys_info->value = adev->pg_flags;
|
||||
break;
|
||||
case CGS_SYSTEM_INFO_GFX_CU_INFO:
|
||||
sys_info->value = adev->gfx.cu_info.number;
|
||||
break;
|
||||
case CGS_SYSTEM_INFO_GFX_SE_INFO:
|
||||
sys_info->value = adev->gfx.config.max_shader_engines;
|
||||
break;
|
||||
case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID:
|
||||
sys_info->value = adev->pdev->subsystem_device;
|
||||
break;
|
||||
case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
|
||||
sys_info->value = adev->pdev->subsystem_vendor;
|
||||
break;
|
||||
case CGS_SYSTEM_INFO_PCIE_BUS_DEVFN:
|
||||
sys_info->value = adev->pdev->devfn;
|
||||
break;
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
|
||||
struct cgs_display_info *info)
|
||||
{
|
||||
@ -982,235 +780,7 @@ static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool ena
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** \brief evaluate acpi namespace object, handle or pathname must be valid
|
||||
* \param cgs_device
|
||||
* \param info input/output arguments for the control method
|
||||
* \return status
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_ACPI)
|
||||
static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
|
||||
struct cgs_acpi_method_info *info)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
acpi_handle handle;
|
||||
struct acpi_object_list input;
|
||||
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
union acpi_object *params, *obj;
|
||||
uint8_t name[5] = {'\0'};
|
||||
struct cgs_acpi_method_argument *argument;
|
||||
uint32_t i, count;
|
||||
acpi_status status;
|
||||
int result;
|
||||
|
||||
handle = ACPI_HANDLE(&adev->pdev->dev);
|
||||
if (!handle)
|
||||
return -ENODEV;
|
||||
|
||||
memset(&input, 0, sizeof(struct acpi_object_list));
|
||||
|
||||
/* validate input info */
|
||||
if (info->size != sizeof(struct cgs_acpi_method_info))
|
||||
return -EINVAL;
|
||||
|
||||
input.count = info->input_count;
|
||||
if (info->input_count > 0) {
|
||||
if (info->pinput_argument == NULL)
|
||||
return -EINVAL;
|
||||
argument = info->pinput_argument;
|
||||
for (i = 0; i < info->input_count; i++) {
|
||||
if (((argument->type == ACPI_TYPE_STRING) ||
|
||||
(argument->type == ACPI_TYPE_BUFFER)) &&
|
||||
(argument->pointer == NULL))
|
||||
return -EINVAL;
|
||||
argument++;
|
||||
}
|
||||
}
|
||||
|
||||
if (info->output_count > 0) {
|
||||
if (info->poutput_argument == NULL)
|
||||
return -EINVAL;
|
||||
argument = info->poutput_argument;
|
||||
for (i = 0; i < info->output_count; i++) {
|
||||
if (((argument->type == ACPI_TYPE_STRING) ||
|
||||
(argument->type == ACPI_TYPE_BUFFER))
|
||||
&& (argument->pointer == NULL))
|
||||
return -EINVAL;
|
||||
argument++;
|
||||
}
|
||||
}
|
||||
|
||||
/* The path name passed to acpi_evaluate_object should be null terminated */
|
||||
if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) {
|
||||
strncpy(name, (char *)&(info->name), sizeof(uint32_t));
|
||||
name[4] = '\0';
|
||||
}
|
||||
|
||||
/* parse input parameters */
|
||||
if (input.count > 0) {
|
||||
input.pointer = params =
|
||||
kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL);
|
||||
if (params == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
argument = info->pinput_argument;
|
||||
|
||||
for (i = 0; i < input.count; i++) {
|
||||
params->type = argument->type;
|
||||
switch (params->type) {
|
||||
case ACPI_TYPE_INTEGER:
|
||||
params->integer.value = argument->value;
|
||||
break;
|
||||
case ACPI_TYPE_STRING:
|
||||
params->string.length = argument->data_length;
|
||||
params->string.pointer = argument->pointer;
|
||||
break;
|
||||
case ACPI_TYPE_BUFFER:
|
||||
params->buffer.length = argument->data_length;
|
||||
params->buffer.pointer = argument->pointer;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
params++;
|
||||
argument++;
|
||||
}
|
||||
}
|
||||
|
||||
/* parse output info */
|
||||
count = info->output_count;
|
||||
argument = info->poutput_argument;
|
||||
|
||||
/* evaluate the acpi method */
|
||||
status = acpi_evaluate_object(handle, name, &input, &output);
|
||||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
result = -EIO;
|
||||
goto free_input;
|
||||
}
|
||||
|
||||
/* return the output info */
|
||||
obj = output.pointer;
|
||||
|
||||
if (count > 1) {
|
||||
if ((obj->type != ACPI_TYPE_PACKAGE) ||
|
||||
(obj->package.count != count)) {
|
||||
result = -EIO;
|
||||
goto free_obj;
|
||||
}
|
||||
params = obj->package.elements;
|
||||
} else
|
||||
params = obj;
|
||||
|
||||
if (params == NULL) {
|
||||
result = -EIO;
|
||||
goto free_obj;
|
||||
}
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (argument->type != params->type) {
|
||||
result = -EIO;
|
||||
goto free_obj;
|
||||
}
|
||||
switch (params->type) {
|
||||
case ACPI_TYPE_INTEGER:
|
||||
argument->value = params->integer.value;
|
||||
break;
|
||||
case ACPI_TYPE_STRING:
|
||||
if ((params->string.length != argument->data_length) ||
|
||||
(params->string.pointer == NULL)) {
|
||||
result = -EIO;
|
||||
goto free_obj;
|
||||
}
|
||||
strncpy(argument->pointer,
|
||||
params->string.pointer,
|
||||
params->string.length);
|
||||
break;
|
||||
case ACPI_TYPE_BUFFER:
|
||||
if (params->buffer.pointer == NULL) {
|
||||
result = -EIO;
|
||||
goto free_obj;
|
||||
}
|
||||
memcpy(argument->pointer,
|
||||
params->buffer.pointer,
|
||||
argument->data_length);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
argument++;
|
||||
params++;
|
||||
}
|
||||
|
||||
result = 0;
|
||||
free_obj:
|
||||
kfree(obj);
|
||||
free_input:
|
||||
kfree((void *)input.pointer);
|
||||
return result;
|
||||
}
|
||||
#else
|
||||
static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
|
||||
struct cgs_acpi_method_info *info)
|
||||
{
|
||||
return -EIO;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
|
||||
uint32_t acpi_method,
|
||||
uint32_t acpi_function,
|
||||
void *pinput, void *poutput,
|
||||
uint32_t output_count,
|
||||
uint32_t input_size,
|
||||
uint32_t output_size)
|
||||
{
|
||||
struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} };
|
||||
struct cgs_acpi_method_argument acpi_output = {0};
|
||||
struct cgs_acpi_method_info info = {0};
|
||||
|
||||
acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
|
||||
acpi_input[0].data_length = sizeof(uint32_t);
|
||||
acpi_input[0].value = acpi_function;
|
||||
|
||||
acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
|
||||
acpi_input[1].data_length = input_size;
|
||||
acpi_input[1].pointer = pinput;
|
||||
|
||||
acpi_output.type = CGS_ACPI_TYPE_BUFFER;
|
||||
acpi_output.data_length = output_size;
|
||||
acpi_output.pointer = poutput;
|
||||
|
||||
info.size = sizeof(struct cgs_acpi_method_info);
|
||||
info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT;
|
||||
info.input_count = 2;
|
||||
info.name = acpi_method;
|
||||
info.pinput_argument = acpi_input;
|
||||
info.output_count = output_count;
|
||||
info.poutput_argument = &acpi_output;
|
||||
|
||||
return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_set_temperature_range(struct cgs_device *cgs_device,
|
||||
int min_temperature,
|
||||
int max_temperature)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
|
||||
adev->pm.dpm.thermal.min_temp = min_temperature;
|
||||
adev->pm.dpm.thermal.max_temp = max_temperature;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct cgs_ops amdgpu_cgs_ops = {
|
||||
.alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem,
|
||||
.free_gpu_mem = amdgpu_cgs_free_gpu_mem,
|
||||
.gmap_gpu_mem = amdgpu_cgs_gmap_gpu_mem,
|
||||
.gunmap_gpu_mem = amdgpu_cgs_gunmap_gpu_mem,
|
||||
.kmap_gpu_mem = amdgpu_cgs_kmap_gpu_mem,
|
||||
.kunmap_gpu_mem = amdgpu_cgs_kunmap_gpu_mem,
|
||||
.read_register = amdgpu_cgs_read_register,
|
||||
.write_register = amdgpu_cgs_write_register,
|
||||
.read_ind_register = amdgpu_cgs_read_ind_register,
|
||||
@ -1225,13 +795,9 @@ static const struct cgs_ops amdgpu_cgs_ops = {
|
||||
.set_clockgating_state = amdgpu_cgs_set_clockgating_state,
|
||||
.get_active_displays_info = amdgpu_cgs_get_active_displays_info,
|
||||
.notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
|
||||
.call_acpi_method = amdgpu_cgs_call_acpi_method,
|
||||
.query_system_info = amdgpu_cgs_query_system_info,
|
||||
.is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
|
||||
.enter_safe_mode = amdgpu_cgs_enter_safe_mode,
|
||||
.lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
|
||||
.register_pp_handle = amdgpu_cgs_register_pp_handle,
|
||||
.set_temperature_range = amdgpu_cgs_set_temperature_range,
|
||||
};
|
||||
|
||||
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
|
||||
|
@ -346,8 +346,8 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = true,
|
||||
.no_wait_gpu = false,
|
||||
.allow_reserved_eviction = false,
|
||||
.resv = bo->tbo.resv
|
||||
.resv = bo->tbo.resv,
|
||||
.flags = 0
|
||||
};
|
||||
uint32_t domain;
|
||||
int r;
|
||||
|
@ -767,10 +767,21 @@ static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *)m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
||||
seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_info_list amdgpu_debugfs_list[] = {
|
||||
{"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump},
|
||||
{"amdgpu_test_ib", &amdgpu_debugfs_test_ib},
|
||||
{"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram}
|
||||
{"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram},
|
||||
{"amdgpu_evict_gtt", &amdgpu_debugfs_evict_gtt},
|
||||
};
|
||||
|
||||
int amdgpu_debugfs_init(struct amdgpu_device *adev)
|
||||
|
@ -492,7 +492,7 @@ static int amdgpu_device_wb_init(struct amdgpu_device *adev)
|
||||
memset(&adev->wb.used, 0, sizeof(adev->wb.used));
|
||||
|
||||
/* clear wb memory */
|
||||
memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
|
||||
memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -530,8 +530,9 @@ int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
|
||||
*/
|
||||
void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
|
||||
{
|
||||
wb >>= 3;
|
||||
if (wb < adev->wb.num_wb)
|
||||
__clear_bit(wb >> 3, adev->wb.used);
|
||||
__clear_bit(wb, adev->wb.used);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1458,11 +1459,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
|
||||
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
||||
if (!adev->ip_blocks[i].status.hw)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
|
||||
amdgpu_free_static_csa(adev);
|
||||
amdgpu_device_wb_fini(adev);
|
||||
amdgpu_device_vram_scratch_fini(adev);
|
||||
}
|
||||
|
||||
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
|
||||
@ -1492,6 +1488,13 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
|
||||
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
||||
if (!adev->ip_blocks[i].status.sw)
|
||||
continue;
|
||||
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
|
||||
amdgpu_free_static_csa(adev);
|
||||
amdgpu_device_wb_fini(adev);
|
||||
amdgpu_device_vram_scratch_fini(adev);
|
||||
}
|
||||
|
||||
r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
|
||||
/* XXX handle errors */
|
||||
if (r) {
|
||||
@ -1588,6 +1591,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
|
||||
|
||||
r = block->version->funcs->hw_init(adev);
|
||||
DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1621,6 +1626,8 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
|
||||
|
||||
r = block->version->funcs->hw_init(adev);
|
||||
DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2467,17 +2474,71 @@ static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
||||
struct amdgpu_bo *bo, *tmp;
|
||||
struct dma_fence *fence = NULL, *next = NULL;
|
||||
long r = 1;
|
||||
int i = 0;
|
||||
long tmo;
|
||||
|
||||
if (amdgpu_sriov_runtime(adev))
|
||||
tmo = msecs_to_jiffies(amdgpu_lockup_timeout);
|
||||
else
|
||||
tmo = msecs_to_jiffies(100);
|
||||
|
||||
DRM_INFO("recover vram bo from shadow start\n");
|
||||
mutex_lock(&adev->shadow_list_lock);
|
||||
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
|
||||
next = NULL;
|
||||
amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
|
||||
if (fence) {
|
||||
r = dma_fence_wait_timeout(fence, false, tmo);
|
||||
if (r == 0)
|
||||
pr_err("wait fence %p[%d] timeout\n", fence, i);
|
||||
else if (r < 0)
|
||||
pr_err("wait fence %p[%d] interrupted\n", fence, i);
|
||||
if (r < 1) {
|
||||
dma_fence_put(fence);
|
||||
fence = next;
|
||||
break;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
dma_fence_put(fence);
|
||||
fence = next;
|
||||
}
|
||||
mutex_unlock(&adev->shadow_list_lock);
|
||||
|
||||
if (fence) {
|
||||
r = dma_fence_wait_timeout(fence, false, tmo);
|
||||
if (r == 0)
|
||||
pr_err("wait fence %p[%d] timeout\n", fence, i);
|
||||
else if (r < 0)
|
||||
pr_err("wait fence %p[%d] interrupted\n", fence, i);
|
||||
|
||||
}
|
||||
dma_fence_put(fence);
|
||||
|
||||
if (r > 0)
|
||||
DRM_INFO("recover vram bo from shadow done\n");
|
||||
else
|
||||
DRM_ERROR("recover vram bo from shadow failed\n");
|
||||
|
||||
return (r > 0?0:1);
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
* @reset_flags: output param tells caller the reset result
|
||||
*
|
||||
* attempt to do soft-reset or full-reset and reinitialize Asic
|
||||
* return 0 means successed otherwise failed
|
||||
*/
|
||||
static int amdgpu_device_reset(struct amdgpu_device *adev,
|
||||
uint64_t* reset_flags)
|
||||
static int amdgpu_device_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
bool need_full_reset, vram_lost = 0;
|
||||
int r;
|
||||
@ -2492,7 +2553,6 @@ static int amdgpu_device_reset(struct amdgpu_device *adev,
|
||||
DRM_INFO("soft reset failed, will fallback to full reset!\n");
|
||||
need_full_reset = true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (need_full_reset) {
|
||||
@ -2541,13 +2601,8 @@ static int amdgpu_device_reset(struct amdgpu_device *adev,
|
||||
}
|
||||
}
|
||||
|
||||
if (reset_flags) {
|
||||
if (vram_lost)
|
||||
(*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
|
||||
|
||||
if (need_full_reset)
|
||||
(*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
|
||||
}
|
||||
if (!r && ((need_full_reset && !(adev->flags & AMD_IS_APU)) || vram_lost))
|
||||
r = amdgpu_device_handle_vram_lost(adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -2556,14 +2611,11 @@ static int amdgpu_device_reset(struct amdgpu_device *adev,
|
||||
* amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
* @reset_flags: output param tells caller the reset result
|
||||
*
|
||||
* do VF FLR and reinitialize Asic
|
||||
* return 0 means successed otherwise failed
|
||||
*/
|
||||
static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
||||
uint64_t *reset_flags,
|
||||
bool from_hypervisor)
|
||||
static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, bool from_hypervisor)
|
||||
{
|
||||
int r;
|
||||
|
||||
@ -2584,27 +2636,19 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
||||
|
||||
/* now we are okay to resume SMC/CP/SDMA */
|
||||
r = amdgpu_device_ip_reinit_late_sriov(adev);
|
||||
amdgpu_virt_release_full_gpu(adev, true);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
amdgpu_irq_gpu_reset_resume_helper(adev);
|
||||
r = amdgpu_ib_ring_tests(adev);
|
||||
if (r)
|
||||
dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
|
||||
|
||||
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
|
||||
atomic_inc(&adev->vram_lost_counter);
|
||||
r = amdgpu_device_handle_vram_lost(adev);
|
||||
}
|
||||
|
||||
error:
|
||||
/* release full control of GPU after ib test */
|
||||
amdgpu_virt_release_full_gpu(adev, true);
|
||||
|
||||
if (reset_flags) {
|
||||
if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
|
||||
(*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
|
||||
atomic_inc(&adev->vram_lost_counter);
|
||||
}
|
||||
|
||||
/* VF FLR or hotlink reset is always full-reset */
|
||||
(*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -2623,7 +2667,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
struct amdgpu_job *job, bool force)
|
||||
{
|
||||
struct drm_atomic_state *state = NULL;
|
||||
uint64_t reset_flags = 0;
|
||||
int i, r, resched;
|
||||
|
||||
if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
|
||||
@ -2645,22 +2688,23 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
|
||||
/* block TTM */
|
||||
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
||||
|
||||
/* store modesetting */
|
||||
if (amdgpu_device_has_dc_support(adev))
|
||||
state = drm_atomic_helper_suspend(adev->ddev);
|
||||
|
||||
/* block scheduler */
|
||||
/* block all schedulers and reset given job's ring */
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
|
||||
if (!ring || !ring->sched.thread)
|
||||
continue;
|
||||
|
||||
/* only focus on the ring hit timeout if &job not NULL */
|
||||
kthread_park(ring->sched.thread);
|
||||
|
||||
if (job && job->ring->idx != i)
|
||||
continue;
|
||||
|
||||
kthread_park(ring->sched.thread);
|
||||
drm_sched_hw_job_reset(&ring->sched, &job->base);
|
||||
|
||||
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
||||
@ -2668,68 +2712,24 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
r = amdgpu_device_reset_sriov(adev, &reset_flags, job ? false : true);
|
||||
r = amdgpu_device_reset_sriov(adev, job ? false : true);
|
||||
else
|
||||
r = amdgpu_device_reset(adev, &reset_flags);
|
||||
r = amdgpu_device_reset(adev);
|
||||
|
||||
if (!r) {
|
||||
if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) ||
|
||||
(reset_flags & AMDGPU_RESET_INFO_VRAM_LOST)) {
|
||||
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
||||
struct amdgpu_bo *bo, *tmp;
|
||||
struct dma_fence *fence = NULL, *next = NULL;
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
|
||||
DRM_INFO("recover vram bo from shadow\n");
|
||||
mutex_lock(&adev->shadow_list_lock);
|
||||
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
|
||||
next = NULL;
|
||||
amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
|
||||
if (fence) {
|
||||
r = dma_fence_wait(fence, false);
|
||||
if (r) {
|
||||
WARN(r, "recovery from shadow isn't completed\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
dma_fence_put(fence);
|
||||
fence = next;
|
||||
}
|
||||
mutex_unlock(&adev->shadow_list_lock);
|
||||
if (fence) {
|
||||
r = dma_fence_wait(fence, false);
|
||||
if (r)
|
||||
WARN(r, "recovery from shadow isn't completed\n");
|
||||
}
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
|
||||
if (!ring || !ring->sched.thread)
|
||||
continue;
|
||||
|
||||
/* only focus on the ring hit timeout if &job not NULL */
|
||||
if (job && job->ring->idx != i)
|
||||
continue;
|
||||
if (!ring || !ring->sched.thread)
|
||||
continue;
|
||||
|
||||
/* only need recovery sched of the given job's ring
|
||||
* or all rings (in the case @job is NULL)
|
||||
* after above amdgpu_reset accomplished
|
||||
*/
|
||||
if ((!job || job->ring->idx == i) && !r)
|
||||
drm_sched_job_recovery(&ring->sched);
|
||||
kthread_unpark(ring->sched.thread);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
|
||||
if (!ring || !ring->sched.thread)
|
||||
continue;
|
||||
|
||||
/* only focus on the ring hit timeout if &job not NULL */
|
||||
if (job && job->ring->idx != i)
|
||||
continue;
|
||||
|
||||
kthread_unpark(adev->rings[i]->sched.thread);
|
||||
}
|
||||
kthread_unpark(ring->sched.thread);
|
||||
}
|
||||
|
||||
if (amdgpu_device_has_dc_support(adev)) {
|
||||
|
@ -341,17 +341,9 @@ enum amdgpu_pcie_gen {
|
||||
((adev)->powerplay.pp_funcs->reset_power_profile_state(\
|
||||
(adev)->powerplay.pp_handle, request))
|
||||
|
||||
#define amdgpu_dpm_get_power_profile_state(adev, query) \
|
||||
((adev)->powerplay.pp_funcs->get_power_profile_state(\
|
||||
(adev)->powerplay.pp_handle, query))
|
||||
|
||||
#define amdgpu_dpm_set_power_profile_state(adev, request) \
|
||||
((adev)->powerplay.pp_funcs->set_power_profile_state(\
|
||||
(adev)->powerplay.pp_handle, request))
|
||||
|
||||
#define amdgpu_dpm_switch_power_profile(adev, type) \
|
||||
#define amdgpu_dpm_switch_power_profile(adev, type, en) \
|
||||
((adev)->powerplay.pp_funcs->switch_power_profile(\
|
||||
(adev)->powerplay.pp_handle, type))
|
||||
(adev)->powerplay.pp_handle, type, en))
|
||||
|
||||
#define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \
|
||||
((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
|
||||
|
@ -121,7 +121,7 @@ uint amdgpu_pg_mask = 0xffffffff;
|
||||
uint amdgpu_sdma_phase_quantum = 32;
|
||||
char *amdgpu_disable_cu = NULL;
|
||||
char *amdgpu_virtual_display = NULL;
|
||||
uint amdgpu_pp_feature_mask = 0x3fff;
|
||||
uint amdgpu_pp_feature_mask = 0xffffbfff;
|
||||
int amdgpu_ngg = 0;
|
||||
int amdgpu_prim_buf_per_se = 0;
|
||||
int amdgpu_pos_buf_per_se = 0;
|
||||
@ -284,10 +284,10 @@ module_param_named(lbpw, amdgpu_lbpw, int, 0444);
|
||||
MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
|
||||
module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto");
|
||||
MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
|
||||
module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable");
|
||||
MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
|
||||
module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
|
||||
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
|
@ -68,17 +68,15 @@
|
||||
*/
|
||||
static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->dummy_page.page)
|
||||
struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
|
||||
|
||||
if (adev->dummy_page_addr)
|
||||
return 0;
|
||||
adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
|
||||
if (adev->dummy_page.page == NULL)
|
||||
return -ENOMEM;
|
||||
adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
|
||||
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
|
||||
adev->dummy_page_addr = pci_map_page(adev->pdev, dummy_page, 0,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(adev->pdev, adev->dummy_page_addr)) {
|
||||
dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
|
||||
__free_page(adev->dummy_page.page);
|
||||
adev->dummy_page.page = NULL;
|
||||
adev->dummy_page_addr = 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
@ -93,12 +91,11 @@ static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
|
||||
*/
|
||||
static void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->dummy_page.page == NULL)
|
||||
if (!adev->dummy_page_addr)
|
||||
return;
|
||||
pci_unmap_page(adev->pdev, adev->dummy_page.addr,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
__free_page(adev->dummy_page.page);
|
||||
adev->dummy_page.page = NULL;
|
||||
pci_unmap_page(adev->pdev, adev->dummy_page_addr,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
adev->dummy_page_addr = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -236,7 +233,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
||||
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
|
||||
adev->gart.pages[p] = NULL;
|
||||
#endif
|
||||
page_base = adev->dummy_page.addr;
|
||||
page_base = adev->dummy_page_addr;
|
||||
if (!adev->gart.ptr)
|
||||
continue;
|
||||
|
||||
@ -347,7 +344,7 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (adev->dummy_page.page)
|
||||
if (adev->dummy_page_addr)
|
||||
return 0;
|
||||
|
||||
/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
|
||||
|
@ -181,7 +181,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
}
|
||||
}
|
||||
|
||||
if (ring->funcs->init_cond_exec)
|
||||
if (job && ring->funcs->init_cond_exec)
|
||||
patch_offset = amdgpu_ring_init_cond_exec(ring);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
@ -279,11 +279,6 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_sa_bo_manager_start(adev, &adev->ring_tmp_bo);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
adev->ib_pool_ready = true;
|
||||
if (amdgpu_debugfs_sa_init(adev)) {
|
||||
dev_err(adev->dev, "failed to register debugfs file for SA\n");
|
||||
@ -302,7 +297,6 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
|
||||
void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->ib_pool_ready) {
|
||||
amdgpu_sa_bo_manager_suspend(adev, &adev->ring_tmp_bo);
|
||||
amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
|
||||
adev->ib_pool_ready = false;
|
||||
}
|
||||
@ -322,14 +316,45 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned i;
|
||||
int r, ret = 0;
|
||||
long tmo_gfx, tmo_mm;
|
||||
|
||||
tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
/* for MM engines in hypervisor side they are not scheduled together
|
||||
* with CP and SDMA engines, so even in exclusive mode MM engine could
|
||||
* still running on other VF thus the IB TEST TIMEOUT for MM engines
|
||||
* under SR-IOV should be set to a long time. 8 sec should be enough
|
||||
* for the MM comes back to this VF.
|
||||
*/
|
||||
tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_runtime(adev)) {
|
||||
/* for CP & SDMA engines since they are scheduled together so
|
||||
* need to make the timeout width enough to cover the time
|
||||
* cost waiting for it coming back under RUNTIME only
|
||||
*/
|
||||
tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
|
||||
}
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
long tmo;
|
||||
|
||||
if (!ring || !ring->ready)
|
||||
continue;
|
||||
|
||||
r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT);
|
||||
/* MM engine need more time */
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
|
||||
ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
|
||||
ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
|
||||
ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
|
||||
ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
|
||||
tmo = tmo_mm;
|
||||
else
|
||||
tmo = tmo_gfx;
|
||||
|
||||
r = amdgpu_ring_test_ib(ring, tmo);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
|
||||
|
@ -208,7 +208,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
|
||||
r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
|
||||
if (r) {
|
||||
adev->irq.installed = false;
|
||||
flush_work(&adev->hotplug_work);
|
||||
if (!amdgpu_device_has_dc_support(adev))
|
||||
flush_work(&adev->hotplug_work);
|
||||
cancel_work_sync(&adev->reset_work);
|
||||
return r;
|
||||
}
|
||||
@ -234,7 +235,8 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
|
||||
adev->irq.installed = false;
|
||||
if (adev->irq.msi_enabled)
|
||||
pci_disable_msi(adev->pdev);
|
||||
flush_work(&adev->hotplug_work);
|
||||
if (!amdgpu_device_has_dc_support(adev))
|
||||
flush_work(&adev->hotplug_work);
|
||||
cancel_work_sync(&adev->reset_work);
|
||||
}
|
||||
|
||||
|
@ -341,8 +341,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = !kernel,
|
||||
.no_wait_gpu = false,
|
||||
.allow_reserved_eviction = true,
|
||||
.resv = resv
|
||||
.resv = resv,
|
||||
.flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
|
||||
};
|
||||
struct amdgpu_bo *bo;
|
||||
enum ttm_bo_type type;
|
||||
@ -418,8 +418,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
||||
amdgpu_ttm_placement_from_domain(bo, domain);
|
||||
|
||||
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
|
||||
&bo->placement, page_align, &ctx, NULL,
|
||||
acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
|
||||
&bo->placement, page_align, &ctx, acc_size,
|
||||
sg, resv, &amdgpu_ttm_bo_destroy);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
|
@ -281,8 +281,6 @@ void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_manager *sa_manager);
|
||||
int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_manager *sa_manager);
|
||||
int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_manager *sa_manager);
|
||||
int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
|
||||
struct amdgpu_sa_bo **sa_bo,
|
||||
unsigned size, unsigned align);
|
||||
|
@ -734,161 +734,6 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_get_pp_power_profile(struct device *dev,
|
||||
char *buf, struct amd_pp_profile *query)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
int ret = 0xff;
|
||||
|
||||
if (adev->powerplay.pp_funcs->get_power_profile_state)
|
||||
ret = amdgpu_dpm_get_power_profile_state(
|
||||
adev, query);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE,
|
||||
"%d %d %d %d %d\n",
|
||||
query->min_sclk / 100,
|
||||
query->min_mclk / 100,
|
||||
query->activity_threshold,
|
||||
query->up_hyst,
|
||||
query->down_hyst);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_get_pp_gfx_power_profile(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct amd_pp_profile query = {0};
|
||||
|
||||
query.type = AMD_PP_GFX_PROFILE;
|
||||
|
||||
return amdgpu_get_pp_power_profile(dev, buf, &query);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_get_pp_compute_power_profile(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct amd_pp_profile query = {0};
|
||||
|
||||
query.type = AMD_PP_COMPUTE_PROFILE;
|
||||
|
||||
return amdgpu_get_pp_power_profile(dev, buf, &query);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
|
||||
const char *buf,
|
||||
size_t count,
|
||||
struct amd_pp_profile *request)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
uint32_t loop = 0;
|
||||
char *sub_str, buf_cpy[128], *tmp_str;
|
||||
const char delimiter[3] = {' ', '\n', '\0'};
|
||||
long int value;
|
||||
int ret = 0xff;
|
||||
|
||||
if (strncmp("reset", buf, strlen("reset")) == 0) {
|
||||
if (adev->powerplay.pp_funcs->reset_power_profile_state)
|
||||
ret = amdgpu_dpm_reset_power_profile_state(
|
||||
adev, request);
|
||||
if (ret) {
|
||||
count = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
if (strncmp("set", buf, strlen("set")) == 0) {
|
||||
if (adev->powerplay.pp_funcs->set_power_profile_state)
|
||||
ret = amdgpu_dpm_set_power_profile_state(
|
||||
adev, request);
|
||||
|
||||
if (ret) {
|
||||
count = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
if (count + 1 >= 128) {
|
||||
count = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
memcpy(buf_cpy, buf, count + 1);
|
||||
tmp_str = buf_cpy;
|
||||
|
||||
while (tmp_str[0]) {
|
||||
sub_str = strsep(&tmp_str, delimiter);
|
||||
ret = kstrtol(sub_str, 0, &value);
|
||||
if (ret) {
|
||||
count = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
switch (loop) {
|
||||
case 0:
|
||||
/* input unit MHz convert to dpm table unit 10KHz*/
|
||||
request->min_sclk = (uint32_t)value * 100;
|
||||
break;
|
||||
case 1:
|
||||
/* input unit MHz convert to dpm table unit 10KHz*/
|
||||
request->min_mclk = (uint32_t)value * 100;
|
||||
break;
|
||||
case 2:
|
||||
request->activity_threshold = (uint16_t)value;
|
||||
break;
|
||||
case 3:
|
||||
request->up_hyst = (uint8_t)value;
|
||||
break;
|
||||
case 4:
|
||||
request->down_hyst = (uint8_t)value;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
loop++;
|
||||
}
|
||||
if (adev->powerplay.pp_funcs->set_power_profile_state)
|
||||
ret = amdgpu_dpm_set_power_profile_state(adev, request);
|
||||
|
||||
if (ret)
|
||||
count = -EINVAL;
|
||||
|
||||
fail:
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_set_pp_gfx_power_profile(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
struct amd_pp_profile request = {0};
|
||||
|
||||
request.type = AMD_PP_GFX_PROFILE;
|
||||
|
||||
return amdgpu_set_pp_power_profile(dev, buf, count, &request);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_set_pp_compute_power_profile(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
struct amd_pp_profile request = {0};
|
||||
|
||||
request.type = AMD_PP_COMPUTE_PROFILE;
|
||||
|
||||
return amdgpu_set_pp_power_profile(dev, buf, count, &request);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
|
||||
static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
|
||||
amdgpu_get_dpm_forced_performance_level,
|
||||
@ -916,12 +761,6 @@ static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
|
||||
static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
|
||||
amdgpu_get_pp_mclk_od,
|
||||
amdgpu_set_pp_mclk_od);
|
||||
static DEVICE_ATTR(pp_gfx_power_profile, S_IRUGO | S_IWUSR,
|
||||
amdgpu_get_pp_gfx_power_profile,
|
||||
amdgpu_set_pp_gfx_power_profile);
|
||||
static DEVICE_ATTR(pp_compute_power_profile, S_IRUGO | S_IWUSR,
|
||||
amdgpu_get_pp_compute_power_profile,
|
||||
amdgpu_set_pp_compute_power_profile);
|
||||
static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
|
||||
amdgpu_get_pp_power_profile_mode,
|
||||
amdgpu_set_pp_power_profile_mode);
|
||||
@ -1766,21 +1605,6 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
|
||||
DRM_ERROR("failed to create device file pp_mclk_od\n");
|
||||
return ret;
|
||||
}
|
||||
ret = device_create_file(adev->dev,
|
||||
&dev_attr_pp_gfx_power_profile);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create device file "
|
||||
"pp_gfx_power_profile\n");
|
||||
return ret;
|
||||
}
|
||||
ret = device_create_file(adev->dev,
|
||||
&dev_attr_pp_compute_power_profile);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create device file "
|
||||
"pp_compute_power_profile\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = device_create_file(adev->dev,
|
||||
&dev_attr_pp_power_profile_mode);
|
||||
if (ret) {
|
||||
@ -1826,10 +1650,6 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
|
||||
device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
|
||||
device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
|
||||
device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
|
||||
device_remove_file(adev->dev,
|
||||
&dev_attr_pp_gfx_power_profile);
|
||||
device_remove_file(adev->dev,
|
||||
&dev_attr_pp_compute_power_profile);
|
||||
device_remove_file(adev->dev,
|
||||
&dev_attr_pp_power_profile_mode);
|
||||
device_remove_file(adev->dev,
|
||||
|
@ -94,9 +94,7 @@ static int amdgpu_pp_early_init(void *handle)
|
||||
}
|
||||
|
||||
if (adev->powerplay.ip_funcs->early_init)
|
||||
ret = adev->powerplay.ip_funcs->early_init(
|
||||
amd_pp->cgs_device ? amd_pp->cgs_device :
|
||||
amd_pp->pp_handle);
|
||||
ret = adev->powerplay.ip_funcs->early_init(adev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -107,12 +107,18 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
ww_mutex_lock(&resv->lock, NULL);
|
||||
ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
|
||||
ww_mutex_unlock(&resv->lock);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
goto error;
|
||||
|
||||
bo->prime_shared_count = 1;
|
||||
if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
|
||||
bo->prime_shared_count = 1;
|
||||
|
||||
ww_mutex_unlock(&resv->lock);
|
||||
return &bo->gem_base;
|
||||
|
||||
error:
|
||||
ww_mutex_unlock(&resv->lock);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
|
||||
|
@ -484,7 +484,7 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
|
||||
result = 0;
|
||||
|
||||
if (*pos < 12) {
|
||||
early[0] = amdgpu_ring_get_rptr(ring);
|
||||
early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
|
||||
early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
|
||||
early[2] = ring->wptr & ring->buf_mask;
|
||||
for (i = *pos / 4; i < 3 && size; i++) {
|
||||
|
@ -63,21 +63,27 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
|
||||
for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
|
||||
INIT_LIST_HEAD(&sa_manager->flist[i]);
|
||||
|
||||
r = amdgpu_bo_create(adev, size, align, true, domain,
|
||||
0, NULL, NULL, &sa_manager->bo);
|
||||
r = amdgpu_bo_create_kernel(adev, size, align, domain, &sa_manager->bo,
|
||||
&sa_manager->gpu_addr, &sa_manager->cpu_ptr);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
memset(sa_manager->cpu_ptr, 0, sa_manager->size);
|
||||
return r;
|
||||
}
|
||||
|
||||
void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_manager *sa_manager)
|
||||
struct amdgpu_sa_manager *sa_manager)
|
||||
{
|
||||
struct amdgpu_sa_bo *sa_bo, *tmp;
|
||||
|
||||
if (sa_manager->bo == NULL) {
|
||||
dev_err(adev->dev, "no bo for sa manager\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!list_empty(&sa_manager->olist)) {
|
||||
sa_manager->hole = &sa_manager->olist,
|
||||
amdgpu_sa_bo_try_free(sa_manager);
|
||||
@ -88,57 +94,11 @@ void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
|
||||
list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
|
||||
amdgpu_sa_bo_remove_locked(sa_bo);
|
||||
}
|
||||
amdgpu_bo_unref(&sa_manager->bo);
|
||||
|
||||
amdgpu_bo_free_kernel(&sa_manager->bo, &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
|
||||
sa_manager->size = 0;
|
||||
}
|
||||
|
||||
int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_manager *sa_manager)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (sa_manager->bo == NULL) {
|
||||
dev_err(adev->dev, "no bo for sa manager\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* map the buffer */
|
||||
r = amdgpu_bo_reserve(sa_manager->bo, false);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(sa_manager->bo);
|
||||
dev_err(adev->dev, "(%d) failed to pin manager bo\n", r);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
|
||||
memset(sa_manager->cpu_ptr, 0, sa_manager->size);
|
||||
amdgpu_bo_unreserve(sa_manager->bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_manager *sa_manager)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (sa_manager->bo == NULL) {
|
||||
dev_err(adev->dev, "no bo for sa manager\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_reserve(sa_manager->bo, true);
|
||||
if (!r) {
|
||||
amdgpu_bo_kunmap(sa_manager->bo);
|
||||
amdgpu_bo_unpin(sa_manager->bo);
|
||||
amdgpu_bo_unreserve(sa_manager->bo);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
|
||||
{
|
||||
struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
|
||||
|
@ -213,9 +213,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
||||
abo = ttm_to_amdgpu_bo(bo);
|
||||
switch (bo->mem.mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
if (adev->mman.buffer_funcs &&
|
||||
adev->mman.buffer_funcs_ring &&
|
||||
adev->mman.buffer_funcs_ring->ready == false) {
|
||||
if (!adev->mman.buffer_funcs_enabled) {
|
||||
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
|
||||
} else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
|
||||
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
|
||||
@ -331,7 +329,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
|
||||
const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
|
||||
AMDGPU_GPU_PAGE_SIZE);
|
||||
|
||||
if (!ring->ready) {
|
||||
if (!adev->mman.buffer_funcs_enabled) {
|
||||
DRM_ERROR("Trying to move memory with ring turned off.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -577,12 +575,9 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
|
||||
amdgpu_move_null(bo, new_mem);
|
||||
return 0;
|
||||
}
|
||||
if (adev->mman.buffer_funcs == NULL ||
|
||||
adev->mman.buffer_funcs_ring == NULL ||
|
||||
!adev->mman.buffer_funcs_ring->ready) {
|
||||
/* use memcpy */
|
||||
|
||||
if (!adev->mman.buffer_funcs_enabled)
|
||||
goto memcpy;
|
||||
}
|
||||
|
||||
if (old_mem->mem_type == TTM_PL_VRAM &&
|
||||
new_mem->mem_type == TTM_PL_SYSTEM) {
|
||||
@ -621,6 +616,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
|
||||
struct drm_mm_node *mm_node = mem->mm_node;
|
||||
|
||||
mem->bus.addr = NULL;
|
||||
mem->bus.offset = 0;
|
||||
@ -640,6 +636,15 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
|
||||
/* check if it's visible */
|
||||
if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
|
||||
return -EINVAL;
|
||||
/* Only physically contiguous buffers apply. In a contiguous
|
||||
* buffer, size of the first mm_node would match the number of
|
||||
* pages in ttm_mem_reg.
|
||||
*/
|
||||
if (adev->mman.aper_base_kaddr &&
|
||||
(mm_node->size == mem->num_pages))
|
||||
mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
|
||||
mem->bus.offset;
|
||||
|
||||
mem->bus.base = adev->gmc.aper_base;
|
||||
mem->bus.is_iomem = true;
|
||||
break;
|
||||
@ -674,7 +679,6 @@ struct amdgpu_ttm_gup_task_list {
|
||||
|
||||
struct amdgpu_ttm_tt {
|
||||
struct ttm_dma_tt ttm;
|
||||
struct amdgpu_device *adev;
|
||||
u64 offset;
|
||||
uint64_t userptr;
|
||||
struct mm_struct *usermm;
|
||||
@ -832,6 +836,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
|
||||
static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
|
||||
struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
|
||||
struct amdgpu_ttm_tt *gtt = (void*)ttm;
|
||||
uint64_t flags;
|
||||
int r = 0;
|
||||
@ -858,9 +863,9 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
|
||||
flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
|
||||
gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
|
||||
r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
|
||||
r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
|
||||
ttm->pages, gtt->ttm.dma_address, flags);
|
||||
|
||||
if (r)
|
||||
@ -937,6 +942,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
|
||||
|
||||
static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
int r;
|
||||
|
||||
@ -947,7 +953,7 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
|
||||
return 0;
|
||||
|
||||
/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
|
||||
r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
|
||||
r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
|
||||
if (r)
|
||||
DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
|
||||
gtt->ttm.ttm.num_pages, gtt->offset);
|
||||
@ -969,8 +975,7 @@ static struct ttm_backend_func amdgpu_backend_func = {
|
||||
};
|
||||
|
||||
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
struct page *dummy_read_page)
|
||||
unsigned long size, uint32_t page_flags)
|
||||
{
|
||||
struct amdgpu_device *adev;
|
||||
struct amdgpu_ttm_tt *gtt;
|
||||
@ -982,8 +987,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
return NULL;
|
||||
}
|
||||
gtt->ttm.ttm.func = &amdgpu_backend_func;
|
||||
gtt->adev = adev;
|
||||
if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) {
|
||||
if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags)) {
|
||||
kfree(gtt);
|
||||
return NULL;
|
||||
}
|
||||
@ -1402,7 +1406,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||
adev->gmc.visible_vram_size = vis_vram_limit;
|
||||
|
||||
/* Change the size here instead of the init above so only lpfn is affected */
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
#ifdef CONFIG_64BIT
|
||||
adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
|
||||
adev->gmc.visible_vram_size);
|
||||
#endif
|
||||
|
||||
/*
|
||||
*The reserved vram for firmware must be pinned to the specified
|
||||
@ -1495,6 +1503,9 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
|
||||
amdgpu_ttm_debugfs_fini(adev);
|
||||
amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
|
||||
amdgpu_ttm_fw_reserve_vram_fini(adev);
|
||||
if (adev->mman.aper_base_kaddr)
|
||||
iounmap(adev->mman.aper_base_kaddr);
|
||||
adev->mman.aper_base_kaddr = NULL;
|
||||
|
||||
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
|
||||
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
|
||||
@ -1510,18 +1521,30 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
|
||||
DRM_INFO("amdgpu: ttm finalized\n");
|
||||
}
|
||||
|
||||
/* this should only be called at bootup or when userspace
|
||||
* isn't running */
|
||||
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size)
|
||||
/**
|
||||
* amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @enable: true when we can use buffer functions.
|
||||
*
|
||||
* Enable/disable use of buffer functions during suspend/resume. This should
|
||||
* only be called at bootup or when userspace isn't running.
|
||||
*/
|
||||
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
struct ttm_mem_type_manager *man;
|
||||
struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
|
||||
uint64_t size;
|
||||
|
||||
if (!adev->mman.initialized)
|
||||
if (!adev->mman.initialized || adev->in_gpu_reset)
|
||||
return;
|
||||
|
||||
man = &adev->mman.bdev.man[TTM_PL_VRAM];
|
||||
/* this just adjusts TTM size idea, which sets lpfn to the correct value */
|
||||
if (enable)
|
||||
size = adev->gmc.real_vram_size;
|
||||
else
|
||||
size = adev->gmc.visible_vram_size;
|
||||
man->size = size >> PAGE_SHIFT;
|
||||
adev->mman.buffer_funcs_enabled = enable;
|
||||
}
|
||||
|
||||
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
@ -1620,6 +1643,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
if (direct_submit && !ring->ready) {
|
||||
DRM_ERROR("Trying to move memory with ring turned off.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
|
||||
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
|
||||
num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
|
||||
@ -1693,7 +1721,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
||||
struct amdgpu_job *job;
|
||||
int r;
|
||||
|
||||
if (!ring->ready) {
|
||||
if (!adev->mman.buffer_funcs_enabled) {
|
||||
DRM_ERROR("Trying to clear memory with ring turned off.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1929,38 +1957,98 @@ static const struct file_operations amdgpu_ttm_gtt_fops = {
|
||||
|
||||
#endif
|
||||
|
||||
static ssize_t amdgpu_iova_to_phys_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
struct amdgpu_device *adev = file_inode(f)->i_private;
|
||||
int r;
|
||||
uint64_t phys;
|
||||
struct iommu_domain *dom;
|
||||
|
||||
// always return 8 bytes
|
||||
if (size != 8)
|
||||
return -EINVAL;
|
||||
|
||||
// only accept page addresses
|
||||
if (*pos & 0xFFF)
|
||||
return -EINVAL;
|
||||
ssize_t result = 0;
|
||||
int r;
|
||||
|
||||
dom = iommu_get_domain_for_dev(adev->dev);
|
||||
if (dom)
|
||||
phys = iommu_iova_to_phys(dom, *pos);
|
||||
else
|
||||
phys = *pos;
|
||||
|
||||
r = copy_to_user(buf, &phys, 8);
|
||||
if (r)
|
||||
return -EFAULT;
|
||||
while (size) {
|
||||
phys_addr_t addr = *pos & PAGE_MASK;
|
||||
loff_t off = *pos & ~PAGE_MASK;
|
||||
size_t bytes = PAGE_SIZE - off;
|
||||
unsigned long pfn;
|
||||
struct page *p;
|
||||
void *ptr;
|
||||
|
||||
return 8;
|
||||
bytes = bytes < size ? bytes : size;
|
||||
|
||||
addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
|
||||
|
||||
pfn = addr >> PAGE_SHIFT;
|
||||
if (!pfn_valid(pfn))
|
||||
return -EPERM;
|
||||
|
||||
p = pfn_to_page(pfn);
|
||||
if (p->mapping != adev->mman.bdev.dev_mapping)
|
||||
return -EPERM;
|
||||
|
||||
ptr = kmap(p);
|
||||
r = copy_to_user(buf, ptr, bytes);
|
||||
kunmap(p);
|
||||
if (r)
|
||||
return -EFAULT;
|
||||
|
||||
size -= bytes;
|
||||
*pos += bytes;
|
||||
result += bytes;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static const struct file_operations amdgpu_ttm_iova_fops = {
|
||||
static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
struct amdgpu_device *adev = file_inode(f)->i_private;
|
||||
struct iommu_domain *dom;
|
||||
ssize_t result = 0;
|
||||
int r;
|
||||
|
||||
dom = iommu_get_domain_for_dev(adev->dev);
|
||||
|
||||
while (size) {
|
||||
phys_addr_t addr = *pos & PAGE_MASK;
|
||||
loff_t off = *pos & ~PAGE_MASK;
|
||||
size_t bytes = PAGE_SIZE - off;
|
||||
unsigned long pfn;
|
||||
struct page *p;
|
||||
void *ptr;
|
||||
|
||||
bytes = bytes < size ? bytes : size;
|
||||
|
||||
addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
|
||||
|
||||
pfn = addr >> PAGE_SHIFT;
|
||||
if (!pfn_valid(pfn))
|
||||
return -EPERM;
|
||||
|
||||
p = pfn_to_page(pfn);
|
||||
if (p->mapping != adev->mman.bdev.dev_mapping)
|
||||
return -EPERM;
|
||||
|
||||
ptr = kmap(p);
|
||||
r = copy_from_user(ptr, buf, bytes);
|
||||
kunmap(p);
|
||||
if (r)
|
||||
return -EFAULT;
|
||||
|
||||
size -= bytes;
|
||||
*pos += bytes;
|
||||
result += bytes;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static const struct file_operations amdgpu_ttm_iomem_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = amdgpu_iova_to_phys_read,
|
||||
.read = amdgpu_iomem_read,
|
||||
.write = amdgpu_iomem_write,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
@ -1973,7 +2061,7 @@ static const struct {
|
||||
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
|
||||
{ "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
|
||||
#endif
|
||||
{ "amdgpu_iova", &amdgpu_ttm_iova_fops, TTM_PL_SYSTEM },
|
||||
{ "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -44,6 +44,7 @@ struct amdgpu_mman {
|
||||
struct ttm_bo_device bdev;
|
||||
bool mem_global_referenced;
|
||||
bool initialized;
|
||||
void __iomem *aper_base_kaddr;
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
struct dentry *debugfs_entries[8];
|
||||
@ -52,6 +53,7 @@ struct amdgpu_mman {
|
||||
/* buffer handling */
|
||||
const struct amdgpu_buffer_funcs *buffer_funcs;
|
||||
struct amdgpu_ring *buffer_funcs_ring;
|
||||
bool buffer_funcs_enabled;
|
||||
|
||||
struct mutex gtt_window_lock;
|
||||
/* Scheduler entity for buffer moves */
|
||||
@ -74,6 +76,11 @@ int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
|
||||
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
|
||||
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
|
||||
|
||||
int amdgpu_ttm_init(struct amdgpu_device *adev);
|
||||
void amdgpu_ttm_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
|
||||
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
||||
uint64_t dst_offset, uint32_t byte_count,
|
||||
struct reservation_object *resv,
|
||||
|
@ -299,12 +299,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
|
||||
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i)
|
||||
if (atomic_read(&adev->uvd.handles[i]))
|
||||
break;
|
||||
/* only valid for physical mode */
|
||||
if (adev->asic_type < CHIP_POLARIS10) {
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i)
|
||||
if (atomic_read(&adev->uvd.handles[i]))
|
||||
break;
|
||||
|
||||
if (i == AMDGPU_MAX_UVD_HANDLES)
|
||||
return 0;
|
||||
if (i == adev->uvd.max_handles)
|
||||
return 0;
|
||||
}
|
||||
|
||||
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
|
||||
ptr = adev->uvd.cpu_addr;
|
||||
@ -1116,9 +1119,6 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
|
||||
container_of(work, struct amdgpu_device, uvd.idle_work.work);
|
||||
unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
if (fences == 0) {
|
||||
if (adev->pm.dpm_enabled) {
|
||||
amdgpu_dpm_enable_uvd(adev, false);
|
||||
@ -1138,11 +1138,12 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
|
||||
void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
bool set_clocks;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
if (set_clocks) {
|
||||
if (adev->pm.dpm_enabled) {
|
||||
amdgpu_dpm_enable_uvd(adev, true);
|
||||
@ -1158,7 +1159,8 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
|
||||
|
||||
void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
|
||||
{
|
||||
schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
|
||||
if (!amdgpu_sriov_vf(ring->adev))
|
||||
schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -300,9 +300,6 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
|
||||
container_of(work, struct amdgpu_device, vce.idle_work.work);
|
||||
unsigned i, count = 0;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
for (i = 0; i < adev->vce.num_rings; i++)
|
||||
count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
|
||||
|
||||
@ -362,7 +359,8 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
|
||||
*/
|
||||
void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
|
||||
{
|
||||
schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
|
||||
if (!amdgpu_sriov_vf(ring->adev))
|
||||
schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -22,7 +22,9 @@
|
||||
*/
|
||||
|
||||
#include "amdgpu.h"
|
||||
#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
|
||||
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
|
||||
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
|
||||
#define MAX_KIQ_REG_TRY 20
|
||||
|
||||
uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
|
||||
{
|
||||
@ -137,9 +139,9 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
|
||||
|
||||
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
|
||||
{
|
||||
signed long r;
|
||||
signed long r, cnt = 0;
|
||||
unsigned long flags;
|
||||
uint32_t val, seq;
|
||||
uint32_t seq;
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
||||
struct amdgpu_ring *ring = &kiq->ring;
|
||||
|
||||
@ -153,18 +155,39 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
|
||||
spin_unlock_irqrestore(&kiq->ring_lock, flags);
|
||||
|
||||
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
|
||||
if (r < 1) {
|
||||
DRM_ERROR("wait for kiq fence error: %ld\n", r);
|
||||
return ~0;
|
||||
}
|
||||
val = adev->wb.wb[adev->virt.reg_val_offs];
|
||||
|
||||
return val;
|
||||
/* don't wait anymore for gpu reset case because this way may
|
||||
* block gpu_recover() routine forever, e.g. this virt_kiq_rreg
|
||||
* is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
|
||||
* never return if we keep waiting in virt_kiq_rreg, which cause
|
||||
* gpu_recover() hang there.
|
||||
*
|
||||
* also don't wait anymore for IRQ context
|
||||
* */
|
||||
if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
|
||||
goto failed_kiq_read;
|
||||
|
||||
if (in_interrupt())
|
||||
might_sleep();
|
||||
|
||||
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
|
||||
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
|
||||
}
|
||||
|
||||
if (cnt > MAX_KIQ_REG_TRY)
|
||||
goto failed_kiq_read;
|
||||
|
||||
return adev->wb.wb[adev->virt.reg_val_offs];
|
||||
|
||||
failed_kiq_read:
|
||||
pr_err("failed to read reg:%x\n", reg);
|
||||
return ~0;
|
||||
}
|
||||
|
||||
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
|
||||
{
|
||||
signed long r;
|
||||
signed long r, cnt = 0;
|
||||
unsigned long flags;
|
||||
uint32_t seq;
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
||||
@ -180,8 +203,34 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
|
||||
spin_unlock_irqrestore(&kiq->ring_lock, flags);
|
||||
|
||||
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
|
||||
if (r < 1)
|
||||
DRM_ERROR("wait for kiq fence error: %ld\n", r);
|
||||
|
||||
/* don't wait anymore for gpu reset case because this way may
|
||||
* block gpu_recover() routine forever, e.g. this virt_kiq_rreg
|
||||
* is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
|
||||
* never return if we keep waiting in virt_kiq_rreg, which cause
|
||||
* gpu_recover() hang there.
|
||||
*
|
||||
* also don't wait anymore for IRQ context
|
||||
* */
|
||||
if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
|
||||
goto failed_kiq_write;
|
||||
|
||||
if (in_interrupt())
|
||||
might_sleep();
|
||||
|
||||
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
|
||||
|
||||
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
|
||||
}
|
||||
|
||||
if (cnt > MAX_KIQ_REG_TRY)
|
||||
goto failed_kiq_write;
|
||||
|
||||
return;
|
||||
|
||||
failed_kiq_write:
|
||||
pr_err("failed to write reg:%x\n", reg);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3695,40 +3695,6 @@ static int ci_find_boot_level(struct ci_single_dpm_table *table,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ci_save_default_power_profile(struct amdgpu_device *adev)
|
||||
{
|
||||
struct ci_power_info *pi = ci_get_pi(adev);
|
||||
struct SMU7_Discrete_GraphicsLevel *levels =
|
||||
pi->smc_state_table.GraphicsLevel;
|
||||
uint32_t min_level = 0;
|
||||
|
||||
pi->default_gfx_power_profile.activity_threshold =
|
||||
be16_to_cpu(levels[0].ActivityLevel);
|
||||
pi->default_gfx_power_profile.up_hyst = levels[0].UpH;
|
||||
pi->default_gfx_power_profile.down_hyst = levels[0].DownH;
|
||||
pi->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
|
||||
|
||||
pi->default_compute_power_profile = pi->default_gfx_power_profile;
|
||||
pi->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
|
||||
|
||||
/* Optimize compute power profile: Use only highest
|
||||
* 2 power levels (if more than 2 are available), Hysteresis:
|
||||
* 0ms up, 5ms down
|
||||
*/
|
||||
if (pi->smc_state_table.GraphicsDpmLevelCount > 2)
|
||||
min_level = pi->smc_state_table.GraphicsDpmLevelCount - 2;
|
||||
else if (pi->smc_state_table.GraphicsDpmLevelCount == 2)
|
||||
min_level = 1;
|
||||
pi->default_compute_power_profile.min_sclk =
|
||||
be32_to_cpu(levels[min_level].SclkFrequency);
|
||||
|
||||
pi->default_compute_power_profile.up_hyst = 0;
|
||||
pi->default_compute_power_profile.down_hyst = 5;
|
||||
|
||||
pi->gfx_power_profile = pi->default_gfx_power_profile;
|
||||
pi->compute_power_profile = pi->default_compute_power_profile;
|
||||
}
|
||||
|
||||
static int ci_init_smc_table(struct amdgpu_device *adev)
|
||||
{
|
||||
struct ci_power_info *pi = ci_get_pi(adev);
|
||||
@ -3874,8 +3840,6 @@ static int ci_init_smc_table(struct amdgpu_device *adev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ci_save_default_power_profile(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -6753,222 +6717,6 @@ static int ci_dpm_set_mclk_od(void *handle, uint32_t value)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ci_dpm_get_power_profile_state(void *handle,
|
||||
struct amd_pp_profile *query)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct ci_power_info *pi = ci_get_pi(adev);
|
||||
|
||||
if (!pi || !query)
|
||||
return -EINVAL;
|
||||
|
||||
if (query->type == AMD_PP_GFX_PROFILE)
|
||||
memcpy(query, &pi->gfx_power_profile,
|
||||
sizeof(struct amd_pp_profile));
|
||||
else if (query->type == AMD_PP_COMPUTE_PROFILE)
|
||||
memcpy(query, &pi->compute_power_profile,
|
||||
sizeof(struct amd_pp_profile));
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ci_populate_requested_graphic_levels(struct amdgpu_device *adev,
|
||||
struct amd_pp_profile *request)
|
||||
{
|
||||
struct ci_power_info *pi = ci_get_pi(adev);
|
||||
struct ci_dpm_table *dpm_table = &(pi->dpm_table);
|
||||
struct SMU7_Discrete_GraphicsLevel *levels =
|
||||
pi->smc_state_table.GraphicsLevel;
|
||||
uint32_t array = pi->dpm_table_start +
|
||||
offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
|
||||
uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
|
||||
SMU7_MAX_LEVELS_GRAPHICS;
|
||||
uint32_t i;
|
||||
|
||||
for (i = 0; i < dpm_table->sclk_table.count; i++) {
|
||||
levels[i].ActivityLevel =
|
||||
cpu_to_be16(request->activity_threshold);
|
||||
levels[i].EnabledForActivity = 1;
|
||||
levels[i].UpH = request->up_hyst;
|
||||
levels[i].DownH = request->down_hyst;
|
||||
}
|
||||
|
||||
return amdgpu_ci_copy_bytes_to_smc(adev, array, (uint8_t *)levels,
|
||||
array_size, pi->sram_end);
|
||||
}
|
||||
|
||||
static void ci_find_min_clock_masks(struct amdgpu_device *adev,
|
||||
uint32_t *sclk_mask, uint32_t *mclk_mask,
|
||||
uint32_t min_sclk, uint32_t min_mclk)
|
||||
{
|
||||
struct ci_power_info *pi = ci_get_pi(adev);
|
||||
struct ci_dpm_table *dpm_table = &(pi->dpm_table);
|
||||
uint32_t i;
|
||||
|
||||
for (i = 0; i < dpm_table->sclk_table.count; i++) {
|
||||
if (dpm_table->sclk_table.dpm_levels[i].enabled &&
|
||||
dpm_table->sclk_table.dpm_levels[i].value >= min_sclk)
|
||||
*sclk_mask |= 1 << i;
|
||||
}
|
||||
|
||||
for (i = 0; i < dpm_table->mclk_table.count; i++) {
|
||||
if (dpm_table->mclk_table.dpm_levels[i].enabled &&
|
||||
dpm_table->mclk_table.dpm_levels[i].value >= min_mclk)
|
||||
*mclk_mask |= 1 << i;
|
||||
}
|
||||
}
|
||||
|
||||
static int ci_set_power_profile_state(struct amdgpu_device *adev,
|
||||
struct amd_pp_profile *request)
|
||||
{
|
||||
struct ci_power_info *pi = ci_get_pi(adev);
|
||||
int tmp_result, result = 0;
|
||||
uint32_t sclk_mask = 0, mclk_mask = 0;
|
||||
|
||||
tmp_result = ci_freeze_sclk_mclk_dpm(adev);
|
||||
if (tmp_result) {
|
||||
DRM_ERROR("Failed to freeze SCLK MCLK DPM!");
|
||||
result = tmp_result;
|
||||
}
|
||||
|
||||
tmp_result = ci_populate_requested_graphic_levels(adev,
|
||||
request);
|
||||
if (tmp_result) {
|
||||
DRM_ERROR("Failed to populate requested graphic levels!");
|
||||
result = tmp_result;
|
||||
}
|
||||
|
||||
tmp_result = ci_unfreeze_sclk_mclk_dpm(adev);
|
||||
if (tmp_result) {
|
||||
DRM_ERROR("Failed to unfreeze SCLK MCLK DPM!");
|
||||
result = tmp_result;
|
||||
}
|
||||
|
||||
ci_find_min_clock_masks(adev, &sclk_mask, &mclk_mask,
|
||||
request->min_sclk, request->min_mclk);
|
||||
|
||||
if (sclk_mask) {
|
||||
if (!pi->sclk_dpm_key_disabled)
|
||||
amdgpu_ci_send_msg_to_smc_with_parameter(
|
||||
adev,
|
||||
PPSMC_MSG_SCLKDPM_SetEnabledMask,
|
||||
pi->dpm_level_enable_mask.
|
||||
sclk_dpm_enable_mask &
|
||||
sclk_mask);
|
||||
}
|
||||
|
||||
if (mclk_mask) {
|
||||
if (!pi->mclk_dpm_key_disabled)
|
||||
amdgpu_ci_send_msg_to_smc_with_parameter(
|
||||
adev,
|
||||
PPSMC_MSG_MCLKDPM_SetEnabledMask,
|
||||
pi->dpm_level_enable_mask.
|
||||
mclk_dpm_enable_mask &
|
||||
mclk_mask);
|
||||
}
|
||||
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int ci_dpm_set_power_profile_state(void *handle,
|
||||
struct amd_pp_profile *request)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct ci_power_info *pi = ci_get_pi(adev);
|
||||
int ret = -1;
|
||||
|
||||
if (!pi || !request)
|
||||
return -EINVAL;
|
||||
|
||||
if (adev->pm.dpm.forced_level !=
|
||||
AMD_DPM_FORCED_LEVEL_AUTO)
|
||||
return -EINVAL;
|
||||
|
||||
if (request->min_sclk ||
|
||||
request->min_mclk ||
|
||||
request->activity_threshold ||
|
||||
request->up_hyst ||
|
||||
request->down_hyst) {
|
||||
if (request->type == AMD_PP_GFX_PROFILE)
|
||||
memcpy(&pi->gfx_power_profile, request,
|
||||
sizeof(struct amd_pp_profile));
|
||||
else if (request->type == AMD_PP_COMPUTE_PROFILE)
|
||||
memcpy(&pi->compute_power_profile, request,
|
||||
sizeof(struct amd_pp_profile));
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
if (request->type == pi->current_power_profile)
|
||||
ret = ci_set_power_profile_state(
|
||||
adev,
|
||||
request);
|
||||
} else {
|
||||
/* set power profile if it exists */
|
||||
switch (request->type) {
|
||||
case AMD_PP_GFX_PROFILE:
|
||||
ret = ci_set_power_profile_state(
|
||||
adev,
|
||||
&pi->gfx_power_profile);
|
||||
break;
|
||||
case AMD_PP_COMPUTE_PROFILE:
|
||||
ret = ci_set_power_profile_state(
|
||||
adev,
|
||||
&pi->compute_power_profile);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
pi->current_power_profile = request->type;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ci_dpm_reset_power_profile_state(void *handle,
|
||||
struct amd_pp_profile *request)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct ci_power_info *pi = ci_get_pi(adev);
|
||||
|
||||
if (!pi || !request)
|
||||
return -EINVAL;
|
||||
|
||||
if (request->type == AMD_PP_GFX_PROFILE) {
|
||||
pi->gfx_power_profile = pi->default_gfx_power_profile;
|
||||
return ci_dpm_set_power_profile_state(adev,
|
||||
&pi->gfx_power_profile);
|
||||
} else if (request->type == AMD_PP_COMPUTE_PROFILE) {
|
||||
pi->compute_power_profile =
|
||||
pi->default_compute_power_profile;
|
||||
return ci_dpm_set_power_profile_state(adev,
|
||||
&pi->compute_power_profile);
|
||||
} else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int ci_dpm_switch_power_profile(void *handle,
|
||||
enum amd_pp_profile_type type)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct ci_power_info *pi = ci_get_pi(adev);
|
||||
struct amd_pp_profile request = {0};
|
||||
|
||||
if (!pi)
|
||||
return -EINVAL;
|
||||
|
||||
if (pi->current_power_profile != type) {
|
||||
request.type = type;
|
||||
return ci_dpm_set_power_profile_state(adev, &request);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ci_dpm_read_sensor(void *handle, int idx,
|
||||
void *value, int *size)
|
||||
{
|
||||
@ -7053,10 +6801,6 @@ const struct amd_pm_funcs ci_dpm_funcs = {
|
||||
.set_mclk_od = ci_dpm_set_mclk_od,
|
||||
.check_state_equal = ci_check_state_equal,
|
||||
.get_vce_clock_state = amdgpu_get_vce_clock_state,
|
||||
.get_power_profile_state = ci_dpm_get_power_profile_state,
|
||||
.set_power_profile_state = ci_dpm_set_power_profile_state,
|
||||
.reset_power_profile_state = ci_dpm_reset_power_profile_state,
|
||||
.switch_power_profile = ci_dpm_switch_power_profile,
|
||||
.read_sensor = ci_dpm_read_sensor,
|
||||
};
|
||||
|
||||
|
@ -295,13 +295,6 @@ struct ci_power_info {
|
||||
bool fan_is_controlled_by_smc;
|
||||
u32 t_min;
|
||||
u32 fan_ctrl_default_mode;
|
||||
|
||||
/* power profile */
|
||||
struct amd_pp_profile gfx_power_profile;
|
||||
struct amd_pp_profile compute_power_profile;
|
||||
struct amd_pp_profile default_gfx_power_profile;
|
||||
struct amd_pp_profile default_compute_power_profile;
|
||||
enum amd_pp_profile_type current_power_profile;
|
||||
};
|
||||
|
||||
#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0
|
||||
|
@ -111,7 +111,7 @@ static int cik_ih_irq_init(struct amdgpu_device *adev)
|
||||
cik_ih_disable_interrupts(adev);
|
||||
|
||||
/* setup interrupt control */
|
||||
WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
|
||||
WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
|
||||
interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
|
||||
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
|
||||
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
|
||||
|
@ -310,7 +310,7 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma0) ||
|
||||
(adev->mman.buffer_funcs_ring == sdma1))
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
|
||||
@ -510,7 +510,7 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -111,7 +111,7 @@ static int cz_ih_irq_init(struct amdgpu_device *adev)
|
||||
cz_ih_disable_interrupts(adev);
|
||||
|
||||
/* setup interrupt control */
|
||||
WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
|
||||
WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
|
||||
interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
|
||||
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
|
||||
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
|
||||
|
@ -3037,7 +3037,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
|
||||
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
|
||||
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
|
||||
schedule_work(&adev->hotplug_work);
|
||||
DRM_INFO("IH: HPD%d\n", hpd + 1);
|
||||
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -4358,34 +4358,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
|
||||
case CHIP_KAVERI:
|
||||
adev->gfx.config.max_shader_engines = 1;
|
||||
adev->gfx.config.max_tile_pipes = 4;
|
||||
if ((adev->pdev->device == 0x1304) ||
|
||||
(adev->pdev->device == 0x1305) ||
|
||||
(adev->pdev->device == 0x130C) ||
|
||||
(adev->pdev->device == 0x130F) ||
|
||||
(adev->pdev->device == 0x1310) ||
|
||||
(adev->pdev->device == 0x1311) ||
|
||||
(adev->pdev->device == 0x131C)) {
|
||||
adev->gfx.config.max_cu_per_sh = 8;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
} else if ((adev->pdev->device == 0x1309) ||
|
||||
(adev->pdev->device == 0x130A) ||
|
||||
(adev->pdev->device == 0x130D) ||
|
||||
(adev->pdev->device == 0x1313) ||
|
||||
(adev->pdev->device == 0x131D)) {
|
||||
adev->gfx.config.max_cu_per_sh = 6;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
} else if ((adev->pdev->device == 0x1306) ||
|
||||
(adev->pdev->device == 0x1307) ||
|
||||
(adev->pdev->device == 0x130B) ||
|
||||
(adev->pdev->device == 0x130E) ||
|
||||
(adev->pdev->device == 0x1315) ||
|
||||
(adev->pdev->device == 0x131B)) {
|
||||
adev->gfx.config.max_cu_per_sh = 4;
|
||||
adev->gfx.config.max_backends_per_se = 1;
|
||||
} else {
|
||||
adev->gfx.config.max_cu_per_sh = 3;
|
||||
adev->gfx.config.max_backends_per_se = 1;
|
||||
}
|
||||
adev->gfx.config.max_cu_per_sh = 8;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
adev->gfx.config.max_sh_per_se = 1;
|
||||
adev->gfx.config.max_texture_channel_caches = 4;
|
||||
adev->gfx.config.max_gprs = 256;
|
||||
|
@ -271,58 +271,65 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
|
||||
|
||||
static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_ib ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint32_t scratch;
|
||||
uint32_t tmp = 0;
|
||||
long r;
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_ib ib;
|
||||
struct dma_fence *f = NULL;
|
||||
|
||||
r = amdgpu_gfx_scratch_get(adev, &scratch);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
|
||||
return r;
|
||||
}
|
||||
WREG32(scratch, 0xCAFEDEAD);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 256, &ib);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
|
||||
goto err1;
|
||||
}
|
||||
ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
|
||||
ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
|
||||
ib.ptr[2] = 0xDEADBEEF;
|
||||
ib.length_dw = 3;
|
||||
unsigned index;
|
||||
uint64_t gpu_addr;
|
||||
uint32_t tmp;
|
||||
long r;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
|
||||
if (r)
|
||||
goto err2;
|
||||
r = amdgpu_device_wb_get(adev, &index);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
gpu_addr = adev->wb.gpu_addr + (index * 4);
|
||||
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 16, &ib);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
|
||||
goto err1;
|
||||
}
|
||||
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
|
||||
ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
|
||||
ib.ptr[2] = lower_32_bits(gpu_addr);
|
||||
ib.ptr[3] = upper_32_bits(gpu_addr);
|
||||
ib.ptr[4] = 0xDEADBEEF;
|
||||
ib.length_dw = 5;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
|
||||
if (r)
|
||||
goto err2;
|
||||
|
||||
r = dma_fence_wait_timeout(f, false, timeout);
|
||||
if (r == 0) {
|
||||
DRM_ERROR("amdgpu: IB test timed out.\n");
|
||||
r = -ETIMEDOUT;
|
||||
goto err2;
|
||||
} else if (r < 0) {
|
||||
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
||||
goto err2;
|
||||
}
|
||||
|
||||
tmp = adev->wb.wb[index];
|
||||
if (tmp == 0xDEADBEEF) {
|
||||
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
|
||||
r = 0;
|
||||
} else {
|
||||
DRM_ERROR("ib test on ring %d failed\n", ring->idx);
|
||||
r = -EINVAL;
|
||||
}
|
||||
|
||||
r = dma_fence_wait_timeout(f, false, timeout);
|
||||
if (r == 0) {
|
||||
DRM_ERROR("amdgpu: IB test timed out.\n");
|
||||
r = -ETIMEDOUT;
|
||||
goto err2;
|
||||
} else if (r < 0) {
|
||||
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
||||
goto err2;
|
||||
}
|
||||
tmp = RREG32(scratch);
|
||||
if (tmp == 0xDEADBEEF) {
|
||||
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
|
||||
r = 0;
|
||||
} else {
|
||||
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
|
||||
scratch, tmp);
|
||||
r = -EINVAL;
|
||||
}
|
||||
err2:
|
||||
amdgpu_ib_free(adev, &ib, NULL);
|
||||
dma_fence_put(f);
|
||||
amdgpu_ib_free(adev, &ib, NULL);
|
||||
dma_fence_put(f);
|
||||
err1:
|
||||
amdgpu_gfx_scratch_free(adev, scratch);
|
||||
return r;
|
||||
amdgpu_device_wb_free(adev, index);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
@ -2954,7 +2961,13 @@ static int gfx_v9_0_hw_fini(void *handle)
|
||||
gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
pr_debug("For SRIOV client, shouldn't do anything.\n");
|
||||
gfx_v9_0_cp_gfx_enable(adev, false);
|
||||
/* must disable polling for SRIOV when hw finished, otherwise
|
||||
* CPC engine may still keep fetching WB address which is already
|
||||
* invalid after sw finished and trigger DMAR reading error in
|
||||
* hypervisor side.
|
||||
*/
|
||||
WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
|
||||
return 0;
|
||||
}
|
||||
gfx_v9_0_cp_enable(adev, false);
|
||||
|
@ -92,9 +92,9 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
|
||||
|
||||
/* Program "protection fault". */
|
||||
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
|
||||
(u32)(adev->dummy_page.addr >> 12));
|
||||
(u32)(adev->dummy_page_addr >> 12));
|
||||
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
|
||||
(u32)((u64)adev->dummy_page.addr >> 44));
|
||||
(u32)((u64)adev->dummy_page_addr >> 44));
|
||||
|
||||
WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2,
|
||||
ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
|
||||
|
@ -533,7 +533,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
|
||||
(u32)(adev->dummy_page.addr >> 12));
|
||||
(u32)(adev->dummy_page_addr >> 12));
|
||||
WREG32(mmVM_CONTEXT0_CNTL2, 0);
|
||||
WREG32(mmVM_CONTEXT0_CNTL,
|
||||
VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
|
||||
@ -563,7 +563,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
|
||||
|
||||
/* enable context1-15 */
|
||||
WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
|
||||
(u32)(adev->dummy_page.addr >> 12));
|
||||
(u32)(adev->dummy_page_addr >> 12));
|
||||
WREG32(mmVM_CONTEXT1_CNTL2, 4);
|
||||
WREG32(mmVM_CONTEXT1_CNTL,
|
||||
VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
|
||||
|
@ -644,7 +644,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
|
||||
(u32)(adev->dummy_page.addr >> 12));
|
||||
(u32)(adev->dummy_page_addr >> 12));
|
||||
WREG32(mmVM_CONTEXT0_CNTL2, 0);
|
||||
tmp = RREG32(mmVM_CONTEXT0_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
|
||||
@ -674,7 +674,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
|
||||
|
||||
/* enable context1-15 */
|
||||
WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
|
||||
(u32)(adev->dummy_page.addr >> 12));
|
||||
(u32)(adev->dummy_page_addr >> 12));
|
||||
WREG32(mmVM_CONTEXT1_CNTL2, 4);
|
||||
tmp = RREG32(mmVM_CONTEXT1_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
|
||||
|
@ -860,7 +860,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
|
||||
(u32)(adev->dummy_page.addr >> 12));
|
||||
(u32)(adev->dummy_page_addr >> 12));
|
||||
WREG32(mmVM_CONTEXT0_CNTL2, 0);
|
||||
tmp = RREG32(mmVM_CONTEXT0_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
|
||||
@ -890,7 +890,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
|
||||
|
||||
/* enable context1-15 */
|
||||
WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
|
||||
(u32)(adev->dummy_page.addr >> 12));
|
||||
(u32)(adev->dummy_page_addr >> 12));
|
||||
WREG32(mmVM_CONTEXT1_CNTL2, 4);
|
||||
tmp = RREG32(mmVM_CONTEXT1_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
|
||||
@ -1105,7 +1105,6 @@ static int gmc_v8_0_sw_init(void *handle)
|
||||
*/
|
||||
adev->need_dma32 = false;
|
||||
dma_bits = adev->need_dma32 ? 32 : 40;
|
||||
adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
|
||||
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
if (r) {
|
||||
adev->need_dma32 = true;
|
||||
|
@ -673,7 +673,7 @@ static int gmc_v9_0_late_init(void *handle)
|
||||
for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
|
||||
BUG_ON(vm_inv_eng[i] > 16);
|
||||
|
||||
if (adev->asic_type == CHIP_VEGA10) {
|
||||
if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
|
||||
r = gmc_v9_0_ecc_available(adev);
|
||||
if (r == 1) {
|
||||
DRM_INFO("ECC is active.\n");
|
||||
@ -722,7 +722,10 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
|
||||
adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
|
||||
if (!adev->gmc.vram_width) {
|
||||
/* hbm memory channel size */
|
||||
chansize = 128;
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
chansize = 64;
|
||||
else
|
||||
chansize = 128;
|
||||
|
||||
tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
|
||||
tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
|
||||
@ -789,7 +792,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA10: /* all engines support GPUVM */
|
||||
default:
|
||||
adev->gmc.gart_size = 256ULL << 20;
|
||||
adev->gmc.gart_size = 512ULL << 20;
|
||||
break;
|
||||
case CHIP_RAVEN: /* DCE SG support */
|
||||
adev->gmc.gart_size = 1024ULL << 20;
|
||||
|
@ -111,7 +111,7 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev)
|
||||
iceland_ih_disable_interrupts(adev);
|
||||
|
||||
/* setup interrupt control */
|
||||
WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
|
||||
WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
|
||||
interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
|
||||
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
|
||||
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
|
||||
|
@ -103,9 +103,9 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
|
||||
|
||||
/* Program "protection fault". */
|
||||
WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
|
||||
(u32)(adev->dummy_page.addr >> 12));
|
||||
(u32)(adev->dummy_page_addr >> 12));
|
||||
WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
|
||||
(u32)((u64)adev->dummy_page.addr >> 44));
|
||||
(u32)((u64)adev->dummy_page_addr >> 44));
|
||||
|
||||
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2);
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
|
||||
|
@ -133,7 +133,7 @@ static void nbio_v6_1_ih_control(struct amdgpu_device *adev)
|
||||
u32 interrupt_cntl;
|
||||
|
||||
/* setup interrupt control */
|
||||
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
|
||||
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
|
||||
interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
|
||||
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
|
||||
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
|
||||
|
@ -208,7 +208,7 @@ static void nbio_v7_0_ih_control(struct amdgpu_device *adev)
|
||||
u32 interrupt_cntl;
|
||||
|
||||
/* setup interrupt control */
|
||||
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
|
||||
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
|
||||
interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
|
||||
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
|
||||
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
|
||||
|
@ -339,7 +339,7 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma0) ||
|
||||
(adev->mman.buffer_funcs_ring == sdma1))
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
|
||||
@ -484,7 +484,7 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -510,7 +510,7 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma0) ||
|
||||
(adev->mman.buffer_funcs_ring == sdma1))
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
|
||||
@ -711,14 +711,17 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
|
||||
WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i],
|
||||
upper_32_bits(wptr_gpu_addr));
|
||||
wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]);
|
||||
if (ring->use_pollmem)
|
||||
if (ring->use_pollmem) {
|
||||
/*wptr polling is not enogh fast, directly clean the wptr register */
|
||||
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
|
||||
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
|
||||
SDMA0_GFX_RB_WPTR_POLL_CNTL,
|
||||
ENABLE, 1);
|
||||
else
|
||||
} else {
|
||||
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
|
||||
SDMA0_GFX_RB_WPTR_POLL_CNTL,
|
||||
ENABLE, 0);
|
||||
}
|
||||
WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl);
|
||||
|
||||
/* enable DMA RB */
|
||||
@ -750,7 +753,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -238,31 +238,27 @@ static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
|
||||
static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u64 *wptr = NULL;
|
||||
uint64_t local_wptr = 0;
|
||||
u64 wptr;
|
||||
|
||||
if (ring->use_doorbell) {
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]);
|
||||
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr);
|
||||
*wptr = (*wptr) >> 2;
|
||||
DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr);
|
||||
wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
|
||||
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
|
||||
} else {
|
||||
u32 lowbit, highbit;
|
||||
int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
|
||||
|
||||
wptr = &local_wptr;
|
||||
lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2;
|
||||
highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
|
||||
|
||||
DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
|
||||
me, highbit, lowbit);
|
||||
*wptr = highbit;
|
||||
*wptr = (*wptr) << 32;
|
||||
*wptr |= lowbit;
|
||||
wptr = highbit;
|
||||
wptr = wptr << 32;
|
||||
wptr |= lowbit;
|
||||
}
|
||||
|
||||
return *wptr;
|
||||
return wptr >> 2;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -430,7 +426,7 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma0) ||
|
||||
(adev->mman.buffer_funcs_ring == sdma1))
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
|
||||
@ -672,7 +668,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, true);
|
||||
|
||||
}
|
||||
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "amdgpu_uvd.h"
|
||||
#include "amdgpu_vce.h"
|
||||
#include "atom.h"
|
||||
#include "amd_pcie.h"
|
||||
#include "amdgpu_powerplay.h"
|
||||
#include "sid.h"
|
||||
#include "si_ih.h"
|
||||
@ -1484,8 +1485,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
|
||||
{
|
||||
struct pci_dev *root = adev->pdev->bus->self;
|
||||
int bridge_pos, gpu_pos;
|
||||
u32 speed_cntl, mask, current_data_rate;
|
||||
int ret, i;
|
||||
u32 speed_cntl, current_data_rate;
|
||||
int i;
|
||||
u16 tmp16;
|
||||
|
||||
if (pci_is_root_bus(adev->pdev->bus))
|
||||
@ -1497,23 +1498,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return;
|
||||
|
||||
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
|
||||
if (ret != 0)
|
||||
return;
|
||||
|
||||
if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
|
||||
if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
|
||||
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
|
||||
return;
|
||||
|
||||
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
|
||||
current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
|
||||
LC_CURRENT_DATA_RATE_SHIFT;
|
||||
if (mask & DRM_PCIE_SPEED_80) {
|
||||
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
|
||||
if (current_data_rate == 2) {
|
||||
DRM_INFO("PCIE gen 3 link speeds already enabled\n");
|
||||
return;
|
||||
}
|
||||
DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
|
||||
} else if (mask & DRM_PCIE_SPEED_50) {
|
||||
} else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
|
||||
if (current_data_rate == 1) {
|
||||
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
|
||||
return;
|
||||
@ -1529,7 +1527,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
|
||||
if (!gpu_pos)
|
||||
return;
|
||||
|
||||
if (mask & DRM_PCIE_SPEED_80) {
|
||||
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
|
||||
if (current_data_rate != 2) {
|
||||
u16 bridge_cfg, gpu_cfg;
|
||||
u16 bridge_cfg2, gpu_cfg2;
|
||||
@ -1612,9 +1610,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
|
||||
|
||||
pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
|
||||
tmp16 &= ~0xf;
|
||||
if (mask & DRM_PCIE_SPEED_80)
|
||||
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
|
||||
tmp16 |= 3;
|
||||
else if (mask & DRM_PCIE_SPEED_50)
|
||||
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
|
||||
tmp16 |= 2;
|
||||
else
|
||||
tmp16 |= 1;
|
||||
|
@ -121,7 +121,7 @@ static void si_dma_stop(struct amdgpu_device *adev)
|
||||
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
ring->ready = false;
|
||||
}
|
||||
}
|
||||
@ -184,7 +184,7 @@ static int si_dma_start(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "amdgpu_pm.h"
|
||||
#include "amdgpu_dpm.h"
|
||||
#include "amdgpu_atombios.h"
|
||||
#include "amd_pcie.h"
|
||||
#include "sid.h"
|
||||
#include "r600_dpm.h"
|
||||
#include "si_dpm.h"
|
||||
@ -3331,29 +3332,6 @@ static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
|
||||
}
|
||||
}
|
||||
|
||||
static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
|
||||
u32 sys_mask,
|
||||
enum amdgpu_pcie_gen asic_gen,
|
||||
enum amdgpu_pcie_gen default_gen)
|
||||
{
|
||||
switch (asic_gen) {
|
||||
case AMDGPU_PCIE_GEN1:
|
||||
return AMDGPU_PCIE_GEN1;
|
||||
case AMDGPU_PCIE_GEN2:
|
||||
return AMDGPU_PCIE_GEN2;
|
||||
case AMDGPU_PCIE_GEN3:
|
||||
return AMDGPU_PCIE_GEN3;
|
||||
default:
|
||||
if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
|
||||
return AMDGPU_PCIE_GEN3;
|
||||
else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
|
||||
return AMDGPU_PCIE_GEN2;
|
||||
else
|
||||
return AMDGPU_PCIE_GEN1;
|
||||
}
|
||||
return AMDGPU_PCIE_GEN1;
|
||||
}
|
||||
|
||||
static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
|
||||
u32 *p, u32 *u)
|
||||
{
|
||||
@ -5028,10 +5006,11 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
|
||||
table->ACPIState.levels[0].vddc.index,
|
||||
&table->ACPIState.levels[0].std_vddc);
|
||||
}
|
||||
table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev,
|
||||
si_pi->sys_pcie_mask,
|
||||
si_pi->boot_pcie_gen,
|
||||
AMDGPU_PCIE_GEN1);
|
||||
table->ACPIState.levels[0].gen2PCIE =
|
||||
(u8)amdgpu_get_pcie_gen_support(adev,
|
||||
si_pi->sys_pcie_mask,
|
||||
si_pi->boot_pcie_gen,
|
||||
AMDGPU_PCIE_GEN1);
|
||||
|
||||
if (si_pi->vddc_phase_shed_control)
|
||||
si_populate_phase_shedding_value(adev,
|
||||
@ -7168,10 +7147,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
|
||||
pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
|
||||
pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
|
||||
pl->flags = le32_to_cpu(clock_info->si.ulFlags);
|
||||
pl->pcie_gen = r600_get_pcie_gen_support(adev,
|
||||
si_pi->sys_pcie_mask,
|
||||
si_pi->boot_pcie_gen,
|
||||
clock_info->si.ucPCIEGen);
|
||||
pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
|
||||
si_pi->sys_pcie_mask,
|
||||
si_pi->boot_pcie_gen,
|
||||
clock_info->si.ucPCIEGen);
|
||||
|
||||
/* patch up vddc if necessary */
|
||||
ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
|
||||
@ -7326,7 +7305,6 @@ static int si_dpm_init(struct amdgpu_device *adev)
|
||||
struct si_power_info *si_pi;
|
||||
struct atom_clock_dividers dividers;
|
||||
int ret;
|
||||
u32 mask;
|
||||
|
||||
si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
|
||||
if (si_pi == NULL)
|
||||
@ -7336,11 +7314,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
|
||||
eg_pi = &ni_pi->eg;
|
||||
pi = &eg_pi->rv7xx;
|
||||
|
||||
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
|
||||
if (ret)
|
||||
si_pi->sys_pcie_mask = 0;
|
||||
else
|
||||
si_pi->sys_pcie_mask = mask;
|
||||
si_pi->sys_pcie_mask =
|
||||
(adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
|
||||
CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
|
||||
si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
|
||||
si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
|
||||
|
||||
|
@ -107,7 +107,7 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev)
|
||||
tonga_ih_disable_interrupts(adev);
|
||||
|
||||
/* setup interrupt control */
|
||||
WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
|
||||
WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
|
||||
interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
|
||||
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
|
||||
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
|
||||
|
@ -1580,7 +1580,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
|
||||
.set_wptr = uvd_v6_0_enc_ring_set_wptr,
|
||||
.emit_frame_size =
|
||||
4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
|
||||
6 + /* uvd_v6_0_enc_ring_emit_vm_flush */
|
||||
5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
|
||||
5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
|
||||
1, /* uvd_v6_0_enc_ring_insert_end */
|
||||
.emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
|
||||
|
@ -26,8 +26,6 @@
|
||||
|
||||
AMDDALPATH = $(RELATIVE_AMD_DISPLAY_PATH)
|
||||
|
||||
subdir-ccflags-y += -I$(AMDDALPATH)/ -I$(AMDDALPATH)/include
|
||||
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
|
||||
|
@ -374,7 +374,7 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
|
||||
|
||||
if (max_size) {
|
||||
int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, &compressor->bo_ptr,
|
||||
AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
|
||||
&compressor->gpu_addr, &compressor->cpu_addr);
|
||||
|
||||
if (r)
|
||||
@ -1058,6 +1058,10 @@ static void handle_hpd_rx_irq(void *param)
|
||||
!is_mst_root_connector) {
|
||||
/* Downstream Port status changed. */
|
||||
if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
|
||||
|
||||
if (aconnector->fake_enable)
|
||||
aconnector->fake_enable = false;
|
||||
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
|
||||
|
||||
@ -2486,6 +2490,27 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
|
||||
return &state->base;
|
||||
}
|
||||
|
||||
|
||||
static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
|
||||
{
|
||||
enum dc_irq_source irq_source;
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
|
||||
return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
|
||||
}
|
||||
|
||||
static int dm_enable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
return dm_set_vblank(crtc, true);
|
||||
}
|
||||
|
||||
static void dm_disable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
dm_set_vblank(crtc, false);
|
||||
}
|
||||
|
||||
/* Implemented only the options currently availible for the driver */
|
||||
static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
|
||||
.reset = dm_crtc_reset_state,
|
||||
@ -2496,6 +2521,8 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
|
||||
.atomic_duplicate_state = dm_crtc_duplicate_state,
|
||||
.atomic_destroy_state = dm_crtc_destroy_state,
|
||||
.set_crc_source = amdgpu_dm_crtc_set_crc_source,
|
||||
.enable_vblank = dm_enable_vblank,
|
||||
.disable_vblank = dm_disable_vblank,
|
||||
};
|
||||
|
||||
static enum drm_connector_status
|
||||
@ -3059,6 +3086,9 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
|
||||
if (!dm_plane_state->dc_state)
|
||||
return 0;
|
||||
|
||||
if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
|
||||
return -EINVAL;
|
||||
|
||||
if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
|
||||
return 0;
|
||||
|
||||
@ -3193,7 +3223,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
|
||||
dm->adev->mode_info.crtcs[crtc_index] = acrtc;
|
||||
drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
|
||||
true, MAX_COLOR_LUT_ENTRIES);
|
||||
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LUT_ENTRIES);
|
||||
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -4660,8 +4690,6 @@ static int dm_update_planes_state(struct dc *dc,
|
||||
bool pflip_needed = !state->allow_modeset;
|
||||
int ret = 0;
|
||||
|
||||
if (pflip_needed)
|
||||
return ret;
|
||||
|
||||
/* Add new planes */
|
||||
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
|
||||
@ -4676,6 +4704,8 @@ static int dm_update_planes_state(struct dc *dc,
|
||||
|
||||
/* Remove any changed/removed planes */
|
||||
if (!enable) {
|
||||
if (pflip_needed)
|
||||
continue;
|
||||
|
||||
if (!old_plane_crtc)
|
||||
continue;
|
||||
@ -4720,6 +4750,8 @@ static int dm_update_planes_state(struct dc *dc,
|
||||
if (!dm_new_crtc_state->stream)
|
||||
continue;
|
||||
|
||||
if (pflip_needed)
|
||||
continue;
|
||||
|
||||
WARN_ON(dm_new_plane_state->dc_state);
|
||||
|
||||
@ -4764,6 +4796,30 @@ static int dm_update_planes_state(struct dc *dc,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
|
||||
struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_plane *plane;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
|
||||
WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
|
||||
|
||||
drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
|
||||
struct drm_plane_state *plane_state =
|
||||
drm_atomic_get_plane_state(state, plane);
|
||||
|
||||
if (IS_ERR(plane_state))
|
||||
return -EDEADLK;
|
||||
|
||||
crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
|
||||
if (crtc->primary == plane && crtc_state->active) {
|
||||
if (!plane_state->fb)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
@ -4787,6 +4843,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
goto fail;
|
||||
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
ret = dm_atomic_check_plane_state_fb(state, crtc);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
|
||||
!new_crtc_state->color_mgmt_changed)
|
||||
continue;
|
||||
|
@ -268,7 +268,9 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
|
||||
#define amdgpu_dm_crtc_handle_crc_irq(x)
|
||||
#endif
|
||||
|
||||
#define MAX_COLOR_LUT_ENTRIES 256
|
||||
#define MAX_COLOR_LUT_ENTRIES 4096
|
||||
/* Legacy gamm LUT users such as X doesn't like large LUT sizes */
|
||||
#define MAX_COLOR_LEGACY_LUT_ENTRIES 256
|
||||
|
||||
void amdgpu_dm_init_color_mod(void);
|
||||
int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
|
||||
|
@ -27,6 +27,8 @@
|
||||
#include "amdgpu_dm.h"
|
||||
#include "modules/color/color_gamma.h"
|
||||
|
||||
#define MAX_DRM_LUT_VALUE 0xFFFF
|
||||
|
||||
/*
|
||||
* Initialize the color module.
|
||||
*
|
||||
@ -47,19 +49,18 @@ void amdgpu_dm_init_color_mod(void)
|
||||
* f(a) = (0xFF00/MAX_COLOR_LUT_ENTRIES-1)a; for integer a in
|
||||
* [0, MAX_COLOR_LUT_ENTRIES)
|
||||
*/
|
||||
static bool __is_lut_linear(struct drm_color_lut *lut)
|
||||
static bool __is_lut_linear(struct drm_color_lut *lut, uint32_t size)
|
||||
{
|
||||
int i;
|
||||
uint32_t max_os = 0xFF00;
|
||||
uint32_t expected;
|
||||
int delta;
|
||||
|
||||
for (i = 0; i < MAX_COLOR_LUT_ENTRIES; i++) {
|
||||
for (i = 0; i < size; i++) {
|
||||
/* All color values should equal */
|
||||
if ((lut[i].red != lut[i].green) || (lut[i].green != lut[i].blue))
|
||||
return false;
|
||||
|
||||
expected = i * max_os / (MAX_COLOR_LUT_ENTRIES-1);
|
||||
expected = i * MAX_DRM_LUT_VALUE / (size-1);
|
||||
|
||||
/* Allow a +/-1 error. */
|
||||
delta = lut[i].red - expected;
|
||||
@ -69,6 +70,42 @@ static bool __is_lut_linear(struct drm_color_lut *lut)
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert the drm_color_lut to dc_gamma. The conversion depends on the size
|
||||
* of the lut - whether or not it's legacy.
|
||||
*/
|
||||
static void __drm_lut_to_dc_gamma(struct drm_color_lut *lut,
|
||||
struct dc_gamma *gamma,
|
||||
bool is_legacy)
|
||||
{
|
||||
uint32_t r, g, b;
|
||||
int i;
|
||||
|
||||
if (is_legacy) {
|
||||
for (i = 0; i < MAX_COLOR_LEGACY_LUT_ENTRIES; i++) {
|
||||
r = drm_color_lut_extract(lut[i].red, 16);
|
||||
g = drm_color_lut_extract(lut[i].green, 16);
|
||||
b = drm_color_lut_extract(lut[i].blue, 16);
|
||||
|
||||
gamma->entries.red[i] = dal_fixed31_32_from_int(r);
|
||||
gamma->entries.green[i] = dal_fixed31_32_from_int(g);
|
||||
gamma->entries.blue[i] = dal_fixed31_32_from_int(b);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/* else */
|
||||
for (i = 0; i < MAX_COLOR_LUT_ENTRIES; i++) {
|
||||
r = drm_color_lut_extract(lut[i].red, 16);
|
||||
g = drm_color_lut_extract(lut[i].green, 16);
|
||||
b = drm_color_lut_extract(lut[i].blue, 16);
|
||||
|
||||
gamma->entries.red[i] = dal_fixed31_32_from_fraction(r, MAX_DRM_LUT_VALUE);
|
||||
gamma->entries.green[i] = dal_fixed31_32_from_fraction(g, MAX_DRM_LUT_VALUE);
|
||||
gamma->entries.blue[i] = dal_fixed31_32_from_fraction(b, MAX_DRM_LUT_VALUE);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_dm_set_regamma_lut: Set regamma lut for the given CRTC.
|
||||
* @crtc: amdgpu_dm crtc state
|
||||
@ -85,11 +122,10 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
|
||||
struct drm_property_blob *blob = crtc->base.gamma_lut;
|
||||
struct dc_stream_state *stream = crtc->stream;
|
||||
struct drm_color_lut *lut;
|
||||
uint32_t lut_size;
|
||||
struct dc_gamma *gamma;
|
||||
enum dc_transfer_func_type old_type = stream->out_transfer_func->type;
|
||||
|
||||
uint32_t r, g, b;
|
||||
int i;
|
||||
bool ret;
|
||||
|
||||
if (!blob) {
|
||||
@ -100,8 +136,9 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
|
||||
}
|
||||
|
||||
lut = (struct drm_color_lut *)blob->data;
|
||||
lut_size = blob->length / sizeof(struct drm_color_lut);
|
||||
|
||||
if (__is_lut_linear(lut)) {
|
||||
if (__is_lut_linear(lut, lut_size)) {
|
||||
/* Set to bypass if lut is set to linear */
|
||||
stream->out_transfer_func->type = TF_TYPE_BYPASS;
|
||||
stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
|
||||
@ -112,20 +149,20 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
|
||||
if (!gamma)
|
||||
return -ENOMEM;
|
||||
|
||||
gamma->num_entries = MAX_COLOR_LUT_ENTRIES;
|
||||
gamma->type = GAMMA_RGB_256;
|
||||
|
||||
/* Truncate, and store in dc_gamma for output tf calculation */
|
||||
for (i = 0; i < gamma->num_entries; i++) {
|
||||
r = drm_color_lut_extract(lut[i].red, 16);
|
||||
g = drm_color_lut_extract(lut[i].green, 16);
|
||||
b = drm_color_lut_extract(lut[i].blue, 16);
|
||||
|
||||
gamma->entries.red[i] = dal_fixed31_32_from_int(r);
|
||||
gamma->entries.green[i] = dal_fixed31_32_from_int(g);
|
||||
gamma->entries.blue[i] = dal_fixed31_32_from_int(b);
|
||||
gamma->num_entries = lut_size;
|
||||
if (gamma->num_entries == MAX_COLOR_LEGACY_LUT_ENTRIES)
|
||||
gamma->type = GAMMA_RGB_256;
|
||||
else if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES)
|
||||
gamma->type = GAMMA_CS_TFM_1D;
|
||||
else {
|
||||
/* Invalid lut size */
|
||||
dc_gamma_release(&gamma);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Convert drm_lut into dc_gamma */
|
||||
__drm_lut_to_dc_gamma(lut, gamma, gamma->type == GAMMA_RGB_256);
|
||||
|
||||
/* Call color module to translate into something DC understands. Namely
|
||||
* a transfer function.
|
||||
*/
|
||||
@ -212,7 +249,7 @@ int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
|
||||
}
|
||||
|
||||
lut = (struct drm_color_lut *)blob->data;
|
||||
if (__is_lut_linear(lut)) {
|
||||
if (__is_lut_linear(lut, MAX_COLOR_LUT_ENTRIES)) {
|
||||
dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
|
||||
dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
|
||||
return 0;
|
||||
|
@ -258,6 +258,15 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Clear payload allocation table before enable MST DP link.
|
||||
*/
|
||||
void dm_helpers_dp_mst_clear_payload_allocation_table(
|
||||
struct dc_context *ctx,
|
||||
const struct dc_link *link)
|
||||
{}
|
||||
|
||||
/*
|
||||
* Polls for ACT (allocation change trigger) handled and sends
|
||||
* ALLOCATE_PAYLOAD message.
|
||||
@ -496,3 +505,8 @@ enum dc_edid_status dm_helpers_read_local_edid(
|
||||
|
||||
return edid_status;
|
||||
}
|
||||
|
||||
void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
|
||||
{
|
||||
/* TODO: something */
|
||||
}
|
||||
|
@ -83,17 +83,18 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
|
||||
enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
|
||||
I2C_MOT_TRUE : I2C_MOT_FALSE;
|
||||
enum ddc_result res;
|
||||
ssize_t read_bytes;
|
||||
|
||||
switch (msg->request & ~DP_AUX_I2C_MOT) {
|
||||
case DP_AUX_NATIVE_READ:
|
||||
res = dal_ddc_service_read_dpcd_data(
|
||||
read_bytes = dal_ddc_service_read_dpcd_data(
|
||||
TO_DM_AUX(aux)->ddc_service,
|
||||
false,
|
||||
I2C_MOT_UNDEF,
|
||||
msg->address,
|
||||
msg->buffer,
|
||||
msg->size);
|
||||
break;
|
||||
return read_bytes;
|
||||
case DP_AUX_NATIVE_WRITE:
|
||||
res = dal_ddc_service_write_dpcd_data(
|
||||
TO_DM_AUX(aux)->ddc_service,
|
||||
@ -104,14 +105,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
|
||||
msg->size);
|
||||
break;
|
||||
case DP_AUX_I2C_READ:
|
||||
res = dal_ddc_service_read_dpcd_data(
|
||||
read_bytes = dal_ddc_service_read_dpcd_data(
|
||||
TO_DM_AUX(aux)->ddc_service,
|
||||
true,
|
||||
mot,
|
||||
msg->address,
|
||||
msg->buffer,
|
||||
msg->size);
|
||||
break;
|
||||
return read_bytes;
|
||||
case DP_AUX_I2C_WRITE:
|
||||
res = dal_ddc_service_write_dpcd_data(
|
||||
TO_DM_AUX(aux)->ddc_service,
|
||||
|
@ -49,6 +49,9 @@
|
||||
|
||||
#define LAST_RECORD_TYPE 0xff
|
||||
|
||||
#define DC_LOGGER \
|
||||
bp->base.ctx->logger
|
||||
|
||||
/* GUID to validate external display connection info table (aka OPM module) */
|
||||
static const uint8_t ext_display_connection_guid[NUMBER_OF_UCHAR_FOR_GUID] = {
|
||||
0x91, 0x6E, 0x57, 0x09,
|
||||
@ -3079,8 +3082,7 @@ static enum bp_result patch_bios_image_from_ext_display_connection_info(
|
||||
opm_object,
|
||||
&ext_display_connection_info_tbl) != BP_RESULT_OK) {
|
||||
|
||||
dm_logger_write(bp->base.ctx->logger, LOG_WARNING,
|
||||
"%s: Failed to read Connection Info Table", __func__);
|
||||
DC_LOG_WARNING("%s: Failed to read Connection Info Table", __func__);
|
||||
return BP_RESULT_UNSUPPORTED;
|
||||
}
|
||||
|
||||
|
@ -34,6 +34,8 @@
|
||||
#include "command_table_helper2.h"
|
||||
#include "bios_parser_helper.h"
|
||||
#include "bios_parser_types_internal2.h"
|
||||
#define DC_LOGGER \
|
||||
bp->base.ctx->logger
|
||||
|
||||
#define GET_INDEX_INTO_MASTER_TABLE(MasterOrData, FieldName)\
|
||||
(((char *)(&((\
|
||||
@ -239,8 +241,7 @@ static enum bp_result transmitter_control_v1_6(
|
||||
if (cntl->action == TRANSMITTER_CONTROL_ENABLE ||
|
||||
cntl->action == TRANSMITTER_CONTROL_ACTIAVATE ||
|
||||
cntl->action == TRANSMITTER_CONTROL_DEACTIVATE) {
|
||||
dm_logger_write(bp->base.ctx->logger, LOG_BIOS,\
|
||||
"%s:ps.param.symclk_10khz = %d\n",\
|
||||
DC_LOG_BIOS("%s:ps.param.symclk_10khz = %d\n",\
|
||||
__func__, ps.param.symclk_10khz);
|
||||
}
|
||||
|
||||
@ -331,8 +332,7 @@ static enum bp_result set_pixel_clock_v7(
|
||||
(uint8_t) bp->cmd_helper->
|
||||
transmitter_color_depth_to_atom(
|
||||
bp_params->color_depth);
|
||||
dm_logger_write(bp->base.ctx->logger, LOG_BIOS,\
|
||||
"%s:program display clock = %d"\
|
||||
DC_LOG_BIOS("%s:program display clock = %d"\
|
||||
"colorDepth = %d\n", __func__,\
|
||||
bp_params->target_pixel_clock, bp_params->color_depth);
|
||||
|
||||
@ -772,8 +772,7 @@ static enum bp_result set_dce_clock_v2_1(
|
||||
*/
|
||||
params.param.dceclk_10khz = cpu_to_le32(
|
||||
bp_params->target_clock_frequency / 10);
|
||||
dm_logger_write(bp->base.ctx->logger, LOG_BIOS,
|
||||
"%s:target_clock_frequency = %d"\
|
||||
DC_LOG_BIOS("%s:target_clock_frequency = %d"\
|
||||
"clock_type = %d \n", __func__,\
|
||||
bp_params->target_clock_frequency,\
|
||||
bp_params->clock_type);
|
||||
|
@ -33,6 +33,8 @@
|
||||
#include "dcn10/dcn10_resource.h"
|
||||
#include "dcn_calc_math.h"
|
||||
|
||||
#define DC_LOGGER \
|
||||
dc->ctx->logger
|
||||
/*
|
||||
* NOTE:
|
||||
* This file is gcc-parseable HW gospel, coming straight from HW engineers.
|
||||
@ -996,7 +998,7 @@ bool dcn_validate_bandwidth(
|
||||
dc->debug.min_disp_clk_khz;
|
||||
}
|
||||
|
||||
context->bw.dcn.calc_clk.dppclk_div = (int)(v->dispclk_dppclk_ratio) == 2;
|
||||
context->bw.dcn.calc_clk.max_dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio;
|
||||
|
||||
for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
@ -1242,8 +1244,7 @@ unsigned int dcn_find_dcfclk_suits_all(
|
||||
else
|
||||
dcf_clk = dc->dcn_soc->dcfclkv_min0p65*1000;
|
||||
|
||||
dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"\tdcf_clk for voltage = %d\n", dcf_clk);
|
||||
DC_LOG_BANDWIDTH_CALCS("\tdcf_clk for voltage = %d\n", dcf_clk);
|
||||
return dcf_clk;
|
||||
}
|
||||
|
||||
@ -1441,8 +1442,7 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
|
||||
void dcn_bw_sync_calcs_and_dml(struct dc *dc)
|
||||
{
|
||||
kernel_fpu_begin();
|
||||
dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"sr_exit_time: %d ns\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %d ns\n"
|
||||
"sr_enter_plus_exit_time: %d ns\n"
|
||||
"urgent_latency: %d ns\n"
|
||||
"write_back_latency: %d ns\n"
|
||||
@ -1510,8 +1510,7 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
|
||||
dc->dcn_soc->vmm_page_size,
|
||||
dc->dcn_soc->dram_clock_change_latency * 1000,
|
||||
dc->dcn_soc->return_bus_width);
|
||||
dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"rob_buffer_size_in_kbyte: %d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("rob_buffer_size_in_kbyte: %d\n"
|
||||
"det_buffer_size_in_kbyte: %d\n"
|
||||
"dpp_output_buffer_pixels: %d\n"
|
||||
"opp_output_buffer_lines: %d\n"
|
||||
|
@ -51,6 +51,8 @@
|
||||
#include "dm_helpers.h"
|
||||
#include "mem_input.h"
|
||||
#include "hubp.h"
|
||||
#define DC_LOGGER \
|
||||
dc->ctx->logger
|
||||
|
||||
|
||||
/*******************************************************************************
|
||||
@ -264,7 +266,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
|
||||
/* Only call if supported */
|
||||
if (tg->funcs->configure_crc)
|
||||
return tg->funcs->configure_crc(tg, ¶m);
|
||||
dm_logger_write(dc->ctx->logger, LOG_WARNING, "CRC capture not supported.");
|
||||
DC_LOG_WARNING("CRC capture not supported.");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -297,7 +299,7 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
|
||||
|
||||
if (tg->funcs->get_crc)
|
||||
return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
|
||||
dm_logger_write(dc->ctx->logger, LOG_WARNING, "CRC capture not supported.");
|
||||
DC_LOG_WARNING("CRC capture not supported.");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -618,8 +620,7 @@ struct dc *dc_create(const struct dc_init_data *init_params)
|
||||
|
||||
dc->config = init_params->flags;
|
||||
|
||||
dm_logger_write(dc->ctx->logger, LOG_DC,
|
||||
"Display Core initialized\n");
|
||||
DC_LOG_DC("Display Core initialized\n");
|
||||
|
||||
|
||||
/* TODO: missing feature to be enabled */
|
||||
@ -888,7 +889,7 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
|
||||
if (false == context_changed(dc, context))
|
||||
return DC_OK;
|
||||
|
||||
dm_logger_write(dc->ctx->logger, LOG_DC, "%s: %d streams\n",
|
||||
DC_LOG_DC("%s: %d streams\n",
|
||||
__func__, context->stream_count);
|
||||
|
||||
for (i = 0; i < context->stream_count; i++) {
|
||||
@ -1515,13 +1516,13 @@ enum dc_irq_source dc_interrupt_to_irq_source(
|
||||
return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
|
||||
}
|
||||
|
||||
void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
|
||||
bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
|
||||
{
|
||||
|
||||
if (dc == NULL)
|
||||
return;
|
||||
return false;
|
||||
|
||||
dal_irq_service_set(dc->res_pool->irqs, src, enable);
|
||||
return dal_irq_service_set(dc->res_pool->irqs, src, enable);
|
||||
}
|
||||
|
||||
void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
|
||||
|
@ -361,21 +361,22 @@ void context_clock_trace(
|
||||
struct dc *core_dc = dc;
|
||||
struct dal_logger *logger = core_dc->ctx->logger;
|
||||
|
||||
CLOCK_TRACE("Current: dispclk_khz:%d dppclk_div:%d dcfclk_khz:%d\n"
|
||||
"dcfclk_deep_sleep_khz:%d fclk_khz:%d\n"
|
||||
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
|
||||
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n"
|
||||
"dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
|
||||
context->bw.dcn.calc_clk.dispclk_khz,
|
||||
context->bw.dcn.calc_clk.dppclk_div,
|
||||
context->bw.dcn.calc_clk.max_dppclk_khz,
|
||||
context->bw.dcn.calc_clk.dcfclk_khz,
|
||||
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
|
||||
context->bw.dcn.calc_clk.fclk_khz,
|
||||
context->bw.dcn.calc_clk.socclk_khz,
|
||||
context->bw.dcn.calc_clk.dram_ccm_us,
|
||||
context->bw.dcn.calc_clk.min_active_dram_ccm_us);
|
||||
CLOCK_TRACE("Calculated: dispclk_khz:%d dppclk_div:%d dcfclk_khz:%d\n"
|
||||
"dcfclk_deep_sleep_khz:%d fclk_khz:%d\n"
|
||||
CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
|
||||
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n"
|
||||
"dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
|
||||
context->bw.dcn.calc_clk.dispclk_khz,
|
||||
context->bw.dcn.calc_clk.dppclk_div,
|
||||
context->bw.dcn.calc_clk.max_dppclk_khz,
|
||||
context->bw.dcn.calc_clk.dcfclk_khz,
|
||||
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
|
||||
context->bw.dcn.calc_clk.fclk_khz,
|
||||
|
@ -45,9 +45,11 @@
|
||||
#include "dce/dce_11_0_d.h"
|
||||
#include "dce/dce_11_0_enum.h"
|
||||
#include "dce/dce_11_0_sh_mask.h"
|
||||
#define DC_LOGGER \
|
||||
dc_ctx->logger
|
||||
|
||||
#define LINK_INFO(...) \
|
||||
dm_logger_write(dc_ctx->logger, LOG_HW_HOTPLUG, \
|
||||
DC_LOG_HW_HOTPLUG( \
|
||||
__VA_ARGS__)
|
||||
|
||||
/*******************************************************************************
|
||||
@ -677,12 +679,10 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
||||
|
||||
switch (edid_status) {
|
||||
case EDID_BAD_CHECKSUM:
|
||||
dm_logger_write(link->ctx->logger, LOG_ERROR,
|
||||
"EDID checksum invalid.\n");
|
||||
DC_LOG_ERROR("EDID checksum invalid.\n");
|
||||
break;
|
||||
case EDID_NO_RESPONSE:
|
||||
dm_logger_write(link->ctx->logger, LOG_ERROR,
|
||||
"No EDID read.\n");
|
||||
DC_LOG_ERROR("No EDID read.\n");
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -712,8 +712,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
||||
"%s: [Block %d] ", sink->edid_caps.display_name, i);
|
||||
}
|
||||
|
||||
dm_logger_write(link->ctx->logger, LOG_DETECTION_EDID_PARSER,
|
||||
"%s: "
|
||||
DC_LOG_DETECTION_EDID_PARSER("%s: "
|
||||
"manufacturer_id = %X, "
|
||||
"product_id = %X, "
|
||||
"serial_number = %X, "
|
||||
@ -733,8 +732,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
||||
sink->edid_caps.audio_mode_count);
|
||||
|
||||
for (i = 0; i < sink->edid_caps.audio_mode_count; i++) {
|
||||
dm_logger_write(link->ctx->logger, LOG_DETECTION_EDID_PARSER,
|
||||
"%s: mode number = %d, "
|
||||
DC_LOG_DETECTION_EDID_PARSER("%s: mode number = %d, "
|
||||
"format_code = %d, "
|
||||
"channel_count = %d, "
|
||||
"sample_rate = %d, "
|
||||
@ -984,8 +982,7 @@ static bool construct(
|
||||
}
|
||||
break;
|
||||
default:
|
||||
dm_logger_write(dc_ctx->logger, LOG_WARNING,
|
||||
"Unsupported Connector type:%d!\n", link->link_id.id);
|
||||
DC_LOG_WARNING("Unsupported Connector type:%d!\n", link->link_id.id);
|
||||
goto create_fail;
|
||||
}
|
||||
|
||||
@ -1138,7 +1135,7 @@ static void dpcd_configure_panel_mode(
|
||||
{
|
||||
union dpcd_edp_config edp_config_set;
|
||||
bool panel_mode_edp = false;
|
||||
|
||||
struct dc_context *dc_ctx = link->ctx;
|
||||
memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
|
||||
|
||||
if (DP_PANEL_MODE_DEFAULT != panel_mode) {
|
||||
@ -1175,8 +1172,7 @@ static void dpcd_configure_panel_mode(
|
||||
ASSERT(result == DDC_RESULT_SUCESSFULL);
|
||||
}
|
||||
}
|
||||
dm_logger_write(link->ctx->logger, LOG_DETECTION_DP_CAPS,
|
||||
"Link: %d eDP panel mode supported: %d "
|
||||
DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d "
|
||||
"eDP panel mode enabled: %d \n",
|
||||
link->link_index,
|
||||
link->dpcd_caps.panel_mode_edp,
|
||||
@ -1311,6 +1307,9 @@ static enum dc_status enable_link_dp_mst(
|
||||
if (link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN)
|
||||
return DC_OK;
|
||||
|
||||
/* clear payload table */
|
||||
dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
|
||||
|
||||
/* set the sink to MST mode before enabling the link */
|
||||
dp_enable_mst_on_sink(link, true);
|
||||
|
||||
@ -1951,6 +1950,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
|
||||
struct dc *core_dc = link->ctx->dc;
|
||||
struct abm *abm = core_dc->res_pool->abm;
|
||||
struct dmcu *dmcu = core_dc->res_pool->dmcu;
|
||||
struct dc_context *dc_ctx = link->ctx;
|
||||
unsigned int controller_id = 0;
|
||||
bool use_smooth_brightness = true;
|
||||
int i;
|
||||
@ -1962,8 +1962,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
|
||||
|
||||
use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
|
||||
|
||||
dm_logger_write(link->ctx->logger, LOG_BACKLIGHT,
|
||||
"New Backlight level: %d (0x%X)\n", level, level);
|
||||
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", level, level);
|
||||
|
||||
if (dc_is_embedded_signal(link->connector_signal)) {
|
||||
if (stream != NULL) {
|
||||
@ -2130,6 +2129,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
|
||||
struct fixed31_32 avg_time_slots_per_mtp;
|
||||
struct fixed31_32 pbn;
|
||||
struct fixed31_32 pbn_per_slot;
|
||||
struct dc_context *dc_ctx = link->ctx;
|
||||
uint8_t i;
|
||||
|
||||
/* enable_link_dp_mst already check link->enabled_stream_count
|
||||
@ -2147,21 +2147,18 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
|
||||
link, pipe_ctx->stream_res.stream_enc, &proposed_table);
|
||||
}
|
||||
else
|
||||
dm_logger_write(link->ctx->logger, LOG_WARNING,
|
||||
"Failed to update"
|
||||
DC_LOG_WARNING("Failed to update"
|
||||
"MST allocation table for"
|
||||
"pipe idx:%d\n",
|
||||
pipe_ctx->pipe_idx);
|
||||
|
||||
dm_logger_write(link->ctx->logger, LOG_MST,
|
||||
"%s "
|
||||
DC_LOG_MST("%s "
|
||||
"stream_count: %d: \n ",
|
||||
__func__,
|
||||
link->mst_stream_alloc_table.stream_count);
|
||||
|
||||
for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
|
||||
dm_logger_write(link->ctx->logger, LOG_MST,
|
||||
"stream_enc[%d]: 0x%x "
|
||||
DC_LOG_MST("stream_enc[%d]: 0x%x "
|
||||
"stream[%d].vcp_id: %d "
|
||||
"stream[%d].slot_count: %d\n",
|
||||
i,
|
||||
@ -2212,6 +2209,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
|
||||
struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0);
|
||||
uint8_t i;
|
||||
bool mst_mode = (link->type == dc_connection_mst_branch);
|
||||
struct dc_context *dc_ctx = link->ctx;
|
||||
|
||||
/* deallocate_mst_payload is called before disable link. When mode or
|
||||
* disable/enable monitor, new stream is created which is not in link
|
||||
@ -2237,23 +2235,20 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
|
||||
link, pipe_ctx->stream_res.stream_enc, &proposed_table);
|
||||
}
|
||||
else {
|
||||
dm_logger_write(link->ctx->logger, LOG_WARNING,
|
||||
"Failed to update"
|
||||
DC_LOG_WARNING("Failed to update"
|
||||
"MST allocation table for"
|
||||
"pipe idx:%d\n",
|
||||
pipe_ctx->pipe_idx);
|
||||
}
|
||||
}
|
||||
|
||||
dm_logger_write(link->ctx->logger, LOG_MST,
|
||||
"%s"
|
||||
DC_LOG_MST("%s"
|
||||
"stream_count: %d: ",
|
||||
__func__,
|
||||
link->mst_stream_alloc_table.stream_count);
|
||||
|
||||
for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
|
||||
dm_logger_write(link->ctx->logger, LOG_MST,
|
||||
"stream_enc[%d]: 0x%x "
|
||||
DC_LOG_MST("stream_enc[%d]: 0x%x "
|
||||
"stream[%d].vcp_id: %d "
|
||||
"stream[%d].slot_count: %d\n",
|
||||
i,
|
||||
@ -2287,21 +2282,24 @@ void core_link_enable_stream(
|
||||
struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
|
||||
|
||||
struct dc_context *dc_ctx = pipe_ctx->stream->ctx;
|
||||
enum dc_status status;
|
||||
|
||||
/* eDP lit up by bios already, no need to enable again. */
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
|
||||
core_dc->apply_edp_fast_boot_optimization) {
|
||||
core_dc->apply_edp_fast_boot_optimization = false;
|
||||
pipe_ctx->stream->dpms_off = false;
|
||||
return;
|
||||
}
|
||||
|
||||
if (pipe_ctx->stream->dpms_off)
|
||||
return;
|
||||
|
||||
status = enable_link(state, pipe_ctx);
|
||||
|
||||
if (status != DC_OK) {
|
||||
dm_logger_write(pipe_ctx->stream->ctx->logger,
|
||||
LOG_WARNING, "enabling link %u failed: %d\n",
|
||||
DC_LOG_WARNING("enabling link %u failed: %d\n",
|
||||
pipe_ctx->stream->sink->link->link_index,
|
||||
status);
|
||||
|
||||
@ -2355,11 +2353,14 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
core_dc->hwss.set_avmute(pipe_ctx, enable);
|
||||
}
|
||||
|
||||
void dc_link_disable_hpd_filter(struct dc_link *link)
|
||||
void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
|
||||
{
|
||||
struct gpio *hpd;
|
||||
|
||||
if (!link->is_hpd_filter_disabled) {
|
||||
if (enable) {
|
||||
link->is_hpd_filter_disabled = false;
|
||||
program_hpd_filter(link);
|
||||
} else {
|
||||
link->is_hpd_filter_disabled = true;
|
||||
/* Obtain HPD handle */
|
||||
hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
|
||||
|
@ -629,7 +629,7 @@ bool dal_ddc_service_query_ddc_data(
|
||||
return ret;
|
||||
}
|
||||
|
||||
enum ddc_result dal_ddc_service_read_dpcd_data(
|
||||
ssize_t dal_ddc_service_read_dpcd_data(
|
||||
struct ddc_service *ddc,
|
||||
bool i2c,
|
||||
enum i2c_mot_mode mot,
|
||||
@ -660,8 +660,9 @@ enum ddc_result dal_ddc_service_read_dpcd_data(
|
||||
if (dal_i2caux_submit_aux_command(
|
||||
ddc->ctx->i2caux,
|
||||
ddc->ddc_pin,
|
||||
&command))
|
||||
return DDC_RESULT_SUCESSFULL;
|
||||
&command)) {
|
||||
return (ssize_t)command.payloads->length;
|
||||
}
|
||||
|
||||
return DDC_RESULT_FAILED_OPERATION;
|
||||
}
|
||||
|
@ -11,6 +11,8 @@
|
||||
#include "dpcd_defs.h"
|
||||
|
||||
#include "resource.h"
|
||||
#define DC_LOGGER \
|
||||
link->ctx->logger
|
||||
|
||||
/* maximum pre emphasis level allowed for each voltage swing level*/
|
||||
static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = {
|
||||
@ -63,8 +65,7 @@ static void wait_for_training_aux_rd_interval(
|
||||
|
||||
udelay(default_wait_in_micro_secs);
|
||||
|
||||
dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
|
||||
"%s:\n wait = %d\n",
|
||||
DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n",
|
||||
__func__,
|
||||
default_wait_in_micro_secs);
|
||||
}
|
||||
@ -79,8 +80,7 @@ static void dpcd_set_training_pattern(
|
||||
&dpcd_pattern.raw,
|
||||
1);
|
||||
|
||||
dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
|
||||
"%s\n %x pattern = %x\n",
|
||||
DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n",
|
||||
__func__,
|
||||
DP_TRAINING_PATTERN_SET,
|
||||
dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
|
||||
@ -116,8 +116,7 @@ static void dpcd_set_link_settings(
|
||||
core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
|
||||
&downspread.raw, sizeof(downspread));
|
||||
|
||||
dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
|
||||
"%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
|
||||
DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
|
||||
__func__,
|
||||
DP_LINK_BW_SET,
|
||||
lt_settings->link_settings.link_rate,
|
||||
@ -151,8 +150,7 @@ static enum dpcd_training_patterns
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
|
||||
"%s: Invalid HW Training pattern: %d\n",
|
||||
DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n",
|
||||
__func__, pattern);
|
||||
break;
|
||||
}
|
||||
@ -184,8 +182,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
|
||||
dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - dpcd_base_lt_offset]
|
||||
= dpcd_pattern.raw;
|
||||
|
||||
dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
|
||||
"%s\n %x pattern = %x\n",
|
||||
DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n",
|
||||
__func__,
|
||||
DP_TRAINING_PATTERN_SET,
|
||||
dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
|
||||
@ -219,8 +216,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
|
||||
dpcd_lane,
|
||||
size_in_bytes);
|
||||
|
||||
dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
|
||||
"%s:\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
|
||||
DC_LOG_HW_LINK_TRAINING("%s:\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
|
||||
__func__,
|
||||
DP_TRAINING_LANE0_SET,
|
||||
dpcd_lane[0].bits.VOLTAGE_SWING_SET,
|
||||
@ -456,14 +452,12 @@ static void get_lane_status_and_drive_settings(
|
||||
|
||||
ln_status_updated->raw = dpcd_buf[2];
|
||||
|
||||
dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
|
||||
"%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ",
|
||||
DC_LOG_HW_LINK_TRAINING("%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ",
|
||||
__func__,
|
||||
DP_LANE0_1_STATUS, dpcd_buf[0],
|
||||
DP_LANE2_3_STATUS, dpcd_buf[1]);
|
||||
|
||||
dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
|
||||
"%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n",
|
||||
DC_LOG_HW_LINK_TRAINING("%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n",
|
||||
__func__,
|
||||
DP_ADJUST_REQUEST_LANE0_1,
|
||||
dpcd_buf[4],
|
||||
@ -556,8 +550,7 @@ static void dpcd_set_lane_settings(
|
||||
}
|
||||
*/
|
||||
|
||||
dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
|
||||
"%s\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
|
||||
DC_LOG_HW_LINK_TRAINING("%s\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
|
||||
__func__,
|
||||
DP_TRAINING_LANE0_SET,
|
||||
dpcd_lane[0].bits.VOLTAGE_SWING_SET,
|
||||
@ -669,16 +662,14 @@ static bool perform_post_lt_adj_req_sequence(
|
||||
}
|
||||
|
||||
if (!req_drv_setting_changed) {
|
||||
dm_logger_write(link->ctx->logger, LOG_WARNING,
|
||||
"%s: Post Link Training Adjust Request Timed out\n",
|
||||
DC_LOG_WARNING("%s: Post Link Training Adjust Request Timed out\n",
|
||||
__func__);
|
||||
|
||||
ASSERT(0);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
dm_logger_write(link->ctx->logger, LOG_WARNING,
|
||||
"%s: Post Link Training Adjust Request limit reached\n",
|
||||
DC_LOG_WARNING("%s: Post Link Training Adjust Request limit reached\n",
|
||||
__func__);
|
||||
|
||||
ASSERT(0);
|
||||
@ -709,6 +700,22 @@ static enum hw_dp_training_pattern get_supported_tp(struct dc_link *link)
|
||||
return HW_DP_TRAINING_PATTERN_2;
|
||||
}
|
||||
|
||||
static enum link_training_result get_cr_failure(enum dc_lane_count ln_count,
|
||||
union lane_status *dpcd_lane_status)
|
||||
{
|
||||
enum link_training_result result = LINK_TRAINING_SUCCESS;
|
||||
|
||||
if (ln_count >= LANE_COUNT_ONE && !dpcd_lane_status[0].bits.CR_DONE_0)
|
||||
result = LINK_TRAINING_CR_FAIL_LANE0;
|
||||
else if (ln_count >= LANE_COUNT_TWO && !dpcd_lane_status[1].bits.CR_DONE_0)
|
||||
result = LINK_TRAINING_CR_FAIL_LANE1;
|
||||
else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[2].bits.CR_DONE_0)
|
||||
result = LINK_TRAINING_CR_FAIL_LANE23;
|
||||
else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[3].bits.CR_DONE_0)
|
||||
result = LINK_TRAINING_CR_FAIL_LANE23;
|
||||
return result;
|
||||
}
|
||||
|
||||
static enum link_training_result perform_channel_equalization_sequence(
|
||||
struct dc_link *link,
|
||||
struct link_training_settings *lt_settings)
|
||||
@ -771,7 +778,7 @@ static enum link_training_result perform_channel_equalization_sequence(
|
||||
|
||||
}
|
||||
|
||||
static bool perform_clock_recovery_sequence(
|
||||
static enum link_training_result perform_clock_recovery_sequence(
|
||||
struct dc_link *link,
|
||||
struct link_training_settings *lt_settings)
|
||||
{
|
||||
@ -846,11 +853,11 @@ static bool perform_clock_recovery_sequence(
|
||||
|
||||
/* 5. check CR done*/
|
||||
if (is_cr_done(lane_count, dpcd_lane_status))
|
||||
return true;
|
||||
return LINK_TRAINING_SUCCESS;
|
||||
|
||||
/* 6. max VS reached*/
|
||||
if (is_max_vs_reached(lt_settings))
|
||||
return false;
|
||||
break;
|
||||
|
||||
/* 7. same voltage*/
|
||||
/* Note: VS same for all lanes,
|
||||
@ -869,20 +876,19 @@ static bool perform_clock_recovery_sequence(
|
||||
|
||||
if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
|
||||
ASSERT(0);
|
||||
dm_logger_write(link->ctx->logger, LOG_ERROR,
|
||||
"%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
|
||||
DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
|
||||
__func__,
|
||||
LINK_TRAINING_MAX_CR_RETRY);
|
||||
|
||||
}
|
||||
|
||||
return false;
|
||||
return get_cr_failure(lane_count, dpcd_lane_status);
|
||||
}
|
||||
|
||||
static inline bool perform_link_training_int(
|
||||
static inline enum link_training_result perform_link_training_int(
|
||||
struct dc_link *link,
|
||||
struct link_training_settings *lt_settings,
|
||||
bool status)
|
||||
enum link_training_result status)
|
||||
{
|
||||
union lane_count_set lane_count_set = { {0} };
|
||||
union dpcd_training_pattern dpcd_pattern = { {0} };
|
||||
@ -903,9 +909,9 @@ static inline bool perform_link_training_int(
|
||||
get_supported_tp(link) == HW_DP_TRAINING_PATTERN_4)
|
||||
return status;
|
||||
|
||||
if (status &&
|
||||
if (status == LINK_TRAINING_SUCCESS &&
|
||||
perform_post_lt_adj_req_sequence(link, lt_settings) == false)
|
||||
status = false;
|
||||
status = LINK_TRAINING_LQA_FAIL;
|
||||
|
||||
lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count;
|
||||
lane_count_set.bits.ENHANCED_FRAMING = 1;
|
||||
@ -928,6 +934,8 @@ enum link_training_result dc_link_dp_perform_link_training(
|
||||
enum link_training_result status = LINK_TRAINING_SUCCESS;
|
||||
|
||||
char *link_rate = "Unknown";
|
||||
char *lt_result = "Unknown";
|
||||
|
||||
struct link_training_settings lt_settings;
|
||||
|
||||
memset(<_settings, '\0', sizeof(lt_settings));
|
||||
@ -951,22 +959,16 @@ enum link_training_result dc_link_dp_perform_link_training(
|
||||
|
||||
/* 2. perform link training (set link training done
|
||||
* to false is done as well)*/
|
||||
if (!perform_clock_recovery_sequence(link, <_settings)) {
|
||||
status = LINK_TRAINING_CR_FAIL;
|
||||
} else {
|
||||
status = perform_clock_recovery_sequence(link, <_settings);
|
||||
if (status == LINK_TRAINING_SUCCESS) {
|
||||
status = perform_channel_equalization_sequence(link,
|
||||
<_settings);
|
||||
}
|
||||
|
||||
if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) {
|
||||
if (!perform_link_training_int(link,
|
||||
status = perform_link_training_int(link,
|
||||
<_settings,
|
||||
status == LINK_TRAINING_SUCCESS)) {
|
||||
/* the next link training setting in this case
|
||||
* would be the same as CR failure case.
|
||||
*/
|
||||
status = LINK_TRAINING_CR_FAIL;
|
||||
}
|
||||
status);
|
||||
}
|
||||
|
||||
/* 6. print status message*/
|
||||
@ -991,13 +993,37 @@ enum link_training_result dc_link_dp_perform_link_training(
|
||||
break;
|
||||
}
|
||||
|
||||
switch (status) {
|
||||
case LINK_TRAINING_SUCCESS:
|
||||
lt_result = "pass";
|
||||
break;
|
||||
case LINK_TRAINING_CR_FAIL_LANE0:
|
||||
lt_result = "CR failed lane0";
|
||||
break;
|
||||
case LINK_TRAINING_CR_FAIL_LANE1:
|
||||
lt_result = "CR failed lane1";
|
||||
break;
|
||||
case LINK_TRAINING_CR_FAIL_LANE23:
|
||||
lt_result = "CR failed lane23";
|
||||
break;
|
||||
case LINK_TRAINING_EQ_FAIL_CR:
|
||||
lt_result = "CR failed in EQ";
|
||||
break;
|
||||
case LINK_TRAINING_EQ_FAIL_EQ:
|
||||
lt_result = "EQ failed";
|
||||
break;
|
||||
case LINK_TRAINING_LQA_FAIL:
|
||||
lt_result = "LQA failed";
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Connectivity log: link training */
|
||||
CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d",
|
||||
link_rate,
|
||||
lt_settings.link_settings.lane_count,
|
||||
(status == LINK_TRAINING_SUCCESS) ? "pass" :
|
||||
((status == LINK_TRAINING_CR_FAIL) ? "CR failed" :
|
||||
"EQ failed"),
|
||||
lt_result,
|
||||
lt_settings.lane_settings[0].VOLTAGE_SWING,
|
||||
lt_settings.lane_settings[0].PRE_EMPHASIS);
|
||||
|
||||
@ -1115,6 +1141,7 @@ bool dp_hbr_verify_link_cap(
|
||||
dp_cs_id,
|
||||
cur);
|
||||
|
||||
|
||||
if (skip_link_training)
|
||||
success = true;
|
||||
else {
|
||||
@ -1279,7 +1306,10 @@ static bool decide_fallback_link_setting(
|
||||
return false;
|
||||
|
||||
switch (training_result) {
|
||||
case LINK_TRAINING_CR_FAIL:
|
||||
case LINK_TRAINING_CR_FAIL_LANE0:
|
||||
case LINK_TRAINING_CR_FAIL_LANE1:
|
||||
case LINK_TRAINING_CR_FAIL_LANE23:
|
||||
case LINK_TRAINING_LQA_FAIL:
|
||||
{
|
||||
if (!reached_minimum_link_rate
|
||||
(current_link_setting->link_rate)) {
|
||||
@ -1290,8 +1320,18 @@ static bool decide_fallback_link_setting(
|
||||
(current_link_setting->lane_count)) {
|
||||
current_link_setting->link_rate =
|
||||
initial_link_settings.link_rate;
|
||||
current_link_setting->lane_count =
|
||||
reduce_lane_count(
|
||||
if (training_result == LINK_TRAINING_CR_FAIL_LANE0)
|
||||
return false;
|
||||
else if (training_result == LINK_TRAINING_CR_FAIL_LANE1)
|
||||
current_link_setting->lane_count =
|
||||
LANE_COUNT_ONE;
|
||||
else if (training_result ==
|
||||
LINK_TRAINING_CR_FAIL_LANE23)
|
||||
current_link_setting->lane_count =
|
||||
LANE_COUNT_TWO;
|
||||
else
|
||||
current_link_setting->lane_count =
|
||||
reduce_lane_count(
|
||||
current_link_setting->lane_count);
|
||||
} else {
|
||||
return false;
|
||||
@ -1556,8 +1596,7 @@ static bool hpd_rx_irq_check_link_loss_status(
|
||||
if (sink_status_changed ||
|
||||
!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
|
||||
|
||||
dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
|
||||
"%s: Link Status changed.\n", __func__);
|
||||
DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__);
|
||||
|
||||
return_code = true;
|
||||
|
||||
@ -1570,8 +1609,7 @@ static bool hpd_rx_irq_check_link_loss_status(
|
||||
sizeof(irq_reg_rx_power_state));
|
||||
|
||||
if (dpcd_result != DC_OK) {
|
||||
dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
|
||||
"%s: DPCD read failed to obtain power state.\n",
|
||||
DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n",
|
||||
__func__);
|
||||
} else {
|
||||
if (irq_reg_rx_power_state != DP_SET_POWER_D0)
|
||||
@ -1932,8 +1970,7 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
|
||||
* PSR and device auto test, refer to function handle_sst_hpd_irq
|
||||
* in DAL2.1*/
|
||||
|
||||
dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
|
||||
"%s: Got short pulse HPD on link %d\n",
|
||||
DC_LOG_HW_HPD_IRQ("%s: Got short pulse HPD on link %d\n",
|
||||
__func__, link->link_index);
|
||||
|
||||
|
||||
@ -1947,8 +1984,7 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
|
||||
*out_hpd_irq_dpcd_data = hpd_irq_dpcd_data;
|
||||
|
||||
if (result != DC_OK) {
|
||||
dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
|
||||
"%s: DPCD read failed to obtain irq data\n",
|
||||
DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain irq data\n",
|
||||
__func__);
|
||||
return false;
|
||||
}
|
||||
@ -1966,8 +2002,7 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
|
||||
}
|
||||
|
||||
if (!allow_hpd_rx_irq(link)) {
|
||||
dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
|
||||
"%s: skipping HPD handling on %d\n",
|
||||
DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n",
|
||||
__func__, link->link_index);
|
||||
return false;
|
||||
}
|
||||
|
@ -279,6 +279,7 @@ void dp_retrain_link_dp_test(struct dc_link *link,
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
if (pipes[i].stream != NULL &&
|
||||
!pipes[i].top_pipe &&
|
||||
pipes[i].stream->sink != NULL &&
|
||||
pipes[i].stream->sink->link != NULL &&
|
||||
pipes[i].stream_res.stream_enc != NULL &&
|
||||
|
@ -45,7 +45,8 @@
|
||||
#include "dcn10/dcn10_resource.h"
|
||||
#endif
|
||||
#include "dce120/dce120_resource.h"
|
||||
|
||||
#define DC_LOGGER \
|
||||
ctx->logger
|
||||
enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
|
||||
{
|
||||
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
|
||||
@ -834,7 +835,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
||||
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
|
||||
struct view recout_skip = { 0 };
|
||||
bool res = false;
|
||||
|
||||
struct dc_context *ctx = pipe_ctx->stream->ctx;
|
||||
/* Important: scaling ratio calculation requires pixel format,
|
||||
* lb depth calculation requires recout and taps require scaling ratios.
|
||||
* Inits require viewport, taps, ratios and recout of split pipe
|
||||
@ -893,7 +894,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
||||
/* May need to re-check lb size after this in some obscure scenario */
|
||||
calculate_inits_and_adj_vp(pipe_ctx, &recout_skip);
|
||||
|
||||
dm_logger_write(pipe_ctx->stream->ctx->logger, LOG_SCALER,
|
||||
DC_LOG_SCALER(
|
||||
"%s: Viewport:\nheight:%d width:%d x:%d "
|
||||
"y:%d\n dst_rect:\nheight:%d width:%d x:%d "
|
||||
"y:%d\n",
|
||||
@ -2436,7 +2437,7 @@ static void set_vsc_info_packet(
|
||||
unsigned int i;
|
||||
|
||||
/*VSC packet set to 2 when DP revision >= 1.2*/
|
||||
if (stream->sink->link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
|
||||
if (stream->psr_version != 0) {
|
||||
vscPacketRevision = 2;
|
||||
}
|
||||
|
||||
|
@ -198,7 +198,8 @@ bool dc_stream_set_cursor_attributes(
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
|
||||
|
||||
if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
|
||||
if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm &&
|
||||
!pipe_ctx->plane_res.dpp) || !pipe_ctx->plane_res.ipp)
|
||||
continue;
|
||||
if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
|
||||
continue;
|
||||
@ -237,7 +238,8 @@ bool dc_stream_set_cursor_position(
|
||||
if (pipe_ctx->stream != stream ||
|
||||
(!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
|
||||
!pipe_ctx->plane_state ||
|
||||
(!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
|
||||
(!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) ||
|
||||
!pipe_ctx->plane_res.ipp)
|
||||
continue;
|
||||
|
||||
core_dc->hwss.set_cursor_position(pipe_ctx);
|
||||
|
@ -38,7 +38,7 @@
|
||||
#include "inc/compressor.h"
|
||||
#include "dml/display_mode_lib.h"
|
||||
|
||||
#define DC_VER "3.1.34"
|
||||
#define DC_VER "3.1.37"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_STREAMS 6
|
||||
@ -184,6 +184,17 @@ enum wm_report_mode {
|
||||
WM_REPORT_OVERRIDE = 1,
|
||||
};
|
||||
|
||||
struct dc_clocks {
|
||||
int dispclk_khz;
|
||||
int max_dppclk_khz;
|
||||
int dcfclk_khz;
|
||||
int socclk_khz;
|
||||
int dcfclk_deep_sleep_khz;
|
||||
int fclk_khz;
|
||||
int dram_ccm_us;
|
||||
int min_active_dram_ccm_us;
|
||||
};
|
||||
|
||||
struct dc_debug {
|
||||
bool surface_visual_confirm;
|
||||
bool sanity_checks;
|
||||
@ -694,7 +705,7 @@ enum dc_irq_source dc_interrupt_to_irq_source(
|
||||
struct dc *dc,
|
||||
uint32_t src_id,
|
||||
uint32_t ext_id);
|
||||
void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);
|
||||
bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);
|
||||
void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src);
|
||||
enum dc_irq_source dc_get_hpd_irq_source_at_index(
|
||||
struct dc *dc, uint32_t link_index);
|
||||
|
@ -197,7 +197,7 @@ bool dc_link_dp_set_test_pattern(
|
||||
const unsigned char *p_custom_pattern,
|
||||
unsigned int cust_pattern_size);
|
||||
|
||||
void dc_link_disable_hpd_filter(struct dc_link *link);
|
||||
void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
|
||||
|
||||
/*
|
||||
* DPCD access interfaces
|
||||
|
@ -70,7 +70,8 @@ struct dc_stream_state {
|
||||
bool ignore_msa_timing_param;
|
||||
/* TODO: custom INFO packets */
|
||||
/* TODO: ABM info (DMCU) */
|
||||
/* TODO: PSR info */
|
||||
/* PSR info */
|
||||
unsigned char psr_version;
|
||||
/* TODO: CEA VIC */
|
||||
|
||||
/* from core_stream struct */
|
||||
|
@ -42,6 +42,8 @@
|
||||
#define FN(reg_name, field_name) \
|
||||
abm_dce->abm_shift->field_name, abm_dce->abm_mask->field_name
|
||||
|
||||
#define DC_LOGGER \
|
||||
abm->ctx->logger
|
||||
#define CTX \
|
||||
abm_dce->base.ctx
|
||||
|
||||
@ -403,8 +405,7 @@ static bool dce_abm_set_backlight_level(
|
||||
{
|
||||
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
|
||||
|
||||
dm_logger_write(abm->ctx->logger, LOG_BACKLIGHT,
|
||||
"New Backlight level: %d (0x%X)\n",
|
||||
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
|
||||
backlight_level, backlight_level);
|
||||
|
||||
/* If DMCU is in reset state, DMCU is uninitialized */
|
||||
|
@ -33,6 +33,8 @@
|
||||
|
||||
#define CTX \
|
||||
aud->base.ctx
|
||||
#define DC_LOGGER \
|
||||
aud->base.ctx->logger
|
||||
#define REG(reg)\
|
||||
(aud->regs->reg)
|
||||
|
||||
@ -63,8 +65,7 @@ static void write_indirect_azalia_reg(struct audio *audio,
|
||||
REG_SET(AZALIA_F0_CODEC_ENDPOINT_DATA, 0,
|
||||
AZALIA_ENDPOINT_REG_DATA, reg_data);
|
||||
|
||||
dm_logger_write(CTX->logger, LOG_HW_AUDIO,
|
||||
"AUDIO:write_indirect_azalia_reg: index: %u data: %u\n",
|
||||
DC_LOG_HW_AUDIO("AUDIO:write_indirect_azalia_reg: index: %u data: %u\n",
|
||||
reg_index, reg_data);
|
||||
}
|
||||
|
||||
@ -81,8 +82,7 @@ static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index
|
||||
/* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */
|
||||
value = REG_READ(AZALIA_F0_CODEC_ENDPOINT_DATA);
|
||||
|
||||
dm_logger_write(CTX->logger, LOG_HW_AUDIO,
|
||||
"AUDIO:read_indirect_azalia_reg: index: %u data: %u\n",
|
||||
DC_LOG_HW_AUDIO("AUDIO:read_indirect_azalia_reg: index: %u data: %u\n",
|
||||
reg_index, value);
|
||||
|
||||
return value;
|
||||
@ -364,8 +364,7 @@ void dce_aud_az_enable(struct audio *audio)
|
||||
CLOCK_GATING_DISABLE);
|
||||
AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
|
||||
|
||||
dm_logger_write(CTX->logger, LOG_HW_AUDIO,
|
||||
"\n\t========= AUDIO:dce_aud_az_enable: index: %u data: 0x%x\n",
|
||||
DC_LOG_HW_AUDIO("\n\t========= AUDIO:dce_aud_az_enable: index: %u data: 0x%x\n",
|
||||
audio->inst, value);
|
||||
}
|
||||
|
||||
@ -390,8 +389,7 @@ void dce_aud_az_disable(struct audio *audio)
|
||||
CLOCK_GATING_DISABLE);
|
||||
AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
|
||||
value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
|
||||
dm_logger_write(CTX->logger, LOG_HW_AUDIO,
|
||||
"\n\t========= AUDIO:dce_aud_az_disable: index: %u data: 0x%x\n",
|
||||
DC_LOG_HW_AUDIO("\n\t========= AUDIO:dce_aud_az_disable: index: %u data: 0x%x\n",
|
||||
audio->inst, value);
|
||||
}
|
||||
|
||||
@ -795,8 +793,7 @@ void dce_aud_wall_dto_setup(
|
||||
crtc_info->calculated_pixel_clock,
|
||||
&clock_info);
|
||||
|
||||
dm_logger_write(audio->ctx->logger, LOG_HW_AUDIO,\
|
||||
"\n%s:Input::requested_pixel_clock = %d"\
|
||||
DC_LOG_HW_AUDIO("\n%s:Input::requested_pixel_clock = %d"\
|
||||
"calculated_pixel_clock =%d\n"\
|
||||
"audio_dto_module = %d audio_dto_phase =%d \n\n", __func__,\
|
||||
crtc_info->requested_pixel_clock,\
|
||||
|
@ -41,7 +41,8 @@
|
||||
|
||||
#define CTX \
|
||||
clk_src->base.ctx
|
||||
|
||||
#define DC_LOGGER \
|
||||
calc_pll_cs->ctx->logger
|
||||
#undef FN
|
||||
#define FN(reg_name, field_name) \
|
||||
clk_src->cs_shift->field_name, clk_src->cs_mask->field_name
|
||||
@ -288,7 +289,7 @@ static uint32_t calculate_pixel_clock_pll_dividers(
|
||||
uint32_t max_ref_divider;
|
||||
|
||||
if (pll_settings->adjusted_pix_clk == 0) {
|
||||
dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
|
||||
DC_LOG_ERROR(
|
||||
"%s Bad requested pixel clock", __func__);
|
||||
return MAX_PLL_CALC_ERROR;
|
||||
}
|
||||
@ -349,13 +350,13 @@ static uint32_t calculate_pixel_clock_pll_dividers(
|
||||
* ## SVS Wed 15 Jul 2009 */
|
||||
|
||||
if (min_post_divider > max_post_divider) {
|
||||
dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
|
||||
DC_LOG_ERROR(
|
||||
"%s Post divider range is invalid", __func__);
|
||||
return MAX_PLL_CALC_ERROR;
|
||||
}
|
||||
|
||||
if (min_ref_divider > max_ref_divider) {
|
||||
dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
|
||||
DC_LOG_ERROR(
|
||||
"%s Reference divider range is invalid", __func__);
|
||||
return MAX_PLL_CALC_ERROR;
|
||||
}
|
||||
@ -466,7 +467,7 @@ static uint32_t dce110_get_pix_clk_dividers_helper (
|
||||
{
|
||||
uint32_t field = 0;
|
||||
uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
|
||||
|
||||
struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll;
|
||||
/* Check if reference clock is external (not pcie/xtalin)
|
||||
* HW Dce80 spec:
|
||||
* 00 - PCIE_REFCLK, 01 - XTALIN, 02 - GENERICA, 03 - GENERICB
|
||||
@ -493,7 +494,7 @@ static uint32_t dce110_get_pix_clk_dividers_helper (
|
||||
if (!pll_adjust_pix_clk(clk_src, pix_clk_params, pll_settings)) {
|
||||
/* Should never happen, ASSERT and fill up values to be able
|
||||
* to continue. */
|
||||
dm_logger_write(clk_src->base.ctx->logger, LOG_ERROR,
|
||||
DC_LOG_ERROR(
|
||||
"%s: Failed to adjust pixel clock!!", __func__);
|
||||
pll_settings->actual_pix_clk =
|
||||
pix_clk_params->requested_pix_clk;
|
||||
@ -556,11 +557,12 @@ static uint32_t dce110_get_pix_clk_dividers(
|
||||
struct pll_settings *pll_settings)
|
||||
{
|
||||
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs);
|
||||
struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll;
|
||||
uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
|
||||
|
||||
if (pix_clk_params == NULL || pll_settings == NULL
|
||||
|| pix_clk_params->requested_pix_clk == 0) {
|
||||
dm_logger_write(clk_src->base.ctx->logger, LOG_ERROR,
|
||||
DC_LOG_ERROR(
|
||||
"%s: Invalid parameters!!\n", __func__);
|
||||
return pll_calc_error;
|
||||
}
|
||||
@ -1052,14 +1054,14 @@ static void get_ss_info_from_atombios(
|
||||
struct spread_spectrum_info *ss_info_cur;
|
||||
struct spread_spectrum_data *ss_data_cur;
|
||||
uint32_t i;
|
||||
|
||||
struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll;
|
||||
if (ss_entries_num == NULL) {
|
||||
dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
|
||||
DC_LOG_SYNC(
|
||||
"Invalid entry !!!\n");
|
||||
return;
|
||||
}
|
||||
if (spread_spectrum_data == NULL) {
|
||||
dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
|
||||
DC_LOG_SYNC(
|
||||
"Invalid array pointer!!!\n");
|
||||
return;
|
||||
}
|
||||
@ -1104,7 +1106,7 @@ static void get_ss_info_from_atombios(
|
||||
++i, ++ss_info_cur, ++ss_data_cur) {
|
||||
|
||||
if (ss_info_cur->type.STEP_AND_DELAY_INFO != false) {
|
||||
dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
|
||||
DC_LOG_SYNC(
|
||||
"Invalid ATOMBIOS SS Table!!!\n");
|
||||
goto out_free_data;
|
||||
}
|
||||
@ -1114,9 +1116,9 @@ static void get_ss_info_from_atombios(
|
||||
if (as_signal == AS_SIGNAL_TYPE_HDMI
|
||||
&& ss_info_cur->spread_spectrum_percentage > 6){
|
||||
/* invalid input, do nothing */
|
||||
dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
|
||||
DC_LOG_SYNC(
|
||||
"Invalid SS percentage ");
|
||||
dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
|
||||
DC_LOG_SYNC(
|
||||
"for HDMI in ATOMBIOS info Table!!!\n");
|
||||
continue;
|
||||
}
|
||||
@ -1228,12 +1230,12 @@ static bool calc_pll_max_vco_construct(
|
||||
if (init_data->num_fract_fb_divider_decimal_point == 0 ||
|
||||
init_data->num_fract_fb_divider_decimal_point_precision >
|
||||
init_data->num_fract_fb_divider_decimal_point) {
|
||||
dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
|
||||
DC_LOG_ERROR(
|
||||
"The dec point num or precision is incorrect!");
|
||||
return false;
|
||||
}
|
||||
if (init_data->num_fract_fb_divider_decimal_point_precision == 0) {
|
||||
dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
|
||||
DC_LOG_ERROR(
|
||||
"Incorrect fract feedback divider precision num!");
|
||||
return false;
|
||||
}
|
||||
|
@ -49,6 +49,8 @@
|
||||
|
||||
#define CTX \
|
||||
clk_dce->base.ctx
|
||||
#define DC_LOGGER \
|
||||
clk->ctx->logger
|
||||
|
||||
/* Max clock values for each state indexed by "enum clocks_state": */
|
||||
static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
|
||||
@ -292,8 +294,7 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
|
||||
|
||||
low_req_clk = i + 1;
|
||||
if (low_req_clk > clk->max_clks_state) {
|
||||
dm_logger_write(clk->ctx->logger, LOG_WARNING,
|
||||
"%s: clocks unsupported disp_clk %d pix_clk %d",
|
||||
DC_LOG_WARNING("%s: clocks unsupported disp_clk %d pix_clk %d",
|
||||
__func__,
|
||||
req_clocks->display_clk_khz,
|
||||
req_clocks->pixel_clk_khz);
|
||||
@ -312,8 +313,7 @@ static bool dce_clock_set_min_clocks_state(
|
||||
|
||||
if (clocks_state > clk->max_clks_state) {
|
||||
/*Requested state exceeds max supported state.*/
|
||||
dm_logger_write(clk->ctx->logger, LOG_WARNING,
|
||||
"Requested state exceeds max supported state");
|
||||
DC_LOG_WARNING("Requested state exceeds max supported state");
|
||||
return false;
|
||||
} else if (clocks_state == clk->cur_min_clks_state) {
|
||||
/*if we're trying to set the same state, we can just return
|
||||
|
@ -56,6 +56,8 @@
|
||||
|
||||
#define CTX \
|
||||
enc110->base.ctx
|
||||
#define DC_LOGGER \
|
||||
enc110->base.ctx->logger
|
||||
|
||||
#define REG(reg)\
|
||||
(enc110->link_regs->reg)
|
||||
@ -676,6 +678,7 @@ void dce110_link_encoder_construct(
|
||||
{
|
||||
struct bp_encoder_cap_info bp_cap_info = {0};
|
||||
const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
|
||||
enum bp_result result = BP_RESULT_OK;
|
||||
|
||||
enc110->base.funcs = &dce110_lnk_enc_funcs;
|
||||
enc110->base.ctx = init_data->ctx;
|
||||
@ -750,15 +753,24 @@ void dce110_link_encoder_construct(
|
||||
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
|
||||
}
|
||||
|
||||
/* default to one to mirror Windows behavior */
|
||||
enc110->base.features.flags.bits.HDMI_6GB_EN = 1;
|
||||
|
||||
result = bp_funcs->get_encoder_cap_info(enc110->base.ctx->dc_bios,
|
||||
enc110->base.id, &bp_cap_info);
|
||||
|
||||
/* Override features with DCE-specific values */
|
||||
if (BP_RESULT_OK == bp_funcs->get_encoder_cap_info(
|
||||
enc110->base.ctx->dc_bios, enc110->base.id,
|
||||
&bp_cap_info)) {
|
||||
if (BP_RESULT_OK == result) {
|
||||
enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
|
||||
bp_cap_info.DP_HBR2_EN;
|
||||
enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
|
||||
bp_cap_info.DP_HBR3_EN;
|
||||
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
|
||||
} else {
|
||||
dm_logger_write(enc110->base.ctx->logger, LOG_WARNING,
|
||||
"%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
|
||||
__func__,
|
||||
result);
|
||||
}
|
||||
}
|
||||
|
||||
@ -809,7 +821,6 @@ void dce110_link_encoder_hw_init(
|
||||
struct link_encoder *enc)
|
||||
{
|
||||
struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
|
||||
struct dc_context *ctx = enc110->base.ctx;
|
||||
struct bp_transmitter_control cntl = { 0 };
|
||||
enum bp_result result;
|
||||
|
||||
@ -827,8 +838,7 @@ void dce110_link_encoder_hw_init(
|
||||
result = link_transmitter_control(enc110, &cntl);
|
||||
|
||||
if (result != BP_RESULT_OK) {
|
||||
dm_logger_write(ctx->logger, LOG_ERROR,
|
||||
"%s: Failed to execute VBIOS command table!\n",
|
||||
DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
|
||||
__func__);
|
||||
BREAK_TO_DEBUGGER();
|
||||
return;
|
||||
@ -904,7 +914,6 @@ void dce110_link_encoder_enable_tmds_output(
|
||||
uint32_t pixel_clock)
|
||||
{
|
||||
struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
|
||||
struct dc_context *ctx = enc110->base.ctx;
|
||||
struct bp_transmitter_control cntl = { 0 };
|
||||
enum bp_result result;
|
||||
|
||||
@ -928,8 +937,7 @@ void dce110_link_encoder_enable_tmds_output(
|
||||
result = link_transmitter_control(enc110, &cntl);
|
||||
|
||||
if (result != BP_RESULT_OK) {
|
||||
dm_logger_write(ctx->logger, LOG_ERROR,
|
||||
"%s: Failed to execute VBIOS command table!\n",
|
||||
DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
|
||||
__func__);
|
||||
BREAK_TO_DEBUGGER();
|
||||
}
|
||||
@ -942,7 +950,6 @@ void dce110_link_encoder_enable_dp_output(
|
||||
enum clock_source_id clock_source)
|
||||
{
|
||||
struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
|
||||
struct dc_context *ctx = enc110->base.ctx;
|
||||
struct bp_transmitter_control cntl = { 0 };
|
||||
enum bp_result result;
|
||||
|
||||
@ -969,8 +976,7 @@ void dce110_link_encoder_enable_dp_output(
|
||||
result = link_transmitter_control(enc110, &cntl);
|
||||
|
||||
if (result != BP_RESULT_OK) {
|
||||
dm_logger_write(ctx->logger, LOG_ERROR,
|
||||
"%s: Failed to execute VBIOS command table!\n",
|
||||
DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
|
||||
__func__);
|
||||
BREAK_TO_DEBUGGER();
|
||||
}
|
||||
@ -983,7 +989,6 @@ void dce110_link_encoder_enable_dp_mst_output(
|
||||
enum clock_source_id clock_source)
|
||||
{
|
||||
struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
|
||||
struct dc_context *ctx = enc110->base.ctx;
|
||||
struct bp_transmitter_control cntl = { 0 };
|
||||
enum bp_result result;
|
||||
|
||||
@ -1010,8 +1015,7 @@ void dce110_link_encoder_enable_dp_mst_output(
|
||||
result = link_transmitter_control(enc110, &cntl);
|
||||
|
||||
if (result != BP_RESULT_OK) {
|
||||
dm_logger_write(ctx->logger, LOG_ERROR,
|
||||
"%s: Failed to execute VBIOS command table!\n",
|
||||
DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
|
||||
__func__);
|
||||
BREAK_TO_DEBUGGER();
|
||||
}
|
||||
@ -1025,7 +1029,6 @@ void dce110_link_encoder_disable_output(
|
||||
enum signal_type signal)
|
||||
{
|
||||
struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
|
||||
struct dc_context *ctx = enc110->base.ctx;
|
||||
struct bp_transmitter_control cntl = { 0 };
|
||||
enum bp_result result;
|
||||
|
||||
@ -1053,8 +1056,7 @@ void dce110_link_encoder_disable_output(
|
||||
result = link_transmitter_control(enc110, &cntl);
|
||||
|
||||
if (result != BP_RESULT_OK) {
|
||||
dm_logger_write(ctx->logger, LOG_ERROR,
|
||||
"%s: Failed to execute VBIOS command table!\n",
|
||||
DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
|
||||
__func__);
|
||||
BREAK_TO_DEBUGGER();
|
||||
return;
|
||||
|
@ -26,7 +26,8 @@
|
||||
#include "dc_bios_types.h"
|
||||
#include "dce_stream_encoder.h"
|
||||
#include "reg_helper.h"
|
||||
|
||||
#define DC_LOGGER \
|
||||
enc110->base.ctx->logger
|
||||
enum DP_PIXEL_ENCODING {
|
||||
DP_PIXEL_ENCODING_RGB444 = 0x00000000,
|
||||
DP_PIXEL_ENCODING_YCBCR422 = 0x00000001,
|
||||
@ -197,7 +198,6 @@ static void dce110_update_hdmi_info_packet(
|
||||
uint32_t packet_index,
|
||||
const struct encoder_info_packet *info_packet)
|
||||
{
|
||||
struct dc_context *ctx = enc110->base.ctx;
|
||||
uint32_t cont, send, line;
|
||||
|
||||
if (info_packet->valid) {
|
||||
@ -277,8 +277,7 @@ static void dce110_update_hdmi_info_packet(
|
||||
#endif
|
||||
default:
|
||||
/* invalid HW packet index */
|
||||
dm_logger_write(
|
||||
ctx->logger, LOG_WARNING,
|
||||
DC_LOG_WARNING(
|
||||
"Invalid HW packet index: %s()\n",
|
||||
__func__);
|
||||
return;
|
||||
@ -1386,7 +1385,7 @@ static void dce110_se_setup_hdmi_audio(
|
||||
crtc_info->requested_pixel_clock,
|
||||
crtc_info->calculated_pixel_clock,
|
||||
&audio_clock_info);
|
||||
dm_logger_write(enc->ctx->logger, LOG_HW_AUDIO,
|
||||
DC_LOG_HW_AUDIO(
|
||||
"\n%s:Input::requested_pixel_clock = %d" \
|
||||
"calculated_pixel_clock = %d \n", __func__, \
|
||||
crtc_info->requested_pixel_clock, \
|
||||
|
@ -38,6 +38,8 @@
|
||||
|
||||
#define CTX \
|
||||
xfm_dce->base.ctx
|
||||
#define DC_LOGGER \
|
||||
xfm_dce->base.ctx->logger
|
||||
|
||||
#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19))
|
||||
#define GAMUT_MATRIX_SIZE 12
|
||||
@ -693,8 +695,7 @@ static int dce_transform_get_max_num_of_supported_lines(
|
||||
break;
|
||||
|
||||
default:
|
||||
dm_logger_write(xfm_dce->base.ctx->logger, LOG_WARNING,
|
||||
"%s: Invalid LB pixel depth",
|
||||
DC_LOG_WARNING("%s: Invalid LB pixel depth",
|
||||
__func__);
|
||||
BREAK_TO_DEBUGGER();
|
||||
break;
|
||||
@ -791,8 +792,7 @@ static void dce_transform_set_pixel_storage_depth(
|
||||
if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
|
||||
/*we should use unsupported capabilities
|
||||
* unless it is required by w/a*/
|
||||
dm_logger_write(xfm->ctx->logger, LOG_WARNING,
|
||||
"%s: Capability not supported",
|
||||
DC_LOG_WARNING("%s: Capability not supported",
|
||||
__func__);
|
||||
}
|
||||
}
|
||||
@ -1172,8 +1172,7 @@ static void program_pwl(struct dce_transform *xfm_dce,
|
||||
}
|
||||
|
||||
if (counter == max_tries) {
|
||||
dm_logger_write(xfm_dce->base.ctx->logger, LOG_WARNING,
|
||||
"%s: regamma lut was not powered on "
|
||||
DC_LOG_WARNING("%s: regamma lut was not powered on "
|
||||
"in a timely manner,"
|
||||
" programming still proceeds\n",
|
||||
__func__);
|
||||
|
@ -248,6 +248,7 @@
|
||||
XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, mask_sh),\
|
||||
XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
|
||||
XFM_SF(DCP0_REGAMMA_CONTROL, GRPH_REGAMMA_MODE, mask_sh),\
|
||||
XFM_SF(DCP0_REGAMMA_LUT_WRITE_EN_MASK, REGAMMA_LUT_WRITE_EN_MASK, mask_sh),\
|
||||
XFM_SF(SCL0_SCL_MODE, SCL_MODE, mask_sh), \
|
||||
XFM_SF(SCL0_SCL_TAP_CONTROL, SCL_H_NUM_OF_TAPS, mask_sh), \
|
||||
XFM_SF(SCL0_SCL_TAP_CONTROL, SCL_V_NUM_OF_TAPS, mask_sh), \
|
||||
|
@ -34,6 +34,8 @@
|
||||
|
||||
#include "dce110_compressor.h"
|
||||
|
||||
#define DC_LOGGER \
|
||||
cp110->base.ctx->logger
|
||||
#define DCP_REG(reg)\
|
||||
(reg + cp110->offsets.dcp_offset)
|
||||
#define DMIF_REG(reg)\
|
||||
@ -120,14 +122,10 @@ static void wait_for_fbc_state_changed(
|
||||
}
|
||||
|
||||
if (counter == 10) {
|
||||
dm_logger_write(
|
||||
cp110->base.ctx->logger, LOG_WARNING,
|
||||
"%s: wait counter exceeded, changes to HW not applied",
|
||||
DC_LOG_WARNING("%s: wait counter exceeded, changes to HW not applied",
|
||||
__func__);
|
||||
} else {
|
||||
dm_logger_write(
|
||||
cp110->base.ctx->logger, LOG_SYNC,
|
||||
"FBC status changed to %d", enabled);
|
||||
DC_LOG_SYNC("FBC status changed to %d", enabled);
|
||||
}
|
||||
|
||||
|
||||
@ -310,9 +308,7 @@ void dce110_compressor_program_compressed_surface_address_and_pitch(
|
||||
if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
|
||||
fbc_pitch = fbc_pitch / 8;
|
||||
else
|
||||
dm_logger_write(
|
||||
compressor->ctx->logger, LOG_WARNING,
|
||||
"%s: Unexpected DCE11 compression ratio",
|
||||
DC_LOG_WARNING("%s: Unexpected DCE11 compression ratio",
|
||||
__func__);
|
||||
|
||||
/* Clear content first. */
|
||||
|
@ -70,6 +70,8 @@
|
||||
|
||||
#define CTX \
|
||||
hws->ctx
|
||||
#define DC_LOGGER \
|
||||
ctx->logger
|
||||
#define REG(reg)\
|
||||
hws->regs->reg
|
||||
|
||||
@ -682,15 +684,22 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
|
||||
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
|
||||
struct dc_link *link = pipe_ctx->stream->sink->link;
|
||||
|
||||
/* 1. update AVI info frame (HDMI, DP)
|
||||
* we always need to update info frame
|
||||
*/
|
||||
|
||||
uint32_t active_total_with_borders;
|
||||
uint32_t early_control = 0;
|
||||
struct timing_generator *tg = pipe_ctx->stream_res.tg;
|
||||
|
||||
/* TODOFPGA may change to hwss.update_info_frame */
|
||||
/* For MST, there are multiply stream go to only one link.
|
||||
* connect DIG back_end to front_end while enable_stream and
|
||||
* disconnect them during disable_stream
|
||||
* BY this, it is logic clean to separate stream and link */
|
||||
link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
|
||||
pipe_ctx->stream_res.stream_enc->id, true);
|
||||
|
||||
/* update AVI info frame (HDMI, DP)*/
|
||||
/* TODO: FPGA may change to hwss.update_info_frame */
|
||||
dce110_update_info_frame(pipe_ctx);
|
||||
|
||||
/* enable early control to avoid corruption on DP monitor*/
|
||||
active_total_with_borders =
|
||||
timing->h_addressable
|
||||
@ -711,12 +720,8 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
|
||||
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc);
|
||||
}
|
||||
|
||||
/* For MST, there are multiply stream go to only one link.
|
||||
* connect DIG back_end to front_end while enable_stream and
|
||||
* disconnect them during disable_stream
|
||||
* BY this, it is logic clean to separate stream and link */
|
||||
link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
|
||||
pipe_ctx->stream_res.stream_enc->id, true);
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
@ -816,7 +821,7 @@ void hwss_edp_wait_for_hpd_ready(
|
||||
dal_gpio_destroy_irq(&hpd);
|
||||
|
||||
if (false == edp_hpd_high) {
|
||||
dm_logger_write(ctx->logger, LOG_ERROR,
|
||||
DC_LOG_ERROR(
|
||||
"%s: wait timed out!\n", __func__);
|
||||
}
|
||||
}
|
||||
@ -840,7 +845,7 @@ void hwss_edp_power_control(
|
||||
if (power_up != is_panel_powered_on(hwseq)) {
|
||||
/* Send VBIOS command to prompt eDP panel power */
|
||||
|
||||
dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
|
||||
DC_LOG_HW_RESUME_S3(
|
||||
"%s: Panel Power action: %s\n",
|
||||
__func__, (power_up ? "On":"Off"));
|
||||
|
||||
@ -856,11 +861,11 @@ void hwss_edp_power_control(
|
||||
bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
|
||||
|
||||
if (bp_result != BP_RESULT_OK)
|
||||
dm_logger_write(ctx->logger, LOG_ERROR,
|
||||
DC_LOG_ERROR(
|
||||
"%s: Panel Power bp_result: %d\n",
|
||||
__func__, bp_result);
|
||||
} else {
|
||||
dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
|
||||
DC_LOG_HW_RESUME_S3(
|
||||
"%s: Skipping Panel Power action: %s\n",
|
||||
__func__, (power_up ? "On":"Off"));
|
||||
}
|
||||
@ -886,7 +891,7 @@ void hwss_edp_backlight_control(
|
||||
}
|
||||
|
||||
if (enable && is_panel_backlight_on(hws)) {
|
||||
dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
|
||||
DC_LOG_HW_RESUME_S3(
|
||||
"%s: panel already powered up. Do nothing.\n",
|
||||
__func__);
|
||||
return;
|
||||
@ -894,7 +899,7 @@ void hwss_edp_backlight_control(
|
||||
|
||||
/* Send VBIOS command to control eDP panel backlight */
|
||||
|
||||
dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
|
||||
DC_LOG_HW_RESUME_S3(
|
||||
"%s: backlight action: %s\n",
|
||||
__func__, (enable ? "On":"Off"));
|
||||
|
||||
@ -1320,10 +1325,8 @@ static enum dc_status apply_single_controller_ctx_to_hw(
|
||||
|
||||
resource_build_info_frame(pipe_ctx);
|
||||
dce110_update_info_frame(pipe_ctx);
|
||||
if (!pipe_ctx_old->stream) {
|
||||
if (!pipe_ctx->stream->dpms_off)
|
||||
core_link_enable_stream(context, pipe_ctx);
|
||||
}
|
||||
if (!pipe_ctx_old->stream)
|
||||
core_link_enable_stream(context, pipe_ctx);
|
||||
|
||||
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
|
||||
|
||||
@ -2689,7 +2692,7 @@ static void dce110_program_front_end_for_pipe(
|
||||
struct xfm_grph_csc_adjustment adjust;
|
||||
struct out_csc_color_matrix tbl_entry;
|
||||
unsigned int i;
|
||||
|
||||
struct dc_context *ctx = dc->ctx;
|
||||
memset(&tbl_entry, 0, sizeof(tbl_entry));
|
||||
|
||||
if (dc->current_state)
|
||||
@ -2764,7 +2767,7 @@ static void dce110_program_front_end_for_pipe(
|
||||
if (pipe_ctx->plane_state->update_flags.bits.full_update)
|
||||
dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
|
||||
|
||||
dm_logger_write(dc->ctx->logger, LOG_SURFACE,
|
||||
DC_LOG_SURFACE(
|
||||
"Pipe:%d 0x%x: addr hi:0x%x, "
|
||||
"addr low:0x%x, "
|
||||
"src: %d, %d, %d,"
|
||||
@ -2787,7 +2790,7 @@ static void dce110_program_front_end_for_pipe(
|
||||
pipe_ctx->plane_state->clip_rect.width,
|
||||
pipe_ctx->plane_state->clip_rect.height);
|
||||
|
||||
dm_logger_write(dc->ctx->logger, LOG_SURFACE,
|
||||
DC_LOG_SURFACE(
|
||||
"Pipe %d: width, height, x, y\n"
|
||||
"viewport:%d, %d, %d, %d\n"
|
||||
"recout: %d, %d, %d, %d\n",
|
||||
|
@ -52,6 +52,8 @@
|
||||
#include "dce/dce_abm.h"
|
||||
#include "dce/dce_dmcu.h"
|
||||
|
||||
#define DC_LOGGER \
|
||||
dc->ctx->logger
|
||||
#if defined(CONFIG_DRM_AMD_DC_FBC)
|
||||
#include "dce110/dce110_compressor.h"
|
||||
#endif
|
||||
@ -771,8 +773,7 @@ static bool dce110_validate_bandwidth(
|
||||
{
|
||||
bool result = false;
|
||||
|
||||
dm_logger_write(
|
||||
dc->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
DC_LOG_BANDWIDTH_CALCS(
|
||||
"%s: start",
|
||||
__func__);
|
||||
|
||||
@ -786,8 +787,7 @@ static bool dce110_validate_bandwidth(
|
||||
result = true;
|
||||
|
||||
if (!result)
|
||||
dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_VALIDATION,
|
||||
"%s: %dx%d@%d Bandwidth validation failed!\n",
|
||||
DC_LOG_BANDWIDTH_VALIDATION("%s: %dx%d@%d Bandwidth validation failed!\n",
|
||||
__func__,
|
||||
context->streams[0]->timing.h_addressable,
|
||||
context->streams[0]->timing.v_addressable,
|
||||
@ -846,6 +846,16 @@ static bool dce110_validate_bandwidth(
|
||||
return result;
|
||||
}
|
||||
|
||||
enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
|
||||
struct dc_caps *caps)
|
||||
{
|
||||
if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) ||
|
||||
((plane_state->dst_rect.height * 2) < plane_state->src_rect.height))
|
||||
return DC_FAIL_SURFACE_VALIDATE;
|
||||
|
||||
return DC_OK;
|
||||
}
|
||||
|
||||
static bool dce110_validate_surface_sets(
|
||||
struct dc_state *context)
|
||||
{
|
||||
@ -869,6 +879,13 @@ static bool dce110_validate_surface_sets(
|
||||
plane->src_rect.height > 1080))
|
||||
return false;
|
||||
|
||||
/* we don't have the logic to support underlay
|
||||
* only yet so block the use case where we get
|
||||
* NV12 plane as top layer
|
||||
*/
|
||||
if (j == 0)
|
||||
return false;
|
||||
|
||||
/* irrespective of plane format,
|
||||
* stream should be RGB encoded
|
||||
*/
|
||||
@ -1021,6 +1038,7 @@ static const struct resource_funcs dce110_res_pool_funcs = {
|
||||
.link_enc_create = dce110_link_encoder_create,
|
||||
.validate_guaranteed = dce110_validate_guaranteed,
|
||||
.validate_bandwidth = dce110_validate_bandwidth,
|
||||
.validate_plane = dce110_validate_plane,
|
||||
.acquire_idle_pipe_for_layer = dce110_acquire_underlay,
|
||||
.add_stream_to_ctx = dce110_add_stream_to_ctx,
|
||||
.validate_global = dce110_validate_global
|
||||
|
@ -38,6 +38,8 @@
|
||||
|
||||
#include "timing_generator.h"
|
||||
|
||||
#define DC_LOGGER \
|
||||
tg->ctx->logger
|
||||
/** ********************************************************************************
|
||||
*
|
||||
* DCE11 Timing Generator Implementation
|
||||
@ -606,8 +608,7 @@ static uint32_t dce110_timing_generator_v_get_vblank_counter(struct timing_gener
|
||||
static bool dce110_timing_generator_v_did_triggered_reset_occur(
|
||||
struct timing_generator *tg)
|
||||
{
|
||||
dm_logger_write(tg->ctx->logger, LOG_ERROR,
|
||||
"Timing Sync not supported on underlay pipe\n");
|
||||
DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -615,8 +616,7 @@ static void dce110_timing_generator_v_setup_global_swap_lock(
|
||||
struct timing_generator *tg,
|
||||
const struct dcp_gsl_params *gsl_params)
|
||||
{
|
||||
dm_logger_write(tg->ctx->logger, LOG_ERROR,
|
||||
"Timing Sync not supported on underlay pipe\n");
|
||||
DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -624,24 +624,21 @@ static void dce110_timing_generator_v_enable_reset_trigger(
|
||||
struct timing_generator *tg,
|
||||
int source_tg_inst)
|
||||
{
|
||||
dm_logger_write(tg->ctx->logger, LOG_ERROR,
|
||||
"Timing Sync not supported on underlay pipe\n");
|
||||
DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n");
|
||||
return;
|
||||
}
|
||||
|
||||
static void dce110_timing_generator_v_disable_reset_trigger(
|
||||
struct timing_generator *tg)
|
||||
{
|
||||
dm_logger_write(tg->ctx->logger, LOG_ERROR,
|
||||
"Timing Sync not supported on underlay pipe\n");
|
||||
DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n");
|
||||
return;
|
||||
}
|
||||
|
||||
static void dce110_timing_generator_v_tear_down_global_swap_lock(
|
||||
struct timing_generator *tg)
|
||||
{
|
||||
dm_logger_write(tg->ctx->logger, LOG_ERROR,
|
||||
"Timing Sync not supported on underlay pipe\n");
|
||||
DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -30,6 +30,8 @@
|
||||
#include "dce/dce_11_0_sh_mask.h"
|
||||
|
||||
#define SCLV_PHASES 64
|
||||
#define DC_LOGGER \
|
||||
xfm->ctx->logger
|
||||
|
||||
struct sclv_ratios_inits {
|
||||
uint32_t h_int_scale_ratio_luma;
|
||||
@ -670,8 +672,7 @@ static void dce110_xfmv_set_pixel_storage_depth(
|
||||
if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
|
||||
/*we should use unsupported capabilities
|
||||
* unless it is required by w/a*/
|
||||
dm_logger_write(xfm->ctx->logger, LOG_WARNING,
|
||||
"%s: Capability not supported",
|
||||
DC_LOG_WARNING("%s: Capability not supported",
|
||||
__func__);
|
||||
}
|
||||
}
|
||||
|
@ -33,7 +33,8 @@
|
||||
#include "include/logger_interface.h"
|
||||
|
||||
#include "dce112_compressor.h"
|
||||
|
||||
#define DC_LOGGER \
|
||||
cp110->base.ctx->logger
|
||||
#define DCP_REG(reg)\
|
||||
(reg + cp110->offsets.dcp_offset)
|
||||
#define DMIF_REG(reg)\
|
||||
@ -129,8 +130,7 @@ static uint32_t lpt_memory_control_config(struct dce112_compressor *cp110,
|
||||
LOW_POWER_TILING_NUM_PIPES);
|
||||
break;
|
||||
default:
|
||||
dm_logger_write(
|
||||
cp110->base.ctx->logger, LOG_WARNING,
|
||||
DC_LOG_WARNING(
|
||||
"%s: Invalid LPT NUM_PIPES!!!",
|
||||
__func__);
|
||||
break;
|
||||
@ -175,8 +175,7 @@ static uint32_t lpt_memory_control_config(struct dce112_compressor *cp110,
|
||||
LOW_POWER_TILING_NUM_BANKS);
|
||||
break;
|
||||
default:
|
||||
dm_logger_write(
|
||||
cp110->base.ctx->logger, LOG_WARNING,
|
||||
DC_LOG_WARNING(
|
||||
"%s: Invalid LPT NUM_BANKS!!!",
|
||||
__func__);
|
||||
break;
|
||||
@ -209,8 +208,7 @@ static uint32_t lpt_memory_control_config(struct dce112_compressor *cp110,
|
||||
LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
|
||||
break;
|
||||
default:
|
||||
dm_logger_write(
|
||||
cp110->base.ctx->logger, LOG_WARNING,
|
||||
DC_LOG_WARNING(
|
||||
"%s: Invalid LPT INTERLEAVE_SIZE!!!",
|
||||
__func__);
|
||||
break;
|
||||
@ -253,15 +251,13 @@ static uint32_t lpt_memory_control_config(struct dce112_compressor *cp110,
|
||||
LOW_POWER_TILING_ROW_SIZE);
|
||||
break;
|
||||
default:
|
||||
dm_logger_write(
|
||||
cp110->base.ctx->logger, LOG_WARNING,
|
||||
DC_LOG_WARNING(
|
||||
"%s: Invalid LPT ROW_SIZE!!!",
|
||||
__func__);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
dm_logger_write(
|
||||
cp110->base.ctx->logger, LOG_WARNING,
|
||||
DC_LOG_WARNING(
|
||||
"%s: LPT MC Configuration is not provided",
|
||||
__func__);
|
||||
}
|
||||
@ -311,8 +307,7 @@ static void wait_for_fbc_state_changed(
|
||||
}
|
||||
|
||||
if (counter == 10) {
|
||||
dm_logger_write(
|
||||
cp110->base.ctx->logger, LOG_WARNING,
|
||||
DC_LOG_WARNING(
|
||||
"%s: wait counter exceeded, changes to HW not applied",
|
||||
__func__);
|
||||
}
|
||||
@ -525,8 +520,7 @@ void dce112_compressor_program_compressed_surface_address_and_pitch(
|
||||
if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
|
||||
fbc_pitch = fbc_pitch / 8;
|
||||
else
|
||||
dm_logger_write(
|
||||
compressor->ctx->logger, LOG_WARNING,
|
||||
DC_LOG_WARNING(
|
||||
"%s: Unexpected DCE11 compression ratio",
|
||||
__func__);
|
||||
|
||||
@ -690,8 +684,7 @@ void dce112_compressor_program_lpt_control(
|
||||
LOW_POWER_TILING_MODE);
|
||||
break;
|
||||
default:
|
||||
dm_logger_write(
|
||||
compressor->ctx->logger, LOG_WARNING,
|
||||
DC_LOG_WARNING(
|
||||
"%s: Invalid selected DRAM channels for LPT!!!",
|
||||
__func__);
|
||||
break;
|
||||
|
@ -56,6 +56,8 @@
|
||||
#include "dce/dce_11_2_sh_mask.h"
|
||||
|
||||
#include "dce100/dce100_resource.h"
|
||||
#define DC_LOGGER \
|
||||
dc->ctx->logger
|
||||
|
||||
#ifndef mmDP_DPHY_INTERNAL_CTRL
|
||||
#define mmDP_DPHY_INTERNAL_CTRL 0x4aa7
|
||||
@ -722,8 +724,7 @@ bool dce112_validate_bandwidth(
|
||||
{
|
||||
bool result = false;
|
||||
|
||||
dm_logger_write(
|
||||
dc->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
DC_LOG_BANDWIDTH_CALCS(
|
||||
"%s: start",
|
||||
__func__);
|
||||
|
||||
@ -737,7 +738,7 @@ bool dce112_validate_bandwidth(
|
||||
result = true;
|
||||
|
||||
if (!result)
|
||||
dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_VALIDATION,
|
||||
DC_LOG_BANDWIDTH_VALIDATION(
|
||||
"%s: Bandwidth validation failed!",
|
||||
__func__);
|
||||
|
||||
|
@ -416,3 +416,156 @@ bool cm_helper_translate_curve_to_hw_format(
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#define NUM_DEGAMMA_REGIONS 12
|
||||
|
||||
|
||||
bool cm_helper_translate_curve_to_degamma_hw_format(
|
||||
const struct dc_transfer_func *output_tf,
|
||||
struct pwl_params *lut_params)
|
||||
{
|
||||
struct curve_points *arr_points;
|
||||
struct pwl_result_data *rgb_resulted;
|
||||
struct pwl_result_data *rgb;
|
||||
struct pwl_result_data *rgb_plus_1;
|
||||
struct fixed31_32 y_r;
|
||||
struct fixed31_32 y_g;
|
||||
struct fixed31_32 y_b;
|
||||
struct fixed31_32 y1_min;
|
||||
struct fixed31_32 y3_max;
|
||||
|
||||
int32_t region_start, region_end;
|
||||
int32_t i;
|
||||
uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
|
||||
|
||||
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
|
||||
return false;
|
||||
|
||||
PERF_TRACE();
|
||||
|
||||
arr_points = lut_params->arr_points;
|
||||
rgb_resulted = lut_params->rgb_resulted;
|
||||
hw_points = 0;
|
||||
|
||||
memset(lut_params, 0, sizeof(struct pwl_params));
|
||||
memset(seg_distr, 0, sizeof(seg_distr));
|
||||
|
||||
region_start = -NUM_DEGAMMA_REGIONS;
|
||||
region_end = 0;
|
||||
|
||||
|
||||
for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
|
||||
seg_distr[i] = -1;
|
||||
/* 12 segments
|
||||
* segments are from 2^-12 to 0
|
||||
*/
|
||||
for (i = 0; i < NUM_DEGAMMA_REGIONS ; i++)
|
||||
seg_distr[i] = 4;
|
||||
|
||||
for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
|
||||
if (seg_distr[k] != -1)
|
||||
hw_points += (1 << seg_distr[k]);
|
||||
}
|
||||
|
||||
j = 0;
|
||||
for (k = 0; k < (region_end - region_start); k++) {
|
||||
increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]);
|
||||
start_index = (region_start + k + MAX_LOW_POINT) *
|
||||
NUMBER_SW_SEGMENTS;
|
||||
for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS;
|
||||
i += increment) {
|
||||
if (j == hw_points - 1)
|
||||
break;
|
||||
rgb_resulted[j].red = output_tf->tf_pts.red[i];
|
||||
rgb_resulted[j].green = output_tf->tf_pts.green[i];
|
||||
rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
|
||||
j++;
|
||||
}
|
||||
}
|
||||
|
||||
/* last point */
|
||||
start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS;
|
||||
rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
|
||||
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
|
||||
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
|
||||
|
||||
arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
|
||||
dal_fixed31_32_from_int(region_start));
|
||||
arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
|
||||
dal_fixed31_32_from_int(region_end));
|
||||
|
||||
y_r = rgb_resulted[0].red;
|
||||
y_g = rgb_resulted[0].green;
|
||||
y_b = rgb_resulted[0].blue;
|
||||
|
||||
y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
|
||||
|
||||
arr_points[0].y = y1_min;
|
||||
arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y, arr_points[0].x);
|
||||
y_r = rgb_resulted[hw_points - 1].red;
|
||||
y_g = rgb_resulted[hw_points - 1].green;
|
||||
y_b = rgb_resulted[hw_points - 1].blue;
|
||||
|
||||
/* see comment above, m_arrPoints[1].y should be the Y value for the
|
||||
* region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
|
||||
*/
|
||||
y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
|
||||
|
||||
arr_points[1].y = y3_max;
|
||||
|
||||
arr_points[1].slope = dal_fixed31_32_zero;
|
||||
|
||||
if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
|
||||
/* for PQ, we want to have a straight line from last HW X point,
|
||||
* and the slope to be such that we hit 1.0 at 10000 nits.
|
||||
*/
|
||||
const struct fixed31_32 end_value =
|
||||
dal_fixed31_32_from_int(125);
|
||||
|
||||
arr_points[1].slope = dal_fixed31_32_div(
|
||||
dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
|
||||
dal_fixed31_32_sub(end_value, arr_points[1].x));
|
||||
}
|
||||
|
||||
lut_params->hw_points_num = hw_points;
|
||||
|
||||
i = 1;
|
||||
for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) {
|
||||
if (seg_distr[k] != -1) {
|
||||
lut_params->arr_curve_points[k].segments_num =
|
||||
seg_distr[k];
|
||||
lut_params->arr_curve_points[i].offset =
|
||||
lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
if (seg_distr[k] != -1)
|
||||
lut_params->arr_curve_points[k].segments_num = seg_distr[k];
|
||||
|
||||
rgb = rgb_resulted;
|
||||
rgb_plus_1 = rgb_resulted + 1;
|
||||
|
||||
i = 1;
|
||||
while (i != hw_points + 1) {
|
||||
if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
|
||||
rgb_plus_1->red = rgb->red;
|
||||
if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
|
||||
rgb_plus_1->green = rgb->green;
|
||||
if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
|
||||
rgb_plus_1->blue = rgb->blue;
|
||||
|
||||
rgb->delta_red = dal_fixed31_32_sub(rgb_plus_1->red, rgb->red);
|
||||
rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green);
|
||||
rgb->delta_blue = dal_fixed31_32_sub(rgb_plus_1->blue, rgb->blue);
|
||||
|
||||
++rgb_plus_1;
|
||||
++rgb;
|
||||
++i;
|
||||
}
|
||||
cm_helper_convert_to_custom_float(rgb_resulted,
|
||||
lut_params->arr_points,
|
||||
hw_points, false);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -106,4 +106,9 @@ bool cm_helper_translate_curve_to_hw_format(
|
||||
const struct dc_transfer_func *output_tf,
|
||||
struct pwl_params *lut_params, bool fixpoint);
|
||||
|
||||
bool cm_helper_translate_curve_to_degamma_hw_format(
|
||||
const struct dc_transfer_func *output_tf,
|
||||
struct pwl_params *lut_params);
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -432,14 +432,12 @@ void dpp1_dppclk_control(
|
||||
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
|
||||
|
||||
if (enable) {
|
||||
if (dpp->tf_mask->DPPCLK_RATE_CONTROL) {
|
||||
if (dpp->tf_mask->DPPCLK_RATE_CONTROL)
|
||||
REG_UPDATE_2(DPP_CONTROL,
|
||||
DPPCLK_RATE_CONTROL, dppclk_div,
|
||||
DPP_CLOCK_ENABLE, 1);
|
||||
} else {
|
||||
ASSERT(dppclk_div == false);
|
||||
else
|
||||
REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 1);
|
||||
}
|
||||
} else
|
||||
REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 0);
|
||||
}
|
||||
|
@ -30,6 +30,8 @@
|
||||
|
||||
#define CTX \
|
||||
hubbub->ctx
|
||||
#define DC_LOGGER \
|
||||
hubbub->ctx->logger
|
||||
#define REG(reg)\
|
||||
hubbub->regs->reg
|
||||
|
||||
@ -100,7 +102,6 @@ bool hubbub1_verify_allow_pstate_change_high(
|
||||
static unsigned int max_sampled_pstate_wait_us; /* data collection */
|
||||
static bool forced_pstate_allow; /* help with revert wa */
|
||||
|
||||
unsigned int debug_index = 0x7;
|
||||
unsigned int debug_data;
|
||||
unsigned int i;
|
||||
|
||||
@ -115,7 +116,9 @@ bool hubbub1_verify_allow_pstate_change_high(
|
||||
forced_pstate_allow = false;
|
||||
}
|
||||
|
||||
/* description "3-0: Pipe0 cursor0 QOS
|
||||
/* RV1:
|
||||
* dchubbubdebugind, at: 0x7
|
||||
* description "3-0: Pipe0 cursor0 QOS
|
||||
* 7-4: Pipe1 cursor0 QOS
|
||||
* 11-8: Pipe2 cursor0 QOS
|
||||
* 15-12: Pipe3 cursor0 QOS
|
||||
@ -137,7 +140,8 @@ bool hubbub1_verify_allow_pstate_change_high(
|
||||
* 31: SOC pstate change request
|
||||
*/
|
||||
|
||||
REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, debug_index);
|
||||
|
||||
REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub->debug_test_index_pstate);
|
||||
|
||||
for (i = 0; i < pstate_wait_timeout_us; i++) {
|
||||
debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
|
||||
@ -145,8 +149,7 @@ bool hubbub1_verify_allow_pstate_change_high(
|
||||
if (debug_data & (1 << 30)) {
|
||||
|
||||
if (i > pstate_wait_expected_timeout_us)
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_WARNING,
|
||||
"pstate took longer than expected ~%dus\n",
|
||||
DC_LOG_WARNING("pstate took longer than expected ~%dus\n",
|
||||
i);
|
||||
|
||||
return true;
|
||||
@ -165,8 +168,7 @@ bool hubbub1_verify_allow_pstate_change_high(
|
||||
DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
|
||||
forced_pstate_allow = true;
|
||||
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_WARNING,
|
||||
"pstate TEST_DEBUG_DATA: 0x%X\n",
|
||||
DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n",
|
||||
debug_data);
|
||||
|
||||
return false;
|
||||
@ -209,16 +211,14 @@ void hubbub1_program_watermarks(
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
|
||||
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"URGENCY_WATERMARK_A calculated =%d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
|
||||
"HW register value = 0x%x\n",
|
||||
watermarks->a.urgent_ns, prog_wm_value);
|
||||
|
||||
prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
|
||||
"HW register value = 0x%x\n",
|
||||
watermarks->a.pte_meta_urgent_ns, prog_wm_value);
|
||||
|
||||
@ -227,8 +227,7 @@ void hubbub1_program_watermarks(
|
||||
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
|
||||
"HW register value = 0x%x\n",
|
||||
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
|
||||
|
||||
@ -237,8 +236,7 @@ void hubbub1_program_watermarks(
|
||||
watermarks->a.cstate_pstate.cstate_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"SR_EXIT_WATERMARK_A calculated =%d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
|
||||
"HW register value = 0x%x\n",
|
||||
watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
|
||||
}
|
||||
@ -247,8 +245,7 @@ void hubbub1_program_watermarks(
|
||||
watermarks->a.cstate_pstate.pstate_change_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
|
||||
"HW register value = 0x%x\n\n",
|
||||
watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
|
||||
|
||||
@ -257,8 +254,7 @@ void hubbub1_program_watermarks(
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->b.urgent_ns, refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"URGENCY_WATERMARK_B calculated =%d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
|
||||
"HW register value = 0x%x\n",
|
||||
watermarks->b.urgent_ns, prog_wm_value);
|
||||
|
||||
@ -267,8 +263,7 @@ void hubbub1_program_watermarks(
|
||||
watermarks->b.pte_meta_urgent_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
|
||||
"HW register value = 0x%x\n",
|
||||
watermarks->b.pte_meta_urgent_ns, prog_wm_value);
|
||||
|
||||
@ -278,8 +273,7 @@ void hubbub1_program_watermarks(
|
||||
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"SR_ENTER_WATERMARK_B calculated =%d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_B calculated =%d\n"
|
||||
"HW register value = 0x%x\n",
|
||||
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
|
||||
|
||||
@ -288,8 +282,7 @@ void hubbub1_program_watermarks(
|
||||
watermarks->b.cstate_pstate.cstate_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"SR_EXIT_WATERMARK_B calculated =%d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
|
||||
"HW register value = 0x%x\n",
|
||||
watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
|
||||
}
|
||||
@ -298,8 +291,7 @@ void hubbub1_program_watermarks(
|
||||
watermarks->b.cstate_pstate.pstate_change_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n\n"
|
||||
"HW register value = 0x%x\n",
|
||||
watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
|
||||
|
||||
@ -307,8 +299,7 @@ void hubbub1_program_watermarks(
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->c.urgent_ns, refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"URGENCY_WATERMARK_C calculated =%d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
|
||||
"HW register value = 0x%x\n",
|
||||
watermarks->c.urgent_ns, prog_wm_value);
|
||||
|
||||
@ -317,8 +308,7 @@ void hubbub1_program_watermarks(
|
||||
watermarks->c.pte_meta_urgent_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
|
||||
"HW register value = 0x%x\n",
|
||||
watermarks->c.pte_meta_urgent_ns, prog_wm_value);
|
||||
|
||||
@ -328,8 +318,7 @@ void hubbub1_program_watermarks(
|
||||
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"SR_ENTER_WATERMARK_C calculated =%d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_C calculated =%d\n"
|
||||
"HW register value = 0x%x\n",
|
||||
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
|
||||
|
||||
@ -338,8 +327,7 @@ void hubbub1_program_watermarks(
|
||||
watermarks->c.cstate_pstate.cstate_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"SR_EXIT_WATERMARK_C calculated =%d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
|
||||
"HW register value = 0x%x\n",
|
||||
watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
|
||||
}
|
||||
@ -348,8 +336,7 @@ void hubbub1_program_watermarks(
|
||||
watermarks->c.cstate_pstate.pstate_change_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n\n"
|
||||
"HW register value = 0x%x\n",
|
||||
watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
|
||||
|
||||
@ -357,8 +344,7 @@ void hubbub1_program_watermarks(
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->d.urgent_ns, refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"URGENCY_WATERMARK_D calculated =%d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
|
||||
"HW register value = 0x%x\n",
|
||||
watermarks->d.urgent_ns, prog_wm_value);
|
||||
|
||||
@ -366,8 +352,7 @@ void hubbub1_program_watermarks(
|
||||
watermarks->d.pte_meta_urgent_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
|
||||
"HW register value = 0x%x\n",
|
||||
watermarks->d.pte_meta_urgent_ns, prog_wm_value);
|
||||
|
||||
@ -377,8 +362,7 @@ void hubbub1_program_watermarks(
|
||||
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"SR_ENTER_WATERMARK_D calculated =%d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_D calculated =%d\n"
|
||||
"HW register value = 0x%x\n",
|
||||
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
|
||||
|
||||
@ -387,8 +371,7 @@ void hubbub1_program_watermarks(
|
||||
watermarks->d.cstate_pstate.cstate_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"SR_EXIT_WATERMARK_D calculated =%d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
|
||||
"HW register value = 0x%x\n",
|
||||
watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
|
||||
}
|
||||
@ -398,8 +381,7 @@ void hubbub1_program_watermarks(
|
||||
watermarks->d.cstate_pstate.pstate_change_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
|
||||
dm_logger_write(hubbub->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
"DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
|
||||
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
|
||||
"HW register value = 0x%x\n\n",
|
||||
watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
|
||||
|
||||
@ -512,5 +494,6 @@ void hubbub1_construct(struct hubbub *hubbub,
|
||||
hubbub->shifts = hubbub_shift;
|
||||
hubbub->masks = hubbub_mask;
|
||||
|
||||
hubbub->debug_test_index_pstate = 0x7;
|
||||
}
|
||||
|
||||
|
@ -185,6 +185,7 @@ struct hubbub {
|
||||
const struct dcn_hubbub_registers *regs;
|
||||
const struct dcn_hubbub_shift *shifts;
|
||||
const struct dcn_hubbub_mask *masks;
|
||||
unsigned int debug_test_index_pstate;
|
||||
};
|
||||
|
||||
void hubbub1_update_dchub(
|
||||
|
@ -45,6 +45,8 @@
|
||||
#include "dcn10_hubbub.h"
|
||||
#include "dcn10_cm_common.h"
|
||||
|
||||
#define DC_LOGGER \
|
||||
ctx->logger
|
||||
#define CTX \
|
||||
hws->ctx
|
||||
#define REG(reg)\
|
||||
@ -328,6 +330,7 @@ static void power_on_plane(
|
||||
struct dce_hwseq *hws,
|
||||
int plane_id)
|
||||
{
|
||||
struct dc_context *ctx = hws->ctx;
|
||||
if (REG(DC_IP_REQUEST_CNTL)) {
|
||||
REG_SET(DC_IP_REQUEST_CNTL, 0,
|
||||
IP_REQUEST_EN, 1);
|
||||
@ -335,7 +338,7 @@ static void power_on_plane(
|
||||
hubp_pg_control(hws, plane_id, true);
|
||||
REG_SET(DC_IP_REQUEST_CNTL, 0,
|
||||
IP_REQUEST_EN, 0);
|
||||
dm_logger_write(hws->ctx->logger, LOG_DEBUG,
|
||||
DC_LOG_DEBUG(
|
||||
"Un-gated front end for pipe %d\n", plane_id);
|
||||
}
|
||||
}
|
||||
@ -526,7 +529,7 @@ static void reset_back_end_for_pipe(
|
||||
struct dc_state *context)
|
||||
{
|
||||
int i;
|
||||
|
||||
struct dc_context *ctx = dc->ctx;
|
||||
if (pipe_ctx->stream_res.stream_enc == NULL) {
|
||||
pipe_ctx->stream = NULL;
|
||||
return;
|
||||
@ -536,6 +539,22 @@ static void reset_back_end_for_pipe(
|
||||
/* DPMS may already disable */
|
||||
if (!pipe_ctx->stream->dpms_off)
|
||||
core_link_disable_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
|
||||
else if (pipe_ctx->stream_res.audio) {
|
||||
/*
|
||||
* if stream is already disabled outside of commit streams path,
|
||||
* audio disable was skipped. Need to do it here
|
||||
*/
|
||||
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
|
||||
|
||||
if (dc->caps.dynamic_audio == true) {
|
||||
/*we have to dynamic arbitrate the audio endpoints*/
|
||||
pipe_ctx->stream_res.audio = NULL;
|
||||
/*we free the resource, need reset is_audio_acquired*/
|
||||
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* by upper caller loop, parent pipe: pipe0, will be reset last.
|
||||
@ -556,8 +575,7 @@ static void reset_back_end_for_pipe(
|
||||
return;
|
||||
|
||||
pipe_ctx->stream = NULL;
|
||||
dm_logger_write(dc->ctx->logger, LOG_DEBUG,
|
||||
"Reset back end for pipe %d, tg:%d\n",
|
||||
DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
|
||||
pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
|
||||
}
|
||||
|
||||
@ -607,6 +625,7 @@ static void plane_atomic_power_down(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dce_hwseq *hws = dc->hwseq;
|
||||
struct dpp *dpp = pipe_ctx->plane_res.dpp;
|
||||
struct dc_context *ctx = dc->ctx;
|
||||
|
||||
if (REG(DC_IP_REQUEST_CNTL)) {
|
||||
REG_SET(DC_IP_REQUEST_CNTL, 0,
|
||||
@ -616,7 +635,7 @@ static void plane_atomic_power_down(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
dpp->funcs->dpp_reset(dpp);
|
||||
REG_SET(DC_IP_REQUEST_CNTL, 0,
|
||||
IP_REQUEST_EN, 0);
|
||||
dm_logger_write(dc->ctx->logger, LOG_DEBUG,
|
||||
DC_LOG_DEBUG(
|
||||
"Power gated front end %d\n", pipe_ctx->pipe_idx);
|
||||
}
|
||||
}
|
||||
@ -656,6 +675,8 @@ static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
|
||||
static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc_context *ctx = dc->ctx;
|
||||
|
||||
if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
|
||||
return;
|
||||
|
||||
@ -663,8 +684,7 @@ static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
|
||||
apply_DEGVIDCN10_253_wa(dc);
|
||||
|
||||
dm_logger_write(dc->ctx->logger, LOG_DC,
|
||||
"Power down front end %d\n",
|
||||
DC_LOG_DC("Power down front end %d\n",
|
||||
pipe_ctx->pipe_idx);
|
||||
}
|
||||
|
||||
@ -1086,7 +1106,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
|
||||
struct dc *core_dc,
|
||||
struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
|
||||
"\n============== DML TTU Output parameters [%d] ==============\n"
|
||||
"qos_level_low_wm: %d, \n"
|
||||
"qos_level_high_wm: %d, \n"
|
||||
@ -1116,7 +1136,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
|
||||
pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
|
||||
);
|
||||
|
||||
dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
|
||||
"\n============== DML DLG Output parameters [%d] ==============\n"
|
||||
"refcyc_h_blank_end: %d, \n"
|
||||
"dlg_vblank_end: %d, \n"
|
||||
@ -1151,7 +1171,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
|
||||
pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
|
||||
);
|
||||
|
||||
dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
|
||||
"\ndst_y_per_meta_row_nom_l: %d, \n"
|
||||
"refcyc_per_meta_chunk_nom_l: %d, \n"
|
||||
"refcyc_per_line_delivery_pre_l: %d, \n"
|
||||
@ -1181,7 +1201,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
|
||||
pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
|
||||
);
|
||||
|
||||
dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
|
||||
"\n============== DML RQ Output parameters [%d] ==============\n"
|
||||
"chunk_size: %d \n"
|
||||
"min_chunk_size: %d \n"
|
||||
@ -1314,7 +1334,7 @@ static void dcn10_enable_plane(
|
||||
|
||||
/* TODO: enable/disable in dm as per update type.
|
||||
if (plane_state) {
|
||||
dm_logger_write(dc->ctx->logger, LOG_DC,
|
||||
DC_LOG_DC(dc->ctx->logger,
|
||||
"Pipe:%d 0x%x: addr hi:0x%x, "
|
||||
"addr low:0x%x, "
|
||||
"src: %d, %d, %d,"
|
||||
@ -1332,7 +1352,7 @@ static void dcn10_enable_plane(
|
||||
plane_state->dst_rect.width,
|
||||
plane_state->dst_rect.height);
|
||||
|
||||
dm_logger_write(dc->ctx->logger, LOG_DC,
|
||||
DC_LOG_DC(dc->ctx->logger,
|
||||
"Pipe %d: width, height, x, y format:%d\n"
|
||||
"viewport:%d, %d, %d, %d\n"
|
||||
"recout: %d, %d, %d, %d\n",
|
||||
@ -1568,6 +1588,7 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
|
||||
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
|
||||
}
|
||||
|
||||
|
||||
static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct hubp *hubp = pipe_ctx->plane_res.hubp;
|
||||
@ -1667,12 +1688,13 @@ static void update_dchubp_dpp(
|
||||
if (plane_state->update_flags.bits.full_update) {
|
||||
dpp->funcs->dpp_dppclk_control(
|
||||
dpp,
|
||||
context->bw.dcn.calc_clk.dppclk_div,
|
||||
context->bw.dcn.calc_clk.max_dppclk_khz <
|
||||
context->bw.dcn.calc_clk.dispclk_khz,
|
||||
true);
|
||||
|
||||
dc->current_state->bw.dcn.cur_clk.dppclk_div =
|
||||
context->bw.dcn.calc_clk.dppclk_div;
|
||||
context->bw.dcn.cur_clk.dppclk_div = context->bw.dcn.calc_clk.dppclk_div;
|
||||
dc->current_state->bw.dcn.cur_clk.max_dppclk_khz =
|
||||
context->bw.dcn.calc_clk.max_dppclk_khz;
|
||||
context->bw.dcn.cur_clk.max_dppclk_khz = context->bw.dcn.calc_clk.max_dppclk_khz;
|
||||
}
|
||||
|
||||
/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
|
||||
@ -1803,8 +1825,9 @@ static void program_all_pipe_in_tree(
|
||||
dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
|
||||
}
|
||||
|
||||
if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
|
||||
if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) {
|
||||
program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
|
||||
}
|
||||
}
|
||||
|
||||
static void dcn10_pplib_apply_display_requirements(
|
||||
@ -1889,6 +1912,7 @@ static void dcn10_apply_ctx_for_surface(
|
||||
bool removed_pipe[4] = { false };
|
||||
unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
|
||||
bool program_water_mark = false;
|
||||
struct dc_context *ctx = dc->ctx;
|
||||
|
||||
struct pipe_ctx *top_pipe_to_program =
|
||||
find_top_pipe_for_stream(dc, context, stream);
|
||||
@ -1924,7 +1948,7 @@ static void dcn10_apply_ctx_for_surface(
|
||||
if (old_pipe_ctx->stream_res.tg == tg &&
|
||||
old_pipe_ctx->plane_res.hubp &&
|
||||
old_pipe_ctx->plane_res.hubp->opp_id != 0xf) {
|
||||
dcn10_disable_plane(dc, pipe_ctx);
|
||||
dcn10_disable_plane(dc, old_pipe_ctx);
|
||||
/*
|
||||
* power down fe will unlock when calling reset, need
|
||||
* to lock it back here. Messy, need rework.
|
||||
@ -1940,7 +1964,7 @@ static void dcn10_apply_ctx_for_surface(
|
||||
plane_atomic_disconnect(dc, old_pipe_ctx);
|
||||
removed_pipe[i] = true;
|
||||
|
||||
dm_logger_write(dc->ctx->logger, LOG_DC,
|
||||
DC_LOG_DC(
|
||||
"Reset mpcc for pipe %d\n",
|
||||
old_pipe_ctx->pipe_idx);
|
||||
}
|
||||
@ -1983,7 +2007,7 @@ static void dcn10_apply_ctx_for_surface(
|
||||
dcn10_verify_allow_pstate_change_high(dc);
|
||||
}
|
||||
}
|
||||
/* dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
/* DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
|
||||
"\n============== Watermark parameters ==============\n"
|
||||
"a.urgent_ns: %d \n"
|
||||
"a.cstate_enter_plus_exit: %d \n"
|
||||
@ -2006,7 +2030,7 @@ static void dcn10_apply_ctx_for_surface(
|
||||
context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns,
|
||||
context->bw.dcn.watermarks.b.pte_meta_urgent_ns
|
||||
);
|
||||
dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
|
||||
DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
|
||||
"\nc.urgent_ns: %d \n"
|
||||
"c.cstate_enter_plus_exit: %d \n"
|
||||
"c.cstate_exit: %d \n"
|
||||
@ -2054,22 +2078,24 @@ static void dcn10_set_bandwidth(
|
||||
dc->res_pool->display_clock->funcs->set_clock(
|
||||
dc->res_pool->display_clock,
|
||||
context->bw.dcn.calc_clk.dispclk_khz);
|
||||
dc->current_state->bw.dcn.cur_clk.dispclk_khz =
|
||||
context->bw.dcn.cur_clk.dispclk_khz =
|
||||
context->bw.dcn.calc_clk.dispclk_khz;
|
||||
}
|
||||
if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_khz
|
||||
> dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
|
||||
context->bw.dcn.cur_clk.dcfclk_khz =
|
||||
context->bw.dcn.calc_clk.dcfclk_khz;
|
||||
smu_req.hard_min_dcefclk_khz =
|
||||
context->bw.dcn.calc_clk.dcfclk_khz;
|
||||
}
|
||||
if (decrease_allowed || context->bw.dcn.calc_clk.fclk_khz
|
||||
> dc->current_state->bw.dcn.cur_clk.fclk_khz) {
|
||||
context->bw.dcn.cur_clk.fclk_khz =
|
||||
context->bw.dcn.calc_clk.fclk_khz;
|
||||
smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz;
|
||||
}
|
||||
if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz
|
||||
> dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz) {
|
||||
dc->current_state->bw.dcn.calc_clk.dcfclk_deep_sleep_khz =
|
||||
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
|
||||
context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
|
||||
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
|
||||
}
|
||||
@ -2084,15 +2110,11 @@ static void dcn10_set_bandwidth(
|
||||
/* Decrease in freq is increase in period so opposite comparison for dram_ccm */
|
||||
if (decrease_allowed || context->bw.dcn.calc_clk.dram_ccm_us
|
||||
< dc->current_state->bw.dcn.cur_clk.dram_ccm_us) {
|
||||
dc->current_state->bw.dcn.calc_clk.dram_ccm_us =
|
||||
context->bw.dcn.calc_clk.dram_ccm_us;
|
||||
context->bw.dcn.cur_clk.dram_ccm_us =
|
||||
context->bw.dcn.calc_clk.dram_ccm_us;
|
||||
}
|
||||
if (decrease_allowed || context->bw.dcn.calc_clk.min_active_dram_ccm_us
|
||||
< dc->current_state->bw.dcn.cur_clk.min_active_dram_ccm_us) {
|
||||
dc->current_state->bw.dcn.calc_clk.min_active_dram_ccm_us =
|
||||
context->bw.dcn.calc_clk.min_active_dram_ccm_us;
|
||||
context->bw.dcn.cur_clk.min_active_dram_ccm_us =
|
||||
context->bw.dcn.calc_clk.min_active_dram_ccm_us;
|
||||
}
|
||||
@ -2251,7 +2273,7 @@ static void dcn10_wait_for_mpcc_disconnect(
|
||||
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
|
||||
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
|
||||
hubp->funcs->set_blank(hubp, true);
|
||||
/*dm_logger_write(dc->ctx->logger, LOG_ERROR,
|
||||
/*DC_LOG_ERROR(dc->ctx->logger,
|
||||
"[debug_mpo: wait_for_mpcc finished waiting on mpcc %d]\n",
|
||||
i);*/
|
||||
}
|
||||
|
@ -83,6 +83,8 @@
|
||||
|
||||
|
||||
struct dcn_optc_registers {
|
||||
uint32_t OTG_GLOBAL_CONTROL1;
|
||||
uint32_t OTG_GLOBAL_CONTROL2;
|
||||
uint32_t OTG_VERT_SYNC_CONTROL;
|
||||
uint32_t OTG_MASTER_UPDATE_MODE;
|
||||
uint32_t OTG_GSL_CONTROL;
|
||||
@ -126,6 +128,7 @@ struct dcn_optc_registers {
|
||||
uint32_t OTG_VERTICAL_INTERRUPT2_POSITION;
|
||||
uint32_t OPTC_INPUT_CLOCK_CONTROL;
|
||||
uint32_t OPTC_DATA_SOURCE_SELECT;
|
||||
uint32_t OPTC_MEMORY_CONFIG;
|
||||
uint32_t OPTC_INPUT_GLOBAL_CONTROL;
|
||||
uint32_t CONTROL;
|
||||
uint32_t OTG_GSL_WINDOW_X;
|
||||
@ -325,10 +328,9 @@ struct dcn_optc_registers {
|
||||
type OPTC_INPUT_CLK_EN;\
|
||||
type OPTC_INPUT_CLK_ON;\
|
||||
type OPTC_INPUT_CLK_GATE_DIS;\
|
||||
type OPTC_SRC_SEL;\
|
||||
type OPTC_SEG0_SRC_SEL;\
|
||||
type OPTC_UNDERFLOW_OCCURRED_STATUS;\
|
||||
type OPTC_UNDERFLOW_CLEAR;\
|
||||
type OPTC_SRC_SEL;\
|
||||
type VTG0_ENABLE;\
|
||||
type VTG0_FP2;\
|
||||
type VTG0_VCOUNT_INIT;\
|
||||
|
@ -49,6 +49,13 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
|
||||
struct dp_mst_stream_allocation_table *proposed_table,
|
||||
bool enable);
|
||||
|
||||
/*
|
||||
* Clear payload allocation table before enable MST DP link.
|
||||
*/
|
||||
void dm_helpers_dp_mst_clear_payload_allocation_table(
|
||||
struct dc_context *ctx,
|
||||
const struct dc_link *link);
|
||||
|
||||
/*
|
||||
* Polls for ACT (allocation change trigger) handled and
|
||||
*/
|
||||
@ -101,5 +108,8 @@ enum dc_edid_status dm_helpers_read_local_edid(
|
||||
struct dc_link *link,
|
||||
struct dc_sink *sink);
|
||||
|
||||
void dm_set_dcn_clocks(
|
||||
struct dc_context *ctx,
|
||||
struct dc_clocks *clks);
|
||||
|
||||
#endif /* __DM_HELPERS__ */
|
||||
|
@ -29,7 +29,7 @@
|
||||
#include "os_types.h"
|
||||
#include "dc_types.h"
|
||||
|
||||
#include "dm_pp_smu.h"
|
||||
struct pp_smu_funcs_rv;
|
||||
|
||||
struct dm_pp_clock_range {
|
||||
int min_khz;
|
||||
@ -239,25 +239,8 @@ enum dm_acpi_display_type {
|
||||
AcpiDisplayType_DFP6 = 12
|
||||
};
|
||||
|
||||
enum dm_pp_power_level {
|
||||
DM_PP_POWER_LEVEL_INVALID,
|
||||
DM_PP_POWER_LEVEL_ULTRA_LOW,
|
||||
DM_PP_POWER_LEVEL_LOW,
|
||||
DM_PP_POWER_LEVEL_NOMINAL,
|
||||
DM_PP_POWER_LEVEL_PERFORMANCE,
|
||||
|
||||
DM_PP_POWER_LEVEL_0 = DM_PP_POWER_LEVEL_ULTRA_LOW,
|
||||
DM_PP_POWER_LEVEL_1 = DM_PP_POWER_LEVEL_LOW,
|
||||
DM_PP_POWER_LEVEL_2 = DM_PP_POWER_LEVEL_NOMINAL,
|
||||
DM_PP_POWER_LEVEL_3 = DM_PP_POWER_LEVEL_PERFORMANCE,
|
||||
DM_PP_POWER_LEVEL_4 = DM_PP_CLOCKS_DPM_STATE_LEVEL_3 + 1,
|
||||
DM_PP_POWER_LEVEL_5 = DM_PP_CLOCKS_DPM_STATE_LEVEL_4 + 1,
|
||||
DM_PP_POWER_LEVEL_6 = DM_PP_CLOCKS_DPM_STATE_LEVEL_5 + 1,
|
||||
DM_PP_POWER_LEVEL_7 = DM_PP_CLOCKS_DPM_STATE_LEVEL_6 + 1,
|
||||
};
|
||||
|
||||
struct dm_pp_power_level_change_request {
|
||||
enum dm_pp_power_level power_level;
|
||||
enum dm_pp_clocks_state power_level;
|
||||
};
|
||||
|
||||
struct dm_pp_clock_for_voltage_req {
|
||||
|
@ -55,6 +55,8 @@ enum {
|
||||
|
||||
#define FROM_ENGINE(ptr) \
|
||||
container_of((ptr), struct aux_engine, base)
|
||||
#define DC_LOGGER \
|
||||
engine->base.ctx->logger
|
||||
|
||||
enum i2caux_engine_type dal_aux_engine_get_engine_type(
|
||||
const struct engine *engine)
|
||||
@ -126,20 +128,8 @@ static void process_read_reply(
|
||||
ctx->status =
|
||||
I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
|
||||
ctx->operation_succeeded = false;
|
||||
} else if (ctx->returned_byte < ctx->current_read_length) {
|
||||
ctx->current_read_length -= ctx->returned_byte;
|
||||
|
||||
ctx->offset += ctx->returned_byte;
|
||||
|
||||
++ctx->invalid_reply_retry_aux_on_ack;
|
||||
|
||||
if (ctx->invalid_reply_retry_aux_on_ack >
|
||||
AUX_INVALID_REPLY_RETRY_COUNTER) {
|
||||
ctx->status =
|
||||
I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
|
||||
ctx->operation_succeeded = false;
|
||||
}
|
||||
} else {
|
||||
ctx->current_read_length = ctx->returned_byte;
|
||||
ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
|
||||
ctx->transaction_complete = true;
|
||||
ctx->operation_succeeded = true;
|
||||
@ -286,12 +276,13 @@ static bool read_command(
|
||||
|
||||
if (request->payload.address_space ==
|
||||
I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
|
||||
dm_logger_write(engine->base.ctx->logger, LOG_I2C_AUX, "READ: addr:0x%x value:0x%x Result:%d",
|
||||
DC_LOG_I2C_AUX("READ: addr:0x%x value:0x%x Result:%d",
|
||||
request->payload.address,
|
||||
request->payload.data[0],
|
||||
ctx.operation_succeeded);
|
||||
}
|
||||
|
||||
request->payload.length = ctx.reply.length;
|
||||
return ctx.operation_succeeded;
|
||||
}
|
||||
|
||||
@ -494,7 +485,7 @@ static bool write_command(
|
||||
|
||||
if (request->payload.address_space ==
|
||||
I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
|
||||
dm_logger_write(engine->base.ctx->logger, LOG_I2C_AUX, "WRITE: addr:0x%x value:0x%x Result:%d",
|
||||
DC_LOG_I2C_AUX("WRITE: addr:0x%x value:0x%x Result:%d",
|
||||
request->payload.address,
|
||||
request->payload.data[0],
|
||||
ctx.operation_succeeded);
|
||||
|
@ -48,6 +48,8 @@
|
||||
/*
|
||||
* This unit
|
||||
*/
|
||||
#define DC_LOGGER \
|
||||
hw_engine->base.base.base.ctx->logger
|
||||
|
||||
enum dc_i2c_status {
|
||||
DC_I2C_STATUS__DC_I2C_STATUS_IDLE,
|
||||
@ -525,9 +527,7 @@ static void construct(
|
||||
REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
|
||||
|
||||
if (xtal_ref_div == 0) {
|
||||
dm_logger_write(
|
||||
hw_engine->base.base.base.ctx->logger, LOG_WARNING,
|
||||
"Invalid base timer divider\n",
|
||||
DC_LOG_WARNING("Invalid base timer divider\n",
|
||||
__func__);
|
||||
xtal_ref_div = 2;
|
||||
}
|
||||
|
@ -253,6 +253,7 @@ bool dal_i2caux_submit_aux_command(
|
||||
break;
|
||||
}
|
||||
|
||||
cmd->payloads->length = request.payload.length;
|
||||
++index_of_payload;
|
||||
}
|
||||
|
||||
|
@ -177,6 +177,15 @@ struct resource_pool {
|
||||
const struct resource_caps *res_cap;
|
||||
};
|
||||
|
||||
struct dcn_fe_clocks {
|
||||
int dppclk_khz;
|
||||
};
|
||||
|
||||
struct dcn_fe_bandwidth {
|
||||
struct dcn_fe_clocks calc;
|
||||
struct dcn_fe_clocks cur;
|
||||
};
|
||||
|
||||
struct stream_resource {
|
||||
struct output_pixel_processor *opp;
|
||||
struct timing_generator *tg;
|
||||
@ -195,6 +204,8 @@ struct plane_resource {
|
||||
struct transform *xfm;
|
||||
struct dpp *dpp;
|
||||
uint8_t mpcc_inst;
|
||||
|
||||
struct dcn_fe_bandwidth bw;
|
||||
};
|
||||
|
||||
struct pipe_ctx {
|
||||
@ -245,20 +256,9 @@ struct dce_bw_output {
|
||||
int blackout_recovery_time_us;
|
||||
};
|
||||
|
||||
struct dcn_bw_clocks {
|
||||
int dispclk_khz;
|
||||
int dppclk_khz;
|
||||
bool dppclk_div;
|
||||
int dcfclk_khz;
|
||||
int dcfclk_deep_sleep_khz;
|
||||
int fclk_khz;
|
||||
int dram_ccm_us;
|
||||
int min_active_dram_ccm_us;
|
||||
};
|
||||
|
||||
struct dcn_bw_output {
|
||||
struct dcn_bw_clocks cur_clk;
|
||||
struct dcn_bw_clocks calc_clk;
|
||||
struct dc_clocks cur_clk;
|
||||
struct dc_clocks calc_clk;
|
||||
struct dcn_watermark_set watermarks;
|
||||
};
|
||||
|
||||
|
@ -102,7 +102,7 @@ bool dal_ddc_service_query_ddc_data(
|
||||
uint8_t *read_buf,
|
||||
uint32_t read_size);
|
||||
|
||||
enum ddc_result dal_ddc_service_read_dpcd_data(
|
||||
ssize_t dal_ddc_service_read_dpcd_data(
|
||||
struct ddc_service *ddc,
|
||||
bool i2c,
|
||||
enum i2c_mot_mode mot,
|
||||
|
@ -35,6 +35,8 @@ struct dpp {
|
||||
int inst;
|
||||
struct dpp_caps *caps;
|
||||
struct pwl_params regamma_params;
|
||||
struct pwl_params degamma_params;
|
||||
|
||||
};
|
||||
|
||||
struct dpp_grph_csc_adjustment {
|
||||
|
@ -36,27 +36,25 @@
|
||||
|
||||
#include "dc.h"
|
||||
#include "core_types.h"
|
||||
static bool hpd_ack(
|
||||
struct irq_service *irq_service,
|
||||
const struct irq_source_info *info)
|
||||
#define DC_LOGGER \
|
||||
irq_service->ctx->logger
|
||||
|
||||
static bool hpd_ack(struct irq_service *irq_service,
|
||||
const struct irq_source_info *info)
|
||||
{
|
||||
uint32_t addr = info->status_reg;
|
||||
uint32_t value = dm_read_reg(irq_service->ctx, addr);
|
||||
uint32_t current_status =
|
||||
get_reg_field_value(
|
||||
value,
|
||||
DC_HPD_INT_STATUS,
|
||||
DC_HPD_SENSE_DELAYED);
|
||||
uint32_t current_status = get_reg_field_value(value,
|
||||
DC_HPD_INT_STATUS,
|
||||
DC_HPD_SENSE_DELAYED);
|
||||
|
||||
dal_irq_service_ack_generic(irq_service, info);
|
||||
|
||||
value = dm_read_reg(irq_service->ctx, info->enable_reg);
|
||||
|
||||
set_reg_field_value(
|
||||
value,
|
||||
current_status ? 0 : 1,
|
||||
DC_HPD_INT_CONTROL,
|
||||
DC_HPD_INT_POLARITY);
|
||||
set_reg_field_value(value, current_status ? 0 : 1,
|
||||
DC_HPD_INT_CONTROL,
|
||||
DC_HPD_INT_POLARITY);
|
||||
|
||||
dm_write_reg(irq_service->ctx, info->enable_reg, value);
|
||||
|
||||
@ -176,48 +174,41 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
|
||||
#define dc_underflow_int_entry(reg_num) \
|
||||
[DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
|
||||
|
||||
bool dal_irq_service_dummy_set(
|
||||
struct irq_service *irq_service,
|
||||
const struct irq_source_info *info,
|
||||
bool enable)
|
||||
bool dal_irq_service_dummy_set(struct irq_service *irq_service,
|
||||
const struct irq_source_info *info,
|
||||
bool enable)
|
||||
{
|
||||
dm_logger_write(
|
||||
irq_service->ctx->logger, LOG_ERROR,
|
||||
"%s: called for non-implemented irq source\n",
|
||||
__func__);
|
||||
DC_LOG_ERROR("%s: called for non-implemented irq source\n",
|
||||
__func__);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool dal_irq_service_dummy_ack(
|
||||
struct irq_service *irq_service,
|
||||
const struct irq_source_info *info)
|
||||
bool dal_irq_service_dummy_ack(struct irq_service *irq_service,
|
||||
const struct irq_source_info *info)
|
||||
{
|
||||
dm_logger_write(
|
||||
irq_service->ctx->logger, LOG_ERROR,
|
||||
"%s: called for non-implemented irq source\n",
|
||||
__func__);
|
||||
DC_LOG_ERROR("%s: called for non-implemented irq source\n",
|
||||
__func__);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
bool dce110_vblank_set(
|
||||
struct irq_service *irq_service,
|
||||
const struct irq_source_info *info,
|
||||
bool enable)
|
||||
bool dce110_vblank_set(struct irq_service *irq_service,
|
||||
const struct irq_source_info *info,
|
||||
bool enable)
|
||||
{
|
||||
struct dc_context *dc_ctx = irq_service->ctx;
|
||||
struct dc *core_dc = irq_service->ctx->dc;
|
||||
enum dc_irq_source dal_irq_src = dc_interrupt_to_irq_source(
|
||||
irq_service->ctx->dc,
|
||||
info->src_id,
|
||||
info->ext_id);
|
||||
enum dc_irq_source dal_irq_src =
|
||||
dc_interrupt_to_irq_source(irq_service->ctx->dc,
|
||||
info->src_id,
|
||||
info->ext_id);
|
||||
uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;
|
||||
|
||||
struct timing_generator *tg =
|
||||
core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
|
||||
|
||||
if (enable) {
|
||||
if (!tg->funcs->arm_vert_intr(tg, 2)) {
|
||||
if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
|
||||
DC_ERROR("Failed to get VBLANK!\n");
|
||||
return false;
|
||||
}
|
||||
@ -225,7 +216,6 @@ bool dce110_vblank_set(
|
||||
|
||||
dal_irq_service_set_generic(irq_service, info, enable);
|
||||
return true;
|
||||
|
||||
}
|
||||
|
||||
static const struct irq_source_info_funcs dummy_irq_info_funcs = {
|
||||
@ -406,9 +396,8 @@ static const struct irq_service_funcs irq_service_funcs_dce110 = {
|
||||
.to_dal_irq_source = to_dal_irq_source_dce110
|
||||
};
|
||||
|
||||
static void construct(
|
||||
struct irq_service *irq_service,
|
||||
struct irq_service_init_data *init_data)
|
||||
static void construct(struct irq_service *irq_service,
|
||||
struct irq_service_init_data *init_data)
|
||||
{
|
||||
dal_irq_service_construct(irq_service, init_data);
|
||||
|
||||
@ -416,8 +405,8 @@ static void construct(
|
||||
irq_service->funcs = &irq_service_funcs_dce110;
|
||||
}
|
||||
|
||||
struct irq_service *dal_irq_service_dce110_create(
|
||||
struct irq_service_init_data *init_data)
|
||||
struct irq_service *
|
||||
dal_irq_service_dce110_create(struct irq_service_init_data *init_data)
|
||||
{
|
||||
struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
|
||||
GFP_KERNEL);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user