2015-04-21 03:55:21 +07:00
|
|
|
/*
|
|
|
|
* Copyright 2008 Advanced Micro Devices, Inc.
|
|
|
|
* Copyright 2008 Red Hat Inc.
|
|
|
|
* Copyright 2009 Jerome Glisse.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors: Dave Airlie
|
|
|
|
* Alex Deucher
|
|
|
|
* Jerome Glisse
|
|
|
|
*/
|
|
|
|
#include <drm/drmP.h>
|
|
|
|
#include "amdgpu.h"
|
|
|
|
#include <drm/amdgpu_drm.h>
|
2017-06-27 03:17:13 +07:00
|
|
|
#include "amdgpu_sched.h"
|
2015-04-21 03:55:21 +07:00
|
|
|
#include "amdgpu_uvd.h"
|
|
|
|
#include "amdgpu_vce.h"
|
2018-04-17 20:55:44 +07:00
|
|
|
#include "atom.h"
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
#include <linux/vga_switcheroo.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/pm_runtime.h>
|
2015-06-13 01:35:14 +07:00
|
|
|
#include "amdgpu_amdkfd.h"
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_driver_unload_kms - Main unload function for KMS.
|
|
|
|
*
|
|
|
|
* @dev: drm dev pointer
|
|
|
|
*
|
|
|
|
* This is the main unload function for KMS (all asics).
|
|
|
|
* Returns 0 on success.
|
|
|
|
*/
|
2017-01-07 00:57:31 +07:00
|
|
|
void amdgpu_driver_unload_kms(struct drm_device *dev)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
|
|
|
|
if (adev == NULL)
|
2017-01-07 00:57:31 +07:00
|
|
|
return;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
if (adev->rmmio == NULL)
|
|
|
|
goto done_free;
|
|
|
|
|
2017-01-12 14:14:36 +07:00
|
|
|
if (amdgpu_sriov_vf(adev))
|
|
|
|
amdgpu_virt_request_full_gpu(adev, false);
|
|
|
|
|
2016-06-08 23:47:27 +07:00
|
|
|
if (amdgpu_device_is_px(dev)) {
|
|
|
|
pm_runtime_get_sync(dev->dev);
|
2016-06-08 23:47:27 +07:00
|
|
|
pm_runtime_forbid(dev->dev);
|
2016-06-08 23:47:27 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
amdgpu_acpi_fini(adev);
|
|
|
|
|
|
|
|
amdgpu_device_fini(adev);
|
|
|
|
|
|
|
|
done_free:
|
|
|
|
kfree(adev);
|
|
|
|
dev->dev_private = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_driver_load_kms - Main load function for KMS.
|
|
|
|
*
|
|
|
|
* @dev: drm dev pointer
|
|
|
|
* @flags: device flags
|
|
|
|
*
|
|
|
|
* This is the main load function for KMS (all asics).
|
|
|
|
* Returns 0 on success, error on failure.
|
|
|
|
*/
|
|
|
|
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev;
|
2017-11-08 10:03:14 +07:00
|
|
|
int r, acpi_status;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-06-05 16:53:55 +07:00
|
|
|
#ifdef CONFIG_DRM_AMDGPU_SI
|
|
|
|
if (!amdgpu_si_support) {
|
|
|
|
switch (flags & AMD_ASIC_MASK) {
|
|
|
|
case CHIP_TAHITI:
|
|
|
|
case CHIP_PITCAIRN:
|
|
|
|
case CHIP_VERDE:
|
|
|
|
case CHIP_OLAND:
|
|
|
|
case CHIP_HAINAN:
|
|
|
|
dev_info(dev->dev,
|
|
|
|
"SI support provided by radeon.\n");
|
|
|
|
dev_info(dev->dev,
|
2017-05-29 16:05:20 +07:00
|
|
|
"Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
|
2017-06-05 16:53:55 +07:00
|
|
|
);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2017-06-05 16:43:27 +07:00
|
|
|
#ifdef CONFIG_DRM_AMDGPU_CIK
|
|
|
|
if (!amdgpu_cik_support) {
|
|
|
|
switch (flags & AMD_ASIC_MASK) {
|
|
|
|
case CHIP_KAVERI:
|
|
|
|
case CHIP_BONAIRE:
|
|
|
|
case CHIP_HAWAII:
|
|
|
|
case CHIP_KABINI:
|
|
|
|
case CHIP_MULLINS:
|
|
|
|
dev_info(dev->dev,
|
2017-05-29 16:05:20 +07:00
|
|
|
"CIK support provided by radeon.\n");
|
|
|
|
dev_info(dev->dev,
|
|
|
|
"Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
|
|
|
|
);
|
2017-06-05 16:43:27 +07:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
|
|
|
|
if (adev == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
dev->dev_private = (void *)adev;
|
|
|
|
|
|
|
|
if ((amdgpu_runtime_pm != 0) &&
|
|
|
|
amdgpu_has_atpx() &&
|
2016-10-31 22:02:31 +07:00
|
|
|
(amdgpu_is_atpx_hybrid() ||
|
|
|
|
amdgpu_has_atpx_dgpu_power_cntl()) &&
|
2017-03-11 03:23:45 +07:00
|
|
|
((flags & AMD_IS_APU) == 0) &&
|
|
|
|
!pci_is_thunderbolt_attached(dev->pdev))
|
2015-07-22 10:29:01 +07:00
|
|
|
flags |= AMD_IS_PX;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
/* amdgpu_device_init should report only fatal error
|
|
|
|
* like memory allocation failure or iomapping failure,
|
|
|
|
* or memory manager initialization failure, it must
|
|
|
|
* properly initialize the GPU MC controller and permit
|
|
|
|
* VRAM allocation
|
|
|
|
*/
|
|
|
|
r = amdgpu_device_init(adev, dev, dev->pdev, flags);
|
2017-11-08 10:03:14 +07:00
|
|
|
if (r) {
|
2015-04-21 03:55:21 +07:00
|
|
|
dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Call ACPI methods: require modeset init
|
|
|
|
* but failure is not fatal
|
|
|
|
*/
|
|
|
|
if (!r) {
|
|
|
|
acpi_status = amdgpu_acpi_init(adev);
|
|
|
|
if (acpi_status)
|
|
|
|
dev_dbg(&dev->pdev->dev,
|
|
|
|
"Error during ACPI methods call\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (amdgpu_device_is_px(dev)) {
|
|
|
|
pm_runtime_use_autosuspend(dev->dev);
|
|
|
|
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
|
|
|
|
pm_runtime_set_active(dev->dev);
|
|
|
|
pm_runtime_allow(dev->dev);
|
|
|
|
pm_runtime_mark_last_busy(dev->dev);
|
|
|
|
pm_runtime_put_autosuspend(dev->dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2016-06-08 23:47:27 +07:00
|
|
|
if (r) {
|
|
|
|
/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
|
|
|
|
if (adev->rmmio && amdgpu_device_is_px(dev))
|
|
|
|
pm_runtime_put_noidle(dev->dev);
|
2015-04-21 03:55:21 +07:00
|
|
|
amdgpu_driver_unload_kms(dev);
|
2016-06-08 23:47:27 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2016-06-12 14:44:44 +07:00
|
|
|
static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
|
|
|
|
struct drm_amdgpu_query_fw *query_fw,
|
|
|
|
struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
switch (query_fw->fw_type) {
|
|
|
|
case AMDGPU_INFO_FW_VCE:
|
|
|
|
fw_info->ver = adev->vce.fw_version;
|
|
|
|
fw_info->feature = adev->vce.fb_version;
|
|
|
|
break;
|
|
|
|
case AMDGPU_INFO_FW_UVD:
|
|
|
|
fw_info->ver = adev->uvd.fw_version;
|
|
|
|
fw_info->feature = 0;
|
|
|
|
break;
|
2018-03-16 23:04:53 +07:00
|
|
|
case AMDGPU_INFO_FW_VCN:
|
|
|
|
fw_info->ver = adev->vcn.fw_version;
|
|
|
|
fw_info->feature = 0;
|
|
|
|
break;
|
2016-06-12 14:44:44 +07:00
|
|
|
case AMDGPU_INFO_FW_GMC:
|
2018-01-12 20:52:22 +07:00
|
|
|
fw_info->ver = adev->gmc.fw_version;
|
2016-06-12 14:44:44 +07:00
|
|
|
fw_info->feature = 0;
|
|
|
|
break;
|
|
|
|
case AMDGPU_INFO_FW_GFX_ME:
|
|
|
|
fw_info->ver = adev->gfx.me_fw_version;
|
|
|
|
fw_info->feature = adev->gfx.me_feature_version;
|
|
|
|
break;
|
|
|
|
case AMDGPU_INFO_FW_GFX_PFP:
|
|
|
|
fw_info->ver = adev->gfx.pfp_fw_version;
|
|
|
|
fw_info->feature = adev->gfx.pfp_feature_version;
|
|
|
|
break;
|
|
|
|
case AMDGPU_INFO_FW_GFX_CE:
|
|
|
|
fw_info->ver = adev->gfx.ce_fw_version;
|
|
|
|
fw_info->feature = adev->gfx.ce_feature_version;
|
|
|
|
break;
|
|
|
|
case AMDGPU_INFO_FW_GFX_RLC:
|
|
|
|
fw_info->ver = adev->gfx.rlc_fw_version;
|
|
|
|
fw_info->feature = adev->gfx.rlc_feature_version;
|
|
|
|
break;
|
2018-01-22 19:48:14 +07:00
|
|
|
case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
|
|
|
|
fw_info->ver = adev->gfx.rlc_srlc_fw_version;
|
|
|
|
fw_info->feature = adev->gfx.rlc_srlc_feature_version;
|
|
|
|
break;
|
|
|
|
case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
|
|
|
|
fw_info->ver = adev->gfx.rlc_srlg_fw_version;
|
|
|
|
fw_info->feature = adev->gfx.rlc_srlg_feature_version;
|
|
|
|
break;
|
|
|
|
case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
|
|
|
|
fw_info->ver = adev->gfx.rlc_srls_fw_version;
|
|
|
|
fw_info->feature = adev->gfx.rlc_srls_feature_version;
|
|
|
|
break;
|
2016-06-12 14:44:44 +07:00
|
|
|
case AMDGPU_INFO_FW_GFX_MEC:
|
|
|
|
if (query_fw->index == 0) {
|
|
|
|
fw_info->ver = adev->gfx.mec_fw_version;
|
|
|
|
fw_info->feature = adev->gfx.mec_feature_version;
|
|
|
|
} else if (query_fw->index == 1) {
|
|
|
|
fw_info->ver = adev->gfx.mec2_fw_version;
|
|
|
|
fw_info->feature = adev->gfx.mec2_feature_version;
|
|
|
|
} else
|
|
|
|
return -EINVAL;
|
|
|
|
break;
|
|
|
|
case AMDGPU_INFO_FW_SMC:
|
|
|
|
fw_info->ver = adev->pm.fw_version;
|
|
|
|
fw_info->feature = 0;
|
|
|
|
break;
|
|
|
|
case AMDGPU_INFO_FW_SDMA:
|
|
|
|
if (query_fw->index >= adev->sdma.num_instances)
|
|
|
|
return -EINVAL;
|
|
|
|
fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
|
|
|
|
fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
|
|
|
|
break;
|
2017-03-04 07:15:26 +07:00
|
|
|
case AMDGPU_INFO_FW_SOS:
|
|
|
|
fw_info->ver = adev->psp.sos_fw_version;
|
|
|
|
fw_info->feature = adev->psp.sos_feature_version;
|
|
|
|
break;
|
|
|
|
case AMDGPU_INFO_FW_ASD:
|
|
|
|
fw_info->ver = adev->psp.asd_fw_version;
|
|
|
|
fw_info->feature = adev->psp.asd_feature_version;
|
|
|
|
break;
|
2016-06-12 14:44:44 +07:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/*
|
|
|
|
* Userspace get information ioctl
|
|
|
|
*/
|
|
|
|
/**
|
|
|
|
* amdgpu_info_ioctl - answer a device specific request.
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device pointer
|
|
|
|
* @data: request object
|
|
|
|
* @filp: drm filp
|
|
|
|
*
|
|
|
|
* This function is used to pass device specific parameters to the userspace
|
|
|
|
* drivers. Examples include: pci device id, pipeline parms, tiling params,
|
|
|
|
* etc. (all asics).
|
|
|
|
* Returns 0 on success, -EINVAL on failure.
|
|
|
|
*/
|
|
|
|
static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
struct drm_amdgpu_info *info = data;
|
|
|
|
struct amdgpu_mode_info *minfo = &adev->mode_info;
|
2017-04-06 03:33:00 +07:00
|
|
|
void __user *out = (void __user *)(uintptr_t)info->return_pointer;
|
2015-04-21 03:55:21 +07:00
|
|
|
uint32_t size = info->return_size;
|
|
|
|
struct drm_crtc *crtc;
|
|
|
|
uint32_t ui32 = 0;
|
|
|
|
uint64_t ui64 = 0;
|
2018-05-16 02:31:24 +07:00
|
|
|
int i, j, found;
|
2017-03-09 06:25:15 +07:00
|
|
|
int ui32_size = sizeof(ui32);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
if (!info->return_size || !info->return_pointer)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2018-04-16 13:47:57 +07:00
|
|
|
/* Ensure IB tests are run on ring */
|
|
|
|
flush_delayed_work(&adev->late_init_work);
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
switch (info->query) {
|
|
|
|
case AMDGPU_INFO_ACCEL_WORKING:
|
|
|
|
ui32 = adev->accel_working;
|
|
|
|
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
|
|
|
|
case AMDGPU_INFO_CRTC_FROM_ID:
|
|
|
|
for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
|
|
|
|
crtc = (struct drm_crtc *)minfo->crtcs[i];
|
|
|
|
if (crtc && crtc->base.id == info->mode_crtc.id) {
|
|
|
|
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
|
|
|
ui32 = amdgpu_crtc->crtc_id;
|
|
|
|
found = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found) {
|
|
|
|
DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
|
|
|
|
case AMDGPU_INFO_HW_IP_INFO: {
|
|
|
|
struct drm_amdgpu_info_hw_ip ip = {};
|
2015-05-23 01:39:35 +07:00
|
|
|
enum amd_ip_block_type type;
|
2015-04-21 03:55:21 +07:00
|
|
|
uint32_t ring_mask = 0;
|
2015-06-04 20:26:57 +07:00
|
|
|
uint32_t ib_start_alignment = 0;
|
|
|
|
uint32_t ib_size_alignment = 0;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
switch (info->query_hw_ip.type) {
|
|
|
|
case AMDGPU_HW_IP_GFX:
|
2015-05-23 01:39:35 +07:00
|
|
|
type = AMD_IP_BLOCK_TYPE_GFX;
|
2015-04-21 03:55:21 +07:00
|
|
|
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
|
|
|
|
ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i);
|
2018-06-15 13:39:57 +07:00
|
|
|
ib_start_alignment = 32;
|
|
|
|
ib_size_alignment = 32;
|
2015-04-21 03:55:21 +07:00
|
|
|
break;
|
|
|
|
case AMDGPU_HW_IP_COMPUTE:
|
2015-05-23 01:39:35 +07:00
|
|
|
type = AMD_IP_BLOCK_TYPE_GFX;
|
2015-04-21 03:55:21 +07:00
|
|
|
for (i = 0; i < adev->gfx.num_compute_rings; i++)
|
|
|
|
ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i);
|
2018-06-15 13:39:57 +07:00
|
|
|
ib_start_alignment = 32;
|
|
|
|
ib_size_alignment = 32;
|
2015-04-21 03:55:21 +07:00
|
|
|
break;
|
|
|
|
case AMDGPU_HW_IP_DMA:
|
2015-05-23 01:39:35 +07:00
|
|
|
type = AMD_IP_BLOCK_TYPE_SDMA;
|
2015-10-09 03:30:37 +07:00
|
|
|
for (i = 0; i < adev->sdma.num_instances; i++)
|
|
|
|
ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
|
2018-06-15 13:39:57 +07:00
|
|
|
ib_start_alignment = 256;
|
|
|
|
ib_size_alignment = 4;
|
2015-04-21 03:55:21 +07:00
|
|
|
break;
|
|
|
|
case AMDGPU_HW_IP_UVD:
|
2015-05-23 01:39:35 +07:00
|
|
|
type = AMD_IP_BLOCK_TYPE_UVD;
|
2018-05-16 02:31:24 +07:00
|
|
|
for (i = 0; i < adev->uvd.num_uvd_inst; i++)
|
|
|
|
ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i);
|
2018-06-15 13:39:57 +07:00
|
|
|
ib_start_alignment = 64;
|
|
|
|
ib_size_alignment = 64;
|
2015-04-21 03:55:21 +07:00
|
|
|
break;
|
|
|
|
case AMDGPU_HW_IP_VCE:
|
2015-05-23 01:39:35 +07:00
|
|
|
type = AMD_IP_BLOCK_TYPE_VCE;
|
2016-08-25 03:56:21 +07:00
|
|
|
for (i = 0; i < adev->vce.num_rings; i++)
|
2015-04-21 03:55:21 +07:00
|
|
|
ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
|
2018-06-15 13:39:57 +07:00
|
|
|
ib_start_alignment = 4;
|
2016-08-23 21:44:16 +07:00
|
|
|
ib_size_alignment = 1;
|
2015-04-21 03:55:21 +07:00
|
|
|
break;
|
2017-01-10 23:50:08 +07:00
|
|
|
case AMDGPU_HW_IP_UVD_ENC:
|
|
|
|
type = AMD_IP_BLOCK_TYPE_UVD;
|
2018-05-16 02:31:24 +07:00
|
|
|
for (i = 0; i < adev->uvd.num_uvd_inst; i++)
|
|
|
|
for (j = 0; j < adev->uvd.num_enc_rings; j++)
|
|
|
|
ring_mask |=
|
|
|
|
((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) <<
|
|
|
|
(j + i * adev->uvd.num_enc_rings));
|
2018-06-15 13:39:57 +07:00
|
|
|
ib_start_alignment = 64;
|
|
|
|
ib_size_alignment = 64;
|
2017-01-10 23:50:08 +07:00
|
|
|
break;
|
2017-01-26 03:04:20 +07:00
|
|
|
case AMDGPU_HW_IP_VCN_DEC:
|
|
|
|
type = AMD_IP_BLOCK_TYPE_VCN;
|
|
|
|
ring_mask = adev->vcn.ring_dec.ready ? 1 : 0;
|
2018-06-15 13:39:57 +07:00
|
|
|
ib_start_alignment = 16;
|
2017-01-26 03:04:20 +07:00
|
|
|
ib_size_alignment = 16;
|
|
|
|
break;
|
2017-02-21 23:23:28 +07:00
|
|
|
case AMDGPU_HW_IP_VCN_ENC:
|
|
|
|
type = AMD_IP_BLOCK_TYPE_VCN;
|
|
|
|
for (i = 0; i < adev->vcn.num_enc_rings; i++)
|
|
|
|
ring_mask |= ((adev->vcn.ring_enc[i].ready ? 1 : 0) << i);
|
2018-06-15 13:39:57 +07:00
|
|
|
ib_start_alignment = 64;
|
2017-02-21 23:23:28 +07:00
|
|
|
ib_size_alignment = 1;
|
|
|
|
break;
|
2018-05-02 01:58:25 +07:00
|
|
|
case AMDGPU_HW_IP_VCN_JPEG:
|
|
|
|
type = AMD_IP_BLOCK_TYPE_VCN;
|
|
|
|
ring_mask = adev->vcn.ring_jpeg.ready ? 1 : 0;
|
2018-06-15 13:39:57 +07:00
|
|
|
ib_start_alignment = 16;
|
2018-05-02 01:58:25 +07:00
|
|
|
ib_size_alignment = 16;
|
|
|
|
break;
|
2015-04-21 03:55:21 +07:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (adev->ip_blocks[i].version->type == type &&
|
|
|
|
adev->ip_blocks[i].status.valid) {
|
|
|
|
ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
|
|
|
|
ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
|
2015-04-21 03:55:21 +07:00
|
|
|
ip.capabilities_flags = 0;
|
|
|
|
ip.available_rings = ring_mask;
|
2015-06-04 20:26:57 +07:00
|
|
|
ip.ib_start_alignment = ib_start_alignment;
|
|
|
|
ip.ib_size_alignment = ib_size_alignment;
|
2015-04-21 03:55:21 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return copy_to_user(out, &ip,
|
|
|
|
min((size_t)size, sizeof(ip))) ? -EFAULT : 0;
|
|
|
|
}
|
|
|
|
case AMDGPU_INFO_HW_IP_COUNT: {
|
2015-05-23 01:39:35 +07:00
|
|
|
enum amd_ip_block_type type;
|
2015-04-21 03:55:21 +07:00
|
|
|
uint32_t count = 0;
|
|
|
|
|
|
|
|
switch (info->query_hw_ip.type) {
|
|
|
|
case AMDGPU_HW_IP_GFX:
|
2015-05-23 01:39:35 +07:00
|
|
|
type = AMD_IP_BLOCK_TYPE_GFX;
|
2015-04-21 03:55:21 +07:00
|
|
|
break;
|
|
|
|
case AMDGPU_HW_IP_COMPUTE:
|
2015-05-23 01:39:35 +07:00
|
|
|
type = AMD_IP_BLOCK_TYPE_GFX;
|
2015-04-21 03:55:21 +07:00
|
|
|
break;
|
|
|
|
case AMDGPU_HW_IP_DMA:
|
2015-05-23 01:39:35 +07:00
|
|
|
type = AMD_IP_BLOCK_TYPE_SDMA;
|
2015-04-21 03:55:21 +07:00
|
|
|
break;
|
|
|
|
case AMDGPU_HW_IP_UVD:
|
2015-05-23 01:39:35 +07:00
|
|
|
type = AMD_IP_BLOCK_TYPE_UVD;
|
2015-04-21 03:55:21 +07:00
|
|
|
break;
|
|
|
|
case AMDGPU_HW_IP_VCE:
|
2015-05-23 01:39:35 +07:00
|
|
|
type = AMD_IP_BLOCK_TYPE_VCE;
|
2015-04-21 03:55:21 +07:00
|
|
|
break;
|
2017-01-10 23:50:08 +07:00
|
|
|
case AMDGPU_HW_IP_UVD_ENC:
|
|
|
|
type = AMD_IP_BLOCK_TYPE_UVD;
|
|
|
|
break;
|
2017-01-26 03:04:20 +07:00
|
|
|
case AMDGPU_HW_IP_VCN_DEC:
|
2017-02-21 23:23:28 +07:00
|
|
|
case AMDGPU_HW_IP_VCN_ENC:
|
2018-05-02 01:58:25 +07:00
|
|
|
case AMDGPU_HW_IP_VCN_JPEG:
|
2017-01-26 03:04:20 +07:00
|
|
|
type = AMD_IP_BLOCK_TYPE_VCN;
|
|
|
|
break;
|
2015-04-21 03:55:21 +07:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++)
|
2016-10-14 04:41:13 +07:00
|
|
|
if (adev->ip_blocks[i].version->type == type &&
|
|
|
|
adev->ip_blocks[i].status.valid &&
|
2015-04-21 03:55:21 +07:00
|
|
|
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
|
|
|
|
count++;
|
|
|
|
|
|
|
|
return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
|
|
|
|
}
|
|
|
|
case AMDGPU_INFO_TIMESTAMP:
|
2016-07-08 02:01:42 +07:00
|
|
|
ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
|
2015-04-21 03:55:21 +07:00
|
|
|
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
|
|
|
case AMDGPU_INFO_FW_VERSION: {
|
|
|
|
struct drm_amdgpu_info_firmware fw_info;
|
2016-06-12 14:44:44 +07:00
|
|
|
int ret;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
/* We only support one instance of each IP block right now. */
|
|
|
|
if (info->query_fw.ip_instance != 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-06-12 14:44:44 +07:00
|
|
|
ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
return copy_to_user(out, &fw_info,
|
|
|
|
min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
|
|
|
|
}
|
|
|
|
case AMDGPU_INFO_NUM_BYTES_MOVED:
|
|
|
|
ui64 = atomic64_read(&adev->num_bytes_moved);
|
|
|
|
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
2016-08-18 04:58:58 +07:00
|
|
|
case AMDGPU_INFO_NUM_EVICTIONS:
|
|
|
|
ui64 = atomic64_read(&adev->num_evictions);
|
|
|
|
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
2017-05-18 01:05:08 +07:00
|
|
|
case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
|
|
|
|
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
|
|
|
|
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
2015-04-21 03:55:21 +07:00
|
|
|
case AMDGPU_INFO_VRAM_USAGE:
|
2017-08-07 22:46:49 +07:00
|
|
|
ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
2015-04-21 03:55:21 +07:00
|
|
|
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
|
|
|
case AMDGPU_INFO_VIS_VRAM_USAGE:
|
2017-08-07 22:46:49 +07:00
|
|
|
ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
2015-04-21 03:55:21 +07:00
|
|
|
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
|
|
|
case AMDGPU_INFO_GTT_USAGE:
|
2017-08-07 22:11:33 +07:00
|
|
|
ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
|
2015-04-21 03:55:21 +07:00
|
|
|
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
|
|
|
case AMDGPU_INFO_GDS_CONFIG: {
|
|
|
|
struct drm_amdgpu_info_gds gds_info;
|
|
|
|
|
2015-04-30 22:47:03 +07:00
|
|
|
memset(&gds_info, 0, sizeof(gds_info));
|
2015-04-21 03:55:21 +07:00
|
|
|
gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size >> AMDGPU_GDS_SHIFT;
|
|
|
|
gds_info.compute_partition_size = adev->gds.mem.cs_partition_size >> AMDGPU_GDS_SHIFT;
|
|
|
|
gds_info.gds_total_size = adev->gds.mem.total_size >> AMDGPU_GDS_SHIFT;
|
|
|
|
gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size >> AMDGPU_GWS_SHIFT;
|
|
|
|
gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size >> AMDGPU_GWS_SHIFT;
|
|
|
|
gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size >> AMDGPU_OA_SHIFT;
|
|
|
|
gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size >> AMDGPU_OA_SHIFT;
|
|
|
|
return copy_to_user(out, &gds_info,
|
|
|
|
min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
|
|
|
|
}
|
|
|
|
case AMDGPU_INFO_VRAM_GTT: {
|
|
|
|
struct drm_amdgpu_info_vram_gtt vram_gtt;
|
|
|
|
|
2018-01-12 20:52:22 +07:00
|
|
|
vram_gtt.vram_size = adev->gmc.real_vram_size;
|
2016-04-01 16:05:30 +07:00
|
|
|
vram_gtt.vram_size -= adev->vram_pin_size;
|
2018-01-12 20:52:22 +07:00
|
|
|
vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size;
|
2016-04-05 09:48:48 +07:00
|
|
|
vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
|
2017-06-30 19:37:02 +07:00
|
|
|
vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
|
|
|
|
vram_gtt.gtt_size *= PAGE_SIZE;
|
2015-04-21 03:55:21 +07:00
|
|
|
vram_gtt.gtt_size -= adev->gart_pin_size;
|
|
|
|
return copy_to_user(out, &vram_gtt,
|
|
|
|
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
|
|
|
|
}
|
2016-09-29 08:39:10 +07:00
|
|
|
case AMDGPU_INFO_MEMORY: {
|
|
|
|
struct drm_amdgpu_memory_info mem;
|
|
|
|
|
|
|
|
memset(&mem, 0, sizeof(mem));
|
2018-01-12 20:52:22 +07:00
|
|
|
mem.vram.total_heap_size = adev->gmc.real_vram_size;
|
2016-09-29 08:39:10 +07:00
|
|
|
mem.vram.usable_heap_size =
|
2018-01-12 20:52:22 +07:00
|
|
|
adev->gmc.real_vram_size - adev->vram_pin_size;
|
2017-08-07 22:46:49 +07:00
|
|
|
mem.vram.heap_usage =
|
|
|
|
amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
2016-09-29 08:39:10 +07:00
|
|
|
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
|
|
|
|
|
|
|
|
mem.cpu_accessible_vram.total_heap_size =
|
2018-01-12 20:52:22 +07:00
|
|
|
adev->gmc.visible_vram_size;
|
2016-09-29 08:39:10 +07:00
|
|
|
mem.cpu_accessible_vram.usable_heap_size =
|
2018-01-12 20:52:22 +07:00
|
|
|
adev->gmc.visible_vram_size -
|
2016-09-29 08:39:10 +07:00
|
|
|
(adev->vram_pin_size - adev->invisible_pin_size);
|
|
|
|
mem.cpu_accessible_vram.heap_usage =
|
2017-08-07 22:46:49 +07:00
|
|
|
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
2016-09-29 08:39:10 +07:00
|
|
|
mem.cpu_accessible_vram.max_allocation =
|
|
|
|
mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
|
|
|
|
|
2017-06-30 19:37:02 +07:00
|
|
|
mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
|
|
|
|
mem.gtt.total_heap_size *= PAGE_SIZE;
|
|
|
|
mem.gtt.usable_heap_size = mem.gtt.total_heap_size
|
|
|
|
- adev->gart_pin_size;
|
2017-08-07 22:11:33 +07:00
|
|
|
mem.gtt.heap_usage =
|
|
|
|
amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
|
2016-09-29 08:39:10 +07:00
|
|
|
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
|
|
|
|
|
|
|
|
return copy_to_user(out, &mem,
|
|
|
|
min((size_t)size, sizeof(mem)))
|
2016-09-21 09:33:26 +07:00
|
|
|
? -EFAULT : 0;
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
case AMDGPU_INFO_READ_MMR_REG: {
|
2015-09-23 18:00:12 +07:00
|
|
|
unsigned n, alloc_size;
|
2015-04-21 03:55:21 +07:00
|
|
|
uint32_t *regs;
|
|
|
|
unsigned se_num = (info->read_mmr_reg.instance >>
|
|
|
|
AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
|
|
|
|
AMDGPU_INFO_MMR_SE_INDEX_MASK;
|
|
|
|
unsigned sh_num = (info->read_mmr_reg.instance >>
|
|
|
|
AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
|
|
|
|
AMDGPU_INFO_MMR_SH_INDEX_MASK;
|
|
|
|
|
|
|
|
/* set full masks if the userspace set all bits
|
|
|
|
* in the bitfields */
|
|
|
|
if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
|
|
|
|
se_num = 0xffffffff;
|
|
|
|
if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
|
|
|
|
sh_num = 0xffffffff;
|
|
|
|
|
2015-09-23 18:00:12 +07:00
|
|
|
regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (!regs)
|
|
|
|
return -ENOMEM;
|
2015-09-23 18:00:12 +07:00
|
|
|
alloc_size = info->read_mmr_reg.count * sizeof(*regs);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
for (i = 0; i < info->read_mmr_reg.count; i++)
|
|
|
|
if (amdgpu_asic_read_register(adev, se_num, sh_num,
|
|
|
|
info->read_mmr_reg.dword_offset + i,
|
|
|
|
®s[i])) {
|
|
|
|
DRM_DEBUG_KMS("unallowed offset %#x\n",
|
|
|
|
info->read_mmr_reg.dword_offset + i);
|
|
|
|
kfree(regs);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
n = copy_to_user(out, regs, min(size, alloc_size));
|
|
|
|
kfree(regs);
|
|
|
|
return n ? -EFAULT : 0;
|
|
|
|
}
|
|
|
|
case AMDGPU_INFO_DEV_INFO: {
|
2015-07-28 22:51:29 +07:00
|
|
|
struct drm_amdgpu_info_device dev_info = {};
|
2017-11-07 18:03:31 +07:00
|
|
|
uint64_t vm_size;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
dev_info.device_id = dev->pdev->device;
|
|
|
|
dev_info.chip_rev = adev->rev_id;
|
|
|
|
dev_info.external_rev = adev->external_rev_id;
|
|
|
|
dev_info.pci_rev = dev->pdev->revision;
|
|
|
|
dev_info.family = adev->family;
|
|
|
|
dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
|
|
|
|
dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
|
|
|
|
/* return all clocks in KHz */
|
|
|
|
dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
|
2015-06-03 16:36:54 +07:00
|
|
|
if (adev->pm.dpm_enabled) {
|
2016-10-17 08:49:29 +07:00
|
|
|
dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
|
|
|
|
dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
|
2015-06-03 16:36:54 +07:00
|
|
|
} else {
|
2017-05-26 16:29:51 +07:00
|
|
|
dev_info.max_engine_clock = adev->clock.default_sclk * 10;
|
|
|
|
dev_info.max_memory_clock = adev->clock.default_mclk * 10;
|
2015-06-03 16:36:54 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
|
2016-06-17 21:17:17 +07:00
|
|
|
dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
|
|
|
|
adev->gfx.config.max_shader_engines;
|
2015-04-21 03:55:21 +07:00
|
|
|
dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
|
|
|
|
dev_info._pad = 0;
|
|
|
|
dev_info.ids_flags = 0;
|
2015-07-22 10:29:01 +07:00
|
|
|
if (adev->flags & AMD_IS_APU)
|
2015-04-21 03:55:21 +07:00
|
|
|
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
|
2016-10-24 10:36:17 +07:00
|
|
|
if (amdgpu_sriov_vf(adev))
|
|
|
|
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
|
2017-11-07 18:03:31 +07:00
|
|
|
|
|
|
|
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
|
2018-01-22 17:19:50 +07:00
|
|
|
vm_size -= AMDGPU_VA_RESERVED_SIZE;
|
2018-01-29 22:03:50 +07:00
|
|
|
|
|
|
|
/* Older VCE FW versions are buggy and can handle only 40bits */
|
|
|
|
if (adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
|
|
|
|
vm_size = min(vm_size, 1ULL << 40);
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
|
2017-11-06 21:37:01 +07:00
|
|
|
dev_info.virtual_address_max =
|
2017-11-07 18:03:31 +07:00
|
|
|
min(vm_size, AMDGPU_VA_HOLE_START);
|
|
|
|
|
|
|
|
if (vm_size > AMDGPU_VA_HOLE_START) {
|
|
|
|
dev_info.high_va_offset = AMDGPU_VA_HOLE_END;
|
|
|
|
dev_info.high_va_max = AMDGPU_VA_HOLE_END | vm_size;
|
|
|
|
}
|
2015-08-08 01:22:40 +07:00
|
|
|
dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
|
2017-08-11 19:00:41 +07:00
|
|
|
dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
|
2015-04-21 03:55:21 +07:00
|
|
|
dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
|
2016-05-04 03:25:53 +07:00
|
|
|
dev_info.cu_active_number = adev->gfx.cu_info.number;
|
|
|
|
dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
|
2015-06-03 16:47:54 +07:00
|
|
|
dev_info.ce_ram_size = adev->gfx.ce_ram_size;
|
2017-06-20 10:08:35 +07:00
|
|
|
memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
|
|
|
|
sizeof(adev->gfx.cu_info.ao_cu_bitmap));
|
2016-05-04 03:25:53 +07:00
|
|
|
memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
|
|
|
|
sizeof(adev->gfx.cu_info.bitmap));
|
2018-01-12 20:52:22 +07:00
|
|
|
dev_info.vram_type = adev->gmc.vram_type;
|
|
|
|
dev_info.vram_bit_width = adev->gmc.vram_width;
|
2015-07-13 23:46:23 +07:00
|
|
|
dev_info.vce_harvest_config = adev->vce.harvest_config;
|
2017-02-17 10:05:49 +07:00
|
|
|
dev_info.gc_double_offchip_lds_buf =
|
|
|
|
adev->gfx.config.double_offchip_lds_buf;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-03-28 23:52:08 +07:00
|
|
|
if (amdgpu_ngg) {
|
2017-05-04 13:49:18 +07:00
|
|
|
dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
|
|
|
|
dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
|
|
|
|
dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
|
|
|
|
dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
|
|
|
|
dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
|
|
|
|
dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
|
|
|
|
dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
|
|
|
|
dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
|
2017-03-28 23:52:08 +07:00
|
|
|
}
|
2017-04-27 10:12:07 +07:00
|
|
|
dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
|
|
|
|
dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
|
|
|
|
dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
|
|
|
|
dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
|
|
|
|
dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
|
|
|
|
dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
|
2017-05-03 02:49:36 +07:00
|
|
|
dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
|
2017-03-28 23:52:08 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
return copy_to_user(out, &dev_info,
|
|
|
|
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
|
|
|
|
}
|
2016-10-07 23:22:02 +07:00
|
|
|
case AMDGPU_INFO_VCE_CLOCK_TABLE: {
|
|
|
|
unsigned i;
|
|
|
|
struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
|
|
|
|
struct amd_vce_state *vce_state;
|
|
|
|
|
|
|
|
for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
|
|
|
|
vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
|
|
|
|
if (vce_state) {
|
|
|
|
vce_clk_table.entries[i].sclk = vce_state->sclk;
|
|
|
|
vce_clk_table.entries[i].mclk = vce_state->mclk;
|
|
|
|
vce_clk_table.entries[i].eclk = vce_state->evclk;
|
|
|
|
vce_clk_table.num_valid_entries++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return copy_to_user(out, &vce_clk_table,
|
|
|
|
min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
|
|
|
|
}
|
2016-12-07 09:05:09 +07:00
|
|
|
case AMDGPU_INFO_VBIOS: {
|
|
|
|
uint32_t bios_size = adev->bios_size;
|
|
|
|
|
|
|
|
switch (info->vbios_info.type) {
|
|
|
|
case AMDGPU_INFO_VBIOS_SIZE:
|
|
|
|
return copy_to_user(out, &bios_size,
|
|
|
|
min((size_t)size, sizeof(bios_size)))
|
|
|
|
? -EFAULT : 0;
|
|
|
|
case AMDGPU_INFO_VBIOS_IMAGE: {
|
|
|
|
uint8_t *bios;
|
|
|
|
uint32_t bios_offset = info->vbios_info.offset;
|
|
|
|
|
|
|
|
if (bios_offset >= bios_size)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
bios = adev->bios + bios_offset;
|
|
|
|
return copy_to_user(out, bios,
|
|
|
|
min((size_t)size, (size_t)(bios_size - bios_offset)))
|
|
|
|
? -EFAULT : 0;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
DRM_DEBUG_KMS("Invalid request %d\n",
|
|
|
|
info->vbios_info.type);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
2016-12-12 16:59:33 +07:00
|
|
|
case AMDGPU_INFO_NUM_HANDLES: {
|
|
|
|
struct drm_amdgpu_info_num_handles handle;
|
|
|
|
|
|
|
|
switch (info->query_hw_ip.type) {
|
|
|
|
case AMDGPU_HW_IP_UVD:
|
|
|
|
/* Starting Polaris, we support unlimited UVD handles */
|
|
|
|
if (adev->asic_type < CHIP_POLARIS10) {
|
|
|
|
handle.uvd_max_handles = adev->uvd.max_handles;
|
|
|
|
handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
|
|
|
|
|
|
|
|
return copy_to_user(out, &handle,
|
|
|
|
min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
|
|
|
|
} else {
|
|
|
|
return -ENODATA;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
2017-03-09 06:25:15 +07:00
|
|
|
case AMDGPU_INFO_SENSOR: {
|
2018-03-26 15:18:34 +07:00
|
|
|
if (!adev->pm.dpm_enabled)
|
2017-03-09 06:25:15 +07:00
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
switch (info->sensor_info.type) {
|
|
|
|
case AMDGPU_INFO_SENSOR_GFX_SCLK:
|
|
|
|
/* get sclk in Mhz */
|
|
|
|
if (amdgpu_dpm_read_sensor(adev,
|
|
|
|
AMDGPU_PP_SENSOR_GFX_SCLK,
|
|
|
|
(void *)&ui32, &ui32_size)) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
ui32 /= 100;
|
|
|
|
break;
|
|
|
|
case AMDGPU_INFO_SENSOR_GFX_MCLK:
|
|
|
|
/* get mclk in Mhz */
|
|
|
|
if (amdgpu_dpm_read_sensor(adev,
|
|
|
|
AMDGPU_PP_SENSOR_GFX_MCLK,
|
|
|
|
(void *)&ui32, &ui32_size)) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
ui32 /= 100;
|
|
|
|
break;
|
|
|
|
case AMDGPU_INFO_SENSOR_GPU_TEMP:
|
|
|
|
/* get temperature in millidegrees C */
|
|
|
|
if (amdgpu_dpm_read_sensor(adev,
|
|
|
|
AMDGPU_PP_SENSOR_GPU_TEMP,
|
|
|
|
(void *)&ui32, &ui32_size)) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case AMDGPU_INFO_SENSOR_GPU_LOAD:
|
|
|
|
/* get GPU load */
|
|
|
|
if (amdgpu_dpm_read_sensor(adev,
|
|
|
|
AMDGPU_PP_SENSOR_GPU_LOAD,
|
|
|
|
(void *)&ui32, &ui32_size)) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
|
|
|
|
/* get average GPU power */
|
|
|
|
if (amdgpu_dpm_read_sensor(adev,
|
|
|
|
AMDGPU_PP_SENSOR_GPU_POWER,
|
2018-04-04 14:37:35 +07:00
|
|
|
(void *)&ui32, &ui32_size)) {
|
2017-03-09 06:25:15 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2018-04-04 14:37:35 +07:00
|
|
|
ui32 >>= 8;
|
2017-03-09 06:25:15 +07:00
|
|
|
break;
|
|
|
|
case AMDGPU_INFO_SENSOR_VDDNB:
|
|
|
|
/* get VDDNB in millivolts */
|
|
|
|
if (amdgpu_dpm_read_sensor(adev,
|
|
|
|
AMDGPU_PP_SENSOR_VDDNB,
|
|
|
|
(void *)&ui32, &ui32_size)) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case AMDGPU_INFO_SENSOR_VDDGFX:
|
|
|
|
/* get VDDGFX in millivolts */
|
|
|
|
if (amdgpu_dpm_read_sensor(adev,
|
|
|
|
AMDGPU_PP_SENSOR_VDDGFX,
|
|
|
|
(void *)&ui32, &ui32_size)) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
break;
|
2018-01-17 12:18:47 +07:00
|
|
|
case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
|
|
|
|
/* get stable pstate sclk in Mhz */
|
|
|
|
if (amdgpu_dpm_read_sensor(adev,
|
|
|
|
AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
|
|
|
|
(void *)&ui32, &ui32_size)) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
ui32 /= 100;
|
|
|
|
break;
|
|
|
|
case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
|
|
|
|
/* get stable pstate mclk in Mhz */
|
|
|
|
if (amdgpu_dpm_read_sensor(adev,
|
|
|
|
AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
|
|
|
|
(void *)&ui32, &ui32_size)) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
ui32 /= 100;
|
|
|
|
break;
|
2017-03-09 06:25:15 +07:00
|
|
|
default:
|
|
|
|
DRM_DEBUG_KMS("Invalid request %d\n",
|
|
|
|
info->sensor_info.type);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
|
|
|
|
}
|
2017-10-09 22:53:06 +07:00
|
|
|
case AMDGPU_INFO_VRAM_LOST_COUNTER:
|
|
|
|
ui32 = atomic_read(&adev->vram_lost_counter);
|
|
|
|
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
|
2015-04-21 03:55:21 +07:00
|
|
|
default:
|
|
|
|
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Outdated mess for old drm with Xorg being in charge (void function now).
|
|
|
|
*/
|
|
|
|
/**
|
2015-10-03 03:59:34 +07:00
|
|
|
* amdgpu_driver_lastclose_kms - drm callback for last close
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* @dev: drm dev pointer
|
|
|
|
*
|
2015-09-05 16:17:35 +07:00
|
|
|
* Switch vga_switcheroo state after last close (all asics).
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
|
|
|
void amdgpu_driver_lastclose_kms(struct drm_device *dev)
|
|
|
|
{
|
2017-12-06 01:24:55 +07:00
|
|
|
drm_fb_helper_lastclose(dev);
|
2015-04-21 03:55:21 +07:00
|
|
|
vga_switcheroo_process_delayed_switch();
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_driver_open_kms - drm callback for open
|
|
|
|
*
|
|
|
|
* @dev: drm dev pointer
|
|
|
|
* @file_priv: drm file
|
|
|
|
*
|
|
|
|
* On device open, init vm on cayman+ (all asics).
|
|
|
|
* Returns 0 on success, error on failure.
|
|
|
|
*/
|
|
|
|
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
struct amdgpu_fpriv *fpriv;
|
2018-01-05 20:17:08 +07:00
|
|
|
int r, pasid;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
file_priv->driver_priv = NULL;
|
|
|
|
|
|
|
|
r = pm_runtime_get_sync(dev->dev);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
|
2016-08-27 23:30:25 +07:00
|
|
|
if (unlikely(!fpriv)) {
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto out_suspend;
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2018-01-05 20:17:08 +07:00
|
|
|
pasid = amdgpu_pasid_alloc(16);
|
|
|
|
if (pasid < 0) {
|
|
|
|
dev_warn(adev->dev, "No more PASIDs available!");
|
|
|
|
pasid = 0;
|
2016-08-27 23:30:25 +07:00
|
|
|
}
|
2018-01-05 20:17:08 +07:00
|
|
|
r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
|
|
|
|
if (r)
|
|
|
|
goto error_pasid;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-01-16 12:59:01 +07:00
|
|
|
fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
|
|
|
|
if (!fpriv->prt_va) {
|
|
|
|
r = -ENOMEM;
|
2018-01-05 20:17:08 +07:00
|
|
|
goto error_vm;
|
2017-01-16 12:59:01 +07:00
|
|
|
}
|
|
|
|
|
2017-01-09 14:54:32 +07:00
|
|
|
if (amdgpu_sriov_vf(adev)) {
|
2017-07-31 20:32:40 +07:00
|
|
|
r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
|
2018-01-05 20:17:08 +07:00
|
|
|
if (r)
|
|
|
|
goto error_vm;
|
2017-01-09 14:54:32 +07:00
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
mutex_init(&fpriv->bo_list_lock);
|
|
|
|
idr_init(&fpriv->bo_list_handles);
|
|
|
|
|
2015-08-04 21:20:31 +07:00
|
|
|
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
file_priv->driver_priv = fpriv;
|
2018-01-05 20:17:08 +07:00
|
|
|
goto out_suspend;
|
|
|
|
|
|
|
|
error_vm:
|
|
|
|
amdgpu_vm_fini(adev, &fpriv->vm);
|
|
|
|
|
|
|
|
error_pasid:
|
|
|
|
if (pasid)
|
|
|
|
amdgpu_pasid_free(pasid);
|
|
|
|
|
|
|
|
kfree(fpriv);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-08-27 23:30:25 +07:00
|
|
|
out_suspend:
|
2015-04-21 03:55:21 +07:00
|
|
|
pm_runtime_mark_last_busy(dev->dev);
|
|
|
|
pm_runtime_put_autosuspend(dev->dev);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_driver_postclose_kms - drm callback for post close
|
|
|
|
*
|
|
|
|
* @dev: drm dev pointer
|
|
|
|
* @file_priv: drm file
|
|
|
|
*
|
|
|
|
* On device post close, tear down vm on cayman+ (all asics).
|
|
|
|
*/
|
|
|
|
void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
|
|
|
|
struct amdgpu_bo_list *list;
|
2018-01-05 20:17:08 +07:00
|
|
|
struct amdgpu_bo *pd;
|
|
|
|
unsigned int pasid;
|
2015-04-21 03:55:21 +07:00
|
|
|
int handle;
|
|
|
|
|
|
|
|
if (!fpriv)
|
|
|
|
return;
|
|
|
|
|
2017-03-08 21:12:52 +07:00
|
|
|
pm_runtime_get_sync(dev->dev);
|
2015-08-25 20:05:20 +07:00
|
|
|
|
2017-02-06 03:19:57 +07:00
|
|
|
if (adev->asic_type != CHIP_RAVEN) {
|
|
|
|
amdgpu_uvd_free_handles(adev, file_priv);
|
|
|
|
amdgpu_vce_free_handles(adev, file_priv);
|
|
|
|
}
|
2016-07-23 01:13:11 +07:00
|
|
|
|
2017-01-16 12:59:01 +07:00
|
|
|
amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
|
|
|
|
|
2017-01-09 14:54:32 +07:00
|
|
|
if (amdgpu_sriov_vf(adev)) {
|
|
|
|
/* TODO: how to handle reserve failure */
|
2017-04-28 15:28:14 +07:00
|
|
|
BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
|
2017-07-31 20:32:40 +07:00
|
|
|
amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
|
|
|
|
fpriv->csa_va = NULL;
|
2017-01-09 14:54:32 +07:00
|
|
|
amdgpu_bo_unreserve(adev->virt.csa_obj);
|
|
|
|
}
|
|
|
|
|
2018-01-05 20:17:08 +07:00
|
|
|
pasid = fpriv->vm.pasid;
|
|
|
|
pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
amdgpu_vm_fini(adev, &fpriv->vm);
|
2018-04-16 09:07:02 +07:00
|
|
|
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
|
|
|
|
|
2018-01-05 20:17:08 +07:00
|
|
|
if (pasid)
|
|
|
|
amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
|
|
|
|
amdgpu_bo_unref(&pd);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
|
|
|
|
amdgpu_bo_list_free(list);
|
|
|
|
|
|
|
|
idr_destroy(&fpriv->bo_list_handles);
|
|
|
|
mutex_destroy(&fpriv->bo_list_lock);
|
|
|
|
|
|
|
|
kfree(fpriv);
|
|
|
|
file_priv->driver_priv = NULL;
|
2016-08-27 23:27:24 +07:00
|
|
|
|
|
|
|
pm_runtime_mark_last_busy(dev->dev);
|
|
|
|
pm_runtime_put_autosuspend(dev->dev);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* VBlank related functions.
|
|
|
|
*/
|
|
|
|
/**
|
|
|
|
* amdgpu_get_vblank_counter_kms - get frame count
|
|
|
|
*
|
|
|
|
* @dev: drm dev pointer
|
2015-09-24 23:35:31 +07:00
|
|
|
* @pipe: crtc to get the frame count from
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* Gets the frame count on the requested crtc (all asics).
|
|
|
|
* Returns frame count on success, -EINVAL on failure.
|
|
|
|
*/
|
2015-09-24 23:35:31 +07:00
|
|
|
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
drm/amdgpu: Fixup hw vblank counter/ts for new drm_update_vblank_count() (v3)
commit 4dfd6486 "drm: Use vblank timestamps to guesstimate how many
vblanks were missed" introduced in Linux 4.4-rc1 makes the drm core
more fragile to drivers which don't update hw vblank counters and
vblank timestamps in sync with firing of the vblank irq and
essentially at leading edge of vblank.
This exposed a problem with radeon-kms/amdgpu-kms which do not
satisfy above requirements:
The vblank irq fires a few scanlines before start of vblank, but
programmed pageflips complete at start of vblank and
vblank timestamps update at start of vblank, whereas the
hw vblank counter increments only later, at start of vsync.
This leads to problems like off by one errors for vblank counter
updates, vblank counters apparently going backwards or vblank
timestamps apparently having time going backwards. The net result
is stuttering of graphics in games, or little hangs, as well as
total failure of timing sensitive applications.
See bug #93147 for an example of the regression on Linux 4.4-rc:
https://bugs.freedesktop.org/show_bug.cgi?id=93147
This patch tries to align all above events better from the
viewpoint of the drm core / of external callers to fix the problem:
1. The apparent start of vblank is shifted a few scanlines earlier,
so the vblank irq now always happens after start of this extended
vblank interval and thereby drm_update_vblank_count() always samples
the updated vblank count and timestamp of the new vblank interval.
To achieve this, the reporting of scanout positions by
radeon_get_crtc_scanoutpos() now operates as if the vblank starts
radeon_crtc->lb_vblank_lead_lines before the real start of the hw
vblank interval. This means that the vblank timestamps which are based
on these scanout positions will now update at this earlier start of
vblank.
2. The driver->get_vblank_counter() function will bump the returned
vblank count as read from the hw by +1 if the query happens after
the shifted earlier start of the vblank, but before the real hw increment
at start of vsync, so the counter appears to increment at start of vblank
in sync with the timestamp update.
3. Calls from vblank irq-context and regular non-irq calls are now
treated identical, always simulating the shifted vblank start, to
avoid inconsistent results for queries happening from vblank irq vs.
happening from drm_vblank_enable() or vblank_disable_fn().
4. The radeon_flip_work_func will delay mmio programming a pageflip until
the start of the real vblank iff it happens to execute inside the shifted
earlier start of the vblank, so pageflips now also appear to execute at
start of the shifted vblank, in sync with vblank counter and timestamp
updates. This to avoid some races between updates of vblank count and
timestamps that are used for swap scheduling and pageflip execution which
could cause pageflips to execute before the scheduled target vblank.
The lb_vblank_lead_lines "fudge" value is calculated as the size of
the display controllers line buffer in scanlines for the given video
mode: Vblank irq's are triggered by the line buffer logic when the line
buffer refill for a video frame ends, ie. when the line buffer source read
position enters the hw vblank. This means that a vblank irq could fire at
most as many scanlines before the current reported scanout position of the
crtc timing generator as the number of scanlines the line buffer can
maximally hold for a given video mode.
This patch has been successfully tested on a RV730 card with DCE-3 display
engine and on a evergreen card with DCE-4 display engine, in single-display
and dual-display configuration, with different video modes.
A similar patch is needed for amdgpu-kms to fix the same problem.
Limitations:
- Maybe replace the udelay() in the flip_work_func() by a suitable
usleep_range() for a bit better efficiency? Will try that.
- Line buffer sizes in pixels are hard-coded on < DCE-4 to a value
i just guessed to be high enough to work ok, lacking info on the true
sizes atm.
Probably fixes: fdo#93147
Port of Mario's radeon fix to amdgpu.
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(v1) Reviewed-by: Mario Kleiner <mario.kleiner.de@gmail.com>
(v2) Refine amdgpu_flip_work_func() for better efficiency.
In amdgpu_flip_work_func, replace the busy waiting udelay(5)
with event lock held by a more performance and energy efficient
usleep_range() until at least predicted true start of hw vblank,
with some slack for scheduler happiness. Release the event lock
during waits to not delay other outputs in doing their stuff, as
the waiting can last up to 200 usecs in some cases.
Also small fix to code comment and formatting in that function.
(v2) Signed-off-by: Mario Kleiner <mario.kleiner.de@gmail.com>
(v3) Fix crash in crtc disabled case
2015-12-04 00:31:56 +07:00
|
|
|
int vpos, hpos, stat;
|
|
|
|
u32 count;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2015-09-24 23:35:31 +07:00
|
|
|
if (pipe >= adev->mode_info.num_crtc) {
|
|
|
|
DRM_ERROR("Invalid crtc %u\n", pipe);
|
2015-04-21 03:55:21 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
drm/amdgpu: Fixup hw vblank counter/ts for new drm_update_vblank_count() (v3)
commit 4dfd6486 "drm: Use vblank timestamps to guesstimate how many
vblanks were missed" introduced in Linux 4.4-rc1 makes the drm core
more fragile to drivers which don't update hw vblank counters and
vblank timestamps in sync with firing of the vblank irq and
essentially at leading edge of vblank.
This exposed a problem with radeon-kms/amdgpu-kms which do not
satisfy above requirements:
The vblank irq fires a few scanlines before start of vblank, but
programmed pageflips complete at start of vblank and
vblank timestamps update at start of vblank, whereas the
hw vblank counter increments only later, at start of vsync.
This leads to problems like off by one errors for vblank counter
updates, vblank counters apparently going backwards or vblank
timestamps apparently having time going backwards. The net result
is stuttering of graphics in games, or little hangs, as well as
total failure of timing sensitive applications.
See bug #93147 for an example of the regression on Linux 4.4-rc:
https://bugs.freedesktop.org/show_bug.cgi?id=93147
This patch tries to align all above events better from the
viewpoint of the drm core / of external callers to fix the problem:
1. The apparent start of vblank is shifted a few scanlines earlier,
so the vblank irq now always happens after start of this extended
vblank interval and thereby drm_update_vblank_count() always samples
the updated vblank count and timestamp of the new vblank interval.
To achieve this, the reporting of scanout positions by
radeon_get_crtc_scanoutpos() now operates as if the vblank starts
radeon_crtc->lb_vblank_lead_lines before the real start of the hw
vblank interval. This means that the vblank timestamps which are based
on these scanout positions will now update at this earlier start of
vblank.
2. The driver->get_vblank_counter() function will bump the returned
vblank count as read from the hw by +1 if the query happens after
the shifted earlier start of the vblank, but before the real hw increment
at start of vsync, so the counter appears to increment at start of vblank
in sync with the timestamp update.
3. Calls from vblank irq-context and regular non-irq calls are now
treated identical, always simulating the shifted vblank start, to
avoid inconsistent results for queries happening from vblank irq vs.
happening from drm_vblank_enable() or vblank_disable_fn().
4. The radeon_flip_work_func will delay mmio programming a pageflip until
the start of the real vblank iff it happens to execute inside the shifted
earlier start of the vblank, so pageflips now also appear to execute at
start of the shifted vblank, in sync with vblank counter and timestamp
updates. This to avoid some races between updates of vblank count and
timestamps that are used for swap scheduling and pageflip execution which
could cause pageflips to execute before the scheduled target vblank.
The lb_vblank_lead_lines "fudge" value is calculated as the size of
the display controllers line buffer in scanlines for the given video
mode: Vblank irq's are triggered by the line buffer logic when the line
buffer refill for a video frame ends, ie. when the line buffer source read
position enters the hw vblank. This means that a vblank irq could fire at
most as many scanlines before the current reported scanout position of the
crtc timing generator as the number of scanlines the line buffer can
maximally hold for a given video mode.
This patch has been successfully tested on a RV730 card with DCE-3 display
engine and on a evergreen card with DCE-4 display engine, in single-display
and dual-display configuration, with different video modes.
A similar patch is needed for amdgpu-kms to fix the same problem.
Limitations:
- Maybe replace the udelay() in the flip_work_func() by a suitable
usleep_range() for a bit better efficiency? Will try that.
- Line buffer sizes in pixels are hard-coded on < DCE-4 to a value
i just guessed to be high enough to work ok, lacking info on the true
sizes atm.
Probably fixes: fdo#93147
Port of Mario's radeon fix to amdgpu.
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(v1) Reviewed-by: Mario Kleiner <mario.kleiner.de@gmail.com>
(v2) Refine amdgpu_flip_work_func() for better efficiency.
In amdgpu_flip_work_func, replace the busy waiting udelay(5)
with event lock held by a more performance and energy efficient
usleep_range() until at least predicted true start of hw vblank,
with some slack for scheduler happiness. Release the event lock
during waits to not delay other outputs in doing their stuff, as
the waiting can last up to 200 usecs in some cases.
Also small fix to code comment and formatting in that function.
(v2) Signed-off-by: Mario Kleiner <mario.kleiner.de@gmail.com>
(v3) Fix crash in crtc disabled case
2015-12-04 00:31:56 +07:00
|
|
|
/* The hw increments its frame counter at start of vsync, not at start
|
|
|
|
* of vblank, as is required by DRM core vblank counter handling.
|
|
|
|
* Cook the hw count here to make it appear to the caller as if it
|
|
|
|
* incremented at start of vblank. We measure distance to start of
|
|
|
|
* vblank in vpos. vpos therefore will be >= 0 between start of vblank
|
|
|
|
* and start of vsync, so vpos >= 0 means to bump the hw frame counter
|
|
|
|
* result by 1 to give the proper appearance to caller.
|
|
|
|
*/
|
|
|
|
if (adev->mode_info.crtcs[pipe]) {
|
|
|
|
/* Repeat readout if needed to provide stable result if
|
|
|
|
* we cross start of vsync during the queries.
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
count = amdgpu_display_vblank_get_counter(adev, pipe);
|
2018-01-20 03:53:16 +07:00
|
|
|
/* Ask amdgpu_display_get_crtc_scanoutpos to return
|
|
|
|
* vpos as distance to start of vblank, instead of
|
|
|
|
* regular vertical scanout pos.
|
drm/amdgpu: Fixup hw vblank counter/ts for new drm_update_vblank_count() (v3)
commit 4dfd6486 "drm: Use vblank timestamps to guesstimate how many
vblanks were missed" introduced in Linux 4.4-rc1 makes the drm core
more fragile to drivers which don't update hw vblank counters and
vblank timestamps in sync with firing of the vblank irq and
essentially at leading edge of vblank.
This exposed a problem with radeon-kms/amdgpu-kms which do not
satisfy above requirements:
The vblank irq fires a few scanlines before start of vblank, but
programmed pageflips complete at start of vblank and
vblank timestamps update at start of vblank, whereas the
hw vblank counter increments only later, at start of vsync.
This leads to problems like off by one errors for vblank counter
updates, vblank counters apparently going backwards or vblank
timestamps apparently having time going backwards. The net result
is stuttering of graphics in games, or little hangs, as well as
total failure of timing sensitive applications.
See bug #93147 for an example of the regression on Linux 4.4-rc:
https://bugs.freedesktop.org/show_bug.cgi?id=93147
This patch tries to align all above events better from the
viewpoint of the drm core / of external callers to fix the problem:
1. The apparent start of vblank is shifted a few scanlines earlier,
so the vblank irq now always happens after start of this extended
vblank interval and thereby drm_update_vblank_count() always samples
the updated vblank count and timestamp of the new vblank interval.
To achieve this, the reporting of scanout positions by
radeon_get_crtc_scanoutpos() now operates as if the vblank starts
radeon_crtc->lb_vblank_lead_lines before the real start of the hw
vblank interval. This means that the vblank timestamps which are based
on these scanout positions will now update at this earlier start of
vblank.
2. The driver->get_vblank_counter() function will bump the returned
vblank count as read from the hw by +1 if the query happens after
the shifted earlier start of the vblank, but before the real hw increment
at start of vsync, so the counter appears to increment at start of vblank
in sync with the timestamp update.
3. Calls from vblank irq-context and regular non-irq calls are now
treated identical, always simulating the shifted vblank start, to
avoid inconsistent results for queries happening from vblank irq vs.
happening from drm_vblank_enable() or vblank_disable_fn().
4. The radeon_flip_work_func will delay mmio programming a pageflip until
the start of the real vblank iff it happens to execute inside the shifted
earlier start of the vblank, so pageflips now also appear to execute at
start of the shifted vblank, in sync with vblank counter and timestamp
updates. This to avoid some races between updates of vblank count and
timestamps that are used for swap scheduling and pageflip execution which
could cause pageflips to execute before the scheduled target vblank.
The lb_vblank_lead_lines "fudge" value is calculated as the size of
the display controllers line buffer in scanlines for the given video
mode: Vblank irq's are triggered by the line buffer logic when the line
buffer refill for a video frame ends, ie. when the line buffer source read
position enters the hw vblank. This means that a vblank irq could fire at
most as many scanlines before the current reported scanout position of the
crtc timing generator as the number of scanlines the line buffer can
maximally hold for a given video mode.
This patch has been successfully tested on a RV730 card with DCE-3 display
engine and on a evergreen card with DCE-4 display engine, in single-display
and dual-display configuration, with different video modes.
A similar patch is needed for amdgpu-kms to fix the same problem.
Limitations:
- Maybe replace the udelay() in the flip_work_func() by a suitable
usleep_range() for a bit better efficiency? Will try that.
- Line buffer sizes in pixels are hard-coded on < DCE-4 to a value
i just guessed to be high enough to work ok, lacking info on the true
sizes atm.
Probably fixes: fdo#93147
Port of Mario's radeon fix to amdgpu.
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(v1) Reviewed-by: Mario Kleiner <mario.kleiner.de@gmail.com>
(v2) Refine amdgpu_flip_work_func() for better efficiency.
In amdgpu_flip_work_func, replace the busy waiting udelay(5)
with event lock held by a more performance and energy efficient
usleep_range() until at least predicted true start of hw vblank,
with some slack for scheduler happiness. Release the event lock
during waits to not delay other outputs in doing their stuff, as
the waiting can last up to 200 usecs in some cases.
Also small fix to code comment and formatting in that function.
(v2) Signed-off-by: Mario Kleiner <mario.kleiner.de@gmail.com>
(v3) Fix crash in crtc disabled case
2015-12-04 00:31:56 +07:00
|
|
|
*/
|
2018-01-20 03:53:16 +07:00
|
|
|
stat = amdgpu_display_get_crtc_scanoutpos(
|
drm/amdgpu: Fixup hw vblank counter/ts for new drm_update_vblank_count() (v3)
commit 4dfd6486 "drm: Use vblank timestamps to guesstimate how many
vblanks were missed" introduced in Linux 4.4-rc1 makes the drm core
more fragile to drivers which don't update hw vblank counters and
vblank timestamps in sync with firing of the vblank irq and
essentially at leading edge of vblank.
This exposed a problem with radeon-kms/amdgpu-kms which do not
satisfy above requirements:
The vblank irq fires a few scanlines before start of vblank, but
programmed pageflips complete at start of vblank and
vblank timestamps update at start of vblank, whereas the
hw vblank counter increments only later, at start of vsync.
This leads to problems like off by one errors for vblank counter
updates, vblank counters apparently going backwards or vblank
timestamps apparently having time going backwards. The net result
is stuttering of graphics in games, or little hangs, as well as
total failure of timing sensitive applications.
See bug #93147 for an example of the regression on Linux 4.4-rc:
https://bugs.freedesktop.org/show_bug.cgi?id=93147
This patch tries to align all above events better from the
viewpoint of the drm core / of external callers to fix the problem:
1. The apparent start of vblank is shifted a few scanlines earlier,
so the vblank irq now always happens after start of this extended
vblank interval and thereby drm_update_vblank_count() always samples
the updated vblank count and timestamp of the new vblank interval.
To achieve this, the reporting of scanout positions by
radeon_get_crtc_scanoutpos() now operates as if the vblank starts
radeon_crtc->lb_vblank_lead_lines before the real start of the hw
vblank interval. This means that the vblank timestamps which are based
on these scanout positions will now update at this earlier start of
vblank.
2. The driver->get_vblank_counter() function will bump the returned
vblank count as read from the hw by +1 if the query happens after
the shifted earlier start of the vblank, but before the real hw increment
at start of vsync, so the counter appears to increment at start of vblank
in sync with the timestamp update.
3. Calls from vblank irq-context and regular non-irq calls are now
treated identical, always simulating the shifted vblank start, to
avoid inconsistent results for queries happening from vblank irq vs.
happening from drm_vblank_enable() or vblank_disable_fn().
4. The radeon_flip_work_func will delay mmio programming a pageflip until
the start of the real vblank iff it happens to execute inside the shifted
earlier start of the vblank, so pageflips now also appear to execute at
start of the shifted vblank, in sync with vblank counter and timestamp
updates. This to avoid some races between updates of vblank count and
timestamps that are used for swap scheduling and pageflip execution which
could cause pageflips to execute before the scheduled target vblank.
The lb_vblank_lead_lines "fudge" value is calculated as the size of
the display controllers line buffer in scanlines for the given video
mode: Vblank irq's are triggered by the line buffer logic when the line
buffer refill for a video frame ends, ie. when the line buffer source read
position enters the hw vblank. This means that a vblank irq could fire at
most as many scanlines before the current reported scanout position of the
crtc timing generator as the number of scanlines the line buffer can
maximally hold for a given video mode.
This patch has been successfully tested on a RV730 card with DCE-3 display
engine and on a evergreen card with DCE-4 display engine, in single-display
and dual-display configuration, with different video modes.
A similar patch is needed for amdgpu-kms to fix the same problem.
Limitations:
- Maybe replace the udelay() in the flip_work_func() by a suitable
usleep_range() for a bit better efficiency? Will try that.
- Line buffer sizes in pixels are hard-coded on < DCE-4 to a value
i just guessed to be high enough to work ok, lacking info on the true
sizes atm.
Probably fixes: fdo#93147
Port of Mario's radeon fix to amdgpu.
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(v1) Reviewed-by: Mario Kleiner <mario.kleiner.de@gmail.com>
(v2) Refine amdgpu_flip_work_func() for better efficiency.
In amdgpu_flip_work_func, replace the busy waiting udelay(5)
with event lock held by a more performance and energy efficient
usleep_range() until at least predicted true start of hw vblank,
with some slack for scheduler happiness. Release the event lock
during waits to not delay other outputs in doing their stuff, as
the waiting can last up to 200 usecs in some cases.
Also small fix to code comment and formatting in that function.
(v2) Signed-off-by: Mario Kleiner <mario.kleiner.de@gmail.com>
(v3) Fix crash in crtc disabled case
2015-12-04 00:31:56 +07:00
|
|
|
dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
|
|
|
|
&vpos, &hpos, NULL, NULL,
|
|
|
|
&adev->mode_info.crtcs[pipe]->base.hwmode);
|
|
|
|
} while (count != amdgpu_display_vblank_get_counter(adev, pipe));
|
|
|
|
|
|
|
|
if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
|
|
|
|
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
|
|
|
|
DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
|
|
|
|
} else {
|
|
|
|
DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
|
|
|
|
pipe, vpos);
|
|
|
|
|
|
|
|
/* Bump counter if we are at >= leading edge of vblank,
|
|
|
|
* but before vsync where vpos would turn negative and
|
|
|
|
* the hw counter really increments.
|
|
|
|
*/
|
|
|
|
if (vpos >= 0)
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Fallback to use value as is. */
|
|
|
|
count = amdgpu_display_vblank_get_counter(adev, pipe);
|
|
|
|
DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
return count;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_enable_vblank_kms - enable vblank interrupt
|
|
|
|
*
|
|
|
|
* @dev: drm dev pointer
|
2015-09-24 23:35:31 +07:00
|
|
|
* @pipe: crtc to enable vblank interrupt for
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* Enable the interrupt on the requested crtc (all asics).
|
|
|
|
* Returns 0 on success, -EINVAL on failure.
|
|
|
|
*/
|
2015-09-24 23:35:31 +07:00
|
|
|
int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
2018-01-20 04:06:41 +07:00
|
|
|
int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_disable_vblank_kms - disable vblank interrupt
|
|
|
|
*
|
|
|
|
* @dev: drm dev pointer
|
2015-09-24 23:35:31 +07:00
|
|
|
* @pipe: crtc to disable vblank interrupt for
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* Disable the interrupt on the requested crtc (all asics).
|
|
|
|
*/
|
2015-09-24 23:35:31 +07:00
|
|
|
void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
2018-01-20 04:06:41 +07:00
|
|
|
int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
amdgpu_irq_put(adev, &adev->crtc_irq, idx);
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
|
2015-09-08 18:56:30 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
2017-04-24 10:09:04 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
2017-06-27 03:17:13 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
|
2015-09-08 18:56:30 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
2017-09-13 03:42:14 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
2015-04-21 03:55:21 +07:00
|
|
|
/* KMS */
|
2015-09-08 18:56:30 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
2016-11-05 03:16:10 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
2015-09-08 18:56:30 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
2017-09-13 02:58:20 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
|
2015-04-21 03:55:21 +07:00
|
|
|
};
|
2016-04-10 21:29:59 +07:00
|
|
|
const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
|
drm/amdgpu: introduce a firmware debugfs to dump all current firmware versions
This patch implements the debugfs to dump all currect firmware
version:
root@jenkins-All-Series:/home/jenkins# cat /sys/kernel/debug/dri/0/amdgpu_firmware_info
VCE feature version: 0, firmware version: 0x34040300
UVD feature version: 0, firmware version: 0x01451000
MC feature version: 0, firmware version: 0x00000000
ME feature version: 37, firmware version: 0x00000093
PFP feature version: 37, firmware version: 0x000000da
CE feature version: 37, firmware version: 0x00000080
RLC feature version: 1, firmware version: 0x0000010e
MEC feature version: 37, firmware version: 0x0000029e
MEC2 feature version: 37, firmware version: 0x0000029e
SMC feature version: 0, firmware version: 0x013353e6
SDMA0 feature version: 31, firmware version: 0x00000036
SDMA1 feature version: 0, firmware version: 0x00000036
Suggested-by: Alex Deucher <Alexander.Deucher@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2016-06-12 14:51:09 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Debugfs info
|
|
|
|
*/
|
|
|
|
#if defined(CONFIG_DEBUG_FS)
|
|
|
|
|
|
|
|
static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
|
|
|
|
{
|
|
|
|
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
|
|
|
struct drm_device *dev = node->minor->dev;
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
struct drm_amdgpu_info_firmware fw_info;
|
|
|
|
struct drm_amdgpu_query_fw query_fw;
|
2018-04-17 20:55:44 +07:00
|
|
|
struct atom_context *ctx = adev->mode_info.atom_context;
|
drm/amdgpu: introduce a firmware debugfs to dump all current firmware versions
This patch implements the debugfs to dump all currect firmware
version:
root@jenkins-All-Series:/home/jenkins# cat /sys/kernel/debug/dri/0/amdgpu_firmware_info
VCE feature version: 0, firmware version: 0x34040300
UVD feature version: 0, firmware version: 0x01451000
MC feature version: 0, firmware version: 0x00000000
ME feature version: 37, firmware version: 0x00000093
PFP feature version: 37, firmware version: 0x000000da
CE feature version: 37, firmware version: 0x00000080
RLC feature version: 1, firmware version: 0x0000010e
MEC feature version: 37, firmware version: 0x0000029e
MEC2 feature version: 37, firmware version: 0x0000029e
SMC feature version: 0, firmware version: 0x013353e6
SDMA0 feature version: 31, firmware version: 0x00000036
SDMA1 feature version: 0, firmware version: 0x00000036
Suggested-by: Alex Deucher <Alexander.Deucher@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2016-06-12 14:51:09 +07:00
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
/* VCE */
|
|
|
|
query_fw.fw_type = AMDGPU_INFO_FW_VCE;
|
|
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
|
|
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
|
|
|
|
/* UVD */
|
|
|
|
query_fw.fw_type = AMDGPU_INFO_FW_UVD;
|
|
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
|
|
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
|
|
|
|
/* GMC */
|
|
|
|
query_fw.fw_type = AMDGPU_INFO_FW_GMC;
|
|
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
|
|
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
|
|
|
|
/* ME */
|
|
|
|
query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
|
|
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
|
|
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
|
|
|
|
/* PFP */
|
|
|
|
query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
|
|
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
|
|
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
|
|
|
|
/* CE */
|
|
|
|
query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
|
|
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
|
|
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
|
|
|
|
/* RLC */
|
|
|
|
query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
|
|
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
|
|
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
|
2018-01-22 19:48:14 +07:00
|
|
|
/* RLC SAVE RESTORE LIST CNTL */
|
|
|
|
query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
|
|
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
|
|
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
|
|
|
|
/* RLC SAVE RESTORE LIST GPM MEM */
|
|
|
|
query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
|
|
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
|
|
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
|
|
|
|
/* RLC SAVE RESTORE LIST SRM MEM */
|
|
|
|
query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
|
|
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
|
|
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
|
drm/amdgpu: introduce a firmware debugfs to dump all current firmware versions
This patch implements the debugfs to dump all currect firmware
version:
root@jenkins-All-Series:/home/jenkins# cat /sys/kernel/debug/dri/0/amdgpu_firmware_info
VCE feature version: 0, firmware version: 0x34040300
UVD feature version: 0, firmware version: 0x01451000
MC feature version: 0, firmware version: 0x00000000
ME feature version: 37, firmware version: 0x00000093
PFP feature version: 37, firmware version: 0x000000da
CE feature version: 37, firmware version: 0x00000080
RLC feature version: 1, firmware version: 0x0000010e
MEC feature version: 37, firmware version: 0x0000029e
MEC2 feature version: 37, firmware version: 0x0000029e
SMC feature version: 0, firmware version: 0x013353e6
SDMA0 feature version: 31, firmware version: 0x00000036
SDMA1 feature version: 0, firmware version: 0x00000036
Suggested-by: Alex Deucher <Alexander.Deucher@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2016-06-12 14:51:09 +07:00
|
|
|
/* MEC */
|
|
|
|
query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
|
|
|
|
query_fw.index = 0;
|
|
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
|
|
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
|
|
|
|
/* MEC2 */
|
|
|
|
if (adev->asic_type == CHIP_KAVERI ||
|
|
|
|
(adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
|
|
|
|
query_fw.index = 1;
|
|
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
|
|
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
}
|
|
|
|
|
2017-03-04 07:15:26 +07:00
|
|
|
/* PSP SOS */
|
|
|
|
query_fw.fw_type = AMDGPU_INFO_FW_SOS;
|
|
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
|
|
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
|
|
|
|
|
|
|
|
/* PSP ASD */
|
|
|
|
query_fw.fw_type = AMDGPU_INFO_FW_ASD;
|
|
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
|
|
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
|
drm/amdgpu: introduce a firmware debugfs to dump all current firmware versions
This patch implements the debugfs to dump all currect firmware
version:
root@jenkins-All-Series:/home/jenkins# cat /sys/kernel/debug/dri/0/amdgpu_firmware_info
VCE feature version: 0, firmware version: 0x34040300
UVD feature version: 0, firmware version: 0x01451000
MC feature version: 0, firmware version: 0x00000000
ME feature version: 37, firmware version: 0x00000093
PFP feature version: 37, firmware version: 0x000000da
CE feature version: 37, firmware version: 0x00000080
RLC feature version: 1, firmware version: 0x0000010e
MEC feature version: 37, firmware version: 0x0000029e
MEC2 feature version: 37, firmware version: 0x0000029e
SMC feature version: 0, firmware version: 0x013353e6
SDMA0 feature version: 31, firmware version: 0x00000036
SDMA1 feature version: 0, firmware version: 0x00000036
Suggested-by: Alex Deucher <Alexander.Deucher@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2016-06-12 14:51:09 +07:00
|
|
|
/* SMC */
|
|
|
|
query_fw.fw_type = AMDGPU_INFO_FW_SMC;
|
|
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
|
|
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
|
|
|
|
/* SDMA */
|
|
|
|
query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
|
|
|
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
|
|
|
query_fw.index = i;
|
|
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
|
|
|
|
i, fw_info.feature, fw_info.ver);
|
|
|
|
}
|
|
|
|
|
2018-03-16 23:04:53 +07:00
|
|
|
/* VCN */
|
|
|
|
query_fw.fw_type = AMDGPU_INFO_FW_VCN;
|
|
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
|
|
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
|
2018-04-17 20:55:44 +07:00
|
|
|
|
|
|
|
seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
|
|
|
|
|
drm/amdgpu: introduce a firmware debugfs to dump all current firmware versions
This patch implements the debugfs to dump all currect firmware
version:
root@jenkins-All-Series:/home/jenkins# cat /sys/kernel/debug/dri/0/amdgpu_firmware_info
VCE feature version: 0, firmware version: 0x34040300
UVD feature version: 0, firmware version: 0x01451000
MC feature version: 0, firmware version: 0x00000000
ME feature version: 37, firmware version: 0x00000093
PFP feature version: 37, firmware version: 0x000000da
CE feature version: 37, firmware version: 0x00000080
RLC feature version: 1, firmware version: 0x0000010e
MEC feature version: 37, firmware version: 0x0000029e
MEC2 feature version: 37, firmware version: 0x0000029e
SMC feature version: 0, firmware version: 0x013353e6
SDMA0 feature version: 31, firmware version: 0x00000036
SDMA1 feature version: 0, firmware version: 0x00000036
Suggested-by: Alex Deucher <Alexander.Deucher@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2016-06-12 14:51:09 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_info_list amdgpu_firmware_info_list[] = {
|
|
|
|
{"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_DEBUG_FS)
|
|
|
|
return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
|
|
|
|
ARRAY_SIZE(amdgpu_firmware_info_list));
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|