mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-17 03:36:12 +07:00
f6705bf959
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJaDlaqAAoJEAx081l5xIa+VB8P/3tl1kg6gONXBHA89t4aoyaM uKyLy2D8//9RCPupnI2nOablbcdXzmZYE5gsLGHcN5G/cf9qHksslqo6P/8cjfIC lOz+2AxzFGTP9s6M0jyE7l4Dlk53Chd+7yOTJfm322BUuAZW7nSjWGglkO6rW6RR JRyNwIoRLX62nAkD769R9QTh8sh2P7pWvXKUSRtMQVWRRI0fICvUFuqyBbEFjJZN 4GGkqM5bA6GU+z1W91iqkXoPWz34Zejch7cLBM5pXiZsgXOuzl4V/RwxdKZlWVrf 9oA9357yKvvvb1bkNRgjNqLLHdOxQUomv1k2RxCbvX2xUecOCTKXKb4/X+AurZEI ENfSejTbzj+mP18CI1IsvsQolkighP1xxqjH3zmSu+bS0ivWBywbpDUVN969qKrV 9kHigMwxxX5YCWGoLswhZ+6OsPm5R2FRKg10QVQAlARjye4Q7ssP+l+KRRP8rvkc D4rZiLBMuIDersRhW3ylEym8gXqSO2BoBJZS3+ECSzweIhvwziNgY0q6lpFxfzJa fzjW/mfK/uucEshoZrxJVRAEiWwtULvi1KVnTpQ/lm254maj4mOy6atqs7rmdAKK Jetfg+Z0Fb+805fHeS2dk/E855qwmTCsBf+TA4hGrxoW3EHB3yNLH1j4MSUxK8es 6SpuEv7hzeyCiK0QJcSH =0JS4 -----END PGP SIGNATURE----- Merge tag 'drm-for-v4.15-amd-dc' of git://people.freedesktop.org/~airlied/linux Pull amdgpu DC display code for Vega from Dave Airlie: "This is the pull request for the AMD DC (display code) layer which is a requirement to program the display engines on the new Vega and Raven based GPUs. It also contains support for all amdgpu supported GPUs (CIK, VI, Polaris), which has to be enabled. It is also a kms atomic modesetting compatible driver (unlike the current in-tree display code). I've kept it separate from drm-next because it may have some things that cause you to reject it. Background story: AMD have an internal team creating a shared OS codebase for display at hw bring up time using information from their hardware teams. This process doesn't lead to the most Linux friendly/looking code but we have worked together on cleaning a lot of it up and dealing with sparse/smatch/checkpatch, and having their team internally adhere to Linux coding standards. This tree is a complete history rebased since they started opening it, we decided not to squash it down as the history may have some value. Some of the commits therefore might not reach kernel standards, and we are steadily training people in AMD to better write commit msgs. There is a major bunch of generated bandwidth calculation and verification code that comes from their hardware team. On Vega and before this is float calculations, on Raven (DCN10) this is double based. They do the required things to do FP in the kernel, and I could understand this might raise some issues. Rewriting the bandwidth would be a major undertaken in reverification, it's non-trivial to work out if a display can handle the complete set of mode information thrown at it. Future story: There is a TODO list with this, and it address most of the remaining things that would be nice to refine/remove. The DCN10 code is still under development internally and they push out a lot of patches quite regularly and are supporting this code base with their display team. I think we've reached the point where keeping it out of tree is going to motivate distributions to start carrying the code, so I'd prefer we get it in tree. I think this code is slightly better than STAGING quality but not massively so, I'd really like to see that float/double magic gone and fixed point used, but AMD don't seem to think the accuracy and revalidation of the code is worth the effort" * tag 'drm-for-v4.15-amd-dc' of git://people.freedesktop.org/~airlied/linux: (1110 commits) drm/amd/display: fix MST link training fail division by 0 drm/amd/display: Fix formatting for null pointer dereference fix drm/amd/display: Remove dangling planes on dc commit state drm/amd/display: add flip_immediate to commit update for stream drm/amd/display: Miss register MST encoder cbs drm/amd/display: Fix warnings on S3 resume drm/amd/display: use num_timing_generator instead of pipe_count drm/amd/display: use configurable FBC option in dm drm/amd/display: fix AZ clock not enabled before program AZ endpoint amdgpu/dm: Don't use DRM_ERROR in amdgpu_dm_atomic_check amd/display: Fix potential null dereference in dce_calcs.c amdgpu/dm: Remove unused forward declaration drm/amdgpu: Remove unused dc_stream from amdgpu_crtc amdgpu/dc: Fix double unlock in amdgpu_dm_commit_planes amdgpu/dc: Fix missing null checks in amdgpu_dm.c amdgpu/dc: Fix potential null dereferences in amdgpu_dm.c amdgpu/dc: fix more indentation warnings amdgpu/dc: handle allocation failures in dc_commit_planes_to_stream. amdgpu/dc: fix indentation warning from smatch. amdgpu/dc: fix non-ansi function decls. ...
1180 lines
36 KiB
C
1180 lines
36 KiB
C
/*
|
|
* Copyright 2008 Advanced Micro Devices, Inc.
|
|
* Copyright 2008 Red Hat Inc.
|
|
* Copyright 2009 Jerome Glisse.
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
*
|
|
* Authors: Dave Airlie
|
|
* Alex Deucher
|
|
* Jerome Glisse
|
|
*/
|
|
#include <drm/drmP.h>
|
|
#include "amdgpu.h"
|
|
#include <drm/amdgpu_drm.h>
|
|
#include "amdgpu_sched.h"
|
|
#include "amdgpu_uvd.h"
|
|
#include "amdgpu_vce.h"
|
|
|
|
#include <linux/vga_switcheroo.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/pm_runtime.h>
|
|
#include "amdgpu_amdkfd.h"
|
|
|
|
/**
|
|
* amdgpu_driver_unload_kms - Main unload function for KMS.
|
|
*
|
|
* @dev: drm dev pointer
|
|
*
|
|
* This is the main unload function for KMS (all asics).
|
|
* Returns 0 on success.
|
|
*/
|
|
void amdgpu_driver_unload_kms(struct drm_device *dev)
|
|
{
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
if (adev == NULL)
|
|
return;
|
|
|
|
if (adev->rmmio == NULL)
|
|
goto done_free;
|
|
|
|
if (amdgpu_sriov_vf(adev))
|
|
amdgpu_virt_request_full_gpu(adev, false);
|
|
|
|
if (amdgpu_device_is_px(dev)) {
|
|
pm_runtime_get_sync(dev->dev);
|
|
pm_runtime_forbid(dev->dev);
|
|
}
|
|
|
|
amdgpu_amdkfd_device_fini(adev);
|
|
|
|
amdgpu_acpi_fini(adev);
|
|
|
|
amdgpu_device_fini(adev);
|
|
|
|
done_free:
|
|
kfree(adev);
|
|
dev->dev_private = NULL;
|
|
}
|
|
|
|
/**
|
|
* amdgpu_driver_load_kms - Main load function for KMS.
|
|
*
|
|
* @dev: drm dev pointer
|
|
* @flags: device flags
|
|
*
|
|
* This is the main load function for KMS (all asics).
|
|
* Returns 0 on success, error on failure.
|
|
*/
|
|
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
|
{
|
|
struct amdgpu_device *adev;
|
|
int r, acpi_status;
|
|
|
|
#ifdef CONFIG_DRM_AMDGPU_SI
|
|
if (!amdgpu_si_support) {
|
|
switch (flags & AMD_ASIC_MASK) {
|
|
case CHIP_TAHITI:
|
|
case CHIP_PITCAIRN:
|
|
case CHIP_VERDE:
|
|
case CHIP_OLAND:
|
|
case CHIP_HAINAN:
|
|
dev_info(dev->dev,
|
|
"SI support provided by radeon.\n");
|
|
dev_info(dev->dev,
|
|
"Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
|
|
);
|
|
return -ENODEV;
|
|
}
|
|
}
|
|
#endif
|
|
#ifdef CONFIG_DRM_AMDGPU_CIK
|
|
if (!amdgpu_cik_support) {
|
|
switch (flags & AMD_ASIC_MASK) {
|
|
case CHIP_KAVERI:
|
|
case CHIP_BONAIRE:
|
|
case CHIP_HAWAII:
|
|
case CHIP_KABINI:
|
|
case CHIP_MULLINS:
|
|
dev_info(dev->dev,
|
|
"CIK support provided by radeon.\n");
|
|
dev_info(dev->dev,
|
|
"Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
|
|
);
|
|
return -ENODEV;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
|
|
if (adev == NULL) {
|
|
return -ENOMEM;
|
|
}
|
|
dev->dev_private = (void *)adev;
|
|
|
|
if ((amdgpu_runtime_pm != 0) &&
|
|
amdgpu_has_atpx() &&
|
|
(amdgpu_is_atpx_hybrid() ||
|
|
amdgpu_has_atpx_dgpu_power_cntl()) &&
|
|
((flags & AMD_IS_APU) == 0) &&
|
|
!pci_is_thunderbolt_attached(dev->pdev))
|
|
flags |= AMD_IS_PX;
|
|
|
|
/* amdgpu_device_init should report only fatal error
|
|
* like memory allocation failure or iomapping failure,
|
|
* or memory manager initialization failure, it must
|
|
* properly initialize the GPU MC controller and permit
|
|
* VRAM allocation
|
|
*/
|
|
r = amdgpu_device_init(adev, dev, dev->pdev, flags);
|
|
if (r) {
|
|
dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
|
|
goto out;
|
|
}
|
|
|
|
/* Call ACPI methods: require modeset init
|
|
* but failure is not fatal
|
|
*/
|
|
if (!r) {
|
|
acpi_status = amdgpu_acpi_init(adev);
|
|
if (acpi_status)
|
|
dev_dbg(&dev->pdev->dev,
|
|
"Error during ACPI methods call\n");
|
|
}
|
|
|
|
amdgpu_amdkfd_device_probe(adev);
|
|
amdgpu_amdkfd_device_init(adev);
|
|
|
|
if (amdgpu_device_is_px(dev)) {
|
|
pm_runtime_use_autosuspend(dev->dev);
|
|
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
|
|
pm_runtime_set_active(dev->dev);
|
|
pm_runtime_allow(dev->dev);
|
|
pm_runtime_mark_last_busy(dev->dev);
|
|
pm_runtime_put_autosuspend(dev->dev);
|
|
}
|
|
|
|
if (amdgpu_sriov_vf(adev))
|
|
amdgpu_virt_release_full_gpu(adev, true);
|
|
|
|
out:
|
|
if (r) {
|
|
/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
|
|
if (adev->rmmio && amdgpu_device_is_px(dev))
|
|
pm_runtime_put_noidle(dev->dev);
|
|
amdgpu_driver_unload_kms(dev);
|
|
}
|
|
|
|
return r;
|
|
}
|
|
|
|
static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
|
|
struct drm_amdgpu_query_fw *query_fw,
|
|
struct amdgpu_device *adev)
|
|
{
|
|
switch (query_fw->fw_type) {
|
|
case AMDGPU_INFO_FW_VCE:
|
|
fw_info->ver = adev->vce.fw_version;
|
|
fw_info->feature = adev->vce.fb_version;
|
|
break;
|
|
case AMDGPU_INFO_FW_UVD:
|
|
fw_info->ver = adev->uvd.fw_version;
|
|
fw_info->feature = 0;
|
|
break;
|
|
case AMDGPU_INFO_FW_GMC:
|
|
fw_info->ver = adev->mc.fw_version;
|
|
fw_info->feature = 0;
|
|
break;
|
|
case AMDGPU_INFO_FW_GFX_ME:
|
|
fw_info->ver = adev->gfx.me_fw_version;
|
|
fw_info->feature = adev->gfx.me_feature_version;
|
|
break;
|
|
case AMDGPU_INFO_FW_GFX_PFP:
|
|
fw_info->ver = adev->gfx.pfp_fw_version;
|
|
fw_info->feature = adev->gfx.pfp_feature_version;
|
|
break;
|
|
case AMDGPU_INFO_FW_GFX_CE:
|
|
fw_info->ver = adev->gfx.ce_fw_version;
|
|
fw_info->feature = adev->gfx.ce_feature_version;
|
|
break;
|
|
case AMDGPU_INFO_FW_GFX_RLC:
|
|
fw_info->ver = adev->gfx.rlc_fw_version;
|
|
fw_info->feature = adev->gfx.rlc_feature_version;
|
|
break;
|
|
case AMDGPU_INFO_FW_GFX_MEC:
|
|
if (query_fw->index == 0) {
|
|
fw_info->ver = adev->gfx.mec_fw_version;
|
|
fw_info->feature = adev->gfx.mec_feature_version;
|
|
} else if (query_fw->index == 1) {
|
|
fw_info->ver = adev->gfx.mec2_fw_version;
|
|
fw_info->feature = adev->gfx.mec2_feature_version;
|
|
} else
|
|
return -EINVAL;
|
|
break;
|
|
case AMDGPU_INFO_FW_SMC:
|
|
fw_info->ver = adev->pm.fw_version;
|
|
fw_info->feature = 0;
|
|
break;
|
|
case AMDGPU_INFO_FW_SDMA:
|
|
if (query_fw->index >= adev->sdma.num_instances)
|
|
return -EINVAL;
|
|
fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
|
|
fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
|
|
break;
|
|
case AMDGPU_INFO_FW_SOS:
|
|
fw_info->ver = adev->psp.sos_fw_version;
|
|
fw_info->feature = adev->psp.sos_feature_version;
|
|
break;
|
|
case AMDGPU_INFO_FW_ASD:
|
|
fw_info->ver = adev->psp.asd_fw_version;
|
|
fw_info->feature = adev->psp.asd_feature_version;
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Userspace get information ioctl
|
|
*/
|
|
/**
|
|
* amdgpu_info_ioctl - answer a device specific request.
|
|
*
|
|
* @adev: amdgpu device pointer
|
|
* @data: request object
|
|
* @filp: drm filp
|
|
*
|
|
* This function is used to pass device specific parameters to the userspace
|
|
* drivers. Examples include: pci device id, pipeline parms, tiling params,
|
|
* etc. (all asics).
|
|
* Returns 0 on success, -EINVAL on failure.
|
|
*/
|
|
static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|
{
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
struct drm_amdgpu_info *info = data;
|
|
struct amdgpu_mode_info *minfo = &adev->mode_info;
|
|
void __user *out = (void __user *)(uintptr_t)info->return_pointer;
|
|
uint32_t size = info->return_size;
|
|
struct drm_crtc *crtc;
|
|
uint32_t ui32 = 0;
|
|
uint64_t ui64 = 0;
|
|
int i, found;
|
|
int ui32_size = sizeof(ui32);
|
|
|
|
if (!info->return_size || !info->return_pointer)
|
|
return -EINVAL;
|
|
|
|
switch (info->query) {
|
|
case AMDGPU_INFO_ACCEL_WORKING:
|
|
ui32 = adev->accel_working;
|
|
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
|
|
case AMDGPU_INFO_CRTC_FROM_ID:
|
|
for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
|
|
crtc = (struct drm_crtc *)minfo->crtcs[i];
|
|
if (crtc && crtc->base.id == info->mode_crtc.id) {
|
|
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
|
ui32 = amdgpu_crtc->crtc_id;
|
|
found = 1;
|
|
break;
|
|
}
|
|
}
|
|
if (!found) {
|
|
DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
|
|
return -EINVAL;
|
|
}
|
|
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
|
|
case AMDGPU_INFO_HW_IP_INFO: {
|
|
struct drm_amdgpu_info_hw_ip ip = {};
|
|
enum amd_ip_block_type type;
|
|
uint32_t ring_mask = 0;
|
|
uint32_t ib_start_alignment = 0;
|
|
uint32_t ib_size_alignment = 0;
|
|
|
|
if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
|
|
return -EINVAL;
|
|
|
|
switch (info->query_hw_ip.type) {
|
|
case AMDGPU_HW_IP_GFX:
|
|
type = AMD_IP_BLOCK_TYPE_GFX;
|
|
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
|
|
ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i);
|
|
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
|
|
ib_size_alignment = 8;
|
|
break;
|
|
case AMDGPU_HW_IP_COMPUTE:
|
|
type = AMD_IP_BLOCK_TYPE_GFX;
|
|
for (i = 0; i < adev->gfx.num_compute_rings; i++)
|
|
ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i);
|
|
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
|
|
ib_size_alignment = 8;
|
|
break;
|
|
case AMDGPU_HW_IP_DMA:
|
|
type = AMD_IP_BLOCK_TYPE_SDMA;
|
|
for (i = 0; i < adev->sdma.num_instances; i++)
|
|
ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
|
|
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
|
|
ib_size_alignment = 1;
|
|
break;
|
|
case AMDGPU_HW_IP_UVD:
|
|
type = AMD_IP_BLOCK_TYPE_UVD;
|
|
ring_mask = adev->uvd.ring.ready ? 1 : 0;
|
|
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
|
|
ib_size_alignment = 16;
|
|
break;
|
|
case AMDGPU_HW_IP_VCE:
|
|
type = AMD_IP_BLOCK_TYPE_VCE;
|
|
for (i = 0; i < adev->vce.num_rings; i++)
|
|
ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
|
|
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
|
|
ib_size_alignment = 1;
|
|
break;
|
|
case AMDGPU_HW_IP_UVD_ENC:
|
|
type = AMD_IP_BLOCK_TYPE_UVD;
|
|
for (i = 0; i < adev->uvd.num_enc_rings; i++)
|
|
ring_mask |= ((adev->uvd.ring_enc[i].ready ? 1 : 0) << i);
|
|
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
|
|
ib_size_alignment = 1;
|
|
break;
|
|
case AMDGPU_HW_IP_VCN_DEC:
|
|
type = AMD_IP_BLOCK_TYPE_VCN;
|
|
ring_mask = adev->vcn.ring_dec.ready ? 1 : 0;
|
|
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
|
|
ib_size_alignment = 16;
|
|
break;
|
|
case AMDGPU_HW_IP_VCN_ENC:
|
|
type = AMD_IP_BLOCK_TYPE_VCN;
|
|
for (i = 0; i < adev->vcn.num_enc_rings; i++)
|
|
ring_mask |= ((adev->vcn.ring_enc[i].ready ? 1 : 0) << i);
|
|
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
|
|
ib_size_alignment = 1;
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
|
if (adev->ip_blocks[i].version->type == type &&
|
|
adev->ip_blocks[i].status.valid) {
|
|
ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
|
|
ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
|
|
ip.capabilities_flags = 0;
|
|
ip.available_rings = ring_mask;
|
|
ip.ib_start_alignment = ib_start_alignment;
|
|
ip.ib_size_alignment = ib_size_alignment;
|
|
break;
|
|
}
|
|
}
|
|
return copy_to_user(out, &ip,
|
|
min((size_t)size, sizeof(ip))) ? -EFAULT : 0;
|
|
}
|
|
case AMDGPU_INFO_HW_IP_COUNT: {
|
|
enum amd_ip_block_type type;
|
|
uint32_t count = 0;
|
|
|
|
switch (info->query_hw_ip.type) {
|
|
case AMDGPU_HW_IP_GFX:
|
|
type = AMD_IP_BLOCK_TYPE_GFX;
|
|
break;
|
|
case AMDGPU_HW_IP_COMPUTE:
|
|
type = AMD_IP_BLOCK_TYPE_GFX;
|
|
break;
|
|
case AMDGPU_HW_IP_DMA:
|
|
type = AMD_IP_BLOCK_TYPE_SDMA;
|
|
break;
|
|
case AMDGPU_HW_IP_UVD:
|
|
type = AMD_IP_BLOCK_TYPE_UVD;
|
|
break;
|
|
case AMDGPU_HW_IP_VCE:
|
|
type = AMD_IP_BLOCK_TYPE_VCE;
|
|
break;
|
|
case AMDGPU_HW_IP_UVD_ENC:
|
|
type = AMD_IP_BLOCK_TYPE_UVD;
|
|
break;
|
|
case AMDGPU_HW_IP_VCN_DEC:
|
|
case AMDGPU_HW_IP_VCN_ENC:
|
|
type = AMD_IP_BLOCK_TYPE_VCN;
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++)
|
|
if (adev->ip_blocks[i].version->type == type &&
|
|
adev->ip_blocks[i].status.valid &&
|
|
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
|
|
count++;
|
|
|
|
return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
|
|
}
|
|
case AMDGPU_INFO_TIMESTAMP:
|
|
ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
|
|
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
|
case AMDGPU_INFO_FW_VERSION: {
|
|
struct drm_amdgpu_info_firmware fw_info;
|
|
int ret;
|
|
|
|
/* We only support one instance of each IP block right now. */
|
|
if (info->query_fw.ip_instance != 0)
|
|
return -EINVAL;
|
|
|
|
ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
|
|
if (ret)
|
|
return ret;
|
|
|
|
return copy_to_user(out, &fw_info,
|
|
min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
|
|
}
|
|
case AMDGPU_INFO_NUM_BYTES_MOVED:
|
|
ui64 = atomic64_read(&adev->num_bytes_moved);
|
|
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
|
case AMDGPU_INFO_NUM_EVICTIONS:
|
|
ui64 = atomic64_read(&adev->num_evictions);
|
|
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
|
case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
|
|
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
|
|
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
|
case AMDGPU_INFO_VRAM_USAGE:
|
|
ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
|
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
|
case AMDGPU_INFO_VIS_VRAM_USAGE:
|
|
ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
|
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
|
case AMDGPU_INFO_GTT_USAGE:
|
|
ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
|
|
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
|
case AMDGPU_INFO_GDS_CONFIG: {
|
|
struct drm_amdgpu_info_gds gds_info;
|
|
|
|
memset(&gds_info, 0, sizeof(gds_info));
|
|
gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size >> AMDGPU_GDS_SHIFT;
|
|
gds_info.compute_partition_size = adev->gds.mem.cs_partition_size >> AMDGPU_GDS_SHIFT;
|
|
gds_info.gds_total_size = adev->gds.mem.total_size >> AMDGPU_GDS_SHIFT;
|
|
gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size >> AMDGPU_GWS_SHIFT;
|
|
gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size >> AMDGPU_GWS_SHIFT;
|
|
gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size >> AMDGPU_OA_SHIFT;
|
|
gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size >> AMDGPU_OA_SHIFT;
|
|
return copy_to_user(out, &gds_info,
|
|
min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
|
|
}
|
|
case AMDGPU_INFO_VRAM_GTT: {
|
|
struct drm_amdgpu_info_vram_gtt vram_gtt;
|
|
|
|
vram_gtt.vram_size = adev->mc.real_vram_size;
|
|
vram_gtt.vram_size -= adev->vram_pin_size;
|
|
vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
|
|
vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
|
|
vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
|
|
vram_gtt.gtt_size *= PAGE_SIZE;
|
|
vram_gtt.gtt_size -= adev->gart_pin_size;
|
|
return copy_to_user(out, &vram_gtt,
|
|
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
|
|
}
|
|
case AMDGPU_INFO_MEMORY: {
|
|
struct drm_amdgpu_memory_info mem;
|
|
|
|
memset(&mem, 0, sizeof(mem));
|
|
mem.vram.total_heap_size = adev->mc.real_vram_size;
|
|
mem.vram.usable_heap_size =
|
|
adev->mc.real_vram_size - adev->vram_pin_size;
|
|
mem.vram.heap_usage =
|
|
amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
|
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
|
|
|
|
mem.cpu_accessible_vram.total_heap_size =
|
|
adev->mc.visible_vram_size;
|
|
mem.cpu_accessible_vram.usable_heap_size =
|
|
adev->mc.visible_vram_size -
|
|
(adev->vram_pin_size - adev->invisible_pin_size);
|
|
mem.cpu_accessible_vram.heap_usage =
|
|
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
|
mem.cpu_accessible_vram.max_allocation =
|
|
mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
|
|
|
|
mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
|
|
mem.gtt.total_heap_size *= PAGE_SIZE;
|
|
mem.gtt.usable_heap_size = mem.gtt.total_heap_size
|
|
- adev->gart_pin_size;
|
|
mem.gtt.heap_usage =
|
|
amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
|
|
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
|
|
|
|
return copy_to_user(out, &mem,
|
|
min((size_t)size, sizeof(mem)))
|
|
? -EFAULT : 0;
|
|
}
|
|
case AMDGPU_INFO_READ_MMR_REG: {
|
|
unsigned n, alloc_size;
|
|
uint32_t *regs;
|
|
unsigned se_num = (info->read_mmr_reg.instance >>
|
|
AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
|
|
AMDGPU_INFO_MMR_SE_INDEX_MASK;
|
|
unsigned sh_num = (info->read_mmr_reg.instance >>
|
|
AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
|
|
AMDGPU_INFO_MMR_SH_INDEX_MASK;
|
|
|
|
/* set full masks if the userspace set all bits
|
|
* in the bitfields */
|
|
if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
|
|
se_num = 0xffffffff;
|
|
if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
|
|
sh_num = 0xffffffff;
|
|
|
|
regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
|
|
if (!regs)
|
|
return -ENOMEM;
|
|
alloc_size = info->read_mmr_reg.count * sizeof(*regs);
|
|
|
|
for (i = 0; i < info->read_mmr_reg.count; i++)
|
|
if (amdgpu_asic_read_register(adev, se_num, sh_num,
|
|
info->read_mmr_reg.dword_offset + i,
|
|
®s[i])) {
|
|
DRM_DEBUG_KMS("unallowed offset %#x\n",
|
|
info->read_mmr_reg.dword_offset + i);
|
|
kfree(regs);
|
|
return -EFAULT;
|
|
}
|
|
n = copy_to_user(out, regs, min(size, alloc_size));
|
|
kfree(regs);
|
|
return n ? -EFAULT : 0;
|
|
}
|
|
case AMDGPU_INFO_DEV_INFO: {
|
|
struct drm_amdgpu_info_device dev_info = {};
|
|
|
|
dev_info.device_id = dev->pdev->device;
|
|
dev_info.chip_rev = adev->rev_id;
|
|
dev_info.external_rev = adev->external_rev_id;
|
|
dev_info.pci_rev = dev->pdev->revision;
|
|
dev_info.family = adev->family;
|
|
dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
|
|
dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
|
|
/* return all clocks in KHz */
|
|
dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
|
|
if (adev->pm.dpm_enabled) {
|
|
dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
|
|
dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
|
|
} else {
|
|
dev_info.max_engine_clock = adev->clock.default_sclk * 10;
|
|
dev_info.max_memory_clock = adev->clock.default_mclk * 10;
|
|
}
|
|
dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
|
|
dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
|
|
adev->gfx.config.max_shader_engines;
|
|
dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
|
|
dev_info._pad = 0;
|
|
dev_info.ids_flags = 0;
|
|
if (adev->flags & AMD_IS_APU)
|
|
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
|
|
if (amdgpu_sriov_vf(adev))
|
|
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
|
|
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
|
|
dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
|
|
dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
|
|
dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
|
|
dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
|
|
dev_info.cu_active_number = adev->gfx.cu_info.number;
|
|
dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
|
|
dev_info.ce_ram_size = adev->gfx.ce_ram_size;
|
|
memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
|
|
sizeof(adev->gfx.cu_info.ao_cu_bitmap));
|
|
memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
|
|
sizeof(adev->gfx.cu_info.bitmap));
|
|
dev_info.vram_type = adev->mc.vram_type;
|
|
dev_info.vram_bit_width = adev->mc.vram_width;
|
|
dev_info.vce_harvest_config = adev->vce.harvest_config;
|
|
dev_info.gc_double_offchip_lds_buf =
|
|
adev->gfx.config.double_offchip_lds_buf;
|
|
|
|
if (amdgpu_ngg) {
|
|
dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
|
|
dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
|
|
dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
|
|
dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
|
|
dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
|
|
dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
|
|
dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
|
|
dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
|
|
}
|
|
dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
|
|
dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
|
|
dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
|
|
dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
|
|
dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
|
|
dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
|
|
dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
|
|
|
|
return copy_to_user(out, &dev_info,
|
|
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
|
|
}
|
|
case AMDGPU_INFO_VCE_CLOCK_TABLE: {
|
|
unsigned i;
|
|
struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
|
|
struct amd_vce_state *vce_state;
|
|
|
|
for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
|
|
vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
|
|
if (vce_state) {
|
|
vce_clk_table.entries[i].sclk = vce_state->sclk;
|
|
vce_clk_table.entries[i].mclk = vce_state->mclk;
|
|
vce_clk_table.entries[i].eclk = vce_state->evclk;
|
|
vce_clk_table.num_valid_entries++;
|
|
}
|
|
}
|
|
|
|
return copy_to_user(out, &vce_clk_table,
|
|
min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
|
|
}
|
|
case AMDGPU_INFO_VBIOS: {
|
|
uint32_t bios_size = adev->bios_size;
|
|
|
|
switch (info->vbios_info.type) {
|
|
case AMDGPU_INFO_VBIOS_SIZE:
|
|
return copy_to_user(out, &bios_size,
|
|
min((size_t)size, sizeof(bios_size)))
|
|
? -EFAULT : 0;
|
|
case AMDGPU_INFO_VBIOS_IMAGE: {
|
|
uint8_t *bios;
|
|
uint32_t bios_offset = info->vbios_info.offset;
|
|
|
|
if (bios_offset >= bios_size)
|
|
return -EINVAL;
|
|
|
|
bios = adev->bios + bios_offset;
|
|
return copy_to_user(out, bios,
|
|
min((size_t)size, (size_t)(bios_size - bios_offset)))
|
|
? -EFAULT : 0;
|
|
}
|
|
default:
|
|
DRM_DEBUG_KMS("Invalid request %d\n",
|
|
info->vbios_info.type);
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
case AMDGPU_INFO_NUM_HANDLES: {
|
|
struct drm_amdgpu_info_num_handles handle;
|
|
|
|
switch (info->query_hw_ip.type) {
|
|
case AMDGPU_HW_IP_UVD:
|
|
/* Starting Polaris, we support unlimited UVD handles */
|
|
if (adev->asic_type < CHIP_POLARIS10) {
|
|
handle.uvd_max_handles = adev->uvd.max_handles;
|
|
handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
|
|
|
|
return copy_to_user(out, &handle,
|
|
min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
|
|
} else {
|
|
return -ENODATA;
|
|
}
|
|
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
case AMDGPU_INFO_SENSOR: {
|
|
struct pp_gpu_power query = {0};
|
|
int query_size = sizeof(query);
|
|
|
|
if (amdgpu_dpm == 0)
|
|
return -ENOENT;
|
|
|
|
switch (info->sensor_info.type) {
|
|
case AMDGPU_INFO_SENSOR_GFX_SCLK:
|
|
/* get sclk in Mhz */
|
|
if (amdgpu_dpm_read_sensor(adev,
|
|
AMDGPU_PP_SENSOR_GFX_SCLK,
|
|
(void *)&ui32, &ui32_size)) {
|
|
return -EINVAL;
|
|
}
|
|
ui32 /= 100;
|
|
break;
|
|
case AMDGPU_INFO_SENSOR_GFX_MCLK:
|
|
/* get mclk in Mhz */
|
|
if (amdgpu_dpm_read_sensor(adev,
|
|
AMDGPU_PP_SENSOR_GFX_MCLK,
|
|
(void *)&ui32, &ui32_size)) {
|
|
return -EINVAL;
|
|
}
|
|
ui32 /= 100;
|
|
break;
|
|
case AMDGPU_INFO_SENSOR_GPU_TEMP:
|
|
/* get temperature in millidegrees C */
|
|
if (amdgpu_dpm_read_sensor(adev,
|
|
AMDGPU_PP_SENSOR_GPU_TEMP,
|
|
(void *)&ui32, &ui32_size)) {
|
|
return -EINVAL;
|
|
}
|
|
break;
|
|
case AMDGPU_INFO_SENSOR_GPU_LOAD:
|
|
/* get GPU load */
|
|
if (amdgpu_dpm_read_sensor(adev,
|
|
AMDGPU_PP_SENSOR_GPU_LOAD,
|
|
(void *)&ui32, &ui32_size)) {
|
|
return -EINVAL;
|
|
}
|
|
break;
|
|
case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
|
|
/* get average GPU power */
|
|
if (amdgpu_dpm_read_sensor(adev,
|
|
AMDGPU_PP_SENSOR_GPU_POWER,
|
|
(void *)&query, &query_size)) {
|
|
return -EINVAL;
|
|
}
|
|
ui32 = query.average_gpu_power >> 8;
|
|
break;
|
|
case AMDGPU_INFO_SENSOR_VDDNB:
|
|
/* get VDDNB in millivolts */
|
|
if (amdgpu_dpm_read_sensor(adev,
|
|
AMDGPU_PP_SENSOR_VDDNB,
|
|
(void *)&ui32, &ui32_size)) {
|
|
return -EINVAL;
|
|
}
|
|
break;
|
|
case AMDGPU_INFO_SENSOR_VDDGFX:
|
|
/* get VDDGFX in millivolts */
|
|
if (amdgpu_dpm_read_sensor(adev,
|
|
AMDGPU_PP_SENSOR_VDDGFX,
|
|
(void *)&ui32, &ui32_size)) {
|
|
return -EINVAL;
|
|
}
|
|
break;
|
|
default:
|
|
DRM_DEBUG_KMS("Invalid request %d\n",
|
|
info->sensor_info.type);
|
|
return -EINVAL;
|
|
}
|
|
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
|
|
}
|
|
case AMDGPU_INFO_VRAM_LOST_COUNTER:
|
|
ui32 = atomic_read(&adev->vram_lost_counter);
|
|
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
|
|
default:
|
|
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
|
|
return -EINVAL;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
|
|
/*
|
|
* Outdated mess for old drm with Xorg being in charge (void function now).
|
|
*/
|
|
/**
|
|
* amdgpu_driver_lastclose_kms - drm callback for last close
|
|
*
|
|
* @dev: drm dev pointer
|
|
*
|
|
* Switch vga_switcheroo state after last close (all asics).
|
|
*/
|
|
void amdgpu_driver_lastclose_kms(struct drm_device *dev)
|
|
{
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
amdgpu_fbdev_restore_mode(adev);
|
|
vga_switcheroo_process_delayed_switch();
|
|
}
|
|
|
|
/**
|
|
* amdgpu_driver_open_kms - drm callback for open
|
|
*
|
|
* @dev: drm dev pointer
|
|
* @file_priv: drm file
|
|
*
|
|
* On device open, init vm on cayman+ (all asics).
|
|
* Returns 0 on success, error on failure.
|
|
*/
|
|
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
|
{
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
struct amdgpu_fpriv *fpriv;
|
|
int r;
|
|
|
|
file_priv->driver_priv = NULL;
|
|
|
|
r = pm_runtime_get_sync(dev->dev);
|
|
if (r < 0)
|
|
return r;
|
|
|
|
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
|
|
if (unlikely(!fpriv)) {
|
|
r = -ENOMEM;
|
|
goto out_suspend;
|
|
}
|
|
|
|
r = amdgpu_vm_init(adev, &fpriv->vm,
|
|
AMDGPU_VM_CONTEXT_GFX, 0);
|
|
if (r) {
|
|
kfree(fpriv);
|
|
goto out_suspend;
|
|
}
|
|
|
|
fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
|
|
if (!fpriv->prt_va) {
|
|
r = -ENOMEM;
|
|
amdgpu_vm_fini(adev, &fpriv->vm);
|
|
kfree(fpriv);
|
|
goto out_suspend;
|
|
}
|
|
|
|
if (amdgpu_sriov_vf(adev)) {
|
|
r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
|
|
if (r) {
|
|
amdgpu_vm_fini(adev, &fpriv->vm);
|
|
kfree(fpriv);
|
|
goto out_suspend;
|
|
}
|
|
}
|
|
|
|
mutex_init(&fpriv->bo_list_lock);
|
|
idr_init(&fpriv->bo_list_handles);
|
|
|
|
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
|
|
|
|
file_priv->driver_priv = fpriv;
|
|
|
|
out_suspend:
|
|
pm_runtime_mark_last_busy(dev->dev);
|
|
pm_runtime_put_autosuspend(dev->dev);
|
|
|
|
return r;
|
|
}
|
|
|
|
/**
|
|
* amdgpu_driver_postclose_kms - drm callback for post close
|
|
*
|
|
* @dev: drm dev pointer
|
|
* @file_priv: drm file
|
|
*
|
|
* On device post close, tear down vm on cayman+ (all asics).
|
|
*/
|
|
void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
|
struct drm_file *file_priv)
|
|
{
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
|
|
struct amdgpu_bo_list *list;
|
|
int handle;
|
|
|
|
if (!fpriv)
|
|
return;
|
|
|
|
pm_runtime_get_sync(dev->dev);
|
|
|
|
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
|
|
|
|
if (adev->asic_type != CHIP_RAVEN) {
|
|
amdgpu_uvd_free_handles(adev, file_priv);
|
|
amdgpu_vce_free_handles(adev, file_priv);
|
|
}
|
|
|
|
amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
|
|
|
|
if (amdgpu_sriov_vf(adev)) {
|
|
/* TODO: how to handle reserve failure */
|
|
BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
|
|
amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
|
|
fpriv->csa_va = NULL;
|
|
amdgpu_bo_unreserve(adev->virt.csa_obj);
|
|
}
|
|
|
|
amdgpu_vm_fini(adev, &fpriv->vm);
|
|
|
|
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
|
|
amdgpu_bo_list_free(list);
|
|
|
|
idr_destroy(&fpriv->bo_list_handles);
|
|
mutex_destroy(&fpriv->bo_list_lock);
|
|
|
|
kfree(fpriv);
|
|
file_priv->driver_priv = NULL;
|
|
|
|
pm_runtime_mark_last_busy(dev->dev);
|
|
pm_runtime_put_autosuspend(dev->dev);
|
|
}
|
|
|
|
/*
|
|
* VBlank related functions.
|
|
*/
|
|
/**
|
|
* amdgpu_get_vblank_counter_kms - get frame count
|
|
*
|
|
* @dev: drm dev pointer
|
|
* @pipe: crtc to get the frame count from
|
|
*
|
|
* Gets the frame count on the requested crtc (all asics).
|
|
* Returns frame count on success, -EINVAL on failure.
|
|
*/
|
|
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
|
|
{
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
int vpos, hpos, stat;
|
|
u32 count;
|
|
|
|
if (pipe >= adev->mode_info.num_crtc) {
|
|
DRM_ERROR("Invalid crtc %u\n", pipe);
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* The hw increments its frame counter at start of vsync, not at start
|
|
* of vblank, as is required by DRM core vblank counter handling.
|
|
* Cook the hw count here to make it appear to the caller as if it
|
|
* incremented at start of vblank. We measure distance to start of
|
|
* vblank in vpos. vpos therefore will be >= 0 between start of vblank
|
|
* and start of vsync, so vpos >= 0 means to bump the hw frame counter
|
|
* result by 1 to give the proper appearance to caller.
|
|
*/
|
|
if (adev->mode_info.crtcs[pipe]) {
|
|
/* Repeat readout if needed to provide stable result if
|
|
* we cross start of vsync during the queries.
|
|
*/
|
|
do {
|
|
count = amdgpu_display_vblank_get_counter(adev, pipe);
|
|
/* Ask amdgpu_get_crtc_scanoutpos to return vpos as
|
|
* distance to start of vblank, instead of regular
|
|
* vertical scanout pos.
|
|
*/
|
|
stat = amdgpu_get_crtc_scanoutpos(
|
|
dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
|
|
&vpos, &hpos, NULL, NULL,
|
|
&adev->mode_info.crtcs[pipe]->base.hwmode);
|
|
} while (count != amdgpu_display_vblank_get_counter(adev, pipe));
|
|
|
|
if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
|
|
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
|
|
DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
|
|
} else {
|
|
DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
|
|
pipe, vpos);
|
|
|
|
/* Bump counter if we are at >= leading edge of vblank,
|
|
* but before vsync where vpos would turn negative and
|
|
* the hw counter really increments.
|
|
*/
|
|
if (vpos >= 0)
|
|
count++;
|
|
}
|
|
} else {
|
|
/* Fallback to use value as is. */
|
|
count = amdgpu_display_vblank_get_counter(adev, pipe);
|
|
DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
|
|
}
|
|
|
|
return count;
|
|
}
|
|
|
|
/**
|
|
* amdgpu_enable_vblank_kms - enable vblank interrupt
|
|
*
|
|
* @dev: drm dev pointer
|
|
* @pipe: crtc to enable vblank interrupt for
|
|
*
|
|
* Enable the interrupt on the requested crtc (all asics).
|
|
* Returns 0 on success, -EINVAL on failure.
|
|
*/
|
|
int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
|
|
{
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe);
|
|
|
|
return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
|
|
}
|
|
|
|
/**
|
|
* amdgpu_disable_vblank_kms - disable vblank interrupt
|
|
*
|
|
* @dev: drm dev pointer
|
|
* @pipe: crtc to disable vblank interrupt for
|
|
*
|
|
* Disable the interrupt on the requested crtc (all asics).
|
|
*/
|
|
void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
|
|
{
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe);
|
|
|
|
amdgpu_irq_put(adev, &adev->crtc_irq, idx);
|
|
}
|
|
|
|
const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
/* KMS */
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
|
|
};
|
|
const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
|
|
|
|
/*
|
|
* Debugfs info
|
|
*/
|
|
#if defined(CONFIG_DEBUG_FS)
|
|
|
|
static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
|
|
{
|
|
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
|
struct drm_device *dev = node->minor->dev;
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
struct drm_amdgpu_info_firmware fw_info;
|
|
struct drm_amdgpu_query_fw query_fw;
|
|
int ret, i;
|
|
|
|
/* VCE */
|
|
query_fw.fw_type = AMDGPU_INFO_FW_VCE;
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
if (ret)
|
|
return ret;
|
|
seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
/* UVD */
|
|
query_fw.fw_type = AMDGPU_INFO_FW_UVD;
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
if (ret)
|
|
return ret;
|
|
seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
/* GMC */
|
|
query_fw.fw_type = AMDGPU_INFO_FW_GMC;
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
if (ret)
|
|
return ret;
|
|
seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
/* ME */
|
|
query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
if (ret)
|
|
return ret;
|
|
seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
/* PFP */
|
|
query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
if (ret)
|
|
return ret;
|
|
seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
/* CE */
|
|
query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
if (ret)
|
|
return ret;
|
|
seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
/* RLC */
|
|
query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
if (ret)
|
|
return ret;
|
|
seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
/* MEC */
|
|
query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
|
|
query_fw.index = 0;
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
if (ret)
|
|
return ret;
|
|
seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
/* MEC2 */
|
|
if (adev->asic_type == CHIP_KAVERI ||
|
|
(adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
|
|
query_fw.index = 1;
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
if (ret)
|
|
return ret;
|
|
seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
|
|
fw_info.feature, fw_info.ver);
|
|
}
|
|
|
|
/* PSP SOS */
|
|
query_fw.fw_type = AMDGPU_INFO_FW_SOS;
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
if (ret)
|
|
return ret;
|
|
seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
|
|
/* PSP ASD */
|
|
query_fw.fw_type = AMDGPU_INFO_FW_ASD;
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
if (ret)
|
|
return ret;
|
|
seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
/* SMC */
|
|
query_fw.fw_type = AMDGPU_INFO_FW_SMC;
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
if (ret)
|
|
return ret;
|
|
seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
|
|
fw_info.feature, fw_info.ver);
|
|
|
|
/* SDMA */
|
|
query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
|
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
|
query_fw.index = i;
|
|
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
|
if (ret)
|
|
return ret;
|
|
seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
|
|
i, fw_info.feature, fw_info.ver);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct drm_info_list amdgpu_firmware_info_list[] = {
|
|
{"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
|
|
};
|
|
#endif
|
|
|
|
int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
|
|
{
|
|
#if defined(CONFIG_DEBUG_FS)
|
|
return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
|
|
ARRAY_SIZE(amdgpu_firmware_info_list));
|
|
#else
|
|
return 0;
|
|
#endif
|
|
}
|