drm/amdgpu: export function to flush TLB via pasid

This can be used directly from amdgpu and amdkfd to invalidate
TLB through pasid.
It supports gmc v7, v8, v9 and v10.

Signed-off-by: Alex Sierra <alex.sierra@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Alex Sierra 2019-12-19 23:40:19 -06:00 committed by Alex Deucher
parent 4f01f1e58e
commit ea930000a6
5 changed files with 223 additions and 0 deletions

View File

@ -92,6 +92,9 @@ struct amdgpu_gmc_funcs {
/* flush the vm tlb via mmio */
void (*flush_gpu_tlb)(struct amdgpu_device *adev, uint32_t vmid,
uint32_t vmhub, uint32_t flush_type);
/* flush the vm tlb via pasid */
int (*flush_gpu_tlb_pasid)(struct amdgpu_device *adev, uint16_t pasid,
uint32_t flush_type, bool all_hub);
/* flush the vm tlb via ring */
uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr);
@ -216,6 +219,9 @@ struct amdgpu_gmc {
};
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
#define amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, type, allhub) \
((adev)->gmc.gmc_funcs->flush_gpu_tlb_pasid \
((adev), (pasid), (type), (allhub)))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))

View File

@ -30,6 +30,8 @@
#include "hdp/hdp_5_0_0_sh_mask.h"
#include "gc/gc_10_1_0_sh_mask.h"
#include "mmhub/mmhub_2_0_0_sh_mask.h"
#include "athub/athub_2_0_0_sh_mask.h"
#include "athub/athub_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_sh_mask.h"
#include "oss/osssys_5_0_0_offset.h"
@ -37,6 +39,7 @@
#include "navi10_enum.h"
#include "soc15.h"
#include "soc15d.h"
#include "soc15_common.h"
#include "nbio_v2_3.h"
@ -234,6 +237,19 @@ static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
(!amdgpu_sriov_vf(adev)));
}
static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid)
{
uint32_t value;
value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ vmid);
*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@ -380,6 +396,63 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
}
/**
* gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
*
* @adev: amdgpu_device pointer
* @pasid: pasid to be flush
*
* Flush the TLB for the requested pasid.
*/
static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t pasid, uint32_t flush_type,
bool all_hub)
{
int vmid, i;
signed long r;
uint32_t seq;
uint16_t queried_pasid;
bool ret;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
if (amdgpu_emu_mode == 0 && ring->sched.ready) {
spin_lock(&adev->gfx.kiq.ring_lock);
amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size);
kiq->pmf->kiq_invalidate_tlbs(ring,
pasid, flush_type, all_hub);
amdgpu_fence_emit_polling(ring, &seq);
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
if (r < 1) {
DRM_ERROR("wait for kiq fence error: %ld.\n", r);
return -ETIME;
}
return 0;
}
for (vmid = 1; vmid < 16; vmid++) {
ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
&queried_pasid);
if (ret && queried_pasid == pasid) {
if (all_hub) {
for (i = 0; i < adev->num_vmhubs; i++)
gmc_v10_0_flush_gpu_tlb(adev, vmid,
i, 0);
} else {
gmc_v10_0_flush_gpu_tlb(adev, vmid,
AMDGPU_GFXHUB_0, 0);
}
break;
}
}
return 0;
}
static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
@ -531,6 +604,7 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
.flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
.map_mtype = gmc_v10_0_map_mtype,

View File

@ -418,6 +418,38 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
return 0;
}
/**
* gmc_v7_0_flush_gpu_tlb_pasid - tlb flush via pasid
*
* @adev: amdgpu_device pointer
* @pasid: pasid to be flush
*
* Flush the TLB for the requested pasid.
*/
static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t pasid, uint32_t flush_type,
bool all_hub)
{
int vmid;
unsigned int tmp;
if (adev->in_gpu_reset)
return -EIO;
for (vmid = 1; vmid < 16; vmid++) {
tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
(tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
RREG32(mmVM_INVALIDATE_RESPONSE);
break;
}
}
return 0;
}
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@ -1333,6 +1365,7 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
.flush_gpu_tlb_pasid = gmc_v7_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
.set_prt = gmc_v7_0_set_prt,

View File

@ -620,6 +620,39 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
return 0;
}
/**
* gmc_v8_0_flush_gpu_tlb_pasid - tlb flush via pasid
*
* @adev: amdgpu_device pointer
* @pasid: pasid to be flush
*
* Flush the TLB for the requested pasid.
*/
static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t pasid, uint32_t flush_type,
bool all_hub)
{
int vmid;
unsigned int tmp;
if (adev->in_gpu_reset)
return -EIO;
for (vmid = 1; vmid < 16; vmid++) {
tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
(tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
RREG32(mmVM_INVALIDATE_RESPONSE);
break;
}
}
return 0;
}
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@ -1700,6 +1733,7 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
.flush_gpu_tlb_pasid = gmc_v8_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
.set_prt = gmc_v8_0_set_prt,

View File

@ -38,10 +38,12 @@
#include "dce/dce_12_0_sh_mask.h"
#include "vega10_enum.h"
#include "mmhub/mmhub_1_0_offset.h"
#include "athub/athub_1_0_sh_mask.h"
#include "athub/athub_1_0_offset.h"
#include "oss/osssys_4_0_offset.h"
#include "soc15.h"
#include "soc15d.h"
#include "soc15_common.h"
#include "umc/umc_6_0_sh_mask.h"
@ -441,6 +443,18 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
adev->pdev->device == 0x15d8)));
}
static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid)
{
uint32_t value;
value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ vmid);
*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@ -539,6 +553,67 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
DRM_ERROR("Timeout waiting for VM flush ACK!\n");
}
/**
* gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
*
* @adev: amdgpu_device pointer
* @pasid: pasid to be flush
*
* Flush the TLB for the requested pasid.
*/
static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t pasid, uint32_t flush_type,
bool all_hub)
{
int vmid, i;
signed long r;
uint32_t seq;
uint16_t queried_pasid;
bool ret;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
if (adev->in_gpu_reset)
return -EIO;
if (ring->sched.ready) {
spin_lock(&adev->gfx.kiq.ring_lock);
amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size);
kiq->pmf->kiq_invalidate_tlbs(ring,
pasid, flush_type, all_hub);
amdgpu_fence_emit_polling(ring, &seq);
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
if (r < 1) {
DRM_ERROR("wait for kiq fence error: %ld.\n", r);
return -ETIME;
}
return 0;
}
for (vmid = 1; vmid < 16; vmid++) {
ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
&queried_pasid);
if (ret && queried_pasid == pasid) {
if (all_hub) {
for (i = 0; i < adev->num_vmhubs; i++)
gmc_v9_0_flush_gpu_tlb(adev, vmid,
i, 0);
} else {
gmc_v9_0_flush_gpu_tlb(adev, vmid,
AMDGPU_GFXHUB_0, 0);
}
break;
}
}
return 0;
}
static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
@ -700,6 +775,7 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
.map_mtype = gmc_v9_0_map_mtype,