2017-09-13 01:29:07 +07:00
|
|
|
/*
|
|
|
|
* Copyright 2017 Advanced Micro Devices, Inc.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors: Christian König
|
|
|
|
*/
|
|
|
|
#ifndef __AMDGPU_MN_H__
|
|
|
|
#define __AMDGPU_MN_H__
|
|
|
|
|
|
|
|
/*
|
drm/amdgpu: replace get_user_pages with HMM mirror helpers
Use HMM helper function hmm_vma_fault() to get physical pages backing
userptr and start CPU page table update track of those pages. Then use
hmm_vma_range_done() to check if those pages are updated before
amdgpu_cs_submit for gfx or before user queues are resumed for kfd.
If userptr pages are updated, for gfx, amdgpu_cs_ioctl will restart
from scratch, for kfd, restore worker is rescheduled to retry.
HMM simplify the CPU page table concurrent update check, so remove
guptasklock, mmu_invalidations, last_set_pages fields from
amdgpu_ttm_tt struct.
HMM does not pin the page (increase page ref count), so remove related
operations like release_pages(), put_page(), mark_page_dirty().
Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-12-14 03:35:28 +07:00
|
|
|
* HMM mirror
|
2017-09-13 01:29:07 +07:00
|
|
|
*/
|
|
|
|
struct amdgpu_mn;
|
drm/amdgpu: replace get_user_pages with HMM mirror helpers
Use HMM helper function hmm_vma_fault() to get physical pages backing
userptr and start CPU page table update track of those pages. Then use
hmm_vma_range_done() to check if those pages are updated before
amdgpu_cs_submit for gfx or before user queues are resumed for kfd.
If userptr pages are updated, for gfx, amdgpu_cs_ioctl will restart
from scratch, for kfd, restore worker is rescheduled to retry.
HMM simplify the CPU page table concurrent update check, so remove
guptasklock, mmu_invalidations, last_set_pages fields from
amdgpu_ttm_tt struct.
HMM does not pin the page (increase page ref count), so remove related
operations like release_pages(), put_page(), mark_page_dirty().
Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-12-14 03:35:28 +07:00
|
|
|
struct hmm_range;
|
2017-09-13 01:29:07 +07:00
|
|
|
|
2018-03-24 02:32:28 +07:00
|
|
|
enum amdgpu_mn_type {
|
|
|
|
AMDGPU_MN_TYPE_GFX,
|
|
|
|
AMDGPU_MN_TYPE_HSA,
|
|
|
|
};
|
|
|
|
|
2018-07-24 04:45:46 +07:00
|
|
|
#if defined(CONFIG_HMM_MIRROR)
|
2017-09-13 01:29:07 +07:00
|
|
|
void amdgpu_mn_lock(struct amdgpu_mn *mn);
|
|
|
|
void amdgpu_mn_unlock(struct amdgpu_mn *mn);
|
2018-03-24 02:32:28 +07:00
|
|
|
struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
|
|
|
|
enum amdgpu_mn_type type);
|
2017-09-13 01:29:07 +07:00
|
|
|
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
|
|
|
|
void amdgpu_mn_unregister(struct amdgpu_bo *bo);
|
drm/amdgpu: replace get_user_pages with HMM mirror helpers
Use HMM helper function hmm_vma_fault() to get physical pages backing
userptr and start CPU page table update track of those pages. Then use
hmm_vma_range_done() to check if those pages are updated before
amdgpu_cs_submit for gfx or before user queues are resumed for kfd.
If userptr pages are updated, for gfx, amdgpu_cs_ioctl will restart
from scratch, for kfd, restore worker is rescheduled to retry.
HMM simplify the CPU page table concurrent update check, so remove
guptasklock, mmu_invalidations, last_set_pages fields from
amdgpu_ttm_tt struct.
HMM does not pin the page (increase page ref count), so remove related
operations like release_pages(), put_page(), mark_page_dirty().
Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-12-14 03:35:28 +07:00
|
|
|
void amdgpu_hmm_init_range(struct hmm_range *range);
|
2017-09-13 01:29:07 +07:00
|
|
|
#else
|
|
|
|
static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {}
|
|
|
|
static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {}
|
2018-03-24 02:32:28 +07:00
|
|
|
static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
|
|
|
|
enum amdgpu_mn_type type)
|
2017-09-13 01:29:07 +07:00
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
|
|
|
|
{
|
2019-03-04 22:37:55 +07:00
|
|
|
DRM_WARN_ONCE("HMM_MIRROR kernel config option is not enabled, "
|
|
|
|
"add CONFIG_ZONE_DEVICE=y in config file to fix this\n");
|
2017-09-13 01:29:07 +07:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|