Merge tag 'amd-drm-next-5.10-2020-09-03' of git://people.freedesktop.org/~agd5f/linux into drm-next

amd-drm-next-5.10-2020-09-03:

amdgpu:
- RAS fixes
- Sienna Cichlid updates
- Navy Flounder updates
- DCE6 (SI) support in DC
- Enable plane rotation
- Rework pre-OS vram reservation handling during driver init
- Add standard interface to dump GPU metrics table from SMU
- Rework tiling and tmz state handling in atomic commits
- Pstate fixes
- Add voltage and power hwmon interfaces for renoir
- SW CTF fixes
- S/G display fix for Raven
- Print client strings for vmfaults for vega and newer
- Manual fan control fixes
- Display updates
- Reorg power management directory structure
- Misc bug fixes
- Misc code cleanups

amdkfd:
- Topology fixes
- Add SMI events for thermal throttling and GPU resets

radeon:
- switch from pci_* to dma_* for dma allocations
- PLL fix

Scheduler:
- Clean up priority levels

UAPI:
- amdgpu INFO IOCTL query update for TMZ state
  https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6049
- amdkfd SMI event interface updates
  https://github.com/RadeonOpenCompute/rocm_smi_lib/tree/therm_thrott

From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200903222921.4152-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2020-09-08 16:40:13 +10:00
commit 0c8d22fcae
455 changed files with 14098 additions and 3210 deletions

View File

@ -153,7 +153,7 @@ This section covers hwmon and power/thermal controls.
HWMON Interfaces HWMON Interfaces
---------------- ----------------
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
:doc: hwmon :doc: hwmon
GPU sysfs Power State Interfaces GPU sysfs Power State Interfaces
@ -164,48 +164,54 @@ GPU power controls are exposed via sysfs files.
power_dpm_state power_dpm_state
~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
:doc: power_dpm_state :doc: power_dpm_state
power_dpm_force_performance_level power_dpm_force_performance_level
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
:doc: power_dpm_force_performance_level :doc: power_dpm_force_performance_level
pp_table pp_table
~~~~~~~~ ~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
:doc: pp_table :doc: pp_table
pp_od_clk_voltage pp_od_clk_voltage
~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
:doc: pp_od_clk_voltage :doc: pp_od_clk_voltage
pp_dpm_* pp_dpm_*
~~~~~~~~ ~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
:doc: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie :doc: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
pp_power_profile_mode pp_power_profile_mode
~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
:doc: pp_power_profile_mode :doc: pp_power_profile_mode
*_busy_percent *_busy_percent
~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
:doc: gpu_busy_percent :doc: gpu_busy_percent
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
:doc: mem_busy_percent :doc: mem_busy_percent
gpu_metrics
~~~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
:doc: gpu_metrics
GPU Product Information GPU Product Information
======================= =======================
@ -233,7 +239,7 @@ serial_number
unique_id unique_id
--------- ---------
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
:doc: unique_id :doc: unique_id
GPU Memory Usage Information GPU Memory Usage Information
@ -283,7 +289,7 @@ PCIe Accounting Information
pcie_bw pcie_bw
------- -------
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
:doc: pcie_bw :doc: pcie_bw
pcie_replay_count pcie_replay_count

View File

@ -30,7 +30,7 @@ FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \ -I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/amdgpu \ -I$(FULL_AMD_PATH)/amdgpu \
-I$(FULL_AMD_PATH)/powerplay/inc \ -I$(FULL_AMD_PATH)/pm/inc \
-I$(FULL_AMD_PATH)/acp/include \ -I$(FULL_AMD_PATH)/acp/include \
-I$(FULL_AMD_DISPLAY_PATH) \ -I$(FULL_AMD_DISPLAY_PATH) \
-I$(FULL_AMD_DISPLAY_PATH)/include \ -I$(FULL_AMD_DISPLAY_PATH)/include \
@ -47,7 +47,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \ amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \ amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \ amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \ atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
amdgpu_dma_buf.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_dma_buf.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
@ -55,15 +55,15 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \ amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \ amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
# add asic specific block # add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o \
dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o \ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o \
uvd_v3_1.o uvd_v3_1.o
amdgpu-y += \ amdgpu-y += \
@ -85,7 +85,7 @@ amdgpu-y += \
# add UMC block # add UMC block
amdgpu-y += \ amdgpu-y += \
umc_v6_1.o umc_v6_0.o umc_v6_1.o umc_v6_0.o umc_v8_7.o
# add IH block # add IH block
amdgpu-y += \ amdgpu-y += \
@ -105,10 +105,6 @@ amdgpu-y += \
psp_v11_0.o \ psp_v11_0.o \
psp_v12_0.o psp_v12_0.o
# add SMC block
amdgpu-y += \
amdgpu_dpm.o
# add DCE block # add DCE block
amdgpu-y += \ amdgpu-y += \
dce_v10_0.o \ dce_v10_0.o \
@ -212,7 +208,7 @@ amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
amdgpu-$(CONFIG_HMM_MIRROR) += amdgpu_mn.o amdgpu-$(CONFIG_HMM_MIRROR) += amdgpu_mn.o
include $(FULL_AMD_PATH)/powerplay/Makefile include $(FULL_AMD_PATH)/pm/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES) amdgpu-y += $(AMD_POWERPLAY_FILES)

View File

@ -178,6 +178,7 @@ extern uint amdgpu_dm_abm_level;
extern struct amdgpu_mgpu_info mgpu_info; extern struct amdgpu_mgpu_info mgpu_info;
extern int amdgpu_ras_enable; extern int amdgpu_ras_enable;
extern uint amdgpu_ras_mask; extern uint amdgpu_ras_mask;
extern int amdgpu_bad_page_threshold;
extern int amdgpu_async_gfx_ring; extern int amdgpu_async_gfx_ring;
extern int amdgpu_mcbp; extern int amdgpu_mcbp;
extern int amdgpu_discovery; extern int amdgpu_discovery;
@ -187,9 +188,11 @@ extern int amdgpu_force_asic_type;
#ifdef CONFIG_HSA_AMD #ifdef CONFIG_HSA_AMD
extern int sched_policy; extern int sched_policy;
extern bool debug_evictions; extern bool debug_evictions;
extern bool no_system_mem_limit;
#else #else
static const int sched_policy = KFD_SCHED_POLICY_HWS; static const int sched_policy = KFD_SCHED_POLICY_HWS;
static const bool debug_evictions; /* = false */ static const bool debug_evictions; /* = false */
static const bool no_system_mem_limit;
#endif #endif
extern int amdgpu_tmz; extern int amdgpu_tmz;
@ -201,6 +204,7 @@ extern int amdgpu_si_support;
#ifdef CONFIG_DRM_AMDGPU_CIK #ifdef CONFIG_DRM_AMDGPU_CIK
extern int amdgpu_cik_support; extern int amdgpu_cik_support;
#endif #endif
extern int amdgpu_num_kcq;
#define AMDGPU_VM_MAX_NUM_CTX 4096 #define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024) #define AMDGPU_SG_THRESHOLD (256*1024*1024)
@ -212,6 +216,8 @@ extern int amdgpu_cik_support;
#define AMDGPUFB_CONN_LIMIT 4 #define AMDGPUFB_CONN_LIMIT 4
#define AMDGPU_BIOS_NUM_SCRATCH 16 #define AMDGPU_BIOS_NUM_SCRATCH 16
#define AMDGPU_VBIOS_VGA_ALLOCATION (9 * 1024 * 1024) /* reserve 8MB for vga emulator and 1 MB for FB */
/* hard reset data */ /* hard reset data */
#define AMDGPU_ASIC_RESET_DATA 0x39d5e86b #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
@ -245,6 +251,7 @@ struct amdgpu_fpriv;
struct amdgpu_bo_va_mapping; struct amdgpu_bo_va_mapping;
struct amdgpu_atif; struct amdgpu_atif;
struct kfd_vm_fault_info; struct kfd_vm_fault_info;
struct amdgpu_hive_info;
enum amdgpu_cp_irq { enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0,
@ -611,6 +618,8 @@ struct amdgpu_asic_funcs {
uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev); uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev);
/* device supports BACO */ /* device supports BACO */
bool (*supports_baco)(struct amdgpu_device *adev); bool (*supports_baco)(struct amdgpu_device *adev);
/* pre asic_init quirks */
void (*pre_asic_init)(struct amdgpu_device *adev);
}; };
/* /*
@ -647,16 +656,6 @@ struct amdgpu_atcs {
struct amdgpu_atcs_functions functions; struct amdgpu_atcs_functions functions;
}; };
/*
* Firmware VRAM reservation
*/
struct amdgpu_fw_vram_usage {
u64 start_offset;
u64 size;
struct amdgpu_bo *reserved_bo;
void *va;
};
/* /*
* CGS * CGS
*/ */
@ -725,13 +724,13 @@ struct amd_powerplay {
#define AMDGPU_MAX_DF_PERFMONS 4 #define AMDGPU_MAX_DF_PERFMONS 4
struct amdgpu_device { struct amdgpu_device {
struct device *dev; struct device *dev;
struct drm_device *ddev;
struct pci_dev *pdev; struct pci_dev *pdev;
struct drm_device ddev;
#ifdef CONFIG_DRM_AMD_ACP #ifdef CONFIG_DRM_AMD_ACP
struct amdgpu_acp acp; struct amdgpu_acp acp;
#endif #endif
struct amdgpu_hive_info *hive;
/* ASIC */ /* ASIC */
enum amd_asic_type asic_type; enum amd_asic_type asic_type;
uint32_t family; uint32_t family;
@ -765,7 +764,6 @@ struct amdgpu_device {
bool is_atom_fw; bool is_atom_fw;
uint8_t *bios; uint8_t *bios;
uint32_t bios_size; uint32_t bios_size;
struct amdgpu_bo *stolen_vga_memory;
uint32_t bios_scratch_reg_offset; uint32_t bios_scratch_reg_offset;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@ -917,11 +915,6 @@ struct amdgpu_device {
/* display related functionality */ /* display related functionality */
struct amdgpu_display_manager dm; struct amdgpu_display_manager dm;
/* discovery */
uint8_t *discovery_bin;
uint32_t discovery_tmr_size;
struct amdgpu_bo *discovery_memory;
/* mes */ /* mes */
bool enable_mes; bool enable_mes;
struct amdgpu_mes mes; struct amdgpu_mes mes;
@ -946,8 +939,6 @@ struct amdgpu_device {
struct delayed_work delayed_init_work; struct delayed_work delayed_init_work;
struct amdgpu_virt virt; struct amdgpu_virt virt;
/* firmware VRAM reservation */
struct amdgpu_fw_vram_usage fw_vram_usage;
/* link all shadow bo */ /* link all shadow bo */
struct list_head shadow_list; struct list_head shadow_list;
@ -961,9 +952,9 @@ struct amdgpu_device {
bool in_suspend; bool in_suspend;
bool in_hibernate; bool in_hibernate;
bool in_gpu_reset; atomic_t in_gpu_reset;
enum pp_mp1_state mp1_state; enum pp_mp1_state mp1_state;
struct mutex lock_reset; struct rw_semaphore reset_sem;
struct amdgpu_doorbell_index doorbell_index; struct amdgpu_doorbell_index doorbell_index;
struct mutex notifier_lock; struct mutex notifier_lock;
@ -995,16 +986,25 @@ struct amdgpu_device {
atomic_t throttling_logging_enabled; atomic_t throttling_logging_enabled;
struct ratelimit_state throttling_logging_rs; struct ratelimit_state throttling_logging_rs;
uint32_t ras_features;
}; };
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
{
return container_of(ddev, struct amdgpu_device, ddev);
}
static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev)
{
return &adev->ddev;
}
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
{ {
return container_of(bdev, struct amdgpu_device, mman.bdev); return container_of(bdev, struct amdgpu_device, mman.bdev);
} }
int amdgpu_device_init(struct amdgpu_device *adev, int amdgpu_device_init(struct amdgpu_device *adev,
struct drm_device *ddev,
struct pci_dev *pdev,
uint32_t flags); uint32_t flags);
void amdgpu_device_fini(struct amdgpu_device *adev); void amdgpu_device_fini(struct amdgpu_device *adev);
int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
@ -1141,10 +1141,12 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev)) #define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
#define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev))) #define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev)))
#define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev)) #define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev))
#define amdgpu_asic_pre_asic_init(adev) (adev)->asic_funcs->pre_asic_init((adev))
#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter)); #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));
/* Common functions */ /* Common functions */
bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
int amdgpu_device_gpu_recover(struct amdgpu_device *adev, int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_job* job); struct amdgpu_job* job);
@ -1194,7 +1196,7 @@ static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
extern const int amdgpu_max_kms_ioctl; extern const int amdgpu_max_kms_ioctl;
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags);
void amdgpu_driver_unload_kms(struct drm_device *dev); void amdgpu_driver_unload_kms(struct drm_device *dev);
void amdgpu_driver_lastclose_kms(struct drm_device *dev); void amdgpu_driver_lastclose_kms(struct drm_device *dev);
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
@ -1278,4 +1280,8 @@ static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
return adev->gmc.tmz_enabled; return adev->gmc.tmz_enabled;
} }
static inline int amdgpu_in_reset(struct amdgpu_device *adev)
{
return atomic_read(&adev->in_gpu_reset);
}
#endif #endif

View File

@ -136,9 +136,7 @@ static int acp_poweroff(struct generic_pm_domain *genpd)
* 2. power off the acp tiles * 2. power off the acp tiles
* 3. check and enter ulv state * 3. check and enter ulv state
*/ */
if (adev->powerplay.pp_funcs && amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
adev->powerplay.pp_funcs->set_powergating_by_smu)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
} }
return 0; return 0;
} }
@ -157,8 +155,7 @@ static int acp_poweron(struct generic_pm_domain *genpd)
* 2. turn on acp clock * 2. turn on acp clock
* 3. power on acp tiles * 3. power on acp tiles
*/ */
if (adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
} }
return 0; return 0;
} }
@ -529,9 +526,7 @@ static int acp_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_PG_STATE_GATE); bool enable = (state == AMD_PG_STATE_GATE);
if (adev->powerplay.pp_funcs && amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
adev->powerplay.pp_funcs->set_powergating_by_smu)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
return 0; return 0;
} }

View File

@ -463,11 +463,11 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) { if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
if (adev->flags & AMD_IS_PX) { if (adev->flags & AMD_IS_PX) {
pm_runtime_get_sync(adev->ddev->dev); pm_runtime_get_sync(adev_to_drm(adev)->dev);
/* Just fire off a uevent and let userspace tell us what to do */ /* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(adev->ddev); drm_helper_hpd_irq_event(adev_to_drm(adev));
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
} }
} }
/* TODO: check other events */ /* TODO: check other events */
@ -817,7 +817,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
struct drm_encoder *tmp; struct drm_encoder *tmp;
/* Find the encoder controlling the brightness */ /* Find the encoder controlling the brightness */
list_for_each_entry(tmp, &adev->ddev->mode_config.encoder_list, list_for_each_entry(tmp, &adev_to_drm(adev)->mode_config.encoder_list,
head) { head) {
struct amdgpu_encoder *enc = to_amdgpu_encoder(tmp); struct amdgpu_encoder *enc = to_amdgpu_encoder(tmp);

View File

@ -119,7 +119,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
.gpuvm_size = min(adev->vm_manager.max_pfn .gpuvm_size = min(adev->vm_manager.max_pfn
<< AMDGPU_GPU_PAGE_SHIFT, << AMDGPU_GPU_PAGE_SHIFT,
AMDGPU_GMC_HOLE_START), AMDGPU_GMC_HOLE_START),
.drm_render_minor = adev->ddev->render->index, .drm_render_minor = adev_to_drm(adev)->render->index,
.sdma_doorbell_idx = adev->doorbell_index.sdma_engine, .sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
}; };
@ -160,7 +160,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->doorbell_index.last_non_cp; adev->doorbell_index.last_non_cp;
} }
kgd2kfd_device_init(adev->kfd.dev, adev->ddev, &gpu_resources); kgd2kfd_device_init(adev->kfd.dev, adev_to_drm(adev), &gpu_resources);
} }
} }
@ -479,11 +479,11 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
goto out_put; goto out_put;
obj = dma_buf->priv; obj = dma_buf->priv;
if (obj->dev->driver != adev->ddev->driver) if (obj->dev->driver != adev_to_drm(adev)->driver)
/* Can't handle buffers from different drivers */ /* Can't handle buffers from different drivers */
goto out_put; goto out_put;
adev = obj->dev->dev_private; adev = drm_to_adev(obj->dev);
bo = gem_to_amdgpu_bo(obj); bo = gem_to_amdgpu_bo(obj);
if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT))) AMDGPU_GEM_DOMAIN_GTT)))
@ -613,6 +613,7 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
job->vmid = vmid; job->vmid = vmid;
ret = amdgpu_ib_schedule(ring, 1, ib, job, &f); ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
if (ret) { if (ret) {
DRM_ERROR("amdgpu: failed to schedule IB.\n"); DRM_ERROR("amdgpu: failed to schedule IB.\n");
goto err_ib_sched; goto err_ib_sched;
@ -756,4 +757,8 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
{ {
} }
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
{
}
#endif #endif

View File

@ -270,5 +270,6 @@ int kgd2kfd_resume_mm(struct mm_struct *mm);
int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
struct dma_fence *fence); struct dma_fence *fence);
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask);
#endif /* AMDGPU_AMDKFD_H_INCLUDED */ #endif /* AMDGPU_AMDKFD_H_INCLUDED */

View File

@ -283,22 +283,6 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
return 0; return 0;
} }
static void kgd_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID %u\n",
vmid);
return;
}
mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base);
gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
}
const struct kfd2kgd_calls arcturus_kfd2kgd = { const struct kfd2kgd_calls arcturus_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
@ -317,7 +301,7 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.wave_control_execute = kgd_gfx_v9_wave_control_execute, .wave_control_execute = kgd_gfx_v9_wave_control_execute,
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info = .get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = kgd_set_vm_context_page_table_base, .set_vm_context_page_table_base =
.get_hive_id = amdgpu_amdkfd_get_hive_id, kgd_gfx_v9_set_vm_context_page_table_base,
}; };

View File

@ -542,7 +542,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
uint32_t temp; uint32_t temp;
struct v10_compute_mqd *m = get_mqd(mqd); struct v10_compute_mqd *m = get_mqd(mqd);
if (adev->in_gpu_reset) if (amdgpu_in_reset(adev))
return -EIO; return -EIO;
#if 0 #if 0
@ -776,6 +776,4 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info = .get_atc_vmid_pasid_mapping_info =
get_atc_vmid_pasid_mapping_info, get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = set_vm_context_page_table_base, .set_vm_context_page_table_base = set_vm_context_page_table_base,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
.get_unique_id = amdgpu_amdkfd_get_unique_id,
}; };

View File

@ -822,7 +822,6 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
.address_watch_get_offset = address_watch_get_offset_v10_3, .address_watch_get_offset = address_watch_get_offset_v10_3,
.get_atc_vmid_pasid_mapping_info = NULL, .get_atc_vmid_pasid_mapping_info = NULL,
.set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3, .set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
#if 0 #if 0
.enable_debug_trap = enable_debug_trap_v10_3, .enable_debug_trap = enable_debug_trap_v10_3,
.disable_debug_trap = disable_debug_trap_v10_3, .disable_debug_trap = disable_debug_trap_v10_3,

View File

@ -423,7 +423,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
unsigned long flags, end_jiffies; unsigned long flags, end_jiffies;
int retry; int retry;
if (adev->in_gpu_reset) if (amdgpu_in_reset(adev))
return -EIO; return -EIO;
acquire_queue(kgd, pipe_id, queue_id); acquire_queue(kgd, pipe_id, queue_id);

View File

@ -419,7 +419,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
int retry; int retry;
struct vi_mqd *m = get_mqd(mqd); struct vi_mqd *m = get_mqd(mqd);
if (adev->in_gpu_reset) if (amdgpu_in_reset(adev))
return -EIO; return -EIO;
acquire_queue(kgd, pipe_id, queue_id); acquire_queue(kgd, pipe_id, queue_id);

View File

@ -552,7 +552,7 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
uint32_t temp; uint32_t temp;
struct v9_mqd *m = get_mqd(mqd); struct v9_mqd *m = get_mqd(mqd);
if (adev->in_gpu_reset) if (amdgpu_in_reset(adev))
return -EIO; return -EIO;
acquire_queue(kgd, pipe_id, queue_id); acquire_queue(kgd, pipe_id, queue_id);
@ -690,7 +690,7 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
return 0; return 0;
} }
static void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
uint32_t vmid, uint64_t page_table_base) uint32_t vmid, uint64_t page_table_base)
{ {
struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_device *adev = get_amdgpu_device(kgd);
@ -701,7 +701,7 @@ static void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
return; return;
} }
mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); adev->mmhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
} }
@ -726,6 +726,4 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info = .get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
.get_unique_id = amdgpu_amdkfd_get_unique_id,
}; };

View File

@ -60,3 +60,6 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
uint8_t vmid, uint16_t *p_pasid); uint8_t vmid, uint16_t *p_pasid);
void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
uint32_t vmid, uint64_t page_table_base);

View File

@ -148,8 +148,12 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
spin_lock(&kfd_mem_limit.mem_limit_lock); spin_lock(&kfd_mem_limit.mem_limit_lock);
if (kfd_mem_limit.system_mem_used + system_mem_needed >
kfd_mem_limit.max_system_mem_limit)
pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
if ((kfd_mem_limit.system_mem_used + system_mem_needed > if ((kfd_mem_limit.system_mem_used + system_mem_needed >
kfd_mem_limit.max_system_mem_limit) || kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
(kfd_mem_limit.ttm_mem_used + ttm_mem_needed > (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
kfd_mem_limit.max_ttm_mem_limit) || kfd_mem_limit.max_ttm_mem_limit) ||
(adev->kfd.vram_used + vram_needed > (adev->kfd.vram_used + vram_needed >
@ -1668,7 +1672,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
return -EINVAL; return -EINVAL;
obj = dma_buf->priv; obj = dma_buf->priv;
if (obj->dev->dev_private != adev) if (drm_to_adev(obj->dev) != adev)
/* Can't handle buffers from other devices */ /* Can't handle buffers from other devices */
return -EINVAL; return -EINVAL;

View File

@ -148,7 +148,7 @@ void amdgpu_atombios_i2c_init(struct amdgpu_device *adev)
if (i2c.valid) { if (i2c.valid) {
sprintf(stmp, "0x%x", i2c.i2c_id); sprintf(stmp, "0x%x", i2c.i2c_id);
adev->i2c_bus[i] = amdgpu_i2c_create(adev->ddev, &i2c, stmp); adev->i2c_bus[i] = amdgpu_i2c_create(adev_to_drm(adev), &i2c, stmp);
} }
gpio = (ATOM_GPIO_I2C_ASSIGMENT *) gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT)); ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
@ -541,7 +541,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
} }
} }
amdgpu_link_encoder_connector(adev->ddev); amdgpu_link_encoder_connector(adev_to_drm(adev));
return true; return true;
} }
@ -1786,9 +1786,9 @@ static int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION << (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) { ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
/* Firmware request VRAM reservation for SR-IOV */ /* Firmware request VRAM reservation for SR-IOV */
adev->fw_vram_usage.start_offset = (start_addr & adev->mman.fw_vram_usage_start_offset = (start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
adev->fw_vram_usage.size = size << 10; adev->mman.fw_vram_usage_size = size << 10;
/* Use the default scratch size */ /* Use the default scratch size */
usage_bytes = 0; usage_bytes = 0;
} else { } else {
@ -1882,7 +1882,7 @@ static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
*/ */
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
{ {
struct amdgpu_device *adev = info->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(info->dev);
WREG32(reg, val); WREG32(reg, val);
} }
@ -1898,7 +1898,7 @@ static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
*/ */
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
{ {
struct amdgpu_device *adev = info->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(info->dev);
uint32_t r; uint32_t r;
r = RREG32(reg); r = RREG32(reg);
@ -1916,7 +1916,7 @@ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
*/ */
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
{ {
struct amdgpu_device *adev = info->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(info->dev);
WREG32_IO(reg, val); WREG32_IO(reg, val);
} }
@ -1932,7 +1932,7 @@ static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
*/ */
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
{ {
struct amdgpu_device *adev = info->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(info->dev);
uint32_t r; uint32_t r;
r = RREG32_IO(reg); r = RREG32_IO(reg);
@ -1944,7 +1944,7 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
char *buf) char *buf)
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = drm_to_adev(ddev);
struct atom_context *ctx = adev->mode_info.atom_context; struct atom_context *ctx = adev->mode_info.atom_context;
return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version); return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
@ -1995,7 +1995,7 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
return -ENOMEM; return -ENOMEM;
adev->mode_info.atom_card_info = atom_card_info; adev->mode_info.atom_card_info = atom_card_info;
atom_card_info->dev = adev->ddev; atom_card_info->dev = adev_to_drm(adev);
atom_card_info->reg_read = cail_reg_read; atom_card_info->reg_read = cail_reg_read;
atom_card_info->reg_write = cail_reg_write; atom_card_info->reg_write = cail_reg_write;
/* needed for iio ops */ /* needed for iio ops */

View File

@ -89,9 +89,9 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION << (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) { ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
/* Firmware request VRAM reservation for SR-IOV */ /* Firmware request VRAM reservation for SR-IOV */
adev->fw_vram_usage.start_offset = (start_addr & adev->mman.fw_vram_usage_start_offset = (start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
adev->fw_vram_usage.size = size << 10; adev->mman.fw_vram_usage_size = size << 10;
/* Use the default scratch size */ /* Use the default scratch size */
usage_bytes = 0; usage_bytes = 0;
} else { } else {

View File

@ -417,26 +417,40 @@ static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
bool amdgpu_get_bios(struct amdgpu_device *adev) bool amdgpu_get_bios(struct amdgpu_device *adev)
{ {
if (amdgpu_atrm_get_bios(adev)) if (amdgpu_atrm_get_bios(adev)) {
dev_info(adev->dev, "Fetched VBIOS from ATRM\n");
goto success; goto success;
}
if (amdgpu_acpi_vfct_bios(adev)) if (amdgpu_acpi_vfct_bios(adev)) {
dev_info(adev->dev, "Fetched VBIOS from VFCT\n");
goto success; goto success;
}
if (igp_read_bios_from_vram(adev)) if (igp_read_bios_from_vram(adev)) {
dev_info(adev->dev, "Fetched VBIOS from VRAM BAR\n");
goto success; goto success;
}
if (amdgpu_read_bios(adev)) if (amdgpu_read_bios(adev)) {
dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
goto success; goto success;
}
if (amdgpu_read_bios_from_rom(adev)) if (amdgpu_read_bios_from_rom(adev)) {
dev_info(adev->dev, "Fetched VBIOS from ROM\n");
goto success; goto success;
}
if (amdgpu_read_disabled_bios(adev)) if (amdgpu_read_disabled_bios(adev)) {
dev_info(adev->dev, "Fetched VBIOS from disabled ROM BAR\n");
goto success; goto success;
}
if (amdgpu_read_platform_bios(adev)) if (amdgpu_read_platform_bios(adev)) {
dev_info(adev->dev, "Fetched VBIOS from platform\n");
goto success; goto success;
}
DRM_ERROR("Unable to locate a BIOS ROM\n"); DRM_ERROR("Unable to locate a BIOS ROM\n");
return false; return false;

View File

@ -265,7 +265,7 @@ int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_bo_list *args = data; union drm_amdgpu_bo_list *args = data;
uint32_t handle = args->in.list_handle; uint32_t handle = args->in.list_handle;

View File

@ -42,7 +42,7 @@
void amdgpu_connector_hotplug(struct drm_connector *connector) void amdgpu_connector_hotplug(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
/* bail if the connector does not have hpd pin, e.g., /* bail if the connector does not have hpd pin, e.g.,
@ -280,7 +280,7 @@ amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev)
static void amdgpu_connector_get_edid(struct drm_connector *connector) static void amdgpu_connector_get_edid(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->edid) if (amdgpu_connector->edid)
@ -464,7 +464,7 @@ static int amdgpu_connector_set_property(struct drm_connector *connector,
uint64_t val) uint64_t val)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_encoder *encoder; struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder; struct amdgpu_encoder *amdgpu_encoder;
@ -835,7 +835,7 @@ static enum drm_mode_status amdgpu_connector_vga_mode_valid(struct drm_connector
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
/* XXX check mode bandwidth */ /* XXX check mode bandwidth */
@ -942,7 +942,7 @@ static bool
amdgpu_connector_check_hpd_status_unchanged(struct drm_connector *connector) amdgpu_connector_check_hpd_status_unchanged(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
enum drm_connector_status status; enum drm_connector_status status;
@ -973,7 +973,7 @@ static enum drm_connector_status
amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
const struct drm_encoder_helper_funcs *encoder_funcs; const struct drm_encoder_helper_funcs *encoder_funcs;
int r; int r;
@ -1160,7 +1160,7 @@ static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
/* XXX check mode bandwidth */ /* XXX check mode bandwidth */
@ -1312,7 +1312,7 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
bool amdgpu_connector_is_dp12_capable(struct drm_connector *connector) bool amdgpu_connector_is_dp12_capable(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
if ((adev->clock.default_dispclk >= 53900) && if ((adev->clock.default_dispclk >= 53900) &&
amdgpu_connector_encoder_is_hbr2(connector)) { amdgpu_connector_encoder_is_hbr2(connector)) {
@ -1326,7 +1326,7 @@ static enum drm_connector_status
amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
enum drm_connector_status ret = connector_status_disconnected; enum drm_connector_status ret = connector_status_disconnected;
struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv; struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv;
@ -1526,7 +1526,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
struct amdgpu_hpd *hpd, struct amdgpu_hpd *hpd,
struct amdgpu_router *router) struct amdgpu_router *router)
{ {
struct drm_device *dev = adev->ddev; struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector; struct drm_connector *connector;
struct drm_connector_list_iter iter; struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector; struct amdgpu_connector *amdgpu_connector;

View File

@ -1275,13 +1275,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
return r; return r;
} }
static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser)
{
int i;
if (!trace_amdgpu_cs_enabled())
return;
for (i = 0; i < parser->job->num_ibs; i++)
trace_amdgpu_cs(parser, i);
}
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
union drm_amdgpu_cs *cs = data; union drm_amdgpu_cs *cs = data;
struct amdgpu_cs_parser parser = {}; struct amdgpu_cs_parser parser = {};
bool reserved_buffers = false; bool reserved_buffers = false;
int i, r; int r;
if (amdgpu_ras_intr_triggered()) if (amdgpu_ras_intr_triggered())
return -EHWPOISON; return -EHWPOISON;
@ -1294,7 +1305,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(&parser, data); r = amdgpu_cs_parser_init(&parser, data);
if (r) { if (r) {
DRM_ERROR("Failed to initialize parser %d!\n", r); if (printk_ratelimit())
DRM_ERROR("Failed to initialize parser %d!\n", r);
goto out; goto out;
} }
@ -1319,8 +1331,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
reserved_buffers = true; reserved_buffers = true;
for (i = 0; i < parser.job->num_ibs; i++) trace_amdgpu_cs_ibs(&parser);
trace_amdgpu_cs(&parser, i);
r = amdgpu_cs_vm_handling(&parser); r = amdgpu_cs_vm_handling(&parser);
if (r) if (r)
@ -1421,7 +1432,7 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
union drm_amdgpu_fence_to_handle *info = data; union drm_amdgpu_fence_to_handle *info = data;
struct dma_fence *fence; struct dma_fence *fence;
struct drm_syncobj *syncobj; struct drm_syncobj *syncobj;
@ -1597,7 +1608,7 @@ static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
union drm_amdgpu_wait_fences *wait = data; union drm_amdgpu_wait_fences *wait = data;
uint32_t fence_count = wait->in.fence_count; uint32_t fence_count = wait->in.fence_count;
struct drm_amdgpu_fence *fences_user; struct drm_amdgpu_fence *fences_user;

View File

@ -46,7 +46,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
static int amdgpu_ctx_priority_permit(struct drm_file *filp, static int amdgpu_ctx_priority_permit(struct drm_file *filp,
enum drm_sched_priority priority) enum drm_sched_priority priority)
{ {
if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
return -EINVAL; return -EINVAL;
/* NORMAL and below are accessible by everyone */ /* NORMAL and below are accessible by everyone */
@ -65,7 +65,7 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio) static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
{ {
switch (prio) { switch (prio) {
case DRM_SCHED_PRIORITY_HIGH_HW: case DRM_SCHED_PRIORITY_HIGH:
case DRM_SCHED_PRIORITY_KERNEL: case DRM_SCHED_PRIORITY_KERNEL:
return AMDGPU_GFX_PIPE_PRIO_HIGH; return AMDGPU_GFX_PIPE_PRIO_HIGH;
default: default:
@ -114,7 +114,11 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
scheds = adev->gpu_sched[hw_ip][hw_prio].sched; scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds; num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
if (hw_ip == AMDGPU_HW_IP_VCN_ENC || hw_ip == AMDGPU_HW_IP_VCN_DEC) { /* disable load balance if the hw engine retains context among dependent jobs */
if (hw_ip == AMDGPU_HW_IP_VCN_ENC ||
hw_ip == AMDGPU_HW_IP_VCN_DEC ||
hw_ip == AMDGPU_HW_IP_UVD_ENC ||
hw_ip == AMDGPU_HW_IP_UVD) {
sched = drm_sched_pick_best(scheds, num_scheds); sched = drm_sched_pick_best(scheds, num_scheds);
scheds = &sched; scheds = &sched;
num_scheds = 1; num_scheds = 1;
@ -385,16 +389,15 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
enum drm_sched_priority priority; enum drm_sched_priority priority;
union drm_amdgpu_ctx *args = data; union drm_amdgpu_ctx *args = data;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_fpriv *fpriv = filp->driver_priv;
r = 0;
id = args->in.ctx_id; id = args->in.ctx_id;
priority = amdgpu_to_sched_priority(args->in.priority); r = amdgpu_to_sched_priority(args->in.priority, &priority);
/* For backwards compatibility reasons, we need to accept /* For backwards compatibility reasons, we need to accept
* ioctls with garbage in the priority field */ * ioctls with garbage in the priority field */
if (priority == DRM_SCHED_PRIORITY_INVALID) if (r == -EINVAL)
priority = DRM_SCHED_PRIORITY_NORMAL; priority = DRM_SCHED_PRIORITY_NORMAL;
switch (args->in.op) { switch (args->in.op) {

View File

@ -34,6 +34,7 @@
#include "amdgpu_pm.h" #include "amdgpu_pm.h"
#include "amdgpu_dm_debugfs.h" #include "amdgpu_dm_debugfs.h"
#include "amdgpu_ras.h" #include "amdgpu_ras.h"
#include "amdgpu_rap.h"
/** /**
* amdgpu_debugfs_add_files - Add simple debugfs entries * amdgpu_debugfs_add_files - Add simple debugfs entries
@ -68,8 +69,8 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
adev->debugfs_count = i; adev->debugfs_count = i;
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles, drm_debugfs_create_files(files, nfiles,
adev->ddev->primary->debugfs_root, adev_to_drm(adev)->primary->debugfs_root,
adev->ddev->primary); adev_to_drm(adev)->primary);
#endif #endif
return 0; return 0;
} }
@ -100,14 +101,18 @@ static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file)
file->private_data = adev; file->private_data = adev;
mutex_lock(&adev->lock_reset); ret = down_read_killable(&adev->reset_sem);
if (ret)
return ret;
if (adev->autodump.dumping.done) { if (adev->autodump.dumping.done) {
reinit_completion(&adev->autodump.dumping); reinit_completion(&adev->autodump.dumping);
ret = 0; ret = 0;
} else { } else {
ret = -EBUSY; ret = -EBUSY;
} }
mutex_unlock(&adev->lock_reset);
up_read(&adev->reset_sem);
return ret; return ret;
} }
@ -126,7 +131,7 @@ static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_
poll_wait(file, &adev->autodump.gpu_hang, poll_table); poll_wait(file, &adev->autodump.gpu_hang, poll_table);
if (adev->in_gpu_reset) if (amdgpu_in_reset(adev))
return POLLIN | POLLRDNORM | POLLWRNORM; return POLLIN | POLLRDNORM | POLLWRNORM;
return 0; return 0;
@ -146,7 +151,7 @@ static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev)
init_waitqueue_head(&adev->autodump.gpu_hang); init_waitqueue_head(&adev->autodump.gpu_hang);
debugfs_create_file("amdgpu_autodump", 0600, debugfs_create_file("amdgpu_autodump", 0600,
adev->ddev->primary->debugfs_root, adev_to_drm(adev)->primary->debugfs_root,
adev, &autodump_debug_fops); adev, &autodump_debug_fops);
} }
@ -222,23 +227,23 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
*pos &= (1UL << 22) - 1; *pos &= (1UL << 22) - 1;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
r = amdgpu_virt_enable_access_debugfs(adev); r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
if (use_bank) { if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) { (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev); amdgpu_virt_disable_access_debugfs(adev);
return -EINVAL; return -EINVAL;
} }
@ -287,8 +292,8 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
if (pm_pg_lock) if (pm_pg_lock)
mutex_unlock(&adev->pm.mutex); mutex_unlock(&adev->pm.mutex);
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev); amdgpu_virt_disable_access_debugfs(adev);
return result; return result;
@ -335,15 +340,15 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
r = amdgpu_virt_enable_access_debugfs(adev); r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
@ -353,8 +358,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
value = RREG32_PCIE(*pos >> 2); value = RREG32_PCIE(*pos >> 2);
r = put_user(value, (uint32_t *)buf); r = put_user(value, (uint32_t *)buf);
if (r) { if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev); amdgpu_virt_disable_access_debugfs(adev);
return r; return r;
} }
@ -365,8 +370,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
size -= 4; size -= 4;
} }
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev); amdgpu_virt_disable_access_debugfs(adev);
return result; return result;
@ -394,15 +399,15 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
r = amdgpu_virt_enable_access_debugfs(adev); r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
@ -411,8 +416,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
r = get_user(value, (uint32_t *)buf); r = get_user(value, (uint32_t *)buf);
if (r) { if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev); amdgpu_virt_disable_access_debugfs(adev);
return r; return r;
} }
@ -425,8 +430,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
size -= 4; size -= 4;
} }
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev); amdgpu_virt_disable_access_debugfs(adev);
return result; return result;
@ -454,15 +459,15 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
r = amdgpu_virt_enable_access_debugfs(adev); r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
@ -472,8 +477,8 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
value = RREG32_DIDT(*pos >> 2); value = RREG32_DIDT(*pos >> 2);
r = put_user(value, (uint32_t *)buf); r = put_user(value, (uint32_t *)buf);
if (r) { if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev); amdgpu_virt_disable_access_debugfs(adev);
return r; return r;
} }
@ -484,8 +489,8 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
size -= 4; size -= 4;
} }
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev); amdgpu_virt_disable_access_debugfs(adev);
return result; return result;
@ -513,15 +518,15 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
r = amdgpu_virt_enable_access_debugfs(adev); r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
@ -530,8 +535,8 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
r = get_user(value, (uint32_t *)buf); r = get_user(value, (uint32_t *)buf);
if (r) { if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev); amdgpu_virt_disable_access_debugfs(adev);
return r; return r;
} }
@ -544,8 +549,8 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
size -= 4; size -= 4;
} }
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev); amdgpu_virt_disable_access_debugfs(adev);
return result; return result;
@ -573,15 +578,15 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
r = amdgpu_virt_enable_access_debugfs(adev); r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
@ -591,8 +596,8 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
value = RREG32_SMC(*pos); value = RREG32_SMC(*pos);
r = put_user(value, (uint32_t *)buf); r = put_user(value, (uint32_t *)buf);
if (r) { if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev); amdgpu_virt_disable_access_debugfs(adev);
return r; return r;
} }
@ -603,8 +608,8 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
size -= 4; size -= 4;
} }
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev); amdgpu_virt_disable_access_debugfs(adev);
return result; return result;
@ -632,15 +637,15 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
r = amdgpu_virt_enable_access_debugfs(adev); r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
@ -649,8 +654,8 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
r = get_user(value, (uint32_t *)buf); r = get_user(value, (uint32_t *)buf);
if (r) { if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev); amdgpu_virt_disable_access_debugfs(adev);
return r; return r;
} }
@ -663,8 +668,8 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
size -= 4; size -= 4;
} }
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev); amdgpu_virt_disable_access_debugfs(adev);
return result; return result;
@ -791,22 +796,22 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
valuesize = sizeof(values); valuesize = sizeof(values);
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
r = amdgpu_virt_enable_access_debugfs(adev); r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (r) { if (r) {
amdgpu_virt_disable_access_debugfs(adev); amdgpu_virt_disable_access_debugfs(adev);
@ -873,15 +878,15 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
wave = (*pos & GENMASK_ULL(36, 31)) >> 31; wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
simd = (*pos & GENMASK_ULL(44, 37)) >> 37; simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
r = amdgpu_virt_enable_access_debugfs(adev); r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
@ -896,8 +901,8 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
mutex_unlock(&adev->grbm_idx_mutex); mutex_unlock(&adev->grbm_idx_mutex);
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (!x) { if (!x) {
amdgpu_virt_disable_access_debugfs(adev); amdgpu_virt_disable_access_debugfs(adev);
@ -971,7 +976,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
if (!data) if (!data)
return -ENOMEM; return -ENOMEM;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) if (r < 0)
goto err; goto err;
@ -994,8 +999,8 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
mutex_unlock(&adev->grbm_idx_mutex); mutex_unlock(&adev->grbm_idx_mutex);
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
while (size) { while (size) {
uint32_t value; uint32_t value;
@ -1017,7 +1022,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
return result; return result;
err: err:
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
kfree(data); kfree(data);
return r; return r;
} }
@ -1042,9 +1047,9 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
@ -1053,8 +1058,8 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
r = get_user(value, (uint32_t *)buf); r = get_user(value, (uint32_t *)buf);
if (r) { if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
@ -1066,8 +1071,8 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
size -= 4; size -= 4;
} }
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return result; return result;
} }
@ -1091,7 +1096,7 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) if (r < 0)
return r; return r;
@ -1100,15 +1105,15 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
r = amdgpu_get_gfx_off_status(adev, &value); r = amdgpu_get_gfx_off_status(adev, &value);
if (r) { if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
r = put_user(value, (uint32_t *)buf); r = put_user(value, (uint32_t *)buf);
if (r) { if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
@ -1118,8 +1123,8 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
size -= 4; size -= 4;
} }
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return result; return result;
} }
@ -1211,7 +1216,7 @@ static const char *debugfs_regs_names[] = {
*/ */
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{ {
struct drm_minor *minor = adev->ddev->primary; struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *ent, *root = minor->debugfs_root; struct dentry *ent, *root = minor->debugfs_root;
unsigned int i; unsigned int i;
@ -1231,17 +1236,19 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
{ {
struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
int r = 0, i; int r = 0, i;
r = pm_runtime_get_sync(dev->dev); r = pm_runtime_get_sync(dev->dev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
/* Avoid accidently unparking the sched thread during GPU reset */ /* Avoid accidently unparking the sched thread during GPU reset */
mutex_lock(&adev->lock_reset); r = down_read_killable(&adev->reset_sem);
if (r)
return r;
/* hold on the scheduler */ /* hold on the scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) { for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@ -1268,7 +1275,7 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
kthread_unpark(ring->sched.thread); kthread_unpark(ring->sched.thread);
} }
mutex_unlock(&adev->lock_reset); up_read(&adev->reset_sem);
pm_runtime_mark_last_busy(dev->dev); pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev); pm_runtime_put_autosuspend(dev->dev);
@ -1280,7 +1287,7 @@ static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
{ {
struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
seq_write(m, adev->bios, adev->bios_size); seq_write(m, adev->bios, adev->bios_size);
return 0; return 0;
@ -1290,12 +1297,12 @@ static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
{ {
struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
int r; int r;
r = pm_runtime_get_sync(dev->dev); r = pm_runtime_get_sync(dev->dev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
@ -1311,12 +1318,12 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
{ {
struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
int r; int r;
r = pm_runtime_get_sync(dev->dev); r = pm_runtime_get_sync(dev->dev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
@ -1458,7 +1465,9 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
return -ENOMEM; return -ENOMEM;
/* Avoid accidently unparking the sched thread during GPU reset */ /* Avoid accidently unparking the sched thread during GPU reset */
mutex_lock(&adev->lock_reset); r = down_read_killable(&adev->reset_sem);
if (r)
goto pro_end;
/* stop the scheduler */ /* stop the scheduler */
kthread_park(ring->sched.thread); kthread_park(ring->sched.thread);
@ -1499,13 +1508,14 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
/* restart the scheduler */ /* restart the scheduler */
kthread_unpark(ring->sched.thread); kthread_unpark(ring->sched.thread);
mutex_unlock(&adev->lock_reset); up_read(&adev->reset_sem);
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
pro_end:
kfree(fences); kfree(fences);
return 0; return r;
} }
static int amdgpu_debugfs_sclk_set(void *data, u64 val) static int amdgpu_debugfs_sclk_set(void *data, u64 val)
@ -1517,9 +1527,9 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return -EINVAL; return -EINVAL;
ret = pm_runtime_get_sync(adev->ddev->dev); ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (ret < 0) { if (ret < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return ret; return ret;
} }
@ -1532,8 +1542,8 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
return 0; return 0;
} }
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (ret) if (ret)
return -EINVAL; return -EINVAL;
@ -1553,7 +1563,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
adev->debugfs_preempt = adev->debugfs_preempt =
debugfs_create_file("amdgpu_preempt_ib", 0600, debugfs_create_file("amdgpu_preempt_ib", 0600,
adev->ddev->primary->debugfs_root, adev, adev_to_drm(adev)->primary->debugfs_root, adev,
&fops_ib_preempt); &fops_ib_preempt);
if (!(adev->debugfs_preempt)) { if (!(adev->debugfs_preempt)) {
DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
@ -1562,7 +1572,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
adev->smu.debugfs_sclk = adev->smu.debugfs_sclk =
debugfs_create_file("amdgpu_force_sclk", 0200, debugfs_create_file("amdgpu_force_sclk", 0200,
adev->ddev->primary->debugfs_root, adev, adev_to_drm(adev)->primary->debugfs_root, adev,
&fops_sclk_set); &fops_sclk_set);
if (!(adev->smu.debugfs_sclk)) { if (!(adev->smu.debugfs_sclk)) {
DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
@ -1623,6 +1633,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_debugfs_autodump_init(adev); amdgpu_debugfs_autodump_init(adev);
amdgpu_rap_debugfs_init(adev);
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list, return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
ARRAY_SIZE(amdgpu_debugfs_list)); ARRAY_SIZE(amdgpu_debugfs_list));
} }

View File

@ -132,7 +132,7 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = drm_to_adev(ddev);
uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev); uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
return snprintf(buf, PAGE_SIZE, "%llu\n", cnt); return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
@ -157,7 +157,7 @@ static ssize_t amdgpu_device_get_product_name(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = drm_to_adev(ddev);
return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name); return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
} }
@ -179,7 +179,7 @@ static ssize_t amdgpu_device_get_product_number(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = drm_to_adev(ddev);
return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number); return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
} }
@ -201,7 +201,7 @@ static ssize_t amdgpu_device_get_serial_number(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = drm_to_adev(ddev);
return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial); return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
} }
@ -219,7 +219,7 @@ static DEVICE_ATTR(serial_number, S_IRUGO,
*/ */
bool amdgpu_device_supports_boco(struct drm_device *dev) bool amdgpu_device_supports_boco(struct drm_device *dev)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
if (adev->flags & AMD_IS_PX) if (adev->flags & AMD_IS_PX)
return true; return true;
@ -236,7 +236,7 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
*/ */
bool amdgpu_device_supports_baco(struct drm_device *dev) bool amdgpu_device_supports_baco(struct drm_device *dev)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
return amdgpu_asic_supports_baco(adev); return amdgpu_asic_supports_baco(adev);
} }
@ -319,8 +319,12 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
{ {
uint32_t ret; uint32_t ret;
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) &&
return amdgpu_kiq_rreg(adev, reg); down_read_trylock(&adev->reset_sem)) {
ret = amdgpu_kiq_rreg(adev, reg);
up_read(&adev->reset_sem);
return ret;
}
if ((reg * 4) < adev->rmmio_size) if ((reg * 4) < adev->rmmio_size)
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
@ -332,6 +336,7 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
} }
trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret); trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
return ret; return ret;
} }
@ -378,7 +383,9 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
BUG(); BUG();
} }
void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags) static inline void amdgpu_mm_wreg_mmio(struct amdgpu_device *adev,
uint32_t reg, uint32_t v,
uint32_t acc_flags)
{ {
trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
@ -407,8 +414,12 @@ void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg,
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t acc_flags) uint32_t acc_flags)
{ {
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) &&
return amdgpu_kiq_wreg(adev, reg, v); down_read_trylock(&adev->reset_sem)) {
amdgpu_kiq_wreg(adev, reg, v);
up_read(&adev->reset_sem);
return;
}
amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags); amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
} }
@ -653,6 +664,20 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
BUG(); BUG();
} }
/**
* amdgpu_device_asic_init - Wrapper for atom asic_init
*
* @dev: drm_device pointer
*
* Does any asic specific work and then calls atom asic init.
*/
static int amdgpu_device_asic_init(struct amdgpu_device *adev)
{
amdgpu_asic_pre_asic_init(adev);
return amdgpu_atom_asic_init(adev->mode_info.atom_context);
}
/** /**
* amdgpu_device_vram_scratch_init - allocate the VRAM scratch page * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
* *
@ -1199,6 +1224,11 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
amdgpu_gmc_tmz_set(adev); amdgpu_gmc_tmz_set(adev);
if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
amdgpu_num_kcq = 8;
dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
}
return 0; return 0;
} }
@ -1211,7 +1241,8 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
* Callback for the switcheroo driver. Suspends or resumes the * Callback for the switcheroo driver. Suspends or resumes the
* the asics before or after it is powered up using ACPI methods. * the asics before or after it is powered up using ACPI methods.
*/ */
static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{ {
struct drm_device *dev = pci_get_drvdata(pdev); struct drm_device *dev = pci_get_drvdata(pdev);
int r; int r;
@ -1504,7 +1535,7 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
adev->enable_virtual_display = false; adev->enable_virtual_display = false;
if (amdgpu_virtual_display) { if (amdgpu_virtual_display) {
struct drm_device *ddev = adev->ddev; struct drm_device *ddev = adev_to_drm(adev);
const char *pci_address_name = pci_name(ddev->pdev); const char *pci_address_name = pci_name(ddev->pdev);
char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
@ -1563,7 +1594,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL; adev->firmware.gpu_info_fw = NULL;
if (adev->discovery_bin) { if (adev->mman.discovery_bin) {
amdgpu_discovery_get_gfx_info(adev); amdgpu_discovery_get_gfx_info(adev);
/* /*
@ -1935,7 +1966,7 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
if (adev->ip_blocks[i].status.hw == true) if (adev->ip_blocks[i].status.hw == true)
break; break;
if (adev->in_gpu_reset || adev->in_suspend) { if (amdgpu_in_reset(adev) || adev->in_suspend) {
r = adev->ip_blocks[i].version->funcs->resume(adev); r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) { if (r) {
DRM_ERROR("resume of IP block <%s> failed %d\n", DRM_ERROR("resume of IP block <%s> failed %d\n",
@ -2055,13 +2086,19 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
* it should be called after amdgpu_device_ip_hw_init_phase2 since * it should be called after amdgpu_device_ip_hw_init_phase2 since
* for some ASICs the RAS EEPROM code relies on SMU fully functioning * for some ASICs the RAS EEPROM code relies on SMU fully functioning
* for I2C communication which only true at this point. * for I2C communication which only true at this point.
* recovery_init may fail, but it can free all resources allocated by *
* itself and its failure should not stop amdgpu init process. * amdgpu_ras_recovery_init may fail, but the upper only cares the
* failure from bad gpu situation and stop amdgpu init process
* accordingly. For other failed cases, it will still release all
* the resource and print error message, rather than returning one
* negative value to upper level.
* *
* Note: theoretically, this should be called before all vram allocations * Note: theoretically, this should be called before all vram allocations
* to protect retired page from abusing * to protect retired page from abusing
*/ */
amdgpu_ras_recovery_init(adev); r = amdgpu_ras_recovery_init(adev);
if (r)
goto init_failed;
if (adev->gmc.xgmi.num_physical_nodes > 1) if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_add_device(adev); amdgpu_xgmi_add_device(adev);
@ -2106,7 +2143,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
AMDGPU_RESET_MAGIC_NUM)) AMDGPU_RESET_MAGIC_NUM))
return true; return true;
if (!adev->in_gpu_reset) if (!amdgpu_in_reset(adev))
return false; return false;
/* /*
@ -2217,9 +2254,7 @@ static int amdgpu_device_enable_mgpu_fan_boost(void)
gpu_ins = &(mgpu_info.gpu_ins[i]); gpu_ins = &(mgpu_info.gpu_ins[i]);
adev = gpu_ins->adev; adev = gpu_ins->adev;
if (!(adev->flags & AMD_IS_APU) && if (!(adev->flags & AMD_IS_APU) &&
!gpu_ins->mgpu_fan_enabled && !gpu_ins->mgpu_fan_enabled) {
adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
ret = amdgpu_dpm_enable_mgpu_fan_boost(adev); ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
if (ret) if (ret)
break; break;
@ -2574,17 +2609,16 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_IH, AMD_IP_BLOCK_TYPE_IH,
}; };
for (i = 0; i < adev->num_ip_blocks; i++)
adev->ip_blocks[i].status.hw = false;
for (i = 0; i < ARRAY_SIZE(ip_order); i++) { for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
int j; int j;
struct amdgpu_ip_block *block; struct amdgpu_ip_block *block;
for (j = 0; j < adev->num_ip_blocks; j++) { block = &adev->ip_blocks[i];
block = &adev->ip_blocks[j]; block->status.hw = false;
if (block->version->type != ip_order[i] || for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
if (block->version->type != ip_order[j] ||
!block->status.valid) !block->status.valid)
continue; continue;
@ -2777,6 +2811,12 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
{ {
switch (asic_type) { switch (asic_type) {
#if defined(CONFIG_DRM_AMD_DC) #if defined(CONFIG_DRM_AMD_DC)
#if defined(CONFIG_DRM_AMD_DC_SI)
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
#endif
case CHIP_BONAIRE: case CHIP_BONAIRE:
case CHIP_KAVERI: case CHIP_KAVERI:
case CHIP_KABINI: case CHIP_KABINI:
@ -2831,7 +2871,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
*/ */
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
{ {
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
return false; return false;
return amdgpu_device_asic_has_dc_support(adev->asic_type); return amdgpu_device_asic_has_dc_support(adev->asic_type);
@ -2842,7 +2882,7 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
{ {
struct amdgpu_device *adev = struct amdgpu_device *adev =
container_of(__work, struct amdgpu_device, xgmi_reset_work); container_of(__work, struct amdgpu_device, xgmi_reset_work);
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
/* It's a bug to not have a hive within this function */ /* It's a bug to not have a hive within this function */
if (WARN_ON(!hive)) if (WARN_ON(!hive))
@ -2857,13 +2897,13 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
task_barrier_enter(&hive->tb); task_barrier_enter(&hive->tb);
adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev); adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
if (adev->asic_reset_res) if (adev->asic_reset_res)
goto fail; goto fail;
task_barrier_exit(&hive->tb); task_barrier_exit(&hive->tb);
adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev); adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
if (adev->asic_reset_res) if (adev->asic_reset_res)
goto fail; goto fail;
@ -2879,7 +2919,8 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
fail: fail:
if (adev->asic_reset_res) if (adev->asic_reset_res)
DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
adev->asic_reset_res, adev->ddev->unique); adev->asic_reset_res, adev_to_drm(adev)->unique);
amdgpu_put_xgmi_hive(hive);
} }
static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
@ -2962,8 +3003,6 @@ static const struct attribute *amdgpu_dev_attributes[] = {
* amdgpu_device_init - initialize the driver * amdgpu_device_init - initialize the driver
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @ddev: drm dev pointer
* @pdev: pci dev pointer
* @flags: driver flags * @flags: driver flags
* *
* Initializes the driver info and hw (all asics). * Initializes the driver info and hw (all asics).
@ -2971,18 +3010,15 @@ static const struct attribute *amdgpu_dev_attributes[] = {
* Called at driver startup. * Called at driver startup.
*/ */
int amdgpu_device_init(struct amdgpu_device *adev, int amdgpu_device_init(struct amdgpu_device *adev,
struct drm_device *ddev,
struct pci_dev *pdev,
uint32_t flags) uint32_t flags)
{ {
struct drm_device *ddev = adev_to_drm(adev);
struct pci_dev *pdev = adev->pdev;
int r, i; int r, i;
bool boco = false; bool boco = false;
u32 max_MBps; u32 max_MBps;
adev->shutdown = false; adev->shutdown = false;
adev->dev = &pdev->dev;
adev->ddev = ddev;
adev->pdev = pdev;
adev->flags = flags; adev->flags = flags;
if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST) if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
@ -3038,7 +3074,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->mn_lock); mutex_init(&adev->mn_lock);
mutex_init(&adev->virt.vf_errors.lock); mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash); hash_init(adev->mn_hash);
mutex_init(&adev->lock_reset); atomic_set(&adev->in_gpu_reset, 0);
init_rwsem(&adev->reset_sem);
mutex_init(&adev->psp.mutex); mutex_init(&adev->psp.mutex);
mutex_init(&adev->notifier_lock); mutex_init(&adev->notifier_lock);
@ -3188,7 +3225,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto failed; goto failed;
} }
DRM_INFO("GPU posting now...\n"); DRM_INFO("GPU posting now...\n");
r = amdgpu_atom_asic_init(adev->mode_info.atom_context); r = amdgpu_device_asic_init(adev);
if (r) { if (r) {
dev_err(adev->dev, "gpu post error!\n"); dev_err(adev->dev, "gpu post error!\n");
goto failed; goto failed;
@ -3226,7 +3263,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
} }
/* init the mode config */ /* init the mode config */
drm_mode_config_init(adev->ddev); drm_mode_config_init(adev_to_drm(adev));
r = amdgpu_device_ip_init(adev); r = amdgpu_device_ip_init(adev);
if (r) { if (r) {
@ -3352,9 +3389,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
*/ */
void amdgpu_device_fini(struct amdgpu_device *adev) void amdgpu_device_fini(struct amdgpu_device *adev)
{ {
int r; dev_info(adev->dev, "amdgpu: finishing device.\n");
DRM_INFO("amdgpu: finishing device.\n");
flush_delayed_work(&adev->delayed_init_work); flush_delayed_work(&adev->delayed_init_work);
adev->shutdown = true; adev->shutdown = true;
@ -3368,15 +3403,15 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_irq_disable_all(adev); amdgpu_irq_disable_all(adev);
if (adev->mode_info.mode_config_initialized){ if (adev->mode_info.mode_config_initialized){
if (!amdgpu_device_has_dc_support(adev)) if (!amdgpu_device_has_dc_support(adev))
drm_helper_force_disable_all(adev->ddev); drm_helper_force_disable_all(adev_to_drm(adev));
else else
drm_atomic_helper_shutdown(adev->ddev); drm_atomic_helper_shutdown(adev_to_drm(adev));
} }
amdgpu_fence_driver_fini(adev); amdgpu_fence_driver_fini(adev);
if (adev->pm_sysfs_en) if (adev->pm_sysfs_en)
amdgpu_pm_sysfs_fini(adev); amdgpu_pm_sysfs_fini(adev);
amdgpu_fbdev_fini(adev); amdgpu_fbdev_fini(adev);
r = amdgpu_device_ip_fini(adev); amdgpu_device_ip_fini(adev);
release_firmware(adev->firmware.gpu_info_fw); release_firmware(adev->firmware.gpu_info_fw);
adev->firmware.gpu_info_fw = NULL; adev->firmware.gpu_info_fw = NULL;
adev->accel_working = false; adev->accel_working = false;
@ -3394,7 +3429,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_has_atpx_dgpu_power_cntl()) && amdgpu_has_atpx_dgpu_power_cntl()) &&
!pci_is_thunderbolt_attached(adev->pdev)) !pci_is_thunderbolt_attached(adev->pdev))
vga_switcheroo_unregister_client(adev->pdev); vga_switcheroo_unregister_client(adev->pdev);
if (amdgpu_device_supports_boco(adev->ddev)) if (amdgpu_device_supports_boco(adev_to_drm(adev)))
vga_switcheroo_fini_domain_pm_ops(adev->dev); vga_switcheroo_fini_domain_pm_ops(adev->dev);
vga_client_register(adev->pdev, NULL, NULL, NULL); vga_client_register(adev->pdev, NULL, NULL, NULL);
if (adev->rio_mem) if (adev->rio_mem)
@ -3410,7 +3445,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
if (IS_ENABLED(CONFIG_PERF_EVENTS)) if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev); amdgpu_pmu_fini(adev);
if (adev->discovery_bin) if (adev->mman.discovery_bin)
amdgpu_discovery_fini(adev); amdgpu_discovery_fini(adev);
} }
@ -3436,11 +3471,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
struct drm_connector_list_iter iter; struct drm_connector_list_iter iter;
int r; int r;
if (dev == NULL || dev->dev_private == NULL) { adev = drm_to_adev(dev);
return -ENODEV;
}
adev = dev->dev_private;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0; return 0;
@ -3528,7 +3559,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
{ {
struct drm_connector *connector; struct drm_connector *connector;
struct drm_connector_list_iter iter; struct drm_connector_list_iter iter;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_crtc *crtc; struct drm_crtc *crtc;
int r = 0; int r = 0;
@ -3537,14 +3568,14 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
/* post card */ /* post card */
if (amdgpu_device_need_post(adev)) { if (amdgpu_device_need_post(adev)) {
r = amdgpu_atom_asic_init(adev->mode_info.atom_context); r = amdgpu_device_asic_init(adev);
if (r) if (r)
DRM_ERROR("amdgpu asic init failed\n"); dev_err(adev->dev, "amdgpu asic init failed\n");
} }
r = amdgpu_device_ip_resume(adev); r = amdgpu_device_ip_resume(adev);
if (r) { if (r) {
DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r); dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
return r; return r;
} }
amdgpu_fence_driver_resume(adev); amdgpu_fence_driver_resume(adev);
@ -3568,7 +3599,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
if (r == 0) { if (r == 0) {
r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
if (r != 0) if (r != 0)
DRM_ERROR("Failed to pin cursor BO (%d)\n", r); dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
amdgpu_bo_unreserve(aobj); amdgpu_bo_unreserve(aobj);
} }
@ -3658,7 +3689,7 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hang = adev->ip_blocks[i].status.hang =
adev->ip_blocks[i].version->funcs->check_soft_reset(adev); adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
if (adev->ip_blocks[i].status.hang) { if (adev->ip_blocks[i].status.hang) {
DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
asic_hang = true; asic_hang = true;
} }
} }
@ -3719,7 +3750,7 @@ static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
if (adev->ip_blocks[i].status.hang) { if (adev->ip_blocks[i].status.hang) {
DRM_INFO("Some block need full reset!\n"); dev_info(adev->dev, "Some block need full reset!\n");
return true; return true;
} }
} }
@ -3807,7 +3838,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
else else
tmo = msecs_to_jiffies(100); tmo = msecs_to_jiffies(100);
DRM_INFO("recover vram bo from shadow start\n"); dev_info(adev->dev, "recover vram bo from shadow start\n");
mutex_lock(&adev->shadow_list_lock); mutex_lock(&adev->shadow_list_lock);
list_for_each_entry(shadow, &adev->shadow_list, shadow_list) { list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
@ -3843,11 +3874,11 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
dma_fence_put(fence); dma_fence_put(fence);
if (r < 0 || tmo <= 0) { if (r < 0 || tmo <= 0) {
DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
return -EIO; return -EIO;
} }
DRM_INFO("recover vram bo from shadow done\n"); dev_info(adev->dev, "recover vram bo from shadow done\n");
return 0; return 0;
} }
@ -3907,6 +3938,34 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
return r; return r;
} }
/**
* amdgpu_device_has_job_running - check if there is any job in mirror list
*
* @adev: amdgpu device pointer
*
* check if there is any job in mirror list
*/
bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
{
int i;
struct drm_sched_job *job;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->sched.thread)
continue;
spin_lock(&ring->sched.job_list_lock);
job = list_first_entry_or_null(&ring->sched.ring_mirror_list,
struct drm_sched_job, node);
spin_unlock(&ring->sched.job_list_lock);
if (job)
return true;
}
return false;
}
/** /**
* amdgpu_device_should_recover_gpu - check if we should try GPU recovery * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
* *
@ -3918,7 +3977,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
{ {
if (!amdgpu_device_ip_check_soft_reset(adev)) { if (!amdgpu_device_ip_check_soft_reset(adev)) {
DRM_INFO("Timeout, but no hardware hang detected.\n"); dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
return false; return false;
} }
@ -3958,7 +4017,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
return true; return true;
disabled: disabled:
DRM_INFO("GPU recovery disabled.\n"); dev_info(adev->dev, "GPU recovery disabled.\n");
return false; return false;
} }
@ -3997,7 +4056,7 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
r = amdgpu_device_ip_soft_reset(adev); r = amdgpu_device_ip_soft_reset(adev);
amdgpu_device_ip_post_soft_reset(adev); amdgpu_device_ip_post_soft_reset(adev);
if (r || amdgpu_device_ip_check_soft_reset(adev)) { if (r || amdgpu_device_ip_check_soft_reset(adev)) {
DRM_INFO("soft reset failed, will fallback to full reset!\n"); dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
need_full_reset = true; need_full_reset = true;
} }
} }
@ -4033,8 +4092,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
r = amdgpu_asic_reset(tmp_adev); r = amdgpu_asic_reset(tmp_adev);
if (r) { if (r) {
DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s", dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
r, tmp_adev->ddev->unique); r, adev_to_drm(tmp_adev)->unique);
break; break;
} }
} }
@ -4066,8 +4125,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
if (need_full_reset) { if (need_full_reset) {
/* post card */ /* post card */
if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context)) if (amdgpu_device_asic_init(tmp_adev))
DRM_WARN("asic atom init failed!"); dev_warn(tmp_adev->dev, "asic atom init failed!");
if (!r) { if (!r) {
dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
@ -4108,8 +4167,23 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
amdgpu_fbdev_set_suspend(tmp_adev, 0); amdgpu_fbdev_set_suspend(tmp_adev, 0);
/* must succeed. */ /*
amdgpu_ras_resume(tmp_adev); * The GPU enters bad state once faulty pages
* by ECC has reached the threshold, and ras
* recovery is scheduled next. So add one check
* here to break recovery if it indeed exceeds
* bad page threshold, and remind user to
* retire this GPU or setting one bigger
* bad_page_threshold value to fix this once
* probing driver again.
*/
if (!amdgpu_ras_check_err_threshold(tmp_adev)) {
/* must succeed. */
amdgpu_ras_resume(tmp_adev);
} else {
r = -EINVAL;
goto out;
}
/* Update PSP FW topology after reset */ /* Update PSP FW topology after reset */
if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1) if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
@ -4117,7 +4191,6 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
} }
} }
out: out:
if (!r) { if (!r) {
amdgpu_irq_gpu_reset_resume_helper(tmp_adev); amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
@ -4142,16 +4215,19 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
return r; return r;
} }
static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock) static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
struct amdgpu_hive_info *hive)
{ {
if (trylock) { if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
if (!mutex_trylock(&adev->lock_reset)) return false;
return false;
} else if (hive) {
mutex_lock(&adev->lock_reset); down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
} else {
down_write(&adev->reset_sem);
}
atomic_inc(&adev->gpu_reset_counter); atomic_inc(&adev->gpu_reset_counter);
adev->in_gpu_reset = true;
switch (amdgpu_asic_reset_method(adev)) { switch (amdgpu_asic_reset_method(adev)) {
case AMD_RESET_METHOD_MODE1: case AMD_RESET_METHOD_MODE1:
adev->mp1_state = PP_MP1_STATE_SHUTDOWN; adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
@ -4171,8 +4247,8 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
{ {
amdgpu_vf_error_trans_all(adev); amdgpu_vf_error_trans_all(adev);
adev->mp1_state = PP_MP1_STATE_NONE; adev->mp1_state = PP_MP1_STATE_NONE;
adev->in_gpu_reset = false; atomic_set(&adev->in_gpu_reset, 0);
mutex_unlock(&adev->lock_reset); up_write(&adev->reset_sem);
} }
static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev) static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
@ -4282,12 +4358,15 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* We always reset all schedulers for device and all devices for XGMI * We always reset all schedulers for device and all devices for XGMI
* hive so that should take care of them too. * hive so that should take care of them too.
*/ */
hive = amdgpu_get_xgmi_hive(adev, true); hive = amdgpu_get_xgmi_hive(adev);
if (hive && !mutex_trylock(&hive->reset_lock)) { if (hive) {
DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress", if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
job ? job->base.id : -1, hive->hive_id); DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
mutex_unlock(&hive->hive_lock); job ? job->base.id : -1, hive->hive_id);
return 0; amdgpu_put_xgmi_hive(hive);
return 0;
}
mutex_lock(&hive->hive_lock);
} }
/* /*
@ -4309,11 +4388,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
/* block all schedulers and reset given job's ring */ /* block all schedulers and reset given job's ring */
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
if (!amdgpu_device_lock_adev(tmp_adev, !hive)) { if (!amdgpu_device_lock_adev(tmp_adev, hive)) {
DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress", dev_info(tmp_adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
job ? job->base.id : -1); job ? job->base.id : -1);
mutex_unlock(&hive->hive_lock); r = 0;
return 0; goto skip_recovery;
} }
/* /*
@ -4385,8 +4464,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
&need_full_reset); &need_full_reset);
/*TODO Should we stop ?*/ /*TODO Should we stop ?*/
if (r) { if (r) {
DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ", dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
r, tmp_adev->ddev->unique); r, adev_to_drm(tmp_adev)->unique);
tmp_adev->asic_reset_res = r; tmp_adev->asic_reset_res = r;
} }
} }
@ -4422,7 +4501,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
} }
if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) { if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
drm_helper_resume_force_mode(tmp_adev->ddev); drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
} }
tmp_adev->asic_reset_res = 0; tmp_adev->asic_reset_res = 0;
@ -4446,9 +4525,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
amdgpu_device_unlock_adev(tmp_adev); amdgpu_device_unlock_adev(tmp_adev);
} }
skip_recovery:
if (hive) { if (hive) {
mutex_unlock(&hive->reset_lock); atomic_set(&hive->in_reset, 0);
mutex_unlock(&hive->hive_lock); mutex_unlock(&hive->hive_lock);
amdgpu_put_xgmi_hive(hive);
} }
if (r) if (r)
@ -4594,10 +4675,10 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
int amdgpu_device_baco_enter(struct drm_device *dev) int amdgpu_device_baco_enter(struct drm_device *dev)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
if (!amdgpu_device_supports_baco(adev->ddev)) if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
return -ENOTSUPP; return -ENOTSUPP;
if (ras && ras->supported) if (ras && ras->supported)
@ -4608,11 +4689,11 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
int amdgpu_device_baco_exit(struct drm_device *dev) int amdgpu_device_baco_exit(struct drm_device *dev)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int ret = 0; int ret = 0;
if (!amdgpu_device_supports_baco(adev->ddev)) if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
return -ENOTSUPP; return -ENOTSUPP;
ret = amdgpu_dpm_baco_exit(adev); ret = amdgpu_dpm_baco_exit(adev);

View File

@ -136,7 +136,7 @@ static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *bin
uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
adev->discovery_tmr_size, false); adev->mman.discovery_tmr_size, false);
return 0; return 0;
} }
@ -168,18 +168,18 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
uint16_t checksum; uint16_t checksum;
int r; int r;
adev->discovery_tmr_size = DISCOVERY_TMR_SIZE; adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
adev->discovery_bin = kzalloc(adev->discovery_tmr_size, GFP_KERNEL); adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
if (!adev->discovery_bin) if (!adev->mman.discovery_bin)
return -ENOMEM; return -ENOMEM;
r = amdgpu_discovery_read_binary(adev, adev->discovery_bin); r = amdgpu_discovery_read_binary(adev, adev->mman.discovery_bin);
if (r) { if (r) {
DRM_ERROR("failed to read ip discovery binary\n"); DRM_ERROR("failed to read ip discovery binary\n");
goto out; goto out;
} }
bhdr = (struct binary_header *)adev->discovery_bin; bhdr = (struct binary_header *)adev->mman.discovery_bin;
if (le32_to_cpu(bhdr->binary_signature) != BINARY_SIGNATURE) { if (le32_to_cpu(bhdr->binary_signature) != BINARY_SIGNATURE) {
DRM_ERROR("invalid ip discovery binary signature\n"); DRM_ERROR("invalid ip discovery binary signature\n");
@ -192,7 +192,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
size = bhdr->binary_size - offset; size = bhdr->binary_size - offset;
checksum = bhdr->binary_checksum; checksum = bhdr->binary_checksum;
if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset, if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
size, checksum)) { size, checksum)) {
DRM_ERROR("invalid ip discovery binary checksum\n"); DRM_ERROR("invalid ip discovery binary checksum\n");
r = -EINVAL; r = -EINVAL;
@ -202,7 +202,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
info = &bhdr->table_list[IP_DISCOVERY]; info = &bhdr->table_list[IP_DISCOVERY];
offset = le16_to_cpu(info->offset); offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum); checksum = le16_to_cpu(info->checksum);
ihdr = (struct ip_discovery_header *)(adev->discovery_bin + offset); ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
DRM_ERROR("invalid ip discovery data table signature\n"); DRM_ERROR("invalid ip discovery data table signature\n");
@ -210,7 +210,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out; goto out;
} }
if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset, if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
ihdr->size, checksum)) { ihdr->size, checksum)) {
DRM_ERROR("invalid ip discovery data table checksum\n"); DRM_ERROR("invalid ip discovery data table checksum\n");
r = -EINVAL; r = -EINVAL;
@ -220,9 +220,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
info = &bhdr->table_list[GC]; info = &bhdr->table_list[GC];
offset = le16_to_cpu(info->offset); offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum); checksum = le16_to_cpu(info->checksum);
ghdr = (struct gpu_info_header *)(adev->discovery_bin + offset); ghdr = (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset, if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
ghdr->size, checksum)) { ghdr->size, checksum)) {
DRM_ERROR("invalid gc data table checksum\n"); DRM_ERROR("invalid gc data table checksum\n");
r = -EINVAL; r = -EINVAL;
@ -232,16 +232,16 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
return 0; return 0;
out: out:
kfree(adev->discovery_bin); kfree(adev->mman.discovery_bin);
adev->discovery_bin = NULL; adev->mman.discovery_bin = NULL;
return r; return r;
} }
void amdgpu_discovery_fini(struct amdgpu_device *adev) void amdgpu_discovery_fini(struct amdgpu_device *adev)
{ {
kfree(adev->discovery_bin); kfree(adev->mman.discovery_bin);
adev->discovery_bin = NULL; adev->mman.discovery_bin = NULL;
} }
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
@ -265,8 +265,8 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
return r; return r;
} }
bhdr = (struct binary_header *)adev->discovery_bin; bhdr = (struct binary_header *)adev->mman.discovery_bin;
ihdr = (struct ip_discovery_header *)(adev->discovery_bin + ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies); num_dies = le16_to_cpu(ihdr->num_dies);
@ -274,7 +274,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
for (i = 0; i < num_dies; i++) { for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
dhdr = (struct die_header *)(adev->discovery_bin + die_offset); dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips); num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr); ip_offset = die_offset + sizeof(*dhdr);
@ -288,7 +288,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
le16_to_cpu(dhdr->die_id), num_ips); le16_to_cpu(dhdr->die_id), num_ips);
for (j = 0; j < num_ips; j++) { for (j = 0; j < num_ips; j++) {
ip = (struct ip *)(adev->discovery_bin + ip_offset); ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
num_base_address = ip->num_base_address; num_base_address = ip->num_base_address;
DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n", DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
@ -337,24 +337,24 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
uint16_t num_ips; uint16_t num_ips;
int i, j; int i, j;
if (!adev->discovery_bin) { if (!adev->mman.discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n"); DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL; return -EINVAL;
} }
bhdr = (struct binary_header *)adev->discovery_bin; bhdr = (struct binary_header *)adev->mman.discovery_bin;
ihdr = (struct ip_discovery_header *)(adev->discovery_bin + ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies); num_dies = le16_to_cpu(ihdr->num_dies);
for (i = 0; i < num_dies; i++) { for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
dhdr = (struct die_header *)(adev->discovery_bin + die_offset); dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips); num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr); ip_offset = die_offset + sizeof(*dhdr);
for (j = 0; j < num_ips; j++) { for (j = 0; j < num_ips; j++) {
ip = (struct ip *)(adev->discovery_bin + ip_offset); ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
if (le16_to_cpu(ip->hw_id) == hw_id) { if (le16_to_cpu(ip->hw_id) == hw_id) {
if (major) if (major)
@ -377,13 +377,13 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
struct binary_header *bhdr; struct binary_header *bhdr;
struct gc_info_v1_0 *gc_info; struct gc_info_v1_0 *gc_info;
if (!adev->discovery_bin) { if (!adev->mman.discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n"); DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL; return -EINVAL;
} }
bhdr = (struct binary_header *)adev->discovery_bin; bhdr = (struct binary_header *)adev->mman.discovery_bin;
gc_info = (struct gc_info_v1_0 *)(adev->discovery_bin + gc_info = (struct gc_info_v1_0 *)(adev->mman.discovery_bin +
le16_to_cpu(bhdr->table_list[GC].offset)); le16_to_cpu(bhdr->table_list[GC].offset));
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se); adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se);

View File

@ -93,7 +93,7 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work)
* targeted by the flip * targeted by the flip
*/ */
if (amdgpu_crtc->enabled && if (amdgpu_crtc->enabled &&
(amdgpu_display_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0, (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
&vpos, &hpos, NULL, NULL, &vpos, &hpos, NULL, NULL,
&crtc->hwmode) &crtc->hwmode)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
@ -152,7 +152,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx) struct drm_modeset_acquire_ctx *ctx)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_gem_object *obj; struct drm_gem_object *obj;
struct amdgpu_flip_work *work; struct amdgpu_flip_work *work;
@ -292,7 +292,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
pm_runtime_mark_last_busy(dev->dev); pm_runtime_mark_last_busy(dev->dev);
adev = dev->dev_private; adev = drm_to_adev(dev);
/* if we have active crtcs and we don't have a power ref, /* if we have active crtcs and we don't have a power ref,
take the current one */ take the current one */
if (active && !adev->have_disp_power_ref) { if (active && !adev->have_disp_power_ref) {
@ -619,51 +619,51 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
int sz; int sz;
adev->mode_info.coherent_mode_property = adev->mode_info.coherent_mode_property =
drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1); drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1);
if (!adev->mode_info.coherent_mode_property) if (!adev->mode_info.coherent_mode_property)
return -ENOMEM; return -ENOMEM;
adev->mode_info.load_detect_property = adev->mode_info.load_detect_property =
drm_property_create_range(adev->ddev, 0, "load detection", 0, 1); drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1);
if (!adev->mode_info.load_detect_property) if (!adev->mode_info.load_detect_property)
return -ENOMEM; return -ENOMEM;
drm_mode_create_scaling_mode_property(adev->ddev); drm_mode_create_scaling_mode_property(adev_to_drm(adev));
sz = ARRAY_SIZE(amdgpu_underscan_enum_list); sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
adev->mode_info.underscan_property = adev->mode_info.underscan_property =
drm_property_create_enum(adev->ddev, 0, drm_property_create_enum(adev_to_drm(adev), 0,
"underscan", "underscan",
amdgpu_underscan_enum_list, sz); amdgpu_underscan_enum_list, sz);
adev->mode_info.underscan_hborder_property = adev->mode_info.underscan_hborder_property =
drm_property_create_range(adev->ddev, 0, drm_property_create_range(adev_to_drm(adev), 0,
"underscan hborder", 0, 128); "underscan hborder", 0, 128);
if (!adev->mode_info.underscan_hborder_property) if (!adev->mode_info.underscan_hborder_property)
return -ENOMEM; return -ENOMEM;
adev->mode_info.underscan_vborder_property = adev->mode_info.underscan_vborder_property =
drm_property_create_range(adev->ddev, 0, drm_property_create_range(adev_to_drm(adev), 0,
"underscan vborder", 0, 128); "underscan vborder", 0, 128);
if (!adev->mode_info.underscan_vborder_property) if (!adev->mode_info.underscan_vborder_property)
return -ENOMEM; return -ENOMEM;
sz = ARRAY_SIZE(amdgpu_audio_enum_list); sz = ARRAY_SIZE(amdgpu_audio_enum_list);
adev->mode_info.audio_property = adev->mode_info.audio_property =
drm_property_create_enum(adev->ddev, 0, drm_property_create_enum(adev_to_drm(adev), 0,
"audio", "audio",
amdgpu_audio_enum_list, sz); amdgpu_audio_enum_list, sz);
sz = ARRAY_SIZE(amdgpu_dither_enum_list); sz = ARRAY_SIZE(amdgpu_dither_enum_list);
adev->mode_info.dither_property = adev->mode_info.dither_property =
drm_property_create_enum(adev->ddev, 0, drm_property_create_enum(adev_to_drm(adev), 0,
"dither", "dither",
amdgpu_dither_enum_list, sz); amdgpu_dither_enum_list, sz);
if (amdgpu_device_has_dc_support(adev)) { if (amdgpu_device_has_dc_support(adev)) {
adev->mode_info.abm_level_property = adev->mode_info.abm_level_property =
drm_property_create_range(adev->ddev, 0, drm_property_create_range(adev_to_drm(adev), 0,
"abm level", 0, 4); "abm level", 0, 4);
if (!adev->mode_info.abm_level_property) if (!adev->mode_info.abm_level_property)
return -ENOMEM; return -ENOMEM;
} }
@ -813,7 +813,7 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
int vbl_start, vbl_end, vtotal, ret = 0; int vbl_start, vbl_end, vtotal, ret = 0;
bool in_vbl = true; bool in_vbl = true;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */

View File

@ -35,6 +35,7 @@
#include "amdgpu_display.h" #include "amdgpu_display.h"
#include "amdgpu_gem.h" #include "amdgpu_gem.h"
#include "amdgpu_dma_buf.h" #include "amdgpu_dma_buf.h"
#include "amdgpu_xgmi.h"
#include <drm/amdgpu_drm.h> #include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
#include <linux/dma-fence-array.h> #include <linux/dma-fence-array.h>
@ -454,7 +455,7 @@ static struct drm_gem_object *
amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf) amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
{ {
struct dma_resv *resv = dma_buf->resv; struct dma_resv *resv = dma_buf->resv;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
struct amdgpu_bo_param bp; struct amdgpu_bo_param bp;
int ret; int ret;
@ -595,3 +596,36 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
obj->import_attach = attach; obj->import_attach = attach;
return obj; return obj;
} }
/**
* amdgpu_dmabuf_is_xgmi_accessible - Check if xgmi available for P2P transfer
*
* @adev: amdgpu_device pointer of the importer
* @bo: amdgpu buffer object
*
* Returns:
* True if dmabuf accessible over xgmi, false otherwise.
*/
bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
struct amdgpu_bo *bo)
{
struct drm_gem_object *obj = &bo->tbo.base;
struct drm_gem_object *gobj;
if (obj->import_attach) {
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
if (dma_buf->ops != &amdgpu_dmabuf_ops)
/* No XGMI with non AMD GPUs */
return false;
gobj = dma_buf->priv;
bo = gem_to_amdgpu_bo(gobj);
}
if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
(bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM))
return true;
return false;
}

View File

@ -29,6 +29,8 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
int flags); int flags);
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf); struct dma_buf *dma_buf);
bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
struct amdgpu_bo *bo);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,

View File

@ -26,6 +26,7 @@
#include <drm/drm_drv.h> #include <drm/drm_drv.h>
#include <drm/drm_gem.h> #include <drm/drm_gem.h>
#include <drm/drm_vblank.h> #include <drm/drm_vblank.h>
#include <drm/drm_managed.h>
#include "amdgpu_drv.h" #include "amdgpu_drv.h"
#include <drm/drm_pciids.h> #include <drm/drm_pciids.h>
@ -88,9 +89,10 @@
* - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
* - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC * - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC
* - 3.39.0 - DMABUF implicit sync does a full pipeline sync * - 3.39.0 - DMABUF implicit sync does a full pipeline sync
* - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ
*/ */
#define KMS_DRIVER_MAJOR 3 #define KMS_DRIVER_MAJOR 3
#define KMS_DRIVER_MINOR 39 #define KMS_DRIVER_MINOR 40
#define KMS_DRIVER_PATCHLEVEL 0 #define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0; int amdgpu_vram_limit = 0;
@ -150,12 +152,14 @@ int amdgpu_noretry;
int amdgpu_force_asic_type = -1; int amdgpu_force_asic_type = -1;
int amdgpu_tmz = 0; int amdgpu_tmz = 0;
int amdgpu_reset_method = -1; /* auto */ int amdgpu_reset_method = -1; /* auto */
int amdgpu_num_kcq = -1;
struct amdgpu_mgpu_info mgpu_info = { struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
}; };
int amdgpu_ras_enable = -1; int amdgpu_ras_enable = -1;
uint amdgpu_ras_mask = 0xffffffff; uint amdgpu_ras_mask = 0xffffffff;
int amdgpu_bad_page_threshold = -1;
/** /**
* DOC: vramlimit (int) * DOC: vramlimit (int)
@ -676,11 +680,14 @@ MODULE_PARM_DESC(debug_largebar,
* Ignore CRAT table during KFD initialization. By default, KFD uses the ACPI CRAT * Ignore CRAT table during KFD initialization. By default, KFD uses the ACPI CRAT
* table to get information about AMD APUs. This option can serve as a workaround on * table to get information about AMD APUs. This option can serve as a workaround on
* systems with a broken CRAT table. * systems with a broken CRAT table.
*
* Default is auto (according to asic type, iommu_v2, and crat table, to decide
* whehter use CRAT)
*/ */
int ignore_crat; int ignore_crat;
module_param(ignore_crat, int, 0444); module_param(ignore_crat, int, 0444);
MODULE_PARM_DESC(ignore_crat, MODULE_PARM_DESC(ignore_crat,
"Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)"); "Ignore CRAT table during KFD initialization (0 = auto (default), 1 = ignore CRAT)");
/** /**
* DOC: halt_if_hws_hang (int) * DOC: halt_if_hws_hang (int)
@ -715,6 +722,15 @@ MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1
bool debug_evictions; bool debug_evictions;
module_param(debug_evictions, bool, 0644); module_param(debug_evictions, bool, 0644);
MODULE_PARM_DESC(debug_evictions, "enable eviction debug messages (false = default)"); MODULE_PARM_DESC(debug_evictions, "enable eviction debug messages (false = default)");
/**
* DOC: no_system_mem_limit(bool)
* Disable system memory limit, to support multiple process shared memory
*/
bool no_system_mem_limit;
module_param(no_system_mem_limit, bool, 0644);
MODULE_PARM_DESC(no_system_mem_limit, "disable system memory limit (false = default)");
#endif #endif
/** /**
@ -765,6 +781,19 @@ module_param_named(tmz, amdgpu_tmz, int, 0444);
MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)"); MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)");
module_param_named(reset_method, amdgpu_reset_method, int, 0444); module_param_named(reset_method, amdgpu_reset_method, int, 0444);
/**
* DOC: bad_page_threshold (int)
* Bad page threshold is to specify the threshold value of faulty pages
* detected by RAS ECC, that may result in GPU entering bad status if total
* faulty pages by ECC exceed threshold value and leave it for user's further
* check.
*/
MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement)");
module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);
MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)");
module_param_named(num_kcq, amdgpu_num_kcq, int, 0444);
static const struct pci_device_id pciidlist[] = { static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI #ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@ -1057,7 +1086,7 @@ static struct drm_driver kms_driver;
static int amdgpu_pci_probe(struct pci_dev *pdev, static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent) const struct pci_device_id *ent)
{ {
struct drm_device *dev; struct drm_device *ddev;
struct amdgpu_device *adev; struct amdgpu_device *adev;
unsigned long flags = ent->driver_data; unsigned long flags = ent->driver_data;
int ret, retry = 0; int ret, retry = 0;
@ -1113,36 +1142,44 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
if (ret) if (ret)
return ret; return ret;
dev = drm_dev_alloc(&kms_driver, &pdev->dev); adev = kzalloc(sizeof(*adev), GFP_KERNEL);
if (IS_ERR(dev)) if (!adev)
return PTR_ERR(dev); return -ENOMEM;
adev->dev = &pdev->dev;
adev->pdev = pdev;
ddev = adev_to_drm(adev);
ret = drm_dev_init(ddev, &kms_driver, &pdev->dev);
if (ret)
goto err_free;
drmm_add_final_kfree(ddev, adev);
if (!supports_atomic) if (!supports_atomic)
dev->driver_features &= ~DRIVER_ATOMIC; ddev->driver_features &= ~DRIVER_ATOMIC;
ret = pci_enable_device(pdev); ret = pci_enable_device(pdev);
if (ret) if (ret)
goto err_free; goto err_free;
dev->pdev = pdev; ddev->pdev = pdev;
pci_set_drvdata(pdev, ddev);
pci_set_drvdata(pdev, dev); ret = amdgpu_driver_load_kms(adev, ent->driver_data);
ret = amdgpu_driver_load_kms(dev, ent->driver_data);
if (ret) if (ret)
goto err_pci; goto err_pci;
retry_init: retry_init:
ret = drm_dev_register(dev, ent->driver_data); ret = drm_dev_register(ddev, ent->driver_data);
if (ret == -EAGAIN && ++retry <= 3) { if (ret == -EAGAIN && ++retry <= 3) {
DRM_INFO("retry init %d\n", retry); DRM_INFO("retry init %d\n", retry);
/* Don't request EX mode too frequently which is attacking */ /* Don't request EX mode too frequently which is attacking */
msleep(5000); msleep(5000);
goto retry_init; goto retry_init;
} else if (ret) } else if (ret) {
goto err_pci; goto err_pci;
}
adev = dev->dev_private;
ret = amdgpu_debugfs_init(adev); ret = amdgpu_debugfs_init(adev);
if (ret) if (ret)
DRM_ERROR("Creating debugfs files failed (%d).\n", ret); DRM_ERROR("Creating debugfs files failed (%d).\n", ret);
@ -1152,7 +1189,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
err_pci: err_pci:
pci_disable_device(pdev); pci_disable_device(pdev);
err_free: err_free:
drm_dev_put(dev); drm_dev_put(ddev);
return ret; return ret;
} }
@ -1176,7 +1213,7 @@ static void
amdgpu_pci_shutdown(struct pci_dev *pdev) amdgpu_pci_shutdown(struct pci_dev *pdev)
{ {
struct drm_device *dev = pci_get_drvdata(pdev); struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
if (amdgpu_ras_intr_triggered()) if (amdgpu_ras_intr_triggered())
return; return;
@ -1209,7 +1246,7 @@ static int amdgpu_pmops_resume(struct device *dev)
static int amdgpu_pmops_freeze(struct device *dev) static int amdgpu_pmops_freeze(struct device *dev)
{ {
struct drm_device *drm_dev = dev_get_drvdata(dev); struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_dev->dev_private; struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r; int r;
adev->in_hibernate = true; adev->in_hibernate = true;
@ -1245,7 +1282,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
{ {
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev); struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_dev->dev_private; struct amdgpu_device *adev = drm_to_adev(drm_dev);
int ret, i; int ret, i;
if (!adev->runpm) { if (!adev->runpm) {
@ -1296,7 +1333,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
{ {
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev); struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_dev->dev_private; struct amdgpu_device *adev = drm_to_adev(drm_dev);
int ret; int ret;
if (!adev->runpm) if (!adev->runpm)
@ -1332,7 +1369,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
static int amdgpu_pmops_runtime_idle(struct device *dev) static int amdgpu_pmops_runtime_idle(struct device *dev)
{ {
struct drm_device *drm_dev = dev_get_drvdata(dev); struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_dev->dev_private; struct amdgpu_device *adev = drm_to_adev(drm_dev);
/* we don't want the main rpm_idle to call suspend - we want to autosuspend */ /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
int ret = 1; int ret = 1;
@ -1500,8 +1537,6 @@ static struct pci_driver amdgpu_kms_pci_driver = {
.driver.pm = &amdgpu_pm_ops, .driver.pm = &amdgpu_pm_ops,
}; };
static int __init amdgpu_init(void) static int __init amdgpu_init(void)
{ {
int r; int r;

View File

@ -35,7 +35,7 @@
void void
amdgpu_link_encoder_connector(struct drm_device *dev) amdgpu_link_encoder_connector(struct drm_device *dev)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_connector *connector; struct drm_connector *connector;
struct drm_connector_list_iter iter; struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector; struct amdgpu_connector *amdgpu_connector;

View File

@ -135,7 +135,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED; AMDGPU_GEM_CREATE_VRAM_CLEARED;
info = drm_get_format_info(adev->ddev, mode_cmd); info = drm_get_format_info(adev_to_drm(adev), mode_cmd);
cpp = info->cpp[0]; cpp = info->cpp[0];
/* need to align pitch with crtc limits */ /* need to align pitch with crtc limits */
@ -231,7 +231,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
goto out; goto out;
} }
ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb, ret = amdgpu_display_framebuffer_init(adev_to_drm(adev), &rfbdev->rfb,
&mode_cmd, gobj); &mode_cmd, gobj);
if (ret) { if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret); DRM_ERROR("failed to initialize framebuffer %d\n", ret);
@ -254,7 +254,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_info(info, &rfbdev->helper, sizes); drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
/* setup aperture base/size for vesafb takeover */ /* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base; info->apertures->ranges[0].base = adev_to_drm(adev)->mode_config.fb_base;
info->apertures->ranges[0].size = adev->gmc.aper_size; info->apertures->ranges[0].size = adev->gmc.aper_size;
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@ -270,7 +270,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
DRM_INFO("fb depth is %d\n", fb->format->depth); DRM_INFO("fb depth is %d\n", fb->format->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]); DRM_INFO(" pitch is %d\n", fb->pitches[0]);
vga_switcheroo_client_fb_set(adev->ddev->pdev, info); vga_switcheroo_client_fb_set(adev_to_drm(adev)->pdev, info);
return 0; return 0;
out: out:
@ -318,7 +318,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
return 0; return 0;
/* don't init fbdev if there are no connectors */ /* don't init fbdev if there are no connectors */
if (list_empty(&adev->ddev->mode_config.connector_list)) if (list_empty(&adev_to_drm(adev)->mode_config.connector_list))
return 0; return 0;
/* select 8 bpp console on low vram cards */ /* select 8 bpp console on low vram cards */
@ -332,10 +332,10 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
rfbdev->adev = adev; rfbdev->adev = adev;
adev->mode_info.rfbdev = rfbdev; adev->mode_info.rfbdev = rfbdev;
drm_fb_helper_prepare(adev->ddev, &rfbdev->helper, drm_fb_helper_prepare(adev_to_drm(adev), &rfbdev->helper,
&amdgpu_fb_helper_funcs); &amdgpu_fb_helper_funcs);
ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper); ret = drm_fb_helper_init(adev_to_drm(adev), &rfbdev->helper);
if (ret) { if (ret) {
kfree(rfbdev); kfree(rfbdev);
return ret; return ret;
@ -343,7 +343,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
/* disable all the possible outputs/crtcs before entering KMS mode */ /* disable all the possible outputs/crtcs before entering KMS mode */
if (!amdgpu_device_has_dc_support(adev)) if (!amdgpu_device_has_dc_support(adev))
drm_helper_disable_unused_functions(adev->ddev); drm_helper_disable_unused_functions(adev_to_drm(adev));
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0; return 0;
@ -354,7 +354,7 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev)
if (!adev->mode_info.rfbdev) if (!adev->mode_info.rfbdev)
return; return;
amdgpu_fbdev_destroy(adev->ddev, adev->mode_info.rfbdev); amdgpu_fbdev_destroy(adev_to_drm(adev), adev->mode_info.rfbdev);
kfree(adev->mode_info.rfbdev); kfree(adev->mode_info.rfbdev);
adev->mode_info.rfbdev = NULL; adev->mode_info.rfbdev = NULL;
} }

View File

@ -155,7 +155,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
seq); seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT); seq, flags | AMDGPU_FENCE_FLAG_INT);
pm_runtime_get_noresume(adev->ddev->dev); pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) { if (unlikely(rcu_dereference_protected(*ptr, 1))) {
struct dma_fence *old; struct dma_fence *old;
@ -284,8 +284,8 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
BUG(); BUG();
dma_fence_put(fence); dma_fence_put(fence);
pm_runtime_mark_last_busy(adev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
} while (last_seq != seq); } while (last_seq != seq);
return true; return true;
@ -700,7 +700,7 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
{ {
struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
int i; int i;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@ -749,7 +749,7 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
{ {
struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
int r; int r;
r = pm_runtime_get_sync(dev->dev); r = pm_runtime_get_sync(dev->dev);

View File

@ -93,7 +93,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
void amdgpu_gem_force_release(struct amdgpu_device *adev) void amdgpu_gem_force_release(struct amdgpu_device *adev)
{ {
struct drm_device *ddev = adev->ddev; struct drm_device *ddev = adev_to_drm(adev);
struct drm_file *file; struct drm_file *file;
mutex_lock(&ddev->filelist_mutex); mutex_lock(&ddev->filelist_mutex);
@ -217,7 +217,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
union drm_amdgpu_gem_create *args = data; union drm_amdgpu_gem_create *args = data;
@ -298,7 +298,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
struct ttm_operation_ctx ctx = { true, false }; struct ttm_operation_ctx ctx = { true, false };
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_gem_userptr *args = data; struct drm_amdgpu_gem_userptr *args = data;
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
@ -587,7 +587,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_amdgpu_gem_va *args = data; struct drm_amdgpu_gem_va *args = data;
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo; struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
@ -711,7 +711,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_gem_op *args = data; struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
struct amdgpu_vm_bo_base *base; struct amdgpu_vm_bo_base *base;
@ -788,7 +788,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev, struct drm_device *dev,
struct drm_mode_create_dumb *args) struct drm_mode_create_dumb *args)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
uint32_t handle; uint32_t handle;
u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |

View File

@ -202,40 +202,29 @@ bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
{ {
int i, queue, pipe, mec; int i, queue, pipe;
bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev); bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe,
adev->gfx.num_compute_rings);
/* policy for amdgpu compute queue ownership */ if (multipipe_policy) {
for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { /* policy: make queues evenly cross all pipes on MEC1 only */
queue = i % adev->gfx.mec.num_queue_per_pipe; for (i = 0; i < max_queues_per_mec; i++) {
pipe = (i / adev->gfx.mec.num_queue_per_pipe) pipe = i % adev->gfx.mec.num_pipe_per_mec;
% adev->gfx.mec.num_pipe_per_mec; queue = (i / adev->gfx.mec.num_pipe_per_mec) %
mec = (i / adev->gfx.mec.num_queue_per_pipe) adev->gfx.mec.num_queue_per_pipe;
/ adev->gfx.mec.num_pipe_per_mec;
/* we've run out of HW */ set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
if (mec >= adev->gfx.mec.num_mec) adev->gfx.mec.queue_bitmap);
break;
if (multipipe_policy) {
/* policy: amdgpu owns the first two queues of the first MEC */
if (mec == 0 && queue < 2)
set_bit(i, adev->gfx.mec.queue_bitmap);
} else {
/* policy: amdgpu owns all queues in the first pipe */
if (mec == 0 && pipe == 0)
set_bit(i, adev->gfx.mec.queue_bitmap);
} }
} else {
/* policy: amdgpu owns all queues in the given pipe */
for (i = 0; i < max_queues_per_mec; ++i)
set_bit(i, adev->gfx.mec.queue_bitmap);
} }
/* update the number of active compute rings */ dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
adev->gfx.num_compute_rings =
bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
/* If you hit this case and edited the policy, you probably just
* need to increase AMDGPU_MAX_COMPUTE_RINGS */
if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS))
adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
} }
void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
@ -571,8 +560,14 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) { if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE); schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
} else if (!enable && adev->gfx.gfx_off_state) { } else if (!enable && adev->gfx.gfx_off_state) {
if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
adev->gfx.gfx_off_state = false; adev->gfx.gfx_off_state = false;
if (adev->gfx.funcs->init_spm_golden) {
dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n");
amdgpu_gfx_init_spm_golden(adev);
}
}
} }
mutex_unlock(&adev->gfx.gfx_off_mutex); mutex_unlock(&adev->gfx.gfx_off_mutex);
@ -724,7 +719,7 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
* *
* also don't wait anymore for IRQ context * also don't wait anymore for IRQ context
* */ * */
if (r < 1 && (adev->in_gpu_reset || in_interrupt())) if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
goto failed_kiq_read; goto failed_kiq_read;
might_sleep(); might_sleep();
@ -748,7 +743,7 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
failed_kiq_read: failed_kiq_read:
if (reg_val_offs) if (reg_val_offs)
amdgpu_device_wb_free(adev, reg_val_offs); amdgpu_device_wb_free(adev, reg_val_offs);
pr_err("failed to read reg:%x\n", reg); dev_err(adev->dev, "failed to read reg:%x\n", reg);
return ~0; return ~0;
} }
@ -782,7 +777,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
* *
* also don't wait anymore for IRQ context * also don't wait anymore for IRQ context
* */ * */
if (r < 1 && (adev->in_gpu_reset || in_interrupt())) if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
goto failed_kiq_write; goto failed_kiq_write;
might_sleep(); might_sleep();
@ -801,5 +796,5 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
amdgpu_ring_undo(ring); amdgpu_ring_undo(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags); spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq_write: failed_kiq_write:
pr_err("failed to write reg:%x\n", reg); dev_err(adev->dev, "failed to write reg:%x\n", reg);
} }

View File

@ -216,6 +216,7 @@ struct amdgpu_gfx_funcs {
int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if); int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if);
int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status); int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
void (*reset_ras_error_count) (struct amdgpu_device *adev); void (*reset_ras_error_count) (struct amdgpu_device *adev);
void (*init_spm_golden)(struct amdgpu_device *adev);
}; };
struct sq_work { struct sq_work {
@ -324,6 +325,7 @@ struct amdgpu_gfx {
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid)) #define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid))
#define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev))
/** /**
* amdgpu_gfx_create_bitmask - create a bitmask * amdgpu_gfx_create_bitmask - create a bitmask

View File

@ -27,6 +27,7 @@
#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/io-64-nonatomic-lo-hi.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_gmc.h"
#include "amdgpu_ras.h" #include "amdgpu_ras.h"
#include "amdgpu_xgmi.h" #include "amdgpu_xgmi.h"
@ -411,3 +412,64 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
break; break;
} }
} }
void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
bool enable)
{
struct amdgpu_vmhub *hub;
u32 tmp, reg, i;
hub = &adev->vmhub[hub_type];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + hub->ctx_distance * i;
tmp = RREG32(reg);
if (enable)
tmp |= hub->vm_cntx_cntl_vm_fault;
else
tmp &= ~hub->vm_cntx_cntl_vm_fault;
WREG32(reg, tmp);
}
}
void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
{
unsigned size;
/*
* TODO:
* Currently there is a bug where some memory client outside
* of the driver writes to first 8M of VRAM on S3 resume,
* this overrides GART which by default gets placed in first 8M and
* causes VM_FAULTS once GTT is accessed.
* Keep the stolen memory reservation until the while this is not solved.
*/
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_RAVEN:
case CHIP_RENOIR:
adev->mman.keep_stolen_vga_memory = true;
break;
default:
adev->mman.keep_stolen_vga_memory = false;
break;
}
if (!amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE))
size = 0;
else
size = amdgpu_gmc_get_vbios_fb_size(adev);
/* set to 0 if the pre-OS buffer uses up most of vram */
if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
size = 0;
if (size > AMDGPU_VBIOS_VGA_ALLOCATION) {
adev->mman.stolen_vga_size = AMDGPU_VBIOS_VGA_ALLOCATION;
adev->mman.stolen_extended_size = size - adev->mman.stolen_vga_size;
} else {
adev->mman.stolen_vga_size = size;
adev->mman.stolen_extended_size = 0;
}
}

View File

@ -74,6 +74,12 @@ struct amdgpu_gmc_fault {
/* /*
* VMHUB structures, functions & helpers * VMHUB structures, functions & helpers
*/ */
struct amdgpu_vmhub_funcs {
void (*print_l2_protection_fault_status)(struct amdgpu_device *adev,
uint32_t status);
uint32_t (*get_invalidate_req)(unsigned int vmid, uint32_t flush_type);
};
struct amdgpu_vmhub { struct amdgpu_vmhub {
uint32_t ctx0_ptb_addr_lo32; uint32_t ctx0_ptb_addr_lo32;
uint32_t ctx0_ptb_addr_hi32; uint32_t ctx0_ptb_addr_hi32;
@ -92,6 +98,10 @@ struct amdgpu_vmhub {
uint32_t ctx_addr_distance; /* include LO32/HI32 */ uint32_t ctx_addr_distance; /* include LO32/HI32 */
uint32_t eng_distance; uint32_t eng_distance;
uint32_t eng_addr_distance; /* include LO32/HI32 */ uint32_t eng_addr_distance; /* include LO32/HI32 */
uint32_t vm_cntx_cntl_vm_fault;
const struct amdgpu_vmhub_funcs *vmhub_funcs;
}; };
/* /*
@ -121,6 +131,8 @@ struct amdgpu_gmc_funcs {
void (*get_vm_pte)(struct amdgpu_device *adev, void (*get_vm_pte)(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping, struct amdgpu_bo_va_mapping *mapping,
uint64_t *flags); uint64_t *flags);
/* get the amount of memory used by the vbios for pre-OS console */
unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev);
}; };
struct amdgpu_xgmi { struct amdgpu_xgmi {
@ -203,7 +215,6 @@ struct amdgpu_gmc {
uint8_t vram_vendor; uint8_t vram_vendor;
uint32_t srbm_soft_reset; uint32_t srbm_soft_reset;
bool prt_warning; bool prt_warning;
uint64_t stolen_size;
uint32_t sdpif_register; uint32_t sdpif_register;
/* apertures */ /* apertures */
u64 shared_aperture_start; u64 shared_aperture_start;
@ -239,6 +250,7 @@ struct amdgpu_gmc {
#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags)) #define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags)) #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags)) #define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
#define amdgpu_gmc_get_vbios_fb_size(adev) (adev)->gmc.gmc_funcs->get_vbios_fb_size((adev))
/** /**
* amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
@ -289,4 +301,10 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev); extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev);
extern void
amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
bool enable);
void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev);
#endif #endif

View File

@ -46,8 +46,9 @@ static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = drm_to_adev(ddev);
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
return snprintf(buf, PAGE_SIZE, "%llu\n", return snprintf(buf, PAGE_SIZE, "%llu\n",
man->size * PAGE_SIZE); man->size * PAGE_SIZE);
} }
@ -64,8 +65,9 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = drm_to_adev(ddev);
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
return snprintf(buf, PAGE_SIZE, "%llu\n", return snprintf(buf, PAGE_SIZE, "%llu\n",
amdgpu_gtt_mgr_usage(man)); amdgpu_gtt_mgr_usage(man));
} }

View File

@ -40,7 +40,7 @@
static int amdgpu_i2c_pre_xfer(struct i2c_adapter *i2c_adap) static int amdgpu_i2c_pre_xfer(struct i2c_adapter *i2c_adap)
{ {
struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct amdgpu_device *adev = i2c->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec; struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp; uint32_t temp;
@ -82,7 +82,7 @@ static int amdgpu_i2c_pre_xfer(struct i2c_adapter *i2c_adap)
static void amdgpu_i2c_post_xfer(struct i2c_adapter *i2c_adap) static void amdgpu_i2c_post_xfer(struct i2c_adapter *i2c_adap)
{ {
struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct amdgpu_device *adev = i2c->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec; struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp; uint32_t temp;
@ -101,7 +101,7 @@ static void amdgpu_i2c_post_xfer(struct i2c_adapter *i2c_adap)
static int amdgpu_i2c_get_clock(void *i2c_priv) static int amdgpu_i2c_get_clock(void *i2c_priv)
{ {
struct amdgpu_i2c_chan *i2c = i2c_priv; struct amdgpu_i2c_chan *i2c = i2c_priv;
struct amdgpu_device *adev = i2c->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec; struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val; uint32_t val;
@ -116,7 +116,7 @@ static int amdgpu_i2c_get_clock(void *i2c_priv)
static int amdgpu_i2c_get_data(void *i2c_priv) static int amdgpu_i2c_get_data(void *i2c_priv)
{ {
struct amdgpu_i2c_chan *i2c = i2c_priv; struct amdgpu_i2c_chan *i2c = i2c_priv;
struct amdgpu_device *adev = i2c->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec; struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val; uint32_t val;
@ -130,7 +130,7 @@ static int amdgpu_i2c_get_data(void *i2c_priv)
static void amdgpu_i2c_set_clock(void *i2c_priv, int clock) static void amdgpu_i2c_set_clock(void *i2c_priv, int clock)
{ {
struct amdgpu_i2c_chan *i2c = i2c_priv; struct amdgpu_i2c_chan *i2c = i2c_priv;
struct amdgpu_device *adev = i2c->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec; struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val; uint32_t val;
@ -143,7 +143,7 @@ static void amdgpu_i2c_set_clock(void *i2c_priv, int clock)
static void amdgpu_i2c_set_data(void *i2c_priv, int data) static void amdgpu_i2c_set_data(void *i2c_priv, int data)
{ {
struct amdgpu_i2c_chan *i2c = i2c_priv; struct amdgpu_i2c_chan *i2c = i2c_priv;
struct amdgpu_device *adev = i2c->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec; struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val; uint32_t val;
@ -253,7 +253,7 @@ void amdgpu_i2c_add(struct amdgpu_device *adev,
const struct amdgpu_i2c_bus_rec *rec, const struct amdgpu_i2c_bus_rec *rec,
const char *name) const char *name)
{ {
struct drm_device *dev = adev->ddev; struct drm_device *dev = adev_to_drm(adev);
int i; int i;
for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) { for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {

View File

@ -445,7 +445,7 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
{ {
struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
seq_printf(m, "--------------------- DELAYED --------------------- \n"); seq_printf(m, "--------------------- DELAYED --------------------- \n");
amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED], amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],

View File

@ -85,7 +85,7 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
{ {
struct amdgpu_device *adev = container_of(work, struct amdgpu_device, struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
hotplug_work); hotplug_work);
struct drm_device *dev = adev->ddev; struct drm_device *dev = adev_to_drm(adev);
struct drm_mode_config *mode_config = &dev->mode_config; struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector; struct drm_connector *connector;
struct drm_connector_list_iter iter; struct drm_connector_list_iter iter;
@ -151,7 +151,7 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
irqreturn_t amdgpu_irq_handler(int irq, void *arg) irqreturn_t amdgpu_irq_handler(int irq, void *arg)
{ {
struct drm_device *dev = (struct drm_device *) arg; struct drm_device *dev = (struct drm_device *) arg;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
irqreturn_t ret; irqreturn_t ret;
ret = amdgpu_ih_process(adev, &adev->irq.ih); ret = amdgpu_ih_process(adev, &adev->irq.ih);
@ -268,9 +268,9 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (!adev->enable_virtual_display) if (!adev->enable_virtual_display)
/* Disable vblank IRQs aggressively for power-saving */ /* Disable vblank IRQs aggressively for power-saving */
/* XXX: can this be enabled for DC? */ /* XXX: can this be enabled for DC? */
adev->ddev->vblank_disable_immediate = true; adev_to_drm(adev)->vblank_disable_immediate = true;
r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
if (r) if (r)
return r; return r;
@ -284,14 +284,14 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
adev->irq.installed = true; adev->irq.installed = true;
/* Use vector 0 for MSI-X */ /* Use vector 0 for MSI-X */
r = drm_irq_install(adev->ddev, pci_irq_vector(adev->pdev, 0)); r = drm_irq_install(adev_to_drm(adev), pci_irq_vector(adev->pdev, 0));
if (r) { if (r) {
adev->irq.installed = false; adev->irq.installed = false;
if (!amdgpu_device_has_dc_support(adev)) if (!amdgpu_device_has_dc_support(adev))
flush_work(&adev->hotplug_work); flush_work(&adev->hotplug_work);
return r; return r;
} }
adev->ddev->max_vblank_count = 0x00ffffff; adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
DRM_DEBUG("amdgpu: irq initialized.\n"); DRM_DEBUG("amdgpu: irq initialized.\n");
return 0; return 0;
@ -311,7 +311,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
unsigned i, j; unsigned i, j;
if (adev->irq.installed) { if (adev->irq.installed) {
drm_irq_uninstall(adev->ddev); drm_irq_uninstall(adev_to_drm(adev));
adev->irq.installed = false; adev->irq.installed = false;
if (adev->irq.msi_enabled) if (adev->irq.msi_enabled)
pci_free_irq_vectors(adev->pdev); pci_free_irq_vectors(adev->pdev);
@ -522,7 +522,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type) unsigned type)
{ {
if (!adev->ddev->irq_enabled) if (!adev_to_drm(adev)->irq_enabled)
return -ENOENT; return -ENOENT;
if (type >= src->num_types) if (type >= src->num_types)
@ -552,7 +552,7 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type) unsigned type)
{ {
if (!adev->ddev->irq_enabled) if (!adev_to_drm(adev)->irq_enabled)
return -ENOENT; return -ENOENT;
if (type >= src->num_types) if (type >= src->num_types)
@ -583,7 +583,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type) unsigned type)
{ {
if (!adev->ddev->irq_enabled) if (!adev_to_drm(adev)->irq_enabled)
return false; return false;
if (type >= src->num_types) if (type >= src->num_types)

View File

@ -251,7 +251,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
int i; int i;
/* Signal all jobs not yet scheduled */ /* Signal all jobs not yet scheduled */
for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
struct drm_sched_rq *rq = &sched->sched_rq[i]; struct drm_sched_rq *rq = &sched->sched_rq[i];
if (!rq) if (!rq)

View File

@ -78,7 +78,7 @@ void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
*/ */
void amdgpu_driver_unload_kms(struct drm_device *dev) void amdgpu_driver_unload_kms(struct drm_device *dev)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
if (adev == NULL) if (adev == NULL)
return; return;
@ -86,7 +86,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
amdgpu_unregister_gpu_instance(adev); amdgpu_unregister_gpu_instance(adev);
if (adev->rmmio == NULL) if (adev->rmmio == NULL)
goto done_free; return;
if (adev->runpm) { if (adev->runpm) {
pm_runtime_get_sync(dev->dev); pm_runtime_get_sync(dev->dev);
@ -94,12 +94,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
} }
amdgpu_acpi_fini(adev); amdgpu_acpi_fini(adev);
amdgpu_device_fini(adev); amdgpu_device_fini(adev);
done_free:
kfree(adev);
dev->dev_private = NULL;
} }
void amdgpu_register_gpu_instance(struct amdgpu_device *adev) void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
@ -130,22 +125,18 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
/** /**
* amdgpu_driver_load_kms - Main load function for KMS. * amdgpu_driver_load_kms - Main load function for KMS.
* *
* @dev: drm dev pointer * @adev: pointer to struct amdgpu_device
* @flags: device flags * @flags: device flags
* *
* This is the main load function for KMS (all asics). * This is the main load function for KMS (all asics).
* Returns 0 on success, error on failure. * Returns 0 on success, error on failure.
*/ */
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
{ {
struct amdgpu_device *adev; struct drm_device *dev;
int r, acpi_status; int r, acpi_status;
adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL); dev = adev_to_drm(adev);
if (adev == NULL) {
return -ENOMEM;
}
dev->dev_private = (void *)adev;
if (amdgpu_has_atpx() && if (amdgpu_has_atpx() &&
(amdgpu_is_atpx_hybrid() || (amdgpu_is_atpx_hybrid() ||
@ -160,7 +151,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
* properly initialize the GPU MC controller and permit * properly initialize the GPU MC controller and permit
* VRAM allocation * VRAM allocation
*/ */
r = amdgpu_device_init(adev, dev, dev->pdev, flags); r = amdgpu_device_init(adev, flags);
if (r) { if (r) {
dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
goto out; goto out;
@ -480,7 +471,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
*/ */
static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_info *info = data; struct drm_amdgpu_info *info = data;
struct amdgpu_mode_info *minfo = &adev->mode_info; struct amdgpu_mode_info *minfo = &adev->mode_info;
void __user *out = (void __user *)(uintptr_t)info->return_pointer; void __user *out = (void __user *)(uintptr_t)info->return_pointer;
@ -745,6 +736,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
if (amdgpu_is_tmz(adev))
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
vm_size -= AMDGPU_VA_RESERVED_SIZE; vm_size -= AMDGPU_VA_RESERVED_SIZE;
@ -998,7 +991,7 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev)
*/ */
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv; struct amdgpu_fpriv *fpriv;
int r, pasid; int r, pasid;
@ -1083,7 +1076,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
void amdgpu_driver_postclose_kms(struct drm_device *dev, void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_bo_list *list; struct amdgpu_bo_list *list;
struct amdgpu_bo *pd; struct amdgpu_bo *pd;
@ -1148,7 +1141,7 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index; unsigned int pipe = crtc->index;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
int vpos, hpos, stat; int vpos, hpos, stat;
u32 count; u32 count;
@ -1216,7 +1209,7 @@ int amdgpu_enable_vblank_kms(struct drm_crtc *crtc)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index; unsigned int pipe = crtc->index;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
return amdgpu_irq_get(adev, &adev->crtc_irq, idx); return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
@ -1233,7 +1226,7 @@ void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index; unsigned int pipe = crtc->index;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
amdgpu_irq_put(adev, &adev->crtc_irq, idx); amdgpu_irq_put(adev, &adev->crtc_irq, idx);
@ -1269,7 +1262,7 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
{ {
struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_info_firmware fw_info; struct drm_amdgpu_info_firmware fw_info;
struct drm_amdgpu_query_fw query_fw; struct drm_amdgpu_query_fw query_fw;
struct atom_context *ctx = adev->mode_info.atom_context; struct atom_context *ctx = adev->mode_info.atom_context;

View File

@ -27,6 +27,19 @@ struct amdgpu_mmhub_funcs {
void (*query_ras_error_count)(struct amdgpu_device *adev, void (*query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status); void *ras_error_status);
void (*reset_ras_error_count)(struct amdgpu_device *adev); void (*reset_ras_error_count)(struct amdgpu_device *adev);
u64 (*get_fb_location)(struct amdgpu_device *adev);
void (*init)(struct amdgpu_device *adev);
int (*gart_enable)(struct amdgpu_device *adev);
void (*set_fault_enable_default)(struct amdgpu_device *adev,
bool value);
void (*gart_disable)(struct amdgpu_device *adev);
int (*set_clockgating)(struct amdgpu_device *adev,
enum amd_clockgating_state state);
void (*get_clockgating)(struct amdgpu_device *adev, u32 *flags);
void (*setup_vm_pt_regs)(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base);
void (*update_power_gating)(struct amdgpu_device *adev,
bool enable);
}; };
struct amdgpu_mmhub { struct amdgpu_mmhub {

View File

@ -374,6 +374,9 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
if (r) if (r)
return r; return r;
if ((*bo_ptr) == NULL)
return 0;
/* /*
* Remove the original mem node and create a new one at the request * Remove the original mem node and create a new one at the request
* position. * position.
@ -552,7 +555,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
if (bo == NULL) if (bo == NULL)
return -ENOMEM; return -ENOMEM;
drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size); drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
INIT_LIST_HEAD(&bo->shadow_list); INIT_LIST_HEAD(&bo->shadow_list);
bo->vm_bo = NULL; bo->vm_bo = NULL;
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain : bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
@ -1299,7 +1302,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
} }
/** /**
* amdgpu_bo_move_notify - notification about a BO being released * amdgpu_bo_release_notify - notification about a BO being released
* @bo: pointer to a buffer object * @bo: pointer to a buffer object
* *
* Wipes VRAM buffers whose contents should not be leaked before the * Wipes VRAM buffers whose contents should not be leaked before the

View File

@ -226,7 +226,7 @@ static int init_pmu_by_type(struct amdgpu_device *adev,
pmu_entry->pmu.attr_groups = attr_groups; pmu_entry->pmu.attr_groups = attr_groups;
pmu_entry->pmu_perf_type = pmu_perf_type; pmu_entry->pmu_perf_type = pmu_perf_type;
snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d",
pmu_file_prefix, adev->ddev->primary->index); pmu_file_prefix, adev_to_drm(adev)->primary->index);
ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1); ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1);

View File

@ -1429,6 +1429,168 @@ static int psp_dtm_terminate(struct psp_context *psp)
} }
// DTM end // DTM end
// RAP start
static int psp_rap_init_shared_buf(struct psp_context *psp)
{
int ret;
/*
* Allocate 16k memory aligned to 4k from Frame Buffer (local
* physical) for rap ta <-> Driver
*/
ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAP_SHARED_MEM_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
&psp->rap_context.rap_shared_bo,
&psp->rap_context.rap_shared_mc_addr,
&psp->rap_context.rap_shared_buf);
return ret;
}
static int psp_rap_load(struct psp_context *psp)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
memcpy(psp->fw_pri_buf, psp->ta_rap_start_addr, psp->ta_rap_ucode_size);
psp_prep_ta_load_cmd_buf(cmd,
psp->fw_pri_mc_addr,
psp->ta_rap_ucode_size,
psp->rap_context.rap_shared_mc_addr,
PSP_RAP_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
if (!ret) {
psp->rap_context.rap_initialized = true;
psp->rap_context.session_id = cmd->resp.session_id;
mutex_init(&psp->rap_context.mutex);
}
kfree(cmd);
return ret;
}
static int psp_rap_unload(struct psp_context *psp)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
kfree(cmd);
return ret;
}
static int psp_rap_initialize(struct psp_context *psp)
{
int ret;
/*
* TODO: bypass the initialize in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->adev->psp.ta_rap_ucode_size ||
!psp->adev->psp.ta_rap_start_addr) {
dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
return 0;
}
if (!psp->rap_context.rap_initialized) {
ret = psp_rap_init_shared_buf(psp);
if (ret)
return ret;
}
ret = psp_rap_load(psp);
if (ret)
return ret;
ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE);
if (ret != TA_RAP_STATUS__SUCCESS) {
psp_rap_unload(psp);
amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo,
&psp->rap_context.rap_shared_mc_addr,
&psp->rap_context.rap_shared_buf);
psp->rap_context.rap_initialized = false;
dev_warn(psp->adev->dev, "RAP TA initialize fail.\n");
return -EINVAL;
}
return 0;
}
static int psp_rap_terminate(struct psp_context *psp)
{
int ret;
if (!psp->rap_context.rap_initialized)
return 0;
ret = psp_rap_unload(psp);
psp->rap_context.rap_initialized = false;
/* free rap shared memory */
amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo,
&psp->rap_context.rap_shared_mc_addr,
&psp->rap_context.rap_shared_buf);
return ret;
}
int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
struct ta_rap_shared_memory *rap_cmd;
int ret;
if (!psp->rap_context.rap_initialized)
return -EINVAL;
if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
return -EINVAL;
mutex_lock(&psp->rap_context.mutex);
rap_cmd = (struct ta_rap_shared_memory *)
psp->rap_context.rap_shared_buf;
memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
rap_cmd->cmd_id = ta_cmd_id;
rap_cmd->validation_method_id = METHOD_A;
ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.session_id);
if (ret) {
mutex_unlock(&psp->rap_context.mutex);
return ret;
}
mutex_unlock(&psp->rap_context.mutex);
return rap_cmd->rap_status;
}
// RAP end
static int psp_hw_start(struct psp_context *psp) static int psp_hw_start(struct psp_context *psp)
{ {
struct amdgpu_device *adev = psp->adev; struct amdgpu_device *adev = psp->adev;
@ -1706,7 +1868,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
return 0; return 0;
if (adev->in_gpu_reset && ras && ras->supported) { if (amdgpu_in_reset(adev) && ras && ras->supported) {
ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
if (ret) { if (ret) {
DRM_WARN("Failed to set MP1 state prepare for reload\n"); DRM_WARN("Failed to set MP1 state prepare for reload\n");
@ -1821,7 +1983,7 @@ static int psp_load_fw(struct amdgpu_device *adev)
int ret; int ret;
struct psp_context *psp = &adev->psp; struct psp_context *psp = &adev->psp;
if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) { if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */ psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */
goto skip_memalloc; goto skip_memalloc;
} }
@ -1891,6 +2053,11 @@ static int psp_load_fw(struct amdgpu_device *adev)
if (ret) if (ret)
dev_err(psp->adev->dev, dev_err(psp->adev->dev,
"DTM: Failed to initialize DTM\n"); "DTM: Failed to initialize DTM\n");
ret = psp_rap_initialize(psp);
if (ret)
dev_err(psp->adev->dev,
"RAP: Failed to initialize RAP\n");
} }
return 0; return 0;
@ -1941,6 +2108,7 @@ static int psp_hw_fini(void *handle)
if (psp->adev->psp.ta_fw) { if (psp->adev->psp.ta_fw) {
psp_ras_terminate(psp); psp_ras_terminate(psp);
psp_rap_terminate(psp);
psp_dtm_terminate(psp); psp_dtm_terminate(psp);
psp_hdcp_terminate(psp); psp_hdcp_terminate(psp);
} }
@ -1999,6 +2167,11 @@ static int psp_suspend(void *handle)
DRM_ERROR("Failed to terminate dtm ta\n"); DRM_ERROR("Failed to terminate dtm ta\n");
return ret; return ret;
} }
ret = psp_rap_terminate(psp);
if (ret) {
DRM_ERROR("Failed to terminate rap ta\n");
return ret;
}
} }
ret = psp_asd_unload(psp); ret = psp_asd_unload(psp);
@ -2077,6 +2250,11 @@ static int psp_resume(void *handle)
if (ret) if (ret)
dev_err(psp->adev->dev, dev_err(psp->adev->dev,
"DTM: Failed to initialize DTM\n"); "DTM: Failed to initialize DTM\n");
ret = psp_rap_initialize(psp);
if (ret)
dev_err(psp->adev->dev,
"RAP: Failed to initialize RAP\n");
} }
mutex_unlock(&adev->firmware.mutex); mutex_unlock(&adev->firmware.mutex);
@ -2342,6 +2520,11 @@ int parse_ta_bin_descriptor(struct psp_context *psp,
psp->ta_dtm_ucode_size = le32_to_cpu(desc->size_bytes); psp->ta_dtm_ucode_size = le32_to_cpu(desc->size_bytes);
psp->ta_dtm_start_addr = ucode_start_addr; psp->ta_dtm_start_addr = ucode_start_addr;
break; break;
case TA_FW_TYPE_PSP_RAP:
psp->ta_rap_ucode_version = le32_to_cpu(desc->fw_version);
psp->ta_rap_ucode_size = le32_to_cpu(desc->size_bytes);
psp->ta_rap_start_addr = ucode_start_addr;
break;
default: default:
dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
break; break;
@ -2420,7 +2603,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
char *buf) char *buf)
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = drm_to_adev(ddev);
uint32_t fw_ver; uint32_t fw_ver;
int ret; int ret;
@ -2447,7 +2630,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
size_t count) size_t count)
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = drm_to_adev(ddev);
void *cpu_addr; void *cpu_addr;
dma_addr_t dma_addr; dma_addr_t dma_addr;
int ret; int ret;

View File

@ -29,6 +29,7 @@
#include "psp_gfx_if.h" #include "psp_gfx_if.h"
#include "ta_xgmi_if.h" #include "ta_xgmi_if.h"
#include "ta_ras_if.h" #include "ta_ras_if.h"
#include "ta_rap_if.h"
#define PSP_FENCE_BUFFER_SIZE 0x1000 #define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000 #define PSP_CMD_BUFFER_SIZE 0x1000
@ -38,6 +39,7 @@
#define PSP_TMR_SIZE 0x400000 #define PSP_TMR_SIZE 0x400000
#define PSP_HDCP_SHARED_MEM_SIZE 0x4000 #define PSP_HDCP_SHARED_MEM_SIZE 0x4000
#define PSP_DTM_SHARED_MEM_SIZE 0x4000 #define PSP_DTM_SHARED_MEM_SIZE 0x4000
#define PSP_RAP_SHARED_MEM_SIZE 0x4000
#define PSP_SHARED_MEM_SIZE 0x4000 #define PSP_SHARED_MEM_SIZE 0x4000
struct psp_context; struct psp_context;
@ -159,6 +161,15 @@ struct psp_dtm_context {
struct mutex mutex; struct mutex mutex;
}; };
struct psp_rap_context {
bool rap_initialized;
uint32_t session_id;
struct amdgpu_bo *rap_shared_bo;
uint64_t rap_shared_mc_addr;
void *rap_shared_buf;
struct mutex mutex;
};
#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942 #define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000 #define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000
#define GDDR6_MEM_TRAINING_OFFSET 0x8000 #define GDDR6_MEM_TRAINING_OFFSET 0x8000
@ -277,11 +288,16 @@ struct psp_context
uint32_t ta_dtm_ucode_size; uint32_t ta_dtm_ucode_size;
uint8_t *ta_dtm_start_addr; uint8_t *ta_dtm_start_addr;
uint32_t ta_rap_ucode_version;
uint32_t ta_rap_ucode_size;
uint8_t *ta_rap_start_addr;
struct psp_asd_context asd_context; struct psp_asd_context asd_context;
struct psp_xgmi_context xgmi_context; struct psp_xgmi_context xgmi_context;
struct psp_ras_context ras; struct psp_ras_context ras;
struct psp_hdcp_context hdcp_context; struct psp_hdcp_context hdcp_context;
struct psp_dtm_context dtm_context; struct psp_dtm_context dtm_context;
struct psp_rap_context rap_context;
struct mutex mutex; struct mutex mutex;
struct psp_memory_training_context mem_train_ctx; struct psp_memory_training_context mem_train_ctx;
}; };
@ -357,6 +373,7 @@ int psp_ras_trigger_error(struct psp_context *psp,
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_rlc_autoload_start(struct psp_context *psp); int psp_rlc_autoload_start(struct psp_context *psp);

View File

@ -0,0 +1,127 @@
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*
*/
#include <linux/debugfs.h>
#include <linux/pm_runtime.h>
#include "amdgpu.h"
#include "amdgpu_rap.h"
/**
* DOC: AMDGPU RAP debugfs test interface
*
* how to use?
* echo opcode > <debugfs_dir>/dri/xxx/rap_test
*
* opcode:
* currently, only 2 is supported by Linux host driver,
* opcode 2 stands for TA_CMD_RAP__VALIDATE_L0, used to
* trigger L0 policy validation, you can refer more detail
* from header file ta_rap_if.h
*
*/
static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
struct ta_rap_shared_memory *rap_shared_mem;
struct ta_rap_cmd_output_data *rap_cmd_output;
struct drm_device *dev = adev_to_drm(adev);
uint32_t op;
int ret;
if (*pos || size != 2)
return -EINVAL;
ret = kstrtouint_from_user(buf, size, *pos, &op);
if (ret)
return ret;
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
/* make sure gfx core is on, RAP TA cann't handle
* GFX OFF case currently.
*/
amdgpu_gfx_off_ctrl(adev, false);
switch (op) {
case 2:
ret = psp_rap_invoke(&adev->psp, op);
if (ret == TA_RAP_STATUS__SUCCESS) {
dev_info(adev->dev, "RAP L0 validate test success.\n");
} else {
rap_shared_mem = (struct ta_rap_shared_memory *)
adev->psp.rap_context.rap_shared_buf;
rap_cmd_output = &(rap_shared_mem->rap_out_message.output);
dev_info(adev->dev, "RAP test failed, the output is:\n");
dev_info(adev->dev, "\tlast_subsection: 0x%08x.\n",
rap_cmd_output->last_subsection);
dev_info(adev->dev, "\tnum_total_validate: 0x%08x.\n",
rap_cmd_output->num_total_validate);
dev_info(adev->dev, "\tnum_valid: 0x%08x.\n",
rap_cmd_output->num_valid);
dev_info(adev->dev, "\tlast_validate_addr: 0x%08x.\n",
rap_cmd_output->last_validate_addr);
dev_info(adev->dev, "\tlast_validate_val: 0x%08x.\n",
rap_cmd_output->last_validate_val);
dev_info(adev->dev, "\tlast_validate_val_exptd: 0x%08x.\n",
rap_cmd_output->last_validate_val_exptd);
}
break;
default:
dev_info(adev->dev, "Unsupported op id: %d, ", op);
dev_info(adev->dev, "Only support op 2(L0 validate test).\n");
}
amdgpu_gfx_off_ctrl(adev, true);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return size;
}
static const struct file_operations amdgpu_rap_debugfs_ops = {
.owner = THIS_MODULE,
.read = NULL,
.write = amdgpu_rap_debugfs_write,
.llseek = default_llseek
};
void amdgpu_rap_debugfs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
if (!adev->psp.rap_context.rap_initialized)
return;
debugfs_create_file("rap_test", S_IWUSR, minor->debugfs_root,
adev, &amdgpu_rap_debugfs_ops);
#endif
}

View File

@ -0,0 +1,30 @@
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*
*/
#ifndef _AMDGPU_RAP_H
#define _AMDGPU_RAP_H
#include "amdgpu.h"
void amdgpu_rap_debugfs_init(struct amdgpu_device *adev);
#endif

View File

@ -34,6 +34,8 @@
#include "amdgpu_xgmi.h" #include "amdgpu_xgmi.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
static const char *RAS_FS_NAME = "ras";
const char *ras_error_string[] = { const char *ras_error_string[] = {
"none", "none",
"parity", "parity",
@ -62,13 +64,14 @@ const char *ras_block_string[] = {
#define ras_err_str(i) (ras_error_string[ffs(i)]) #define ras_err_str(i) (ras_error_string[ffs(i)])
#define ras_block_str(i) (ras_block_string[i]) #define ras_block_str(i) (ras_block_string[i])
#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS 1
#define AMDGPU_RAS_FLAG_INIT_NEED_RESET 2
#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
/* inject address is 52 bits */ /* inject address is 52 bits */
#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
/* typical ECC bad page rate(1 bad page per 100MB VRAM) */
#define RAS_BAD_PAGE_RATE (100 * 1024 * 1024ULL)
enum amdgpu_ras_retire_page_reservation { enum amdgpu_ras_retire_page_reservation {
AMDGPU_RAS_RETIRE_PAGE_RESERVED, AMDGPU_RAS_RETIRE_PAGE_RESERVED,
AMDGPU_RAS_RETIRE_PAGE_PENDING, AMDGPU_RAS_RETIRE_PAGE_PENDING,
@ -367,12 +370,19 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf, static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos) size_t size, loff_t *pos)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; struct amdgpu_device *adev =
(struct amdgpu_device *)file_inode(f)->i_private;
int ret; int ret;
ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control); ret = amdgpu_ras_eeprom_reset_table(
&(amdgpu_ras_get_context(adev)->eeprom_control));
return ret == 1 ? size : -EIO; if (ret == 1) {
amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
return size;
} else {
return -EIO;
}
} }
static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
@ -1017,29 +1027,13 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features); return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
} }
static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev) static void amdgpu_ras_sysfs_add_bad_page_node(struct amdgpu_device *adev)
{ {
struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct attribute *attrs[] = { struct attribute_group group;
&con->features_attr.attr,
NULL
};
struct bin_attribute *bin_attrs[] = { struct bin_attribute *bin_attrs[] = {
&con->badpages_attr, &con->badpages_attr,
NULL NULL,
};
struct attribute_group group = {
.name = "ras",
.attrs = attrs,
.bin_attrs = bin_attrs,
};
con->features_attr = (struct device_attribute) {
.attr = {
.name = "features",
.mode = S_IRUGO,
},
.show = amdgpu_ras_sysfs_features_read,
}; };
con->badpages_attr = (struct bin_attribute) { con->badpages_attr = (struct bin_attribute) {
@ -1052,12 +1046,48 @@ static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev)
.read = amdgpu_ras_sysfs_badpages_read, .read = amdgpu_ras_sysfs_badpages_read,
}; };
sysfs_attr_init(attrs[0]); group.name = RAS_FS_NAME;
group.bin_attrs = bin_attrs;
sysfs_bin_attr_init(bin_attrs[0]); sysfs_bin_attr_init(bin_attrs[0]);
sysfs_update_group(&adev->dev->kobj, &group);
}
static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct attribute *attrs[] = {
&con->features_attr.attr,
NULL
};
struct attribute_group group = {
.name = RAS_FS_NAME,
.attrs = attrs,
};
con->features_attr = (struct device_attribute) {
.attr = {
.name = "features",
.mode = S_IRUGO,
},
.show = amdgpu_ras_sysfs_features_read,
};
sysfs_attr_init(attrs[0]);
return sysfs_create_group(&adev->dev->kobj, &group); return sysfs_create_group(&adev->dev->kobj, &group);
} }
static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
sysfs_remove_file_from_group(&adev->dev->kobj,
&con->badpages_attr.attr,
RAS_FS_NAME);
}
static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev) static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
{ {
struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@ -1065,14 +1095,9 @@ static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
&con->features_attr.attr, &con->features_attr.attr,
NULL NULL
}; };
struct bin_attribute *bin_attrs[] = {
&con->badpages_attr,
NULL
};
struct attribute_group group = { struct attribute_group group = {
.name = "ras", .name = RAS_FS_NAME,
.attrs = attrs, .attrs = attrs,
.bin_attrs = bin_attrs,
}; };
sysfs_remove_group(&adev->dev->kobj, &group); sysfs_remove_group(&adev->dev->kobj, &group);
@ -1105,7 +1130,7 @@ int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
if (sysfs_add_file_to_group(&adev->dev->kobj, if (sysfs_add_file_to_group(&adev->dev->kobj,
&obj->sysfs_attr.attr, &obj->sysfs_attr.attr,
"ras")) { RAS_FS_NAME)) {
put_obj(obj); put_obj(obj);
return -EINVAL; return -EINVAL;
} }
@ -1125,7 +1150,7 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
sysfs_remove_file_from_group(&adev->dev->kobj, sysfs_remove_file_from_group(&adev->dev->kobj,
&obj->sysfs_attr.attr, &obj->sysfs_attr.attr,
"ras"); RAS_FS_NAME);
obj->attr_inuse = 0; obj->attr_inuse = 0;
put_obj(obj); put_obj(obj);
@ -1141,6 +1166,9 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
amdgpu_ras_sysfs_remove(adev, &obj->head); amdgpu_ras_sysfs_remove(adev, &obj->head);
} }
if (amdgpu_bad_page_threshold != 0)
amdgpu_ras_sysfs_remove_bad_page_node(adev);
amdgpu_ras_sysfs_remove_feature_node(adev); amdgpu_ras_sysfs_remove_feature_node(adev);
return 0; return 0;
@ -1169,9 +1197,9 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
{ {
struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct drm_minor *minor = adev->ddev->primary; struct drm_minor *minor = adev_to_drm(adev)->primary;
con->dir = debugfs_create_dir("ras", minor->debugfs_root); con->dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir, debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
adev, &amdgpu_ras_debugfs_ctrl_ops); adev, &amdgpu_ras_debugfs_ctrl_ops);
debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir, debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir,
@ -1187,6 +1215,13 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
*/ */
debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir, debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir,
&con->reboot); &con->reboot);
/*
* User could set this not to clean up hardware's error count register
* of RAS IPs during ras recovery.
*/
debugfs_create_bool("disable_ras_err_cnt_harvest", 0644,
con->dir, &con->disable_ras_err_cnt_harvest);
} }
void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
@ -1211,6 +1246,7 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
{ {
#if defined(CONFIG_DEBUG_FS)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj; struct ras_manager *obj;
struct ras_fs_if fs_info; struct ras_fs_if fs_info;
@ -1233,6 +1269,7 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
amdgpu_ras_debugfs_create(adev, &fs_info); amdgpu_ras_debugfs_create(adev, &fs_info);
} }
} }
#endif
} }
void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
@ -1249,6 +1286,7 @@ void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev) static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
{ {
#if defined(CONFIG_DEBUG_FS)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj, *tmp; struct ras_manager *obj, *tmp;
@ -1257,6 +1295,7 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
} }
con->dir = NULL; con->dir = NULL;
#endif
} }
/* debugfs end */ /* debugfs end */
@ -1266,6 +1305,9 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
{ {
amdgpu_ras_sysfs_create_feature_node(adev); amdgpu_ras_sysfs_create_feature_node(adev);
if (amdgpu_bad_page_threshold != 0)
amdgpu_ras_sysfs_add_bad_page_node(adev);
return 0; return 0;
} }
@ -1512,23 +1554,28 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_device *remote_adev = NULL; struct amdgpu_device *remote_adev = NULL;
struct amdgpu_device *adev = ras->adev; struct amdgpu_device *adev = ras->adev;
struct list_head device_list, *device_list_handle = NULL; struct list_head device_list, *device_list_handle = NULL;
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false);
/* Build list of devices to query RAS related errors */ if (!ras->disable_ras_err_cnt_harvest) {
if (hive && adev->gmc.xgmi.num_physical_nodes > 1) struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
device_list_handle = &hive->device_list;
else {
INIT_LIST_HEAD(&device_list);
list_add_tail(&adev->gmc.xgmi.head, &device_list);
device_list_handle = &device_list;
}
list_for_each_entry(remote_adev, device_list_handle, gmc.xgmi.head) { /* Build list of devices to query RAS related errors */
amdgpu_ras_log_on_err_counter(remote_adev); if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
device_list_handle = &hive->device_list;
} else {
INIT_LIST_HEAD(&device_list);
list_add_tail(&adev->gmc.xgmi.head, &device_list);
device_list_handle = &device_list;
}
list_for_each_entry(remote_adev,
device_list_handle, gmc.xgmi.head)
amdgpu_ras_log_on_err_counter(remote_adev);
amdgpu_put_xgmi_hive(hive);
} }
if (amdgpu_device_should_recover_gpu(ras->adev)) if (amdgpu_device_should_recover_gpu(ras->adev))
amdgpu_device_gpu_recover(ras->adev, 0); amdgpu_device_gpu_recover(ras->adev, NULL);
atomic_set(&ras->in_recovery, 0); atomic_set(&ras->in_recovery, 0);
} }
@ -1643,7 +1690,7 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
int ret = 0; int ret = 0;
/* no bad page record, skip eeprom access */ /* no bad page record, skip eeprom access */
if (!control->num_recs) if (!control->num_recs || (amdgpu_bad_page_threshold == 0))
return ret; return ret;
bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL); bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL);
@ -1697,6 +1744,47 @@ static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
return ret; return ret;
} }
static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
uint32_t max_length)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
int tmp_threshold = amdgpu_bad_page_threshold;
u64 val;
/*
* Justification of value bad_page_cnt_threshold in ras structure
*
* Generally, -1 <= amdgpu_bad_page_threshold <= max record length
* in eeprom, and introduce two scenarios accordingly.
*
* Bad page retirement enablement:
* - If amdgpu_bad_page_threshold = -1,
* bad_page_cnt_threshold = typical value by formula.
*
* - When the value from user is 0 < amdgpu_bad_page_threshold <
* max record length in eeprom, use it directly.
*
* Bad page retirement disablement:
* - If amdgpu_bad_page_threshold = 0, bad page retirement
* functionality is disabled, and bad_page_cnt_threshold will
* take no effect.
*/
if (tmp_threshold < -1)
tmp_threshold = -1;
else if (tmp_threshold > max_length)
tmp_threshold = max_length;
if (tmp_threshold == -1) {
val = adev->gmc.mc_vram_size;
do_div(val, RAS_BAD_PAGE_RATE);
con->bad_page_cnt_threshold = min(lower_32_bits(val),
max_length);
} else {
con->bad_page_cnt_threshold = tmp_threshold;
}
}
/* called in gpu recovery/init */ /* called in gpu recovery/init */
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev) int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
{ {
@ -1706,7 +1794,8 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
struct amdgpu_bo *bo = NULL; struct amdgpu_bo *bo = NULL;
int i, ret = 0; int i, ret = 0;
if (!con || !con->eh_data) /* Not reserve bad page when amdgpu_bad_page_threshold == 0. */
if (!con || !con->eh_data || (amdgpu_bad_page_threshold == 0))
return 0; return 0;
mutex_lock(&con->recovery_lock); mutex_lock(&con->recovery_lock);
@ -1774,6 +1863,8 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
{ {
struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data **data; struct ras_err_handler_data **data;
uint32_t max_eeprom_records_len = 0;
bool exc_err_limit = false;
int ret; int ret;
if (con) if (con)
@ -1792,8 +1883,15 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
atomic_set(&con->in_recovery, 0); atomic_set(&con->in_recovery, 0);
con->adev = adev; con->adev = adev;
ret = amdgpu_ras_eeprom_init(&con->eeprom_control); max_eeprom_records_len = amdgpu_ras_eeprom_get_record_max_length();
if (ret) amdgpu_ras_validate_threshold(adev, max_eeprom_records_len);
ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
/*
* This calling fails when exc_err_limit is true or
* ret != 0.
*/
if (exc_err_limit || ret)
goto free; goto free;
if (con->eeprom_control.num_recs) { if (con->eeprom_control.num_recs) {
@ -1817,6 +1915,15 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
out: out:
dev_warn(adev->dev, "Failed to initialize ras recovery!\n"); dev_warn(adev->dev, "Failed to initialize ras recovery!\n");
/*
* Except error threshold exceeding case, other failure cases in this
* function would not fail amdgpu driver init.
*/
if (!exc_err_limit)
ret = 0;
else
ret = -EINVAL;
return ret; return ret;
} }
@ -1856,6 +1963,17 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
return 0; return 0;
} }
static int amdgpu_ras_check_asic_type(struct amdgpu_device *adev)
{
if (adev->asic_type != CHIP_VEGA10 &&
adev->asic_type != CHIP_VEGA20 &&
adev->asic_type != CHIP_ARCTURUS &&
adev->asic_type != CHIP_SIENNA_CICHLID)
return 1;
else
return 0;
}
/* /*
* check hardware's ras ability which will be saved in hw_supported. * check hardware's ras ability which will be saved in hw_supported.
* if hardware does not support ras, we can skip some ras initializtion and * if hardware does not support ras, we can skip some ras initializtion and
@ -1872,8 +1990,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
*supported = 0; *supported = 0;
if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw || if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
(adev->asic_type != CHIP_VEGA20 && amdgpu_ras_check_asic_type(adev))
adev->asic_type != CHIP_ARCTURUS))
return; return;
if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
@ -1895,6 +2012,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
*supported = amdgpu_ras_enable == 0 ? *supported = amdgpu_ras_enable == 0 ?
0 : *hw_supported & amdgpu_ras_mask; 0 : *hw_supported & amdgpu_ras_mask;
adev->ras_features = *supported;
} }
int amdgpu_ras_init(struct amdgpu_device *adev) int amdgpu_ras_init(struct amdgpu_device *adev)
@ -1917,9 +2035,9 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
amdgpu_ras_check_supported(adev, &con->hw_supported, amdgpu_ras_check_supported(adev, &con->hw_supported,
&con->supported); &con->supported);
if (!con->hw_supported) { if (!con->hw_supported || (adev->asic_type == CHIP_VEGA10)) {
r = 0; r = 0;
goto err_out; goto release_con;
} }
con->features = 0; con->features = 0;
@ -1930,25 +2048,25 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
if (adev->nbio.funcs->init_ras_controller_interrupt) { if (adev->nbio.funcs->init_ras_controller_interrupt) {
r = adev->nbio.funcs->init_ras_controller_interrupt(adev); r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
if (r) if (r)
goto err_out; goto release_con;
} }
if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) { if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev); r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev);
if (r) if (r)
goto err_out; goto release_con;
} }
if (amdgpu_ras_fs_init(adev)) { if (amdgpu_ras_fs_init(adev)) {
r = -EINVAL; r = -EINVAL;
goto err_out; goto release_con;
} }
dev_info(adev->dev, "RAS INFO: ras initialized successfully, " dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n", "hardware ability[%x] ras_mask[%x]\n",
con->hw_supported, con->supported); con->hw_supported, con->supported);
return 0; return 0;
err_out: release_con:
amdgpu_ras_set_context(adev, NULL); amdgpu_ras_set_context(adev, NULL);
kfree(con); kfree(con);
@ -1976,7 +2094,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
amdgpu_ras_request_reset_on_boot(adev, amdgpu_ras_request_reset_on_boot(adev,
ras_block->block); ras_block->block);
return 0; return 0;
} else if (adev->in_suspend || adev->in_gpu_reset) { } else if (adev->in_suspend || amdgpu_in_reset(adev)) {
/* in resume phase, if fail to enable ras, /* in resume phase, if fail to enable ras,
* clean up all ras fs nodes, and disable ras */ * clean up all ras fs nodes, and disable ras */
goto cleanup; goto cleanup;
@ -1985,7 +2103,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
} }
/* in resume phase, no need to create ras fs node */ /* in resume phase, no need to create ras fs node */
if (adev->in_suspend || adev->in_gpu_reset) if (adev->in_suspend || amdgpu_in_reset(adev))
return 0; return 0;
if (ih_info->cb) { if (ih_info->cb) {
@ -2143,3 +2261,19 @@ bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
return false; return false;
} }
bool amdgpu_ras_check_err_threshold(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
bool exc_err_limit = false;
if (con && (amdgpu_bad_page_threshold != 0))
amdgpu_ras_eeprom_check_err_threshold(&con->eeprom_control,
&exc_err_limit);
/*
* We are only interested in variable exc_err_limit,
* as it says if GPU is in bad state or not.
*/
return exc_err_limit;
}

View File

@ -31,6 +31,10 @@
#include "ta_ras_if.h" #include "ta_ras_if.h"
#include "amdgpu_ras_eeprom.h" #include "amdgpu_ras_eeprom.h"
#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0)
#define AMDGPU_RAS_FLAG_INIT_NEED_RESET (0x1 << 1)
#define AMDGPU_RAS_FLAG_SKIP_BAD_PAGE_RESV (0x1 << 2)
enum amdgpu_ras_block { enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__UMC = 0, AMDGPU_RAS_BLOCK__UMC = 0,
AMDGPU_RAS_BLOCK__SDMA, AMDGPU_RAS_BLOCK__SDMA,
@ -336,6 +340,12 @@ struct amdgpu_ras {
struct amdgpu_ras_eeprom_control eeprom_control; struct amdgpu_ras_eeprom_control eeprom_control;
bool error_query_ready; bool error_query_ready;
/* bad page count threshold */
uint32_t bad_page_cnt_threshold;
/* disable ras error count harvest in recovery */
bool disable_ras_err_cnt_harvest;
}; };
struct ras_fs_data { struct ras_fs_data {
@ -490,6 +500,8 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev);
unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev, unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
bool is_ce); bool is_ce);
bool amdgpu_ras_check_err_threshold(struct amdgpu_device *adev);
/* error handling functions */ /* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
struct eeprom_table_record *bps, int pages); struct eeprom_table_record *bps, int pages);
@ -500,10 +512,14 @@ static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
{ {
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
/* save bad page to eeprom before gpu reset, /*
* i2c may be unstable in gpu reset * Save bad page to eeprom before gpu reset, i2c may be unstable
* in gpu reset.
*
* Also, exclude the case when ras recovery issuer is
* eeprom page write itself.
*/ */
if (in_task()) if (!(ras->flags & AMDGPU_RAS_FLAG_SKIP_BAD_PAGE_RESV) && in_task())
amdgpu_ras_reserve_bad_pages(adev); amdgpu_ras_reserve_bad_pages(adev);
if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)

View File

@ -46,6 +46,9 @@
#define EEPROM_TABLE_HDR_VAL 0x414d4452 #define EEPROM_TABLE_HDR_VAL 0x414d4452
#define EEPROM_TABLE_VER 0x00010000 #define EEPROM_TABLE_VER 0x00010000
/* Bad GPU tag BADG */
#define EEPROM_TABLE_HDR_BAD 0x42414447
/* Assume 2 Mbit size */ /* Assume 2 Mbit size */
#define EEPROM_SIZE_BYTES 256000 #define EEPROM_SIZE_BYTES 256000
#define EEPROM_PAGE__SIZE_BYTES 256 #define EEPROM_PAGE__SIZE_BYTES 256
@ -56,6 +59,15 @@
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev #define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev
static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
{
if ((adev->asic_type == CHIP_VEGA20) ||
(adev->asic_type == CHIP_ARCTURUS))
return true;
return false;
}
static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev, static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
uint16_t *i2c_addr) uint16_t *i2c_addr)
{ {
@ -213,6 +225,24 @@ static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
return true; return true;
} }
static int amdgpu_ras_eeprom_correct_header_tag(
struct amdgpu_ras_eeprom_control *control,
uint32_t header)
{
unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE];
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
int ret = 0;
memset(buff, 0, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE);
mutex_lock(&control->tbl_mutex);
hdr->header = header;
ret = __update_table_header(control, buff);
mutex_unlock(&control->tbl_mutex);
return ret;
}
int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control) int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
{ {
unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 }; unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
@ -238,12 +268,14 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
} }
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
bool *exceed_err_limit)
{ {
int ret = 0; int ret = 0;
struct amdgpu_device *adev = to_amdgpu_device(control); struct amdgpu_device *adev = to_amdgpu_device(control);
unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 }; unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
struct i2c_msg msg = { struct i2c_msg msg = {
.addr = 0, .addr = 0,
.flags = I2C_M_RD, .flags = I2C_M_RD,
@ -251,6 +283,11 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
.buf = buff, .buf = buff,
}; };
*exceed_err_limit = false;
if (!__is_ras_eeprom_supported(adev))
return 0;
/* Verify i2c adapter is initialized */ /* Verify i2c adapter is initialized */
if (!adev->pm.smu_i2c.algo) if (!adev->pm.smu_i2c.algo)
return -ENOENT; return -ENOENT;
@ -279,6 +316,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records", DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
control->num_recs); control->num_recs);
} else if ((hdr->header == EEPROM_TABLE_HDR_BAD) &&
(amdgpu_bad_page_threshold != 0)) {
if (ras->bad_page_cnt_threshold > control->num_recs) {
dev_info(adev->dev, "Using one valid bigger bad page "
"threshold and correcting eeprom header tag.\n");
ret = amdgpu_ras_eeprom_correct_header_tag(control,
EEPROM_TABLE_HDR_VAL);
} else {
*exceed_err_limit = true;
dev_err(adev->dev, "Exceeding the bad_page_threshold parameter, "
"disabling the GPU.\n");
}
} else { } else {
DRM_INFO("Creating new EEPROM table"); DRM_INFO("Creating new EEPROM table");
@ -375,6 +424,49 @@ static uint32_t __correct_eeprom_dest_address(uint32_t curr_address)
return curr_address; return curr_address;
} }
int amdgpu_ras_eeprom_check_err_threshold(
struct amdgpu_ras_eeprom_control *control,
bool *exceed_err_limit)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
unsigned char buff[EEPROM_ADDRESS_SIZE +
EEPROM_TABLE_HEADER_SIZE] = { 0 };
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
struct i2c_msg msg = {
.addr = control->i2c_address,
.flags = I2C_M_RD,
.len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE,
.buf = buff,
};
int ret;
*exceed_err_limit = false;
if (!__is_ras_eeprom_supported(adev))
return 0;
/* read EEPROM table header */
mutex_lock(&control->tbl_mutex);
ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
if (ret < 1) {
dev_err(adev->dev, "Failed to read EEPROM table header.\n");
goto err;
}
__decode_table_header_from_buff(hdr, &buff[2]);
if (hdr->header == EEPROM_TABLE_HDR_BAD) {
dev_warn(adev->dev, "This GPU is in BAD status.");
dev_warn(adev->dev, "Please retire it or setting one bigger "
"threshold value when reloading driver.\n");
*exceed_err_limit = true;
}
err:
mutex_unlock(&control->tbl_mutex);
return 0;
}
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records, struct eeprom_table_record *records,
bool write, bool write,
@ -383,10 +475,12 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
int i, ret = 0; int i, ret = 0;
struct i2c_msg *msgs, *msg; struct i2c_msg *msgs, *msg;
unsigned char *buffs, *buff; unsigned char *buffs, *buff;
bool sched_ras_recovery = false;
struct eeprom_table_record *record; struct eeprom_table_record *record;
struct amdgpu_device *adev = to_amdgpu_device(control); struct amdgpu_device *adev = to_amdgpu_device(control);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
if (adev->asic_type != CHIP_VEGA20 && adev->asic_type != CHIP_ARCTURUS) if (!__is_ras_eeprom_supported(adev))
return 0; return 0;
buffs = kcalloc(num, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE, buffs = kcalloc(num, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE,
@ -402,11 +496,30 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
goto free_buff; goto free_buff;
} }
/*
* If saved bad pages number exceeds the bad page threshold for
* the whole VRAM, update table header to mark the BAD GPU tag
* and schedule one ras recovery after eeprom write is done,
* this can avoid the missing for latest records.
*
* This new header will be picked up and checked in the bootup
* by ras recovery, which may break bootup process to notify
* user this GPU is in bad state and to retire such GPU for
* further check.
*/
if (write && (amdgpu_bad_page_threshold != 0) &&
((control->num_recs + num) >= ras->bad_page_cnt_threshold)) {
dev_warn(adev->dev,
"Saved bad pages(%d) reaches threshold value(%d).\n",
control->num_recs + num, ras->bad_page_cnt_threshold);
control->tbl_hdr.header = EEPROM_TABLE_HDR_BAD;
sched_ras_recovery = true;
}
/* In case of overflow just start from beginning to not lose newest records */ /* In case of overflow just start from beginning to not lose newest records */
if (write && (control->next_addr + EEPROM_TABLE_RECORD_SIZE * num > EEPROM_SIZE_BYTES)) if (write && (control->next_addr + EEPROM_TABLE_RECORD_SIZE * num > EEPROM_SIZE_BYTES))
control->next_addr = EEPROM_RECORD_START; control->next_addr = EEPROM_RECORD_START;
/* /*
* TODO Currently makes EEPROM writes for each record, this creates * TODO Currently makes EEPROM writes for each record, this creates
* internal fragmentation. Optimized the code to do full page write of * internal fragmentation. Optimized the code to do full page write of
@ -482,6 +595,20 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
__update_tbl_checksum(control, records, num, old_hdr_byte_sum); __update_tbl_checksum(control, records, num, old_hdr_byte_sum);
__update_table_header(control, buffs); __update_table_header(control, buffs);
if (sched_ras_recovery) {
/*
* Before scheduling ras recovery, assert the related
* flag first, which shall bypass common bad page
* reservation execution in amdgpu_ras_reset_gpu.
*/
amdgpu_ras_get_context(adev)->flags |=
AMDGPU_RAS_FLAG_SKIP_BAD_PAGE_RESV;
dev_warn(adev->dev, "Conduct ras recovery due to bad "
"page threshold reached.\n");
amdgpu_ras_reset_gpu(adev);
}
} else if (!__validate_tbl_checksum(control, records, num)) { } else if (!__validate_tbl_checksum(control, records, num)) {
DRM_WARN("EEPROM Table checksum mismatch!"); DRM_WARN("EEPROM Table checksum mismatch!");
/* TODO Uncomment when EEPROM read/write is relliable */ /* TODO Uncomment when EEPROM read/write is relliable */
@ -499,6 +626,11 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
return ret == num ? 0 : -EIO; return ret == num ? 0 : -EIO;
} }
inline uint32_t amdgpu_ras_eeprom_get_record_max_length(void)
{
return EEPROM_MAX_RECORD_NUM;
}
/* Used for testing if bugs encountered */ /* Used for testing if bugs encountered */
#if 0 #if 0
void amdgpu_ras_eeprom_test(struct amdgpu_ras_eeprom_control *control) void amdgpu_ras_eeprom_test(struct amdgpu_ras_eeprom_control *control)

View File

@ -76,14 +76,21 @@ struct eeprom_table_record {
unsigned char mcumc_id; unsigned char mcumc_id;
}__attribute__((__packed__)); }__attribute__((__packed__));
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control); int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
bool *exceed_err_limit);
int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control); int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
int amdgpu_ras_eeprom_check_err_threshold(
struct amdgpu_ras_eeprom_control *control,
bool *exceed_err_limit);
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records, struct eeprom_table_record *records,
bool write, bool write,
int num); int num);
inline uint32_t amdgpu_ras_eeprom_get_record_max_length(void);
void amdgpu_ras_eeprom_test(struct amdgpu_ras_eeprom_control *control); void amdgpu_ras_eeprom_test(struct amdgpu_ras_eeprom_control *control);
#endif // _AMDGPU_RAS_EEPROM_H #endif // _AMDGPU_RAS_EEPROM_H

View File

@ -267,7 +267,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
&ring->sched; &ring->sched;
} }
for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i) for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; ++i)
atomic_set(&ring->num_jobs[i], 0); atomic_set(&ring->num_jobs[i], 0);
return 0; return 0;
@ -420,7 +420,7 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring) struct amdgpu_ring *ring)
{ {
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev->ddev->primary; struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *ent, *root = minor->debugfs_root; struct dentry *ent, *root = minor->debugfs_root;
char name[32]; char name[32];

View File

@ -243,7 +243,7 @@ struct amdgpu_ring {
bool has_compute_vm_bug; bool has_compute_vm_bug;
bool no_scheduler; bool no_scheduler;
atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX]; atomic_t num_jobs[DRM_SCHED_PRIORITY_COUNT];
struct mutex priority_mutex; struct mutex priority_mutex;
/* protected by priority_mutex */ /* protected by priority_mutex */
int priority; int priority;

View File

@ -32,24 +32,32 @@
#include "amdgpu_vm.h" #include "amdgpu_vm.h"
enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority) int amdgpu_to_sched_priority(int amdgpu_priority,
enum drm_sched_priority *prio)
{ {
switch (amdgpu_priority) { switch (amdgpu_priority) {
case AMDGPU_CTX_PRIORITY_VERY_HIGH: case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return DRM_SCHED_PRIORITY_HIGH_HW; *prio = DRM_SCHED_PRIORITY_HIGH;
break;
case AMDGPU_CTX_PRIORITY_HIGH: case AMDGPU_CTX_PRIORITY_HIGH:
return DRM_SCHED_PRIORITY_HIGH_SW; *prio = DRM_SCHED_PRIORITY_HIGH;
break;
case AMDGPU_CTX_PRIORITY_NORMAL: case AMDGPU_CTX_PRIORITY_NORMAL:
return DRM_SCHED_PRIORITY_NORMAL; *prio = DRM_SCHED_PRIORITY_NORMAL;
break;
case AMDGPU_CTX_PRIORITY_LOW: case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_VERY_LOW: case AMDGPU_CTX_PRIORITY_VERY_LOW:
return DRM_SCHED_PRIORITY_LOW; *prio = DRM_SCHED_PRIORITY_MIN;
break;
case AMDGPU_CTX_PRIORITY_UNSET: case AMDGPU_CTX_PRIORITY_UNSET:
return DRM_SCHED_PRIORITY_UNSET; *prio = DRM_SCHED_PRIORITY_UNSET;
break;
default: default:
WARN(1, "Invalid context priority %d\n", amdgpu_priority); WARN(1, "Invalid context priority %d\n", amdgpu_priority);
return DRM_SCHED_PRIORITY_INVALID; return -EINVAL;
} }
return 0;
} }
static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
@ -115,13 +123,24 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
union drm_amdgpu_sched *args = data; union drm_amdgpu_sched *args = data;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
enum drm_sched_priority priority; enum drm_sched_priority priority;
int r; int r;
priority = amdgpu_to_sched_priority(args->in.priority); /* First check the op, then the op's argument.
if (priority == DRM_SCHED_PRIORITY_INVALID) */
switch (args->in.op) {
case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:
case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE:
break;
default:
DRM_ERROR("Invalid sched op specified: %d\n", args->in.op);
return -EINVAL; return -EINVAL;
}
r = amdgpu_to_sched_priority(args->in.priority, &priority);
if (r)
return r;
switch (args->in.op) { switch (args->in.op) {
case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE: case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:
@ -136,7 +155,8 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
priority); priority);
break; break;
default: default:
DRM_ERROR("Invalid sched op specified: %d\n", args->in.op); /* Impossible.
*/
r = -EINVAL; r = -EINVAL;
break; break;
} }

View File

@ -30,7 +30,8 @@ enum drm_sched_priority;
struct drm_device; struct drm_device;
struct drm_file; struct drm_file;
enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority); int amdgpu_to_sched_priority(int amdgpu_priority,
enum drm_sched_priority *prio);
int amdgpu_sched_ioctl(struct drm_device *dev, void *data, int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp); struct drm_file *filp);

View File

@ -1716,8 +1716,8 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
*/ */
static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
{ {
amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo, amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
NULL, &adev->fw_vram_usage.va); NULL, &adev->mman.fw_vram_usage_va);
} }
/** /**
@ -1731,19 +1731,19 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
{ {
uint64_t vram_size = adev->gmc.visible_vram_size; uint64_t vram_size = adev->gmc.visible_vram_size;
adev->fw_vram_usage.va = NULL; adev->mman.fw_vram_usage_va = NULL;
adev->fw_vram_usage.reserved_bo = NULL; adev->mman.fw_vram_usage_reserved_bo = NULL;
if (adev->fw_vram_usage.size == 0 || if (adev->mman.fw_vram_usage_size == 0 ||
adev->fw_vram_usage.size > vram_size) adev->mman.fw_vram_usage_size > vram_size)
return 0; return 0;
return amdgpu_bo_create_kernel_at(adev, return amdgpu_bo_create_kernel_at(adev,
adev->fw_vram_usage.start_offset, adev->mman.fw_vram_usage_start_offset,
adev->fw_vram_usage.size, adev->mman.fw_vram_usage_size,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
&adev->fw_vram_usage.reserved_bo, &adev->mman.fw_vram_usage_reserved_bo,
&adev->fw_vram_usage.va); &adev->mman.fw_vram_usage_va);
} }
/* /*
@ -1775,7 +1775,7 @@ static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
memset(ctx, 0, sizeof(*ctx)); memset(ctx, 0, sizeof(*ctx));
ctx->c2p_train_data_offset = ctx->c2p_train_data_offset =
ALIGN((adev->gmc.mc_vram_size - adev->discovery_tmr_size - SZ_1M), SZ_1M); ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
ctx->p2c_train_data_offset = ctx->p2c_train_data_offset =
(adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET); (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
ctx->train_data_size = ctx->train_data_size =
@ -1814,10 +1814,10 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
* Otherwise, fallback to legacy approach to check and reserve tmr block for ip * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
* discovery data and G6 memory training data respectively * discovery data and G6 memory training data respectively
*/ */
adev->discovery_tmr_size = adev->mman.discovery_tmr_size =
amdgpu_atomfirmware_get_fw_reserved_fb_size(adev); amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
if (!adev->discovery_tmr_size) if (!adev->mman.discovery_tmr_size)
adev->discovery_tmr_size = DISCOVERY_TMR_OFFSET; adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
if (mem_train_support) { if (mem_train_support) {
/* reserve vram for mem train according to TMR location */ /* reserve vram for mem train according to TMR location */
@ -1837,14 +1837,14 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
} }
ret = amdgpu_bo_create_kernel_at(adev, ret = amdgpu_bo_create_kernel_at(adev,
adev->gmc.real_vram_size - adev->discovery_tmr_size, adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
adev->discovery_tmr_size, adev->mman.discovery_tmr_size,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
&adev->discovery_memory, &adev->mman.discovery_memory,
NULL); NULL);
if (ret) { if (ret) {
DRM_ERROR("alloc tmr failed(%d)!\n", ret); DRM_ERROR("alloc tmr failed(%d)!\n", ret);
amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL); amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
return ret; return ret;
} }
@ -1865,15 +1865,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
uint64_t gtt_size; uint64_t gtt_size;
int r; int r;
u64 vis_vram_limit; u64 vis_vram_limit;
void *stolen_vga_buf;
mutex_init(&adev->mman.gtt_window_lock); mutex_init(&adev->mman.gtt_window_lock);
/* No others user of address space so set it to 0 */ /* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&adev->mman.bdev, r = ttm_bo_device_init(&adev->mman.bdev,
&amdgpu_bo_driver, &amdgpu_bo_driver,
adev->ddev->anon_inode->i_mapping, adev_to_drm(adev)->anon_inode->i_mapping,
adev->ddev->vma_offset_manager, adev_to_drm(adev)->vma_offset_manager,
dma_addressing_limited(adev->dev)); dma_addressing_limited(adev->dev));
if (r) { if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r); DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@ -1918,7 +1917,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
* If IP discovery enabled, a block of memory should be * If IP discovery enabled, a block of memory should be
* reserved for IP discovey. * reserved for IP discovey.
*/ */
if (adev->discovery_bin) { if (adev->mman.discovery_bin) {
r = amdgpu_ttm_reserve_tmr(adev); r = amdgpu_ttm_reserve_tmr(adev);
if (r) if (r)
return r; return r;
@ -1928,10 +1927,17 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
* This is used for VGA emulation and pre-OS scanout buffers to * This is used for VGA emulation and pre-OS scanout buffers to
* avoid display artifacts while transitioning between pre-OS * avoid display artifacts while transitioning between pre-OS
* and driver. */ * and driver. */
r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE, r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
&adev->stolen_vga_memory, &adev->mman.stolen_vga_memory,
NULL, &stolen_vga_buf); NULL);
if (r)
return r;
r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
adev->mman.stolen_extended_size,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.stolen_extended_memory,
NULL);
if (r) if (r)
return r; return r;
@ -1987,9 +1993,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
*/ */
void amdgpu_ttm_late_init(struct amdgpu_device *adev) void amdgpu_ttm_late_init(struct amdgpu_device *adev)
{ {
void *stolen_vga_buf;
/* return the VGA stolen memory (if any) back to VRAM */ /* return the VGA stolen memory (if any) back to VRAM */
amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf); if (!adev->mman.keep_stolen_vga_memory)
amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
} }
/** /**
@ -2001,8 +2008,11 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
return; return;
amdgpu_ttm_training_reserve_vram_fini(adev); amdgpu_ttm_training_reserve_vram_fini(adev);
/* return the stolen vga memory back to VRAM */
if (adev->mman.keep_stolen_vga_memory)
amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
/* return the IP Discovery TMR memory back to VRAM */ /* return the IP Discovery TMR memory back to VRAM */
amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL); amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
amdgpu_ttm_fw_reserve_vram_fini(adev); amdgpu_ttm_fw_reserve_vram_fini(adev);
if (adev->mman.aper_base_kaddr) if (adev->mman.aper_base_kaddr)
@ -2034,7 +2044,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
uint64_t size; uint64_t size;
int r; int r;
if (!adev->mman.initialized || adev->in_gpu_reset || if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
adev->mman.buffer_funcs_enabled == enable) adev->mman.buffer_funcs_enabled == enable)
return; return;
@ -2045,7 +2055,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
ring = adev->mman.buffer_funcs_ring; ring = adev->mman.buffer_funcs_ring;
sched = &ring->sched; sched = &ring->sched;
r = drm_sched_entity_init(&adev->mman.entity, r = drm_sched_entity_init(&adev->mman.entity,
DRM_SCHED_PRIORITY_KERNEL, &sched, DRM_SCHED_PRIORITY_KERNEL, &sched,
1, NULL); 1, NULL);
if (r) { if (r) {
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
@ -2070,7 +2080,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
{ {
struct drm_file *file_priv = filp->private_data; struct drm_file *file_priv = filp->private_data;
struct amdgpu_device *adev = file_priv->minor->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
if (adev == NULL) if (adev == NULL)
return -EINVAL; return -EINVAL;
@ -2251,7 +2261,7 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_info_node *node = (struct drm_info_node *)m->private;
unsigned ttm_pl = (uintptr_t)node->info_ent->data; unsigned ttm_pl = (uintptr_t)node->info_ent->data;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl); struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl);
struct drm_printer p = drm_seq_file_printer(m); struct drm_printer p = drm_seq_file_printer(m);
@ -2542,7 +2552,7 @@ int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
unsigned count; unsigned count;
struct drm_minor *minor = adev->ddev->primary; struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *ent, *root = minor->debugfs_root; struct dentry *ent, *root = minor->debugfs_root;
for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) { for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {

View File

@ -77,6 +77,23 @@ struct amdgpu_mman {
struct amdgpu_vram_mgr vram_mgr; struct amdgpu_vram_mgr vram_mgr;
struct amdgpu_gtt_mgr gtt_mgr; struct amdgpu_gtt_mgr gtt_mgr;
uint64_t stolen_vga_size;
struct amdgpu_bo *stolen_vga_memory;
uint64_t stolen_extended_size;
struct amdgpu_bo *stolen_extended_memory;
bool keep_stolen_vga_memory;
/* discovery */
uint8_t *discovery_bin;
uint32_t discovery_tmr_size;
struct amdgpu_bo *discovery_memory;
/* firmware VRAM reservation */
u64 fw_vram_usage_start_offset;
u64 fw_vram_usage_size;
struct amdgpu_bo *fw_vram_usage_reserved_bo;
void *fw_vram_usage_va;
}; };
struct amdgpu_copy_mem { struct amdgpu_copy_mem {

View File

@ -408,7 +408,7 @@ static ssize_t show_##name(struct device *dev, \
char *buf) \ char *buf) \
{ \ { \
struct drm_device *ddev = dev_get_drvdata(dev); \ struct drm_device *ddev = dev_get_drvdata(dev); \
struct amdgpu_device *adev = ddev->dev_private; \ struct amdgpu_device *adev = drm_to_adev(ddev); \
\ \
return snprintf(buf, PAGE_SIZE, "0x%08x\n", adev->field); \ return snprintf(buf, PAGE_SIZE, "0x%08x\n", adev->field); \
} \ } \
@ -628,7 +628,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
struct amdgpu_firmware_info *ucode = NULL; struct amdgpu_firmware_info *ucode = NULL;
/* for baremetal, the ucode is allocated in gtt, so don't need to fill the bo when reset/suspend */ /* for baremetal, the ucode is allocated in gtt, so don't need to fill the bo when reset/suspend */
if (!amdgpu_sriov_vf(adev) && (adev->in_gpu_reset || adev->in_suspend)) if (!amdgpu_sriov_vf(adev) && (amdgpu_in_reset(adev) || adev->in_suspend))
return 0; return 0;
/* /*
* if SMU loaded firmware, it needn't add SMC, UVD, and VCE * if SMU loaded firmware, it needn't add SMC, UVD, and VCE

View File

@ -131,6 +131,7 @@ enum ta_fw_type {
TA_FW_TYPE_PSP_RAS, TA_FW_TYPE_PSP_RAS,
TA_FW_TYPE_PSP_HDCP, TA_FW_TYPE_PSP_HDCP,
TA_FW_TYPE_PSP_DTM, TA_FW_TYPE_PSP_DTM,
TA_FW_TYPE_PSP_RAP,
}; };
struct ta_fw_bin_desc { struct ta_fw_bin_desc {

View File

@ -125,8 +125,9 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
"detected in UMC block\n", "detected in UMC block\n",
err_data->ue_count); err_data->ue_count);
if (err_data->err_addr_cnt && if ((amdgpu_bad_page_threshold != 0) &&
amdgpu_ras_add_bad_pages(adev, err_data->err_addr, err_data->err_addr_cnt &&
amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
err_data->err_addr_cnt)) err_data->err_addr_cnt))
dev_warn(adev->dev, "Failed to add ras bad page!\n"); dev_warn(adev->dev, "Failed to add ras bad page!\n");

View File

@ -21,6 +21,20 @@
#ifndef __AMDGPU_UMC_H__ #ifndef __AMDGPU_UMC_H__
#define __AMDGPU_UMC_H__ #define __AMDGPU_UMC_H__
/*
* (addr / 256) * 8192, the higher 26 bits in ErrorAddr
* is the index of 8KB block
*/
#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5)
/* channel index is the index of 256B block */
#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8)
/* offset in 256B block */
#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL)
#define LOOP_UMC_INST(umc_inst) for ((umc_inst) = 0; (umc_inst) < adev->umc.umc_inst_num; (umc_inst)++)
#define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++)
#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
struct amdgpu_umc_funcs { struct amdgpu_umc_funcs {
void (*err_cnt_init)(struct amdgpu_device *adev); void (*err_cnt_init)(struct amdgpu_device *adev);
int (*ras_late_init)(struct amdgpu_device *adev); int (*ras_late_init)(struct amdgpu_device *adev);

View File

@ -45,7 +45,7 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
if (adev->mode_info.num_crtc == 0) if (adev->mode_info.num_crtc == 0)
adev->mode_info.num_crtc = 1; adev->mode_info.num_crtc = 1;
adev->enable_virtual_display = true; adev->enable_virtual_display = true;
adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC; adev_to_drm(adev)->driver->driver_features &= ~DRIVER_ATOMIC;
adev->cg_flags = 0; adev->cg_flags = 0;
adev->pg_flags = 0; adev->pg_flags = 0;
} }
@ -93,7 +93,7 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
amdgpu_ring_undo(ring); amdgpu_ring_undo(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags); spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq: failed_kiq:
pr_err("failed to write reg %x wait reg %x\n", reg0, reg1); dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
} }
/** /**
@ -401,7 +401,7 @@ static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
if (bp_block_size) { if (bp_block_size) {
bp_cnt = bp_block_size / sizeof(uint64_t); bp_cnt = bp_block_size / sizeof(uint64_t);
for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) { for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
retired_page = *(uint64_t *)(adev->fw_vram_usage.va + retired_page = *(uint64_t *)(adev->mman.fw_vram_usage_va +
bp_block_offset + bp_idx * sizeof(uint64_t)); bp_block_offset + bp_idx * sizeof(uint64_t));
bp.retired_page = retired_page; bp.retired_page = retired_page;
@ -428,10 +428,10 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
adev->virt.fw_reserve.p_pf2vf = NULL; adev->virt.fw_reserve.p_pf2vf = NULL;
adev->virt.fw_reserve.p_vf2pf = NULL; adev->virt.fw_reserve.p_vf2pf = NULL;
if (adev->fw_vram_usage.va != NULL) { if (adev->mman.fw_vram_usage_va != NULL) {
adev->virt.fw_reserve.p_pf2vf = adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *)( (struct amd_sriov_msg_pf2vf_info_header *)(
adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET); adev->mman.fw_vram_usage_va + AMDGIM_DATAEXCHANGE_OFFSET);
AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size); AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum); AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature); AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);

View File

@ -325,9 +325,9 @@ static inline bool is_virtual_machine(void)
#define amdgpu_sriov_is_pp_one_vf(adev) \ #define amdgpu_sriov_is_pp_one_vf(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF) ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)
#define amdgpu_sriov_is_debug(adev) \ #define amdgpu_sriov_is_debug(adev) \
((!adev->in_gpu_reset) && adev->virt.tdr_debug) ((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug)
#define amdgpu_sriov_is_normal(adev) \ #define amdgpu_sriov_is_normal(adev) \
((!adev->in_gpu_reset) && (!adev->virt.tdr_debug)) ((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug))
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev);

View File

@ -28,6 +28,7 @@
#include <linux/dma-fence-array.h> #include <linux/dma-fence-array.h>
#include <linux/interval_tree_generic.h> #include <linux/interval_tree_generic.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h> #include <drm/amdgpu_drm.h>
#include "amdgpu.h" #include "amdgpu.h"
@ -35,6 +36,7 @@
#include "amdgpu_amdkfd.h" #include "amdgpu_amdkfd.h"
#include "amdgpu_gmc.h" #include "amdgpu_gmc.h"
#include "amdgpu_xgmi.h" #include "amdgpu_xgmi.h"
#include "amdgpu_dma_buf.h"
/** /**
* DOC: GPUVM * DOC: GPUVM
@ -1691,13 +1693,13 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
uint64_t max_entries; uint64_t max_entries;
uint64_t addr, last; uint64_t addr, last;
max_entries = mapping->last - start + 1;
if (nodes) { if (nodes) {
addr = nodes->start << PAGE_SHIFT; addr = nodes->start << PAGE_SHIFT;
max_entries = (nodes->size - pfn) * max_entries = min((nodes->size - pfn) *
AMDGPU_GPU_PAGES_IN_CPU_PAGE; AMDGPU_GPU_PAGES_IN_CPU_PAGE, max_entries);
} else { } else {
addr = 0; addr = 0;
max_entries = S64_MAX;
} }
if (pages_addr) { if (pages_addr) {
@ -1727,7 +1729,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
addr += pfn << PAGE_SHIFT; addr += pfn << PAGE_SHIFT;
} }
last = min((uint64_t)mapping->last, start + max_entries - 1); last = start + max_entries - 1;
r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv, r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
start, last, flags, addr, start, last, flags, addr,
dma_addr, fence); dma_addr, fence);
@ -1778,15 +1780,24 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
nodes = NULL; nodes = NULL;
resv = vm->root.base.bo->tbo.base.resv; resv = vm->root.base.bo->tbo.base.resv;
} else { } else {
struct drm_gem_object *obj = &bo->tbo.base;
struct ttm_dma_tt *ttm; struct ttm_dma_tt *ttm;
resv = bo->tbo.base.resv;
if (obj->import_attach && bo_va->is_xgmi) {
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
struct drm_gem_object *gobj = dma_buf->priv;
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
if (abo->tbo.mem.mem_type == TTM_PL_VRAM)
bo = gem_to_amdgpu_bo(gobj);
}
mem = &bo->tbo.mem; mem = &bo->tbo.mem;
nodes = mem->mm_node; nodes = mem->mm_node;
if (mem->mem_type == TTM_PL_TT) { if (mem->mem_type == TTM_PL_TT) {
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address; pages_addr = ttm->dma_address;
} }
resv = bo->tbo.base.resv;
} }
if (bo) { if (bo) {
@ -2132,8 +2143,10 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids); INIT_LIST_HEAD(&bo_va->invalids);
if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) && if (!bo)
(bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) { return bo_va;
if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
bo_va->is_xgmi = true; bo_va->is_xgmi = true;
/* Power up XGMI if it can be potentially used */ /* Power up XGMI if it can be potentially used */
amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20); amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
@ -3209,7 +3222,7 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{ {
union drm_amdgpu_vm *args = data; union drm_amdgpu_vm *args = data;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_fpriv *fpriv = filp->driver_priv;
long timeout = msecs_to_jiffies(2000); long timeout = msecs_to_jiffies(2000);
int r; int r;

View File

@ -50,7 +50,7 @@ static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = drm_to_adev(ddev);
return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size); return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size);
} }
@ -67,7 +67,7 @@ static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = drm_to_adev(ddev);
return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size); return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size);
} }
@ -84,8 +84,9 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = drm_to_adev(ddev);
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
return snprintf(buf, PAGE_SIZE, "%llu\n", return snprintf(buf, PAGE_SIZE, "%llu\n",
amdgpu_vram_mgr_usage(man)); amdgpu_vram_mgr_usage(man));
} }
@ -102,8 +103,9 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = drm_to_adev(ddev);
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
return snprintf(buf, PAGE_SIZE, "%llu\n", return snprintf(buf, PAGE_SIZE, "%llu\n",
amdgpu_vram_mgr_vis_usage(man)); amdgpu_vram_mgr_vis_usage(man));
} }
@ -113,7 +115,7 @@ static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
char *buf) char *buf)
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = drm_to_adev(ddev);
switch (adev->gmc.vram_vendor) { switch (adev->gmc.vram_vendor) {
case SAMSUNG: case SAMSUNG:

View File

@ -35,11 +35,9 @@
static DEFINE_MUTEX(xgmi_mutex); static DEFINE_MUTEX(xgmi_mutex);
#define AMDGPU_MAX_XGMI_HIVE 8
#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4 #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4
static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE]; static LIST_HEAD(xgmi_hive_list);
static unsigned hive_count = 0;
static const int xgmi_pcs_err_status_reg_vg20[] = { static const int xgmi_pcs_err_status_reg_vg20[] = {
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS, smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
@ -171,65 +169,53 @@ static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
* *
*/ */
static struct attribute amdgpu_xgmi_hive_id = {
.name = "xgmi_hive_id",
.mode = S_IRUGO
};
static ssize_t amdgpu_xgmi_show_hive_id(struct device *dev, static struct attribute *amdgpu_xgmi_hive_attrs[] = {
struct device_attribute *attr, char *buf) &amdgpu_xgmi_hive_id,
NULL
};
static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj,
struct attribute *attr, char *buf)
{ {
struct amdgpu_hive_info *hive = struct amdgpu_hive_info *hive = container_of(
container_of(attr, struct amdgpu_hive_info, dev_attr); kobj, struct amdgpu_hive_info, kobj);
return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id); if (attr == &amdgpu_xgmi_hive_id)
return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
return 0;
} }
static int amdgpu_xgmi_sysfs_create(struct amdgpu_device *adev, static void amdgpu_xgmi_hive_release(struct kobject *kobj)
struct amdgpu_hive_info *hive)
{ {
int ret = 0; struct amdgpu_hive_info *hive = container_of(
kobj, struct amdgpu_hive_info, kobj);
if (WARN_ON(hive->kobj)) mutex_destroy(&hive->hive_lock);
return -EINVAL; kfree(hive);
hive->kobj = kobject_create_and_add("xgmi_hive_info", &adev->dev->kobj);
if (!hive->kobj) {
dev_err(adev->dev, "XGMI: Failed to allocate sysfs entry!\n");
return -EINVAL;
}
hive->dev_attr = (struct device_attribute) {
.attr = {
.name = "xgmi_hive_id",
.mode = S_IRUGO,
},
.show = amdgpu_xgmi_show_hive_id,
};
ret = sysfs_create_file(hive->kobj, &hive->dev_attr.attr);
if (ret) {
dev_err(adev->dev, "XGMI: Failed to create device file xgmi_hive_id\n");
kobject_del(hive->kobj);
kobject_put(hive->kobj);
hive->kobj = NULL;
}
return ret;
} }
static void amdgpu_xgmi_sysfs_destroy(struct amdgpu_device *adev, static const struct sysfs_ops amdgpu_xgmi_hive_ops = {
struct amdgpu_hive_info *hive) .show = amdgpu_xgmi_show_attrs,
{ };
sysfs_remove_file(hive->kobj, &hive->dev_attr.attr);
kobject_del(hive->kobj); struct kobj_type amdgpu_xgmi_hive_type = {
kobject_put(hive->kobj); .release = amdgpu_xgmi_hive_release,
hive->kobj = NULL; .sysfs_ops = &amdgpu_xgmi_hive_ops,
} .default_attrs = amdgpu_xgmi_hive_attrs,
};
static ssize_t amdgpu_xgmi_show_device_id(struct device *dev, static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = drm_to_adev(ddev);
return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id); return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id);
@ -241,7 +227,7 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev,
char *buf) char *buf)
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = drm_to_adev(ddev);
uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in; uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in;
uint64_t fica_out; uint64_t fica_out;
unsigned int error_count = 0; unsigned int error_count = 0;
@ -287,8 +273,8 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
/* Create sysfs link to hive info folder on the first device */ /* Create sysfs link to hive info folder on the first device */
if (adev != hive->adev) { if (hive->kobj.parent != (&adev->dev->kobj)) {
ret = sysfs_create_link(&adev->dev->kobj, hive->kobj, ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj,
"xgmi_hive_info"); "xgmi_hive_info");
if (ret) { if (ret) {
dev_err(adev->dev, "XGMI: Failed to create link to hive info"); dev_err(adev->dev, "XGMI: Failed to create link to hive info");
@ -296,9 +282,9 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
} }
} }
sprintf(node, "node%d", hive->number_devices); sprintf(node, "node%d", atomic_read(&hive->number_devices));
/* Create sysfs link form the hive folder to yourself */ /* Create sysfs link form the hive folder to yourself */
ret = sysfs_create_link(hive->kobj, &adev->dev->kobj, node); ret = sysfs_create_link(&hive->kobj, &adev->dev->kobj, node);
if (ret) { if (ret) {
dev_err(adev->dev, "XGMI: Failed to create link from hive info"); dev_err(adev->dev, "XGMI: Failed to create link from hive info");
goto remove_link; goto remove_link;
@ -308,7 +294,7 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
remove_link: remove_link:
sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique); sysfs_remove_link(&adev->dev->kobj, adev_to_drm(adev)->unique);
remove_file: remove_file:
device_remove_file(adev->dev, &dev_attr_xgmi_device_id); device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
@ -326,78 +312,96 @@ static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
device_remove_file(adev->dev, &dev_attr_xgmi_device_id); device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
device_remove_file(adev->dev, &dev_attr_xgmi_error); device_remove_file(adev->dev, &dev_attr_xgmi_error);
if (adev != hive->adev) if (hive->kobj.parent != (&adev->dev->kobj))
sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info"); sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
sprintf(node, "node%d", hive->number_devices); sprintf(node, "node%d", atomic_read(&hive->number_devices));
sysfs_remove_link(hive->kobj, node); sysfs_remove_link(&hive->kobj, node);
} }
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock) struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
{ {
int i; struct amdgpu_hive_info *hive = NULL, *tmp = NULL;
struct amdgpu_hive_info *tmp; int ret;
if (!adev->gmc.xgmi.hive_id) if (!adev->gmc.xgmi.hive_id)
return NULL; return NULL;
if (adev->hive) {
kobject_get(&adev->hive->kobj);
return adev->hive;
}
mutex_lock(&xgmi_mutex); mutex_lock(&xgmi_mutex);
for (i = 0 ; i < hive_count; ++i) { if (!list_empty(&xgmi_hive_list)) {
tmp = &xgmi_hives[i]; list_for_each_entry_safe(hive, tmp, &xgmi_hive_list, node) {
if (tmp->hive_id == adev->gmc.xgmi.hive_id) { if (hive->hive_id == adev->gmc.xgmi.hive_id)
if (lock) goto pro_end;
mutex_lock(&tmp->hive_lock);
mutex_unlock(&xgmi_mutex);
return tmp;
} }
} }
if (i >= AMDGPU_MAX_XGMI_HIVE) {
mutex_unlock(&xgmi_mutex); hive = kzalloc(sizeof(*hive), GFP_KERNEL);
return NULL; if (!hive) {
dev_err(adev->dev, "XGMI: allocation failed\n");
hive = NULL;
goto pro_end;
} }
/* initialize new hive if not exist */ /* initialize new hive if not exist */
tmp = &xgmi_hives[hive_count++]; ret = kobject_init_and_add(&hive->kobj,
&amdgpu_xgmi_hive_type,
if (amdgpu_xgmi_sysfs_create(adev, tmp)) { &adev->dev->kobj,
mutex_unlock(&xgmi_mutex); "%s", "xgmi_hive_info");
return NULL; if (ret) {
dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n");
kfree(hive);
hive = NULL;
goto pro_end;
} }
tmp->adev = adev; hive->hive_id = adev->gmc.xgmi.hive_id;
tmp->hive_id = adev->gmc.xgmi.hive_id; INIT_LIST_HEAD(&hive->device_list);
INIT_LIST_HEAD(&tmp->device_list); INIT_LIST_HEAD(&hive->node);
mutex_init(&tmp->hive_lock); mutex_init(&hive->hive_lock);
mutex_init(&tmp->reset_lock); atomic_set(&hive->in_reset, 0);
task_barrier_init(&tmp->tb); atomic_set(&hive->number_devices, 0);
task_barrier_init(&hive->tb);
if (lock) hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
mutex_lock(&tmp->hive_lock); hive->hi_req_gpu = NULL;
tmp->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
tmp->hi_req_gpu = NULL;
/* /*
* hive pstate on boot is high in vega20 so we have to go to low * hive pstate on boot is high in vega20 so we have to go to low
* pstate on after boot. * pstate on after boot.
*/ */
tmp->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE; hive->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
mutex_unlock(&xgmi_mutex); list_add_tail(&hive->node, &xgmi_hive_list);
return tmp; pro_end:
if (hive)
kobject_get(&hive->kobj);
mutex_unlock(&xgmi_mutex);
return hive;
}
void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive)
{
if (hive)
kobject_put(&hive->kobj);
} }
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate) int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
{ {
int ret = 0; int ret = 0;
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
struct amdgpu_device *request_adev = hive->hi_req_gpu ? struct amdgpu_device *request_adev = hive->hi_req_gpu ?
hive->hi_req_gpu : adev; hive->hi_req_gpu : adev;
bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20; bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20;
bool init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN; bool init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN;
amdgpu_put_xgmi_hive(hive);
/* fw bug so temporarily disable pstate switching */ /* fw bug so temporarily disable pstate switching */
return 0; return 0;
@ -449,7 +453,7 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
/* Each psp need to set the latest topology */ /* Each psp need to set the latest topology */
ret = psp_xgmi_set_topology_info(&adev->psp, ret = psp_xgmi_set_topology_info(&adev->psp,
hive->number_devices, atomic_read(&hive->number_devices),
&adev->psp.xgmi_context.top_info); &adev->psp.xgmi_context.top_info);
if (ret) if (ret)
dev_err(adev->dev, dev_err(adev->dev,
@ -511,7 +515,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16; adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16;
} }
hive = amdgpu_get_xgmi_hive(adev, 1); hive = amdgpu_get_xgmi_hive(adev);
if (!hive) { if (!hive) {
ret = -EINVAL; ret = -EINVAL;
dev_err(adev->dev, dev_err(adev->dev,
@ -519,6 +523,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id); adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
goto exit; goto exit;
} }
mutex_lock(&hive->hive_lock);
top_info = &adev->psp.xgmi_context.top_info; top_info = &adev->psp.xgmi_context.top_info;
@ -526,7 +531,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
list_for_each_entry(entry, &hive->device_list, head) list_for_each_entry(entry, &hive->device_list, head)
top_info->nodes[count++].node_id = entry->node_id; top_info->nodes[count++].node_id = entry->node_id;
top_info->num_nodes = count; top_info->num_nodes = count;
hive->number_devices = count; atomic_set(&hive->number_devices, count);
task_barrier_add_task(&hive->tb); task_barrier_add_task(&hive->tb);
@ -541,7 +546,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
} }
ret = amdgpu_xgmi_update_topology(hive, tmp_adev); ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
if (ret) if (ret)
goto exit; goto exit_unlock;
} }
/* get latest topology info for each device from psp */ /* get latest topology info for each device from psp */
@ -554,7 +559,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
tmp_adev->gmc.xgmi.node_id, tmp_adev->gmc.xgmi.node_id,
tmp_adev->gmc.xgmi.hive_id, ret); tmp_adev->gmc.xgmi.hive_id, ret);
/* To do : continue with some node failed or disable the whole hive */ /* To do : continue with some node failed or disable the whole hive */
goto exit; goto exit_unlock;
} }
} }
} }
@ -562,39 +567,51 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
if (!ret) if (!ret)
ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive); ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
exit_unlock:
mutex_unlock(&hive->hive_lock); mutex_unlock(&hive->hive_lock);
exit: exit:
if (!ret) if (!ret) {
adev->hive = hive;
dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n", dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id); adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
else } else {
amdgpu_put_xgmi_hive(hive);
dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n", dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id, adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
ret); ret);
}
return ret; return ret;
} }
int amdgpu_xgmi_remove_device(struct amdgpu_device *adev) int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
{ {
struct amdgpu_hive_info *hive; struct amdgpu_hive_info *hive = adev->hive;
if (!adev->gmc.xgmi.supported) if (!adev->gmc.xgmi.supported)
return -EINVAL; return -EINVAL;
hive = amdgpu_get_xgmi_hive(adev, 1);
if (!hive) if (!hive)
return -EINVAL; return -EINVAL;
mutex_lock(&hive->hive_lock);
task_barrier_rem_task(&hive->tb); task_barrier_rem_task(&hive->tb);
amdgpu_xgmi_sysfs_rem_dev_info(adev, hive); amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
if (hive->hi_req_gpu == adev)
hive->hi_req_gpu = NULL;
list_del(&adev->gmc.xgmi.head);
mutex_unlock(&hive->hive_lock); mutex_unlock(&hive->hive_lock);
if(!(--hive->number_devices)){ amdgpu_put_xgmi_hive(hive);
amdgpu_xgmi_sysfs_destroy(adev, hive); adev->hive = NULL;
mutex_destroy(&hive->hive_lock);
mutex_destroy(&hive->reset_lock); if (atomic_dec_return(&hive->number_devices) == 0) {
/* Remove the hive from global hive list */
mutex_lock(&xgmi_mutex);
list_del(&hive->node);
mutex_unlock(&xgmi_mutex);
amdgpu_put_xgmi_hive(hive);
} }
return psp_xgmi_terminate(&adev->psp); return psp_xgmi_terminate(&adev->psp);

View File

@ -27,13 +27,13 @@
struct amdgpu_hive_info { struct amdgpu_hive_info {
uint64_t hive_id; struct kobject kobj;
struct list_head device_list; uint64_t hive_id;
int number_devices; struct list_head device_list;
struct mutex hive_lock, reset_lock; struct list_head node;
struct kobject *kobj; atomic_t number_devices;
struct device_attribute dev_attr; struct mutex hive_lock;
struct amdgpu_device *adev; atomic_t in_reset;
int hi_req_count; int hi_req_count;
struct amdgpu_device *hi_req_gpu; struct amdgpu_device *hi_req_gpu;
struct task_barrier tb; struct task_barrier tb;
@ -50,7 +50,8 @@ struct amdgpu_pcs_ras_field {
uint32_t pcs_err_shift; uint32_t pcs_err_shift;
}; };
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock); struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev); int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
int amdgpu_xgmi_add_device(struct amdgpu_device *adev); int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
int amdgpu_xgmi_remove_device(struct amdgpu_device *adev); int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);

View File

@ -73,6 +73,7 @@ int athub_v1_0_set_clockgating(struct amdgpu_device *adev,
case CHIP_VEGA12: case CHIP_VEGA12:
case CHIP_VEGA20: case CHIP_VEGA20:
case CHIP_RAVEN: case CHIP_RAVEN:
case CHIP_RENOIR:
athub_update_medium_grain_clock_gating(adev, athub_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE); state == AMD_CG_STATE_GATE);
athub_update_medium_grain_light_sleep(adev, athub_update_medium_grain_light_sleep(adev,

View File

@ -41,7 +41,7 @@ void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
SET_CRTC_OVERSCAN_PS_ALLOCATION args; SET_CRTC_OVERSCAN_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
@ -84,7 +84,7 @@ void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc,
void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc) void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
ENABLE_SCALER_PS_ALLOCATION args; ENABLE_SCALER_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
@ -114,7 +114,7 @@ void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
int index = int index =
GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters); GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
ENABLE_CRTC_PS_ALLOCATION args; ENABLE_CRTC_PS_ALLOCATION args;
@ -131,7 +131,7 @@ void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC); int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC);
ENABLE_CRTC_PS_ALLOCATION args; ENABLE_CRTC_PS_ALLOCATION args;
@ -147,7 +147,7 @@ void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC); int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
BLANK_CRTC_PS_ALLOCATION args; BLANK_CRTC_PS_ALLOCATION args;
@ -163,7 +163,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
@ -192,7 +192,7 @@ void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
SET_CRTC_USING_DTD_TIMING_PARAMETERS args; SET_CRTC_USING_DTD_TIMING_PARAMETERS args;
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming); int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
u16 misc = 0; u16 misc = 0;
@ -307,7 +307,7 @@ static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc,
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_encoder *encoder = amdgpu_crtc->encoder; struct drm_encoder *encoder = amdgpu_crtc->encoder;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@ -588,7 +588,7 @@ void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
struct amdgpu_atom_ss *ss) struct amdgpu_atom_ss *ss)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
u8 frev, crev; u8 frev, crev;
int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
union set_pixel_clock args; union set_pixel_clock args;
@ -749,7 +749,7 @@ int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc,
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder); to_amdgpu_encoder(amdgpu_crtc->encoder);
int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder); int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
@ -818,7 +818,7 @@ void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder); to_amdgpu_encoder(amdgpu_crtc->encoder);
u32 pll_clock = mode->clock; u32 pll_clock = mode->clock;

View File

@ -60,7 +60,7 @@ static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan,
u8 delay, u8 *ack) u8 delay, u8 *ack)
{ {
struct drm_device *dev = chan->dev; struct drm_device *dev = chan->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
union aux_channel_transaction args; union aux_channel_transaction args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base; unsigned char *base;
@ -305,7 +305,7 @@ static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
u8 amdgpu_atombios_dp_get_sinktype(struct amdgpu_connector *amdgpu_connector) u8 amdgpu_atombios_dp_get_sinktype(struct amdgpu_connector *amdgpu_connector)
{ {
struct drm_device *dev = amdgpu_connector->base.dev; struct drm_device *dev = amdgpu_connector->base.dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
return amdgpu_atombios_dp_encoder_service(adev, ATOM_DP_ACTION_GET_SINK_TYPE, 0, return amdgpu_atombios_dp_encoder_service(adev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
amdgpu_connector->ddc_bus->rec.i2c_id, 0); amdgpu_connector->ddc_bus->rec.i2c_id, 0);
@ -718,7 +718,7 @@ void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder,
struct drm_connector *connector) struct drm_connector *connector)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_connector *amdgpu_connector; struct amdgpu_connector *amdgpu_connector;
struct amdgpu_connector_atom_dig *dig_connector; struct amdgpu_connector_atom_dig *dig_connector;

View File

@ -70,7 +70,7 @@ u8
amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder) amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
{ {
struct drm_device *dev = amdgpu_encoder->base.dev; struct drm_device *dev = amdgpu_encoder->base.dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
return 0; return 0;
@ -84,7 +84,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode
{ {
struct drm_encoder *encoder = &amdgpu_encoder->base; struct drm_encoder *encoder = &amdgpu_encoder->base;
struct drm_device *dev = amdgpu_encoder->base.dev; struct drm_device *dev = amdgpu_encoder->base.dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder_atom_dig *dig; struct amdgpu_encoder_atom_dig *dig;
if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
@ -152,7 +152,7 @@ amdgpu_atombios_encoder_get_backlight_brightness(struct backlight_device *bd)
struct amdgpu_backlight_privdata *pdata = bl_get_data(bd); struct amdgpu_backlight_privdata *pdata = bl_get_data(bd);
struct amdgpu_encoder *amdgpu_encoder = pdata->encoder; struct amdgpu_encoder *amdgpu_encoder = pdata->encoder;
struct drm_device *dev = amdgpu_encoder->base.dev; struct drm_device *dev = amdgpu_encoder->base.dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
return amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); return amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
} }
@ -166,7 +166,7 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode
struct drm_connector *drm_connector) struct drm_connector *drm_connector)
{ {
struct drm_device *dev = amdgpu_encoder->base.dev; struct drm_device *dev = amdgpu_encoder->base.dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct backlight_device *bd; struct backlight_device *bd;
struct backlight_properties props; struct backlight_properties props;
struct amdgpu_backlight_privdata *pdata; struct amdgpu_backlight_privdata *pdata;
@ -229,7 +229,7 @@ void
amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *amdgpu_encoder) amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *amdgpu_encoder)
{ {
struct drm_device *dev = amdgpu_encoder->base.dev; struct drm_device *dev = amdgpu_encoder->base.dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct backlight_device *bd = NULL; struct backlight_device *bd = NULL;
struct amdgpu_encoder_atom_dig *dig; struct amdgpu_encoder_atom_dig *dig;
@ -319,7 +319,7 @@ static void
amdgpu_atombios_encoder_setup_dac(struct drm_encoder *encoder, int action) amdgpu_atombios_encoder_setup_dac(struct drm_encoder *encoder, int action)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
DAC_ENCODER_CONTROL_PS_ALLOCATION args; DAC_ENCODER_CONTROL_PS_ALLOCATION args;
int index = 0; int index = 0;
@ -382,7 +382,7 @@ static void
amdgpu_atombios_encoder_setup_dvo(struct drm_encoder *encoder, int action) amdgpu_atombios_encoder_setup_dvo(struct drm_encoder *encoder, int action)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
union dvo_encoder_control args; union dvo_encoder_control args;
int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
@ -573,7 +573,7 @@ amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder,
int action, int panel_mode) int action, int panel_mode)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@ -762,7 +762,7 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a
uint8_t lane_num, uint8_t lane_set) uint8_t lane_num, uint8_t lane_set)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector; struct drm_connector *connector;
@ -1178,7 +1178,7 @@ amdgpu_atombios_encoder_set_edp_panel_power(struct drm_connector *connector,
{ {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_device *dev = amdgpu_connector->base.dev; struct drm_device *dev = amdgpu_connector->base.dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
union dig_transmitter_control args; union dig_transmitter_control args;
int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
uint8_t frev, crev; uint8_t frev, crev;
@ -1225,7 +1225,7 @@ amdgpu_atombios_encoder_setup_external_encoder(struct drm_encoder *encoder,
int action) int action)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder *ext_amdgpu_encoder = to_amdgpu_encoder(ext_encoder); struct amdgpu_encoder *ext_amdgpu_encoder = to_amdgpu_encoder(ext_encoder);
union external_encoder_control args; union external_encoder_control args;
@ -1466,7 +1466,7 @@ void
amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder) amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
union crtc_source_param args; union crtc_source_param args;
@ -1673,7 +1673,7 @@ amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder)
void void
amdgpu_atombios_encoder_init_dig(struct amdgpu_device *adev) amdgpu_atombios_encoder_init_dig(struct amdgpu_device *adev)
{ {
struct drm_device *dev = adev->ddev; struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder; struct drm_encoder *encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@ -1701,7 +1701,7 @@ amdgpu_atombios_encoder_dac_load_detect(struct drm_encoder *encoder,
struct drm_connector *connector) struct drm_connector *connector)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
@ -1751,7 +1751,7 @@ amdgpu_atombios_encoder_dac_detect(struct drm_encoder *encoder,
struct drm_connector *connector) struct drm_connector *connector)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
uint32_t bios_0_scratch; uint32_t bios_0_scratch;
@ -1790,7 +1790,7 @@ amdgpu_atombios_encoder_dig_detect(struct drm_encoder *encoder,
struct drm_connector *connector) struct drm_connector *connector)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder); struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder);
@ -1848,7 +1848,7 @@ amdgpu_atombios_encoder_set_bios_scratch_regs(struct drm_connector *connector,
bool connected) bool connected)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = struct amdgpu_connector *amdgpu_connector =
to_amdgpu_connector(connector); to_amdgpu_connector(connector);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
@ -1999,7 +1999,7 @@ struct amdgpu_encoder_atom_dig *
amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder) amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder)
{ {
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_mode_info *mode_info = &adev->mode_info; struct amdgpu_mode_info *mode_info = &adev->mode_info;
int index = GetIndexIntoMasterTable(DATA, LVDS_Info); int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
uint16_t data_offset, misc; uint16_t data_offset, misc;

View File

@ -40,7 +40,7 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan,
u8 *buf, u8 num) u8 *buf, u8 num)
{ {
struct drm_device *dev = chan->dev; struct drm_device *dev = chan->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
unsigned char *base; unsigned char *base;

View File

@ -1366,8 +1366,10 @@ static int cik_asic_reset(struct amdgpu_device *adev)
int r; int r;
if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
dev_info(adev->dev, "BACO reset\n");
r = amdgpu_dpm_baco_reset(adev); r = amdgpu_dpm_baco_reset(adev);
} else { } else {
dev_info(adev->dev, "PCI CONFIG reset\n");
r = cik_asic_pci_config_reset(adev); r = cik_asic_pci_config_reset(adev);
} }
@ -1919,6 +1921,10 @@ static uint64_t cik_get_pcie_replay_count(struct amdgpu_device *adev)
return (nak_r + nak_g); return (nak_r + nak_g);
} }
static void cik_pre_asic_init(struct amdgpu_device *adev)
{
}
static const struct amdgpu_asic_funcs cik_asic_funcs = static const struct amdgpu_asic_funcs cik_asic_funcs =
{ {
.read_disabled_bios = &cik_read_disabled_bios, .read_disabled_bios = &cik_read_disabled_bios,
@ -1939,6 +1945,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.need_reset_on_init = &cik_need_reset_on_init, .need_reset_on_init = &cik_need_reset_on_init,
.get_pcie_replay_count = &cik_get_pcie_replay_count, .get_pcie_replay_count = &cik_get_pcie_replay_count,
.supports_baco = &cik_asic_supports_baco, .supports_baco = &cik_asic_supports_baco,
.pre_asic_init = &cik_pre_asic_init,
}; };
static int cik_common_early_init(void *handle) static int cik_common_early_init(void *handle)

View File

@ -328,7 +328,7 @@ static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
*/ */
static void dce_v10_0_hpd_init(struct amdgpu_device *adev) static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
{ {
struct drm_device *dev = adev->ddev; struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector; struct drm_connector *connector;
struct drm_connector_list_iter iter; struct drm_connector_list_iter iter;
u32 tmp; u32 tmp;
@ -383,7 +383,7 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
*/ */
static void dce_v10_0_hpd_fini(struct amdgpu_device *adev) static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
{ {
struct drm_device *dev = adev->ddev; struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector; struct drm_connector *connector;
struct drm_connector_list_iter iter; struct drm_connector_list_iter iter;
u32 tmp; u32 tmp;
@ -504,7 +504,7 @@ void dce_v10_0_disable_dce(struct amdgpu_device *adev)
static void dce_v10_0_program_fmt(struct drm_encoder *encoder) static void dce_v10_0_program_fmt(struct drm_encoder *encoder)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@ -1209,7 +1209,7 @@ static struct amdgpu_audio_pin *dce_v10_0_audio_get_pin(struct amdgpu_device *ad
static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder) static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
{ {
struct amdgpu_device *adev = encoder->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp; u32 tmp;
@ -1226,7 +1226,7 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector; struct drm_connector *connector;
@ -1272,7 +1272,7 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder) static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector; struct drm_connector *connector;
@ -1328,7 +1328,7 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder) static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector; struct drm_connector *connector;
@ -1483,7 +1483,7 @@ static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@ -1519,7 +1519,7 @@ static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
void *buffer, size_t size) void *buffer, size_t size)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
uint8_t *frame = buffer + 3; uint8_t *frame = buffer + 3;
@ -1538,7 +1538,7 @@ static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
@ -1569,7 +1569,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@ -1749,7 +1749,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable) static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@ -1822,7 +1822,7 @@ static void dce_v10_0_vga_enable(struct drm_crtc *crtc, bool enable)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
u32 vga_control; u32 vga_control;
vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
@ -1836,7 +1836,7 @@ static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
if (enable) if (enable)
WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
@ -1850,7 +1850,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_framebuffer *target_fb; struct drm_framebuffer *target_fb;
struct drm_gem_object *obj; struct drm_gem_object *obj;
struct amdgpu_bo *abo; struct amdgpu_bo *abo;
@ -2095,7 +2095,7 @@ static void dce_v10_0_set_interleave(struct drm_crtc *crtc,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
u32 tmp; u32 tmp;
@ -2111,7 +2111,7 @@ static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
u16 *r, *g, *b; u16 *r, *g, *b;
int i; int i;
u32 tmp; u32 tmp;
@ -2250,7 +2250,7 @@ static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
u32 pll_in_use; u32 pll_in_use;
int pll; int pll;
@ -2285,7 +2285,7 @@ static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc)
static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock) static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock)
{ {
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint32_t cur_lock; uint32_t cur_lock;
@ -2300,7 +2300,7 @@ static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock)
static void dce_v10_0_hide_cursor(struct drm_crtc *crtc) static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(crtc->dev);
u32 tmp; u32 tmp;
tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
@ -2311,7 +2311,7 @@ static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
static void dce_v10_0_show_cursor(struct drm_crtc *crtc) static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(crtc->dev);
u32 tmp; u32 tmp;
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@ -2329,7 +2329,7 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
int x, int y) int x, int y)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(crtc->dev);
int xorigin = 0, yorigin = 0; int xorigin = 0, yorigin = 0;
amdgpu_crtc->cursor_x = x; amdgpu_crtc->cursor_x = x;
@ -2503,7 +2503,7 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
unsigned type; unsigned type;
@ -2557,7 +2557,7 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_atom_ss ss; struct amdgpu_atom_ss ss;
int i; int i;
@ -2701,7 +2701,7 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
if (amdgpu_crtc == NULL) if (amdgpu_crtc == NULL)
return -ENOMEM; return -ENOMEM;
drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v10_0_crtc_funcs); drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index; amdgpu_crtc->crtc_id = index;
@ -2709,8 +2709,8 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->max_cursor_width = 128; amdgpu_crtc->max_cursor_width = 128;
amdgpu_crtc->max_cursor_height = 128; amdgpu_crtc->max_cursor_height = 128;
adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
switch (amdgpu_crtc->crtc_id) { switch (amdgpu_crtc->crtc_id) {
case 0: case 0:
@ -2792,24 +2792,24 @@ static int dce_v10_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
adev->ddev->mode_config.async_page_flip = true; adev_to_drm(adev)->mode_config.async_page_flip = true;
adev->ddev->mode_config.max_width = 16384; adev_to_drm(adev)->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.max_height = 16384;
adev->ddev->mode_config.preferred_depth = 24; adev_to_drm(adev)->mode_config.preferred_depth = 24;
adev->ddev->mode_config.prefer_shadow = 1; adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev->ddev->mode_config.fb_base = adev->gmc.aper_base; adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev); r = amdgpu_display_modeset_create_props(adev);
if (r) if (r)
return r; return r;
adev->ddev->mode_config.max_width = 16384; adev_to_drm(adev)->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.max_height = 16384;
/* allocate crtcs */ /* allocate crtcs */
for (i = 0; i < adev->mode_info.num_crtc; i++) { for (i = 0; i < adev->mode_info.num_crtc; i++) {
@ -2819,7 +2819,7 @@ static int dce_v10_0_sw_init(void *handle)
} }
if (amdgpu_atombios_get_connector_info_from_object_table(adev)) if (amdgpu_atombios_get_connector_info_from_object_table(adev))
amdgpu_display_print_display_setup(adev->ddev); amdgpu_display_print_display_setup(adev_to_drm(adev));
else else
return -EINVAL; return -EINVAL;
@ -2832,7 +2832,7 @@ static int dce_v10_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
drm_kms_helper_poll_init(adev->ddev); drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true; adev->mode_info.mode_config_initialized = true;
return 0; return 0;
@ -2844,13 +2844,13 @@ static int dce_v10_0_sw_fini(void *handle)
kfree(adev->mode_info.bios_hardcoded_edid); kfree(adev->mode_info.bios_hardcoded_edid);
drm_kms_helper_poll_fini(adev->ddev); drm_kms_helper_poll_fini(adev_to_drm(adev));
dce_v10_0_audio_fini(adev); dce_v10_0_audio_fini(adev);
dce_v10_0_afmt_fini(adev); dce_v10_0_afmt_fini(adev);
drm_mode_config_cleanup(adev->ddev); drm_mode_config_cleanup(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = false; adev->mode_info.mode_config_initialized = false;
return 0; return 0;
@ -3157,14 +3157,14 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
if (amdgpu_crtc == NULL) if (amdgpu_crtc == NULL)
return 0; return 0;
spin_lock_irqsave(&adev->ddev->event_lock, flags); spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works; works = amdgpu_crtc->pflip_works;
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n", "AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status, amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED); AMDGPU_FLIP_SUBMITTED);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return 0; return 0;
} }
@ -3176,7 +3176,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
if (works->event) if (works->event)
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base); drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work); schedule_work(&works->unpin_work);
@ -3245,7 +3245,7 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (amdgpu_irq_enabled(adev, source, irq_type)) { if (amdgpu_irq_enabled(adev, source, irq_type)) {
drm_handle_vblank(adev->ddev, crtc); drm_handle_vblank(adev_to_drm(adev), crtc);
} }
DRM_DEBUG("IH: D%d vblank\n", crtc + 1); DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
@ -3345,7 +3345,7 @@ dce_v10_0_encoder_mode_set(struct drm_encoder *encoder,
static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder) static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder)
{ {
struct amdgpu_device *adev = encoder->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@ -3385,7 +3385,7 @@ static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder)
static void dce_v10_0_encoder_commit(struct drm_encoder *encoder) static void dce_v10_0_encoder_commit(struct drm_encoder *encoder)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
/* need to call this here as we need the crtc set up */ /* need to call this here as we need the crtc set up */
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
@ -3485,7 +3485,7 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
uint32_t supported_device, uint32_t supported_device,
u16 caps) u16 caps)
{ {
struct drm_device *dev = adev->ddev; struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder; struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder; struct amdgpu_encoder *amdgpu_encoder;

View File

@ -346,7 +346,7 @@ static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
*/ */
static void dce_v11_0_hpd_init(struct amdgpu_device *adev) static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
{ {
struct drm_device *dev = adev->ddev; struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector; struct drm_connector *connector;
struct drm_connector_list_iter iter; struct drm_connector_list_iter iter;
u32 tmp; u32 tmp;
@ -400,7 +400,7 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
*/ */
static void dce_v11_0_hpd_fini(struct amdgpu_device *adev) static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
{ {
struct drm_device *dev = adev->ddev; struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector; struct drm_connector *connector;
struct drm_connector_list_iter iter; struct drm_connector_list_iter iter;
u32 tmp; u32 tmp;
@ -530,7 +530,7 @@ void dce_v11_0_disable_dce(struct amdgpu_device *adev)
static void dce_v11_0_program_fmt(struct drm_encoder *encoder) static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@ -1235,7 +1235,7 @@ static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *ad
static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder) static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
{ {
struct amdgpu_device *adev = encoder->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp; u32 tmp;
@ -1252,7 +1252,7 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector; struct drm_connector *connector;
@ -1298,7 +1298,7 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder) static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector; struct drm_connector *connector;
@ -1354,7 +1354,7 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder
static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder) static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector; struct drm_connector *connector;
@ -1525,7 +1525,7 @@ static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@ -1561,7 +1561,7 @@ static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
void *buffer, size_t size) void *buffer, size_t size)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
uint8_t *frame = buffer + 3; uint8_t *frame = buffer + 3;
@ -1580,7 +1580,7 @@ static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
@ -1611,7 +1611,7 @@ static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@ -1791,7 +1791,7 @@ static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable) static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@ -1864,7 +1864,7 @@ static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
u32 vga_control; u32 vga_control;
vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
@ -1878,7 +1878,7 @@ static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
if (enable) if (enable)
WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
@ -1892,7 +1892,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_framebuffer *target_fb; struct drm_framebuffer *target_fb;
struct drm_gem_object *obj; struct drm_gem_object *obj;
struct amdgpu_bo *abo; struct amdgpu_bo *abo;
@ -2137,7 +2137,7 @@ static void dce_v11_0_set_interleave(struct drm_crtc *crtc,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
u32 tmp; u32 tmp;
@ -2153,7 +2153,7 @@ static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
u16 *r, *g, *b; u16 *r, *g, *b;
int i; int i;
u32 tmp; u32 tmp;
@ -2283,7 +2283,7 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
u32 pll_in_use; u32 pll_in_use;
int pll; int pll;
@ -2364,7 +2364,7 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock) static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock)
{ {
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint32_t cur_lock; uint32_t cur_lock;
@ -2379,7 +2379,7 @@ static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock)
static void dce_v11_0_hide_cursor(struct drm_crtc *crtc) static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(crtc->dev);
u32 tmp; u32 tmp;
tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
@ -2390,7 +2390,7 @@ static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
static void dce_v11_0_show_cursor(struct drm_crtc *crtc) static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(crtc->dev);
u32 tmp; u32 tmp;
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@ -2408,7 +2408,7 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
int x, int y) int x, int y)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(crtc->dev);
int xorigin = 0, yorigin = 0; int xorigin = 0, yorigin = 0;
amdgpu_crtc->cursor_x = x; amdgpu_crtc->cursor_x = x;
@ -2582,7 +2582,7 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
unsigned type; unsigned type;
@ -2636,7 +2636,7 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_atom_ss ss; struct amdgpu_atom_ss ss;
int i; int i;
@ -2706,7 +2706,7 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
if (!amdgpu_crtc->adjusted_clock) if (!amdgpu_crtc->adjusted_clock)
return -EINVAL; return -EINVAL;
@ -2809,7 +2809,7 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
if (amdgpu_crtc == NULL) if (amdgpu_crtc == NULL)
return -ENOMEM; return -ENOMEM;
drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v11_0_crtc_funcs); drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index; amdgpu_crtc->crtc_id = index;
@ -2817,8 +2817,8 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->max_cursor_width = 128; amdgpu_crtc->max_cursor_width = 128;
amdgpu_crtc->max_cursor_height = 128; amdgpu_crtc->max_cursor_height = 128;
adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
switch (amdgpu_crtc->crtc_id) { switch (amdgpu_crtc->crtc_id) {
case 0: case 0:
@ -2913,24 +2913,24 @@ static int dce_v11_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
adev->ddev->mode_config.async_page_flip = true; adev_to_drm(adev)->mode_config.async_page_flip = true;
adev->ddev->mode_config.max_width = 16384; adev_to_drm(adev)->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.max_height = 16384;
adev->ddev->mode_config.preferred_depth = 24; adev_to_drm(adev)->mode_config.preferred_depth = 24;
adev->ddev->mode_config.prefer_shadow = 1; adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev->ddev->mode_config.fb_base = adev->gmc.aper_base; adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev); r = amdgpu_display_modeset_create_props(adev);
if (r) if (r)
return r; return r;
adev->ddev->mode_config.max_width = 16384; adev_to_drm(adev)->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.max_height = 16384;
/* allocate crtcs */ /* allocate crtcs */
@ -2941,7 +2941,7 @@ static int dce_v11_0_sw_init(void *handle)
} }
if (amdgpu_atombios_get_connector_info_from_object_table(adev)) if (amdgpu_atombios_get_connector_info_from_object_table(adev))
amdgpu_display_print_display_setup(adev->ddev); amdgpu_display_print_display_setup(adev_to_drm(adev));
else else
return -EINVAL; return -EINVAL;
@ -2954,7 +2954,7 @@ static int dce_v11_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
drm_kms_helper_poll_init(adev->ddev); drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true; adev->mode_info.mode_config_initialized = true;
return 0; return 0;
@ -2966,13 +2966,13 @@ static int dce_v11_0_sw_fini(void *handle)
kfree(adev->mode_info.bios_hardcoded_edid); kfree(adev->mode_info.bios_hardcoded_edid);
drm_kms_helper_poll_fini(adev->ddev); drm_kms_helper_poll_fini(adev_to_drm(adev));
dce_v11_0_audio_fini(adev); dce_v11_0_audio_fini(adev);
dce_v11_0_afmt_fini(adev); dce_v11_0_afmt_fini(adev);
drm_mode_config_cleanup(adev->ddev); drm_mode_config_cleanup(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = false; adev->mode_info.mode_config_initialized = false;
return 0; return 0;
@ -3283,14 +3283,14 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
if(amdgpu_crtc == NULL) if(amdgpu_crtc == NULL)
return 0; return 0;
spin_lock_irqsave(&adev->ddev->event_lock, flags); spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works; works = amdgpu_crtc->pflip_works;
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n", "AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status, amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED); AMDGPU_FLIP_SUBMITTED);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return 0; return 0;
} }
@ -3302,7 +3302,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
if(works->event) if(works->event)
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base); drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work); schedule_work(&works->unpin_work);
@ -3372,7 +3372,7 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (amdgpu_irq_enabled(adev, source, irq_type)) { if (amdgpu_irq_enabled(adev, source, irq_type)) {
drm_handle_vblank(adev->ddev, crtc); drm_handle_vblank(adev_to_drm(adev), crtc);
} }
DRM_DEBUG("IH: D%d vblank\n", crtc + 1); DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
@ -3471,7 +3471,7 @@ dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder) static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder)
{ {
struct amdgpu_device *adev = encoder->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@ -3511,7 +3511,7 @@ static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder)
static void dce_v11_0_encoder_commit(struct drm_encoder *encoder) static void dce_v11_0_encoder_commit(struct drm_encoder *encoder)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
/* need to call this here as we need the crtc set up */ /* need to call this here as we need the crtc set up */
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
@ -3611,7 +3611,7 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
uint32_t supported_device, uint32_t supported_device,
u16 caps) u16 caps)
{ {
struct drm_device *dev = adev->ddev; struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder; struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder; struct amdgpu_encoder *amdgpu_encoder;

View File

@ -279,7 +279,7 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
*/ */
static void dce_v6_0_hpd_init(struct amdgpu_device *adev) static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
{ {
struct drm_device *dev = adev->ddev; struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector; struct drm_connector *connector;
struct drm_connector_list_iter iter; struct drm_connector_list_iter iter;
u32 tmp; u32 tmp;
@ -324,7 +324,7 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
*/ */
static void dce_v6_0_hpd_fini(struct amdgpu_device *adev) static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
{ {
struct drm_device *dev = adev->ddev; struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector; struct drm_connector *connector;
struct drm_connector_list_iter iter; struct drm_connector_list_iter iter;
u32 tmp; u32 tmp;
@ -401,7 +401,7 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
@ -1114,7 +1114,7 @@ static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *ade
static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder) static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
{ {
struct amdgpu_device *adev = encoder->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@ -1130,7 +1130,7 @@ static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector; struct drm_connector *connector;
@ -1174,7 +1174,7 @@ static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder) static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector; struct drm_connector *connector;
@ -1235,7 +1235,7 @@ static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector; struct drm_connector *connector;
@ -1392,7 +1392,7 @@ static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder) static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp; u32 tmp;
@ -1408,7 +1408,7 @@ static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder,
uint32_t clock, int bpc) uint32_t clock, int bpc)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@ -1446,7 +1446,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@ -1488,7 +1488,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
u32 tmp; u32 tmp;
@ -1522,7 +1522,7 @@ static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder) static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp; u32 tmp;
@ -1566,7 +1566,7 @@ static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute) static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp; u32 tmp;
@ -1579,7 +1579,7 @@ static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable) static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp; u32 tmp;
@ -1616,7 +1616,7 @@ static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable) static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp; u32 tmp;
@ -1645,7 +1645,7 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector; struct drm_connector *connector;
@ -1714,7 +1714,7 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable) static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@ -1788,7 +1788,7 @@ static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
u32 vga_control; u32 vga_control;
vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
@ -1799,7 +1799,7 @@ static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0); WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
} }
@ -1810,7 +1810,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_framebuffer *target_fb; struct drm_framebuffer *target_fb;
struct drm_gem_object *obj; struct drm_gem_object *obj;
struct amdgpu_bo *abo; struct amdgpu_bo *abo;
@ -2033,7 +2033,7 @@ static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (mode->flags & DRM_MODE_FLAG_INTERLACE) if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@ -2048,7 +2048,7 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
u16 *r, *g, *b; u16 *r, *g, *b;
int i; int i;
@ -2148,7 +2148,7 @@ static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
u32 pll_in_use; u32 pll_in_use;
int pll; int pll;
@ -2177,7 +2177,7 @@ static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock) static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
{ {
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint32_t cur_lock; uint32_t cur_lock;
@ -2192,7 +2192,7 @@ static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
static void dce_v6_0_hide_cursor(struct drm_crtc *crtc) static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(crtc->dev);
WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
@ -2204,7 +2204,7 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
static void dce_v6_0_show_cursor(struct drm_crtc *crtc) static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(crtc->dev);
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(amdgpu_crtc->cursor_addr)); upper_32_bits(amdgpu_crtc->cursor_addr));
@ -2222,7 +2222,7 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
int x, int y) int x, int y)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(crtc->dev);
int xorigin = 0, yorigin = 0; int xorigin = 0, yorigin = 0;
int w = amdgpu_crtc->cursor_width; int w = amdgpu_crtc->cursor_width;
@ -2397,7 +2397,7 @@ static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode) static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
unsigned type; unsigned type;
@ -2447,7 +2447,7 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_atom_ss ss; struct amdgpu_atom_ss ss;
int i; int i;
@ -2591,7 +2591,7 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
if (amdgpu_crtc == NULL) if (amdgpu_crtc == NULL)
return -ENOMEM; return -ENOMEM;
drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs); drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index; amdgpu_crtc->crtc_id = index;
@ -2599,8 +2599,8 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->max_cursor_width = CURSOR_WIDTH; amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT; amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id]; amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
@ -2669,20 +2669,20 @@ static int dce_v6_0_sw_init(void *handle)
adev->mode_info.mode_config_initialized = true; adev->mode_info.mode_config_initialized = true;
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
adev->ddev->mode_config.async_page_flip = true; adev_to_drm(adev)->mode_config.async_page_flip = true;
adev->ddev->mode_config.max_width = 16384; adev_to_drm(adev)->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.max_height = 16384;
adev->ddev->mode_config.preferred_depth = 24; adev_to_drm(adev)->mode_config.preferred_depth = 24;
adev->ddev->mode_config.prefer_shadow = 1; adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev->ddev->mode_config.fb_base = adev->gmc.aper_base; adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev); r = amdgpu_display_modeset_create_props(adev);
if (r) if (r)
return r; return r;
adev->ddev->mode_config.max_width = 16384; adev_to_drm(adev)->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.max_height = 16384;
/* allocate crtcs */ /* allocate crtcs */
for (i = 0; i < adev->mode_info.num_crtc; i++) { for (i = 0; i < adev->mode_info.num_crtc; i++) {
@ -2693,7 +2693,7 @@ static int dce_v6_0_sw_init(void *handle)
ret = amdgpu_atombios_get_connector_info_from_object_table(adev); ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
if (ret) if (ret)
amdgpu_display_print_display_setup(adev->ddev); amdgpu_display_print_display_setup(adev_to_drm(adev));
else else
return -EINVAL; return -EINVAL;
@ -2706,7 +2706,7 @@ static int dce_v6_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
drm_kms_helper_poll_init(adev->ddev); drm_kms_helper_poll_init(adev_to_drm(adev));
return r; return r;
} }
@ -2717,12 +2717,12 @@ static int dce_v6_0_sw_fini(void *handle)
kfree(adev->mode_info.bios_hardcoded_edid); kfree(adev->mode_info.bios_hardcoded_edid);
drm_kms_helper_poll_fini(adev->ddev); drm_kms_helper_poll_fini(adev_to_drm(adev));
dce_v6_0_audio_fini(adev); dce_v6_0_audio_fini(adev);
dce_v6_0_afmt_fini(adev); dce_v6_0_afmt_fini(adev);
drm_mode_config_cleanup(adev->ddev); drm_mode_config_cleanup(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = false; adev->mode_info.mode_config_initialized = false;
return 0; return 0;
@ -2967,7 +2967,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (amdgpu_irq_enabled(adev, source, irq_type)) { if (amdgpu_irq_enabled(adev, source, irq_type)) {
drm_handle_vblank(adev->ddev, crtc); drm_handle_vblank(adev_to_drm(adev), crtc);
} }
DRM_DEBUG("IH: D%d vblank\n", crtc + 1); DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
break; break;
@ -3036,14 +3036,14 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
if (amdgpu_crtc == NULL) if (amdgpu_crtc == NULL)
return 0; return 0;
spin_lock_irqsave(&adev->ddev->event_lock, flags); spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works; works = amdgpu_crtc->pflip_works;
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n", "AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status, amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED); AMDGPU_FLIP_SUBMITTED);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return 0; return 0;
} }
@ -3055,7 +3055,7 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
if (works->event) if (works->event)
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base); drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work); schedule_work(&works->unpin_work);
@ -3146,7 +3146,7 @@ dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder) static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
{ {
struct amdgpu_device *adev = encoder->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@ -3187,7 +3187,7 @@ static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
/* need to call this here as we need the crtc set up */ /* need to call this here as we need the crtc set up */
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
@ -3297,7 +3297,7 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
uint32_t supported_device, uint32_t supported_device,
u16 caps) u16 caps)
{ {
struct drm_device *dev = adev->ddev; struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder; struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder; struct amdgpu_encoder *amdgpu_encoder;

View File

@ -273,7 +273,7 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
*/ */
static void dce_v8_0_hpd_init(struct amdgpu_device *adev) static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
{ {
struct drm_device *dev = adev->ddev; struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector; struct drm_connector *connector;
struct drm_connector_list_iter iter; struct drm_connector_list_iter iter;
u32 tmp; u32 tmp;
@ -318,7 +318,7 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
*/ */
static void dce_v8_0_hpd_fini(struct amdgpu_device *adev) static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
{ {
struct drm_device *dev = adev->ddev; struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector; struct drm_connector *connector;
struct drm_connector_list_iter iter; struct drm_connector_list_iter iter;
u32 tmp; u32 tmp;
@ -444,7 +444,7 @@ void dce_v8_0_disable_dce(struct amdgpu_device *adev)
static void dce_v8_0_program_fmt(struct drm_encoder *encoder) static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@ -1146,7 +1146,7 @@ static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *ade
static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder) static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
{ {
struct amdgpu_device *adev = encoder->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 offset; u32 offset;
@ -1164,7 +1164,7 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector; struct drm_connector *connector;
@ -1225,7 +1225,7 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder) static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector; struct drm_connector *connector;
@ -1278,7 +1278,7 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 offset; u32 offset;
@ -1446,7 +1446,7 @@ static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@ -1469,7 +1469,7 @@ static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
void *buffer, size_t size) void *buffer, size_t size)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
uint32_t offset = dig->afmt->offset; uint32_t offset = dig->afmt->offset;
@ -1489,7 +1489,7 @@ static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
@ -1516,7 +1516,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@ -1678,7 +1678,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable) static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@ -1751,7 +1751,7 @@ static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
u32 vga_control; u32 vga_control;
vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
@ -1765,7 +1765,7 @@ static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
if (enable) if (enable)
WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
@ -1779,7 +1779,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_framebuffer *target_fb; struct drm_framebuffer *target_fb;
struct drm_gem_object *obj; struct drm_gem_object *obj;
struct amdgpu_bo *abo; struct amdgpu_bo *abo;
@ -2004,7 +2004,7 @@ static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (mode->flags & DRM_MODE_FLAG_INTERLACE) if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@ -2018,7 +2018,7 @@ static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
u16 *r, *g, *b; u16 *r, *g, *b;
int i; int i;
@ -2140,7 +2140,7 @@ static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
u32 pll_in_use; u32 pll_in_use;
int pll; int pll;
@ -2188,7 +2188,7 @@ static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock) static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
{ {
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint32_t cur_lock; uint32_t cur_lock;
@ -2203,7 +2203,7 @@ static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
static void dce_v8_0_hide_cursor(struct drm_crtc *crtc) static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(crtc->dev);
WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
@ -2213,7 +2213,7 @@ static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
static void dce_v8_0_show_cursor(struct drm_crtc *crtc) static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(crtc->dev);
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(amdgpu_crtc->cursor_addr)); upper_32_bits(amdgpu_crtc->cursor_addr));
@ -2230,7 +2230,7 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
int x, int y) int x, int y)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(crtc->dev);
int xorigin = 0, yorigin = 0; int xorigin = 0, yorigin = 0;
amdgpu_crtc->cursor_x = x; amdgpu_crtc->cursor_x = x;
@ -2404,7 +2404,7 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
unsigned type; unsigned type;
@ -2458,7 +2458,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_atom_ss ss; struct amdgpu_atom_ss ss;
int i; int i;
@ -2609,7 +2609,7 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
if (amdgpu_crtc == NULL) if (amdgpu_crtc == NULL)
return -ENOMEM; return -ENOMEM;
drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs); drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index; amdgpu_crtc->crtc_id = index;
@ -2617,8 +2617,8 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH; amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT; amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id]; amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
@ -2689,24 +2689,24 @@ static int dce_v8_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
adev->ddev->mode_config.async_page_flip = true; adev_to_drm(adev)->mode_config.async_page_flip = true;
adev->ddev->mode_config.max_width = 16384; adev_to_drm(adev)->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.max_height = 16384;
adev->ddev->mode_config.preferred_depth = 24; adev_to_drm(adev)->mode_config.preferred_depth = 24;
adev->ddev->mode_config.prefer_shadow = 1; adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev->ddev->mode_config.fb_base = adev->gmc.aper_base; adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev); r = amdgpu_display_modeset_create_props(adev);
if (r) if (r)
return r; return r;
adev->ddev->mode_config.max_width = 16384; adev_to_drm(adev)->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.max_height = 16384;
/* allocate crtcs */ /* allocate crtcs */
for (i = 0; i < adev->mode_info.num_crtc; i++) { for (i = 0; i < adev->mode_info.num_crtc; i++) {
@ -2716,7 +2716,7 @@ static int dce_v8_0_sw_init(void *handle)
} }
if (amdgpu_atombios_get_connector_info_from_object_table(adev)) if (amdgpu_atombios_get_connector_info_from_object_table(adev))
amdgpu_display_print_display_setup(adev->ddev); amdgpu_display_print_display_setup(adev_to_drm(adev));
else else
return -EINVAL; return -EINVAL;
@ -2729,7 +2729,7 @@ static int dce_v8_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
drm_kms_helper_poll_init(adev->ddev); drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true; adev->mode_info.mode_config_initialized = true;
return 0; return 0;
@ -2741,13 +2741,13 @@ static int dce_v8_0_sw_fini(void *handle)
kfree(adev->mode_info.bios_hardcoded_edid); kfree(adev->mode_info.bios_hardcoded_edid);
drm_kms_helper_poll_fini(adev->ddev); drm_kms_helper_poll_fini(adev_to_drm(adev));
dce_v8_0_audio_fini(adev); dce_v8_0_audio_fini(adev);
dce_v8_0_afmt_fini(adev); dce_v8_0_afmt_fini(adev);
drm_mode_config_cleanup(adev->ddev); drm_mode_config_cleanup(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = false; adev->mode_info.mode_config_initialized = false;
return 0; return 0;
@ -3057,7 +3057,7 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (amdgpu_irq_enabled(adev, source, irq_type)) { if (amdgpu_irq_enabled(adev, source, irq_type)) {
drm_handle_vblank(adev->ddev, crtc); drm_handle_vblank(adev_to_drm(adev), crtc);
} }
DRM_DEBUG("IH: D%d vblank\n", crtc + 1); DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
break; break;
@ -3126,14 +3126,14 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
if (amdgpu_crtc == NULL) if (amdgpu_crtc == NULL)
return 0; return 0;
spin_lock_irqsave(&adev->ddev->event_lock, flags); spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works; works = amdgpu_crtc->pflip_works;
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n", "AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status, amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED); AMDGPU_FLIP_SUBMITTED);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return 0; return 0;
} }
@ -3145,7 +3145,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
if (works->event) if (works->event)
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base); drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work); schedule_work(&works->unpin_work);
@ -3233,7 +3233,7 @@ dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder) static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
{ {
struct amdgpu_device *adev = encoder->dev->dev_private; struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@ -3273,7 +3273,7 @@ static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
static void dce_v8_0_encoder_commit(struct drm_encoder *encoder) static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
/* need to call this here as we need the crtc set up */ /* need to call this here as we need the crtc set up */
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
@ -3373,7 +3373,7 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
uint32_t supported_device, uint32_t supported_device,
u16 caps) u16 caps)
{ {
struct drm_device *dev = adev->ddev; struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder; struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder; struct amdgpu_encoder *amdgpu_encoder;

View File

@ -132,7 +132,7 @@ static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode) static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
unsigned type; unsigned type;
@ -235,7 +235,7 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
if (amdgpu_crtc == NULL) if (amdgpu_crtc == NULL)
return -ENOMEM; return -ENOMEM;
drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_virtual_crtc_funcs); drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_virtual_crtc_funcs);
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index; amdgpu_crtc->crtc_id = index;
@ -374,24 +374,24 @@ static int dce_virtual_sw_init(void *handle)
if (r) if (r)
return r; return r;
adev->ddev->max_vblank_count = 0; adev_to_drm(adev)->max_vblank_count = 0;
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
adev->ddev->mode_config.max_width = 16384; adev_to_drm(adev)->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.max_height = 16384;
adev->ddev->mode_config.preferred_depth = 24; adev_to_drm(adev)->mode_config.preferred_depth = 24;
adev->ddev->mode_config.prefer_shadow = 1; adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev->ddev->mode_config.fb_base = adev->gmc.aper_base; adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev); r = amdgpu_display_modeset_create_props(adev);
if (r) if (r)
return r; return r;
adev->ddev->mode_config.max_width = 16384; adev_to_drm(adev)->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.max_height = 16384;
/* allocate crtcs, encoders, connectors */ /* allocate crtcs, encoders, connectors */
for (i = 0; i < adev->mode_info.num_crtc; i++) { for (i = 0; i < adev->mode_info.num_crtc; i++) {
@ -403,7 +403,7 @@ static int dce_virtual_sw_init(void *handle)
return r; return r;
} }
drm_kms_helper_poll_init(adev->ddev); drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true; adev->mode_info.mode_config_initialized = true;
return 0; return 0;
@ -415,9 +415,9 @@ static int dce_virtual_sw_fini(void *handle)
kfree(adev->mode_info.bios_hardcoded_edid); kfree(adev->mode_info.bios_hardcoded_edid);
drm_kms_helper_poll_fini(adev->ddev); drm_kms_helper_poll_fini(adev_to_drm(adev));
drm_mode_config_cleanup(adev->ddev); drm_mode_config_cleanup(adev_to_drm(adev));
/* clear crtcs pointer to avoid dce irq finish routine access freed data */ /* clear crtcs pointer to avoid dce irq finish routine access freed data */
memset(adev->mode_info.crtcs, 0, sizeof(adev->mode_info.crtcs[0]) * AMDGPU_MAX_CRTCS); memset(adev->mode_info.crtcs, 0, sizeof(adev->mode_info.crtcs[0]) * AMDGPU_MAX_CRTCS);
adev->mode_info.mode_config_initialized = false; adev->mode_info.mode_config_initialized = false;
@ -602,7 +602,7 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
if (!encoder) if (!encoder)
return -ENOMEM; return -ENOMEM;
encoder->possible_crtcs = 1 << index; encoder->possible_crtcs = 1 << index;
drm_encoder_init(adev->ddev, encoder, &dce_virtual_encoder_funcs, drm_encoder_init(adev_to_drm(adev), encoder, &dce_virtual_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL); DRM_MODE_ENCODER_VIRTUAL, NULL);
drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs); drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
@ -613,7 +613,7 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
} }
/* add a new connector */ /* add a new connector */
drm_connector_init(adev->ddev, connector, &dce_virtual_connector_funcs, drm_connector_init(adev_to_drm(adev), connector, &dce_virtual_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL); DRM_MODE_CONNECTOR_VIRTUAL);
drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs); drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB; connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@ -663,14 +663,14 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev,
if (amdgpu_crtc == NULL) if (amdgpu_crtc == NULL)
return 0; return 0;
spin_lock_irqsave(&adev->ddev->event_lock, flags); spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works; works = amdgpu_crtc->pflip_works;
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n", "AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status, amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED); AMDGPU_FLIP_SUBMITTED);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return 0; return 0;
} }
@ -682,7 +682,7 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev,
if (works->event) if (works->event)
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base); drm_crtc_vblank_put(&amdgpu_crtc->base);
amdgpu_bo_unref(&works->old_abo); amdgpu_bo_unref(&works->old_abo);
@ -697,7 +697,7 @@ static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vbla
struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer, struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer,
struct amdgpu_crtc, vblank_timer); struct amdgpu_crtc, vblank_timer);
struct drm_device *ddev = amdgpu_crtc->base.dev; struct drm_device *ddev = amdgpu_crtc->base.dev;
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = drm_to_adev(ddev);
drm_handle_vblank(ddev, amdgpu_crtc->crtc_id); drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id); dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);

View File

@ -251,7 +251,7 @@ static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
int i, count; int i, count;
ddev = dev_get_drvdata(dev); ddev = dev_get_drvdata(dev);
adev = ddev->dev_private; adev = drm_to_adev(ddev);
count = 0; count = 0;
for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) { for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) {
@ -646,7 +646,7 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
uint64_t config, uint64_t config,
uint64_t *count) uint64_t *count)
{ {
uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0; uint32_t lo_base_addr = 0, hi_base_addr = 0, lo_val = 0, hi_val = 0;
*count = 0; *count = 0;
switch (adev->asic_type) { switch (adev->asic_type) {

View File

@ -3307,6 +3307,29 @@ static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs; adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs;
} }
static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_NAVI10:
soc15_program_register_sequence(adev,
golden_settings_gc_rlc_spm_10_0_nv10,
(const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10));
break;
case CHIP_NAVI14:
soc15_program_register_sequence(adev,
golden_settings_gc_rlc_spm_10_1_nv14,
(const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14));
break;
case CHIP_NAVI12:
soc15_program_register_sequence(adev,
golden_settings_gc_rlc_spm_10_1_2_nv12,
(const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12));
break;
default:
break;
}
}
static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
{ {
switch (adev->asic_type) { switch (adev->asic_type) {
@ -3317,9 +3340,6 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_gc_10_0_nv10, golden_settings_gc_10_0_nv10,
(const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10)); (const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10));
soc15_program_register_sequence(adev,
golden_settings_gc_rlc_spm_10_0_nv10,
(const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10));
break; break;
case CHIP_NAVI14: case CHIP_NAVI14:
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
@ -3328,9 +3348,6 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_gc_10_1_nv14, golden_settings_gc_10_1_nv14,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14)); (const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14));
soc15_program_register_sequence(adev,
golden_settings_gc_rlc_spm_10_1_nv14,
(const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14));
break; break;
case CHIP_NAVI12: case CHIP_NAVI12:
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
@ -3339,9 +3356,6 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_gc_10_1_2_nv12, golden_settings_gc_10_1_2_nv12,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12)); (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12));
soc15_program_register_sequence(adev,
golden_settings_gc_rlc_spm_10_1_2_nv12,
(const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12));
break; break;
case CHIP_SIENNA_CICHLID: case CHIP_SIENNA_CICHLID:
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
@ -3360,6 +3374,7 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
default: default:
break; break;
} }
gfx_v10_0_init_spm_golden_registers(adev);
} }
static void gfx_v10_0_scratch_init(struct amdgpu_device *adev) static void gfx_v10_0_scratch_init(struct amdgpu_device *adev)
@ -4022,22 +4037,24 @@ static int gfx_v10_0_mec_init(struct amdgpu_device *adev)
amdgpu_gfx_compute_queue_acquire(adev); amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX10_MEC_HPD_SIZE; mec_hpd_size = adev->gfx.num_compute_rings * GFX10_MEC_HPD_SIZE;
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, if (mec_hpd_size) {
AMDGPU_GEM_DOMAIN_GTT, r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
&adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.hpd_eop_gpu_addr, &adev->gfx.mec.hpd_eop_obj,
(void **)&hpd); &adev->gfx.mec.hpd_eop_gpu_addr,
if (r) { (void **)&hpd);
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); if (r) {
gfx_v10_0_mec_fini(adev); dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
return r; gfx_v10_0_mec_fini(adev);
return r;
}
memset(hpd, 0, mec_hpd_size);
amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
} }
memset(hpd, 0, mec_hpd_size);
amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
@ -4147,6 +4164,7 @@ static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = {
.read_wave_sgprs = &gfx_v10_0_read_wave_sgprs, .read_wave_sgprs = &gfx_v10_0_read_wave_sgprs,
.read_wave_vgprs = &gfx_v10_0_read_wave_vgprs, .read_wave_vgprs = &gfx_v10_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v10_0_select_me_pipe_q, .select_me_pipe_q = &gfx_v10_0_select_me_pipe_q,
.init_spm_golden = &gfx_v10_0_init_spm_golden_registers,
}; };
static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev) static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
@ -6180,7 +6198,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
struct v10_gfx_mqd *mqd = ring->mqd_ptr; struct v10_gfx_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.gfx_ring[0]; int mqd_idx = ring - &adev->gfx.gfx_ring[0];
if (!adev->in_gpu_reset && !adev->in_suspend) { if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd)); memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@ -6192,7 +6210,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.me.mqd_backup[mqd_idx]) if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
} else if (adev->in_gpu_reset) { } else if (amdgpu_in_reset(adev)) {
/* reset mqd with the backup copy */ /* reset mqd with the backup copy */
if (adev->gfx.me.mqd_backup[mqd_idx]) if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
@ -6433,6 +6451,10 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
struct v10_compute_mqd *mqd = ring->mqd_ptr; struct v10_compute_mqd *mqd = ring->mqd_ptr;
int j; int j;
/* inactivate the queue */
if (amdgpu_sriov_vf(adev))
WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
/* disable wptr polling */ /* disable wptr polling */
WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
@ -6541,7 +6563,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v10_0_kiq_setting(ring); gfx_v10_0_kiq_setting(ring);
if (adev->in_gpu_reset) { /* for GPU_RESET case */ if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */ /* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
@ -6577,7 +6599,7 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
struct v10_compute_mqd *mqd = ring->mqd_ptr; struct v10_compute_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0]; int mqd_idx = ring - &adev->gfx.compute_ring[0];
if (!adev->in_gpu_reset && !adev->in_suspend) { if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd)); memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@ -6587,7 +6609,7 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
} else if (adev->in_gpu_reset) { /* for GPU_RESET case */ } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */ /* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
@ -7033,8 +7055,7 @@ static int gfx_v10_0_soft_reset(void *handle)
GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK |
GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK |
GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK)) {
| GRBM_STATUS__BCI_BUSY_MASK)) {
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_CP, GRBM_SOFT_RESET, SOFT_RESET_CP,
1); 1);
@ -7159,7 +7180,7 @@ static int gfx_v10_0_early_init(void *handle)
break; break;
} }
adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; adev->gfx.num_compute_rings = amdgpu_num_kcq;
gfx_v10_0_set_kiq_pm4_funcs(adev); gfx_v10_0_set_kiq_pm4_funcs(adev);
gfx_v10_0_set_ring_funcs(adev); gfx_v10_0_set_ring_funcs(adev);
@ -7427,7 +7448,6 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
(AMD_CG_SUPPORT_GFX_MGCG | (AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS)) AMD_CG_SUPPORT_GFX_3D_CGLS))
gfx_v10_0_enable_gui_idle_interrupt(adev, enable); gfx_v10_0_enable_gui_idle_interrupt(adev, enable);

View File

@ -1343,22 +1343,23 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
amdgpu_gfx_compute_queue_acquire(adev); amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE; mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
if (mec_hpd_size) {
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
if (r) {
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
return r;
}
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, memset(hpd, 0, mec_hpd_size);
AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.mec.hpd_eop_obj, amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
&adev->gfx.mec.hpd_eop_gpu_addr, amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
(void **)&hpd);
if (r) {
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
return r;
} }
memset(hpd, 0, mec_hpd_size);
amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
return 0; return 0;
} }
@ -4632,7 +4633,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v8_0_kiq_setting(ring); gfx_v8_0_kiq_setting(ring);
if (adev->in_gpu_reset) { /* for GPU_RESET case */ if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */ /* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@ -4669,7 +4670,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
struct vi_mqd *mqd = ring->mqd_ptr; struct vi_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0]; int mqd_idx = ring - &adev->gfx.compute_ring[0];
if (!adev->in_gpu_reset && !adev->in_suspend) { if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@ -4681,7 +4682,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation)); memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
} else if (adev->in_gpu_reset) { /* for GPU_RESET case */ } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */ /* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@ -5294,7 +5295,7 @@ static int gfx_v8_0_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS; adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; adev->gfx.num_compute_rings = amdgpu_num_kcq;
adev->gfx.funcs = &gfx_v8_0_gfx_funcs; adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
gfx_v8_0_set_ring_funcs(adev); gfx_v8_0_set_ring_funcs(adev);
gfx_v8_0_set_irq_funcs(adev); gfx_v8_0_set_irq_funcs(adev);
@ -5342,10 +5343,9 @@ static int gfx_v8_0_late_init(void *handle)
static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
bool enable) bool enable)
{ {
if (((adev->asic_type == CHIP_POLARIS11) || if ((adev->asic_type == CHIP_POLARIS11) ||
(adev->asic_type == CHIP_POLARIS12) || (adev->asic_type == CHIP_POLARIS12) ||
(adev->asic_type == CHIP_VEGAM)) && (adev->asic_type == CHIP_VEGAM))
adev->powerplay.pp_funcs->set_powergating_by_smu)
/* Send msg to SMU via Powerplay */ /* Send msg to SMU via Powerplay */
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable); amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
@ -5879,8 +5879,7 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CG, PP_BLOCK_GFX_CG,
pp_support_state, pp_support_state,
pp_state); pp_state);
if (adev->powerplay.pp_funcs->set_clockgating_by_smu) amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
} }
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) { if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
@ -5901,8 +5900,7 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_MG, PP_BLOCK_GFX_MG,
pp_support_state, pp_support_state,
pp_state); pp_state);
if (adev->powerplay.pp_funcs->set_clockgating_by_smu) amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
} }
return 0; return 0;
@ -5931,8 +5929,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CG, PP_BLOCK_GFX_CG,
pp_support_state, pp_support_state,
pp_state); pp_state);
if (adev->powerplay.pp_funcs->set_clockgating_by_smu) amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
} }
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) { if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
@ -5951,8 +5948,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_3D, PP_BLOCK_GFX_3D,
pp_support_state, pp_support_state,
pp_state); pp_state);
if (adev->powerplay.pp_funcs->set_clockgating_by_smu) amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
} }
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) { if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
@ -5973,8 +5969,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_MG, PP_BLOCK_GFX_MG,
pp_support_state, pp_support_state,
pp_state); pp_state);
if (adev->powerplay.pp_funcs->set_clockgating_by_smu) amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
} }
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
@ -5989,8 +5984,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_RLC, PP_BLOCK_GFX_RLC,
pp_support_state, pp_support_state,
pp_state); pp_state);
if (adev->powerplay.pp_funcs->set_clockgating_by_smu) amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
} }
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
@ -6004,8 +5998,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CP, PP_BLOCK_GFX_CP,
pp_support_state, pp_support_state,
pp_state); pp_state);
if (adev->powerplay.pp_funcs->set_clockgating_by_smu) amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
} }
return 0; return 0;

View File

@ -1939,23 +1939,24 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
/* take ownership of the relevant compute queues */ /* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev); amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
if (mec_hpd_size) {
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
if (r) {
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
gfx_v9_0_mec_fini(adev);
return r;
}
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, memset(hpd, 0, mec_hpd_size);
AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.mec.hpd_eop_obj, amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
&adev->gfx.mec.hpd_eop_gpu_addr, amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
(void **)&hpd);
if (r) {
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
gfx_v9_0_mec_fini(adev);
return r;
} }
memset(hpd, 0, mec_hpd_size);
amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
fw_data = (const __le32 *) fw_data = (const __le32 *)
@ -3685,7 +3686,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v9_0_kiq_setting(ring); gfx_v9_0_kiq_setting(ring);
if (adev->in_gpu_reset) { /* for GPU_RESET case */ if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */ /* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
@ -3723,7 +3724,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
struct v9_mqd *mqd = ring->mqd_ptr; struct v9_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0]; int mqd_idx = ring - &adev->gfx.compute_ring[0];
if (!adev->in_gpu_reset && !adev->in_suspend) { if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@ -3735,7 +3736,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
} else if (adev->in_gpu_reset) { /* for GPU_RESET case */ } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */ /* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
@ -3929,7 +3930,7 @@ static int gfx_v9_0_hw_fini(void *handle)
/* Use deinitialize sequence from CAIL when unbinding device from driver, /* Use deinitialize sequence from CAIL when unbinding device from driver,
* otherwise KIQ is hanging when binding back * otherwise KIQ is hanging when binding back
*/ */
if (!adev->in_gpu_reset && !adev->in_suspend) { if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, adev->gfx.kiq.ring.me, soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
adev->gfx.kiq.ring.pipe, adev->gfx.kiq.ring.pipe,
@ -4087,7 +4088,7 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
* *
* also don't wait anymore for IRQ context * also don't wait anymore for IRQ context
* */ * */
if (r < 1 && (adev->in_gpu_reset || in_interrupt())) if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
goto failed_kiq_read; goto failed_kiq_read;
might_sleep(); might_sleep();
@ -4626,7 +4627,7 @@ static int gfx_v9_0_early_init(void *handle)
adev->gfx.num_gfx_rings = 0; adev->gfx.num_gfx_rings = 0;
else else
adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS; adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; adev->gfx.num_compute_rings = amdgpu_num_kcq;
gfx_v9_0_set_kiq_pm4_funcs(adev); gfx_v9_0_set_kiq_pm4_funcs(adev);
gfx_v9_0_set_ring_funcs(adev); gfx_v9_0_set_ring_funcs(adev);
gfx_v9_0_set_irq_funcs(adev); gfx_v9_0_set_irq_funcs(adev);

8
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c Executable file → Normal file
View File

@ -57,10 +57,10 @@ static const struct soc15_reg_entry gfx_v9_4_edc_counter_regs[] = {
/* SPI */ /* SPI */
{ SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1 }, { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1 },
/* SQ */ /* SQ */
{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16 }, { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 8, 16 },
{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16 }, { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 8, 16 },
{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16 }, { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 8, 16 },
{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16 }, { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 8, 16 },
/* SQC */ /* SQC */
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6 }, { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6 },
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6 }, { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6 },

View File

@ -31,6 +31,77 @@
#include "soc15_common.h" #include "soc15_common.h"
static const char *gfxhub_client_ids[] = {
"CB/DB",
"Reserved",
"GE1",
"GE2",
"CPF",
"CPC",
"CPG",
"RLC",
"TCP",
"SQC (inst)",
"SQC (data)",
"SQG",
"Reserved",
"SDMA0",
"SDMA1",
"GCR",
"SDMA2",
"SDMA3",
};
static uint32_t gfxhub_v2_0_get_invalidate_req(unsigned int vmid,
uint32_t flush_type)
{
u32 req = 0;
/* invalidate using legacy mode on vmid*/
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
PER_VMID_INVALIDATE_REQ, 1 << vmid);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
return req;
}
static void
gfxhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
uint32_t status)
{
u32 cid = REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, CID);
dev_err(adev->dev,
"GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid],
cid);
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
dev_err(adev->dev, "\t RW: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, RW));
}
u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev) u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev)
{ {
u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE); u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE);
@ -360,6 +431,11 @@ void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, tmp); WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, tmp);
} }
static const struct amdgpu_vmhub_funcs gfxhub_v2_0_vmhub_funcs = {
.print_l2_protection_fault_status = gfxhub_v2_0_print_l2_protection_fault_status,
.get_invalidate_req = gfxhub_v2_0_get_invalidate_req,
};
void gfxhub_v2_0_init(struct amdgpu_device *adev) void gfxhub_v2_0_init(struct amdgpu_device *adev)
{ {
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
@ -390,4 +466,14 @@ void gfxhub_v2_0_init(struct amdgpu_device *adev)
mmGCVM_INVALIDATE_ENG0_REQ; mmGCVM_INVALIDATE_ENG0_REQ;
hub->eng_addr_distance = mmGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - hub->eng_addr_distance = mmGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
hub->vm_cntx_cntl_vm_fault = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
hub->vmhub_funcs = &gfxhub_v2_0_vmhub_funcs;
} }

View File

@ -31,6 +31,77 @@
#include "soc15_common.h" #include "soc15_common.h"
static const char *gfxhub_client_ids[] = {
"CB/DB",
"Reserved",
"GE1",
"GE2",
"CPF",
"CPC",
"CPG",
"RLC",
"TCP",
"SQC (inst)",
"SQC (data)",
"SQG",
"Reserved",
"SDMA0",
"SDMA1",
"GCR",
"SDMA2",
"SDMA3",
};
static uint32_t gfxhub_v2_1_get_invalidate_req(unsigned int vmid,
uint32_t flush_type)
{
u32 req = 0;
/* invalidate using legacy mode on vmid*/
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
PER_VMID_INVALIDATE_REQ, 1 << vmid);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
return req;
}
static void
gfxhub_v2_1_print_l2_protection_fault_status(struct amdgpu_device *adev,
uint32_t status)
{
u32 cid = REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, CID);
dev_err(adev->dev,
"GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid],
cid);
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
dev_err(adev->dev, "\t RW: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, RW));
}
u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev) u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev)
{ {
u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE); u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE);
@ -378,6 +449,11 @@ void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, tmp); WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, tmp);
} }
static const struct amdgpu_vmhub_funcs gfxhub_v2_1_vmhub_funcs = {
.print_l2_protection_fault_status = gfxhub_v2_1_print_l2_protection_fault_status,
.get_invalidate_req = gfxhub_v2_1_get_invalidate_req,
};
void gfxhub_v2_1_init(struct amdgpu_device *adev) void gfxhub_v2_1_init(struct amdgpu_device *adev)
{ {
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
@ -408,6 +484,16 @@ void gfxhub_v2_1_init(struct amdgpu_device *adev)
mmGCVM_INVALIDATE_ENG0_REQ; mmGCVM_INVALIDATE_ENG0_REQ;
hub->eng_addr_distance = mmGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - hub->eng_addr_distance = mmGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
hub->vm_cntx_cntl_vm_fault = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
hub->vmhub_funcs = &gfxhub_v2_1_vmhub_funcs;
} }
int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev) int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)

View File

@ -25,11 +25,10 @@
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_atomfirmware.h" #include "amdgpu_atomfirmware.h"
#include "gmc_v10_0.h" #include "gmc_v10_0.h"
#include "umc_v8_7.h"
#include "hdp/hdp_5_0_0_offset.h" #include "hdp/hdp_5_0_0_offset.h"
#include "hdp/hdp_5_0_0_sh_mask.h" #include "hdp/hdp_5_0_0_sh_mask.h"
#include "gc/gc_10_1_0_sh_mask.h"
#include "mmhub/mmhub_2_0_0_sh_mask.h"
#include "athub/athub_2_0_0_sh_mask.h" #include "athub/athub_2_0_0_sh_mask.h"
#include "athub/athub_2_0_0_offset.h" #include "athub/athub_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_offset.h" #include "dcn/dcn_2_0_0_offset.h"
@ -57,68 +56,31 @@ static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
}; };
#endif #endif
static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static int static int
gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src, unsigned type, struct amdgpu_irq_src *src, unsigned type,
enum amdgpu_interrupt_state state) enum amdgpu_interrupt_state state)
{ {
struct amdgpu_vmhub *hub;
u32 tmp, reg, bits[AMDGPU_MAX_VMHUBS], i;
bits[AMDGPU_GFXHUB_0] = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
bits[AMDGPU_MMHUB_0] = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
switch (state) { switch (state) {
case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_DISABLE:
/* MM HUB */ /* MM HUB */
hub = &adev->vmhub[AMDGPU_MMHUB_0]; amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + hub->ctx_distance * i;
tmp = RREG32(reg);
tmp &= ~bits[AMDGPU_MMHUB_0];
WREG32(reg, tmp);
}
/* GFX HUB */ /* GFX HUB */
hub = &adev->vmhub[AMDGPU_GFXHUB_0]; amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + hub->ctx_distance * i;
tmp = RREG32(reg);
tmp &= ~bits[AMDGPU_GFXHUB_0];
WREG32(reg, tmp);
}
break; break;
case AMDGPU_IRQ_STATE_ENABLE: case AMDGPU_IRQ_STATE_ENABLE:
/* MM HUB */ /* MM HUB */
hub = &adev->vmhub[AMDGPU_MMHUB_0]; amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + hub->ctx_distance * i;
tmp = RREG32(reg);
tmp |= bits[AMDGPU_MMHUB_0];
WREG32(reg, tmp);
}
/* GFX HUB */ /* GFX HUB */
hub = &adev->vmhub[AMDGPU_GFXHUB_0]; amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + hub->ctx_distance * i;
tmp = RREG32(reg);
tmp |= bits[AMDGPU_GFXHUB_0];
WREG32(reg, tmp);
}
break; break;
default: default:
break; break;
@ -166,29 +128,8 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
task_info.task_name, task_info.pid); task_info.task_name, task_info.pid);
dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
addr, entry->client_id); addr, entry->client_id);
if (!amdgpu_sriov_vf(adev)) { if (!amdgpu_sriov_vf(adev))
dev_err(adev->dev, hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
"GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, CID));
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
dev_err(adev->dev, "\t RW: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, RW));
}
} }
return 0; return 0;
@ -199,30 +140,20 @@ static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
.process = gmc_v10_0_process_interrupt, .process = gmc_v10_0_process_interrupt,
}; };
static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev) static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
.set = gmc_v10_0_ecc_interrupt_state,
.process = amdgpu_umc_process_ecc_irq,
};
static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
{ {
adev->gmc.vm_fault.num_types = 1; adev->gmc.vm_fault.num_types = 1;
adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs; adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
}
static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid, if (!amdgpu_sriov_vf(adev)) {
uint32_t flush_type) adev->gmc.ecc_irq.num_types = 1;
{ adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
u32 req = 0; }
/* invalidate using legacy mode on vmid*/
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
PER_VMID_INVALIDATE_REQ, 1 << vmid);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
return req;
} }
/** /**
@ -265,7 +196,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
{ {
bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub); bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type); u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
u32 tmp; u32 tmp;
/* Use register 17 for GART */ /* Use register 17 for GART */
const unsigned eng = 17; const unsigned eng = 17;
@ -356,16 +287,17 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
*/ */
if (adev->gfx.kiq.ring.sched.ready && if (adev->gfx.kiq.ring.sched.ready &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
!adev->in_gpu_reset) { down_read_trylock(&adev->reset_sem)) {
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
const unsigned eng = 17; const unsigned eng = 17;
u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type); u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng; u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
1 << vmid); 1 << vmid);
up_read(&adev->reset_sem);
return; return;
} }
@ -381,7 +313,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
if (!adev->mman.buffer_funcs_enabled || if (!adev->mman.buffer_funcs_enabled ||
!adev->ib_pool_ready || !adev->ib_pool_ready ||
adev->in_gpu_reset || amdgpu_in_reset(adev) ||
ring->sched.ready == false) { ring->sched.ready == false) {
gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0); gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
mutex_unlock(&adev->mman.gtt_window_lock); mutex_unlock(&adev->mman.gtt_window_lock);
@ -459,7 +391,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
spin_unlock(&adev->gfx.kiq.ring_lock); spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
if (r < 1) { if (r < 1) {
DRM_ERROR("wait for kiq fence error: %ld.\n", r); dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
return -ETIME; return -ETIME;
} }
@ -491,7 +423,7 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
{ {
bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0); uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng; unsigned eng = ring->vm_inv_eng;
/* /*
@ -641,6 +573,28 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
} }
} }
static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
{
u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
unsigned size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else {
u32 viewport;
u32 pitch;
viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
size = (REG_GET_FIELD(viewport,
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
4);
}
return size;
}
static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = { static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb, .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
.flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid, .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
@ -648,7 +602,8 @@ static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping, .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
.map_mtype = gmc_v10_0_map_mtype, .map_mtype = gmc_v10_0_map_mtype,
.get_vm_pde = gmc_v10_0_get_vm_pde, .get_vm_pde = gmc_v10_0_get_vm_pde,
.get_vm_pte = gmc_v10_0_get_vm_pte .get_vm_pte = gmc_v10_0_get_vm_pte,
.get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
}; };
static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev) static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
@ -657,12 +612,36 @@ static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs; adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
} }
static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
adev->umc.funcs = &umc_v8_7_funcs;
break;
default:
break;
}
}
static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
{
adev->mmhub.funcs = &mmhub_v2_0_funcs;
}
static int gmc_v10_0_early_init(void *handle) static int gmc_v10_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v10_0_set_mmhub_funcs(adev);
gmc_v10_0_set_gmc_funcs(adev); gmc_v10_0_set_gmc_funcs(adev);
gmc_v10_0_set_irq_funcs(adev); gmc_v10_0_set_irq_funcs(adev);
gmc_v10_0_set_umc_funcs(adev);
adev->gmc.shared_aperture_start = 0x2000000000000000ULL; adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
adev->gmc.shared_aperture_end = adev->gmc.shared_aperture_end =
@ -685,6 +664,10 @@ static int gmc_v10_0_late_init(void *handle)
if (r) if (r)
return r; return r;
r = amdgpu_gmc_ras_late_init(adev);
if (r)
return r;
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
} }
@ -789,36 +772,6 @@ static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
return amdgpu_gart_table_vram_alloc(adev); return amdgpu_gart_table_vram_alloc(adev);
} }
static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
{
u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
unsigned size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
} else {
u32 viewport;
u32 pitch;
viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
size = (REG_GET_FIELD(viewport,
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
4);
}
/* return 0 if the pre-OS buffer uses up most of vram */
if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) {
DRM_ERROR("Warning: pre-OS buffer uses most of vram, \
be aware of gart table overwrite\n");
return 0;
}
return size;
}
static int gmc_v10_0_sw_init(void *handle) static int gmc_v10_0_sw_init(void *handle)
{ {
int r, vram_width = 0, vram_type = 0, vram_vendor = 0; int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
@ -830,7 +783,7 @@ static int gmc_v10_0_sw_init(void *handle)
else else
gfxhub_v2_0_init(adev); gfxhub_v2_0_init(adev);
mmhub_v2_0_init(adev); adev->mmhub.funcs->init(adev);
spin_lock_init(&adev->gmc.invalidate_lock); spin_lock_init(&adev->gmc.invalidate_lock);
@ -878,6 +831,14 @@ static int gmc_v10_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
if (!amdgpu_sriov_vf(adev)) {
/* interrupt sent to DF. */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
&adev->gmc.ecc_irq);
if (r)
return r;
}
/* /*
* Set the internal MC address mask This is the max address of the GPU's * Set the internal MC address mask This is the max address of the GPU's
* internal address space. * internal address space.
@ -900,7 +861,7 @@ static int gmc_v10_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev); amdgpu_gmc_get_vbios_allocations(adev);
/* Memory manager */ /* Memory manager */
r = amdgpu_bo_init(adev); r = amdgpu_bo_init(adev);
@ -991,7 +952,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
if (r) if (r)
return r; return r;
r = mmhub_v2_0_gart_enable(adev); r = adev->mmhub.funcs->gart_enable(adev);
if (r) if (r)
return r; return r;
@ -1013,7 +974,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
gfxhub_v2_1_set_fault_enable_default(adev, value); gfxhub_v2_1_set_fault_enable_default(adev, value);
else else
gfxhub_v2_0_set_fault_enable_default(adev, value); gfxhub_v2_0_set_fault_enable_default(adev, value);
mmhub_v2_0_set_fault_enable_default(adev, value); adev->mmhub.funcs->set_fault_enable_default(adev, value);
gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0); gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
@ -1038,6 +999,9 @@ static int gmc_v10_0_hw_init(void *handle)
if (r) if (r)
return r; return r;
if (adev->umc.funcs && adev->umc.funcs->init_registers)
adev->umc.funcs->init_registers(adev);
return 0; return 0;
} }
@ -1055,7 +1019,7 @@ static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
gfxhub_v2_1_gart_disable(adev); gfxhub_v2_1_gart_disable(adev);
else else
gfxhub_v2_0_gart_disable(adev); gfxhub_v2_0_gart_disable(adev);
mmhub_v2_0_gart_disable(adev); adev->mmhub.funcs->gart_disable(adev);
amdgpu_gart_table_vram_unpin(adev); amdgpu_gart_table_vram_unpin(adev);
} }
@ -1069,6 +1033,7 @@ static int gmc_v10_0_hw_fini(void *handle)
return 0; return 0;
} }
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v10_0_gart_disable(adev); gmc_v10_0_gart_disable(adev);
@ -1121,7 +1086,7 @@ static int gmc_v10_0_set_clockgating_state(void *handle,
int r; int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = mmhub_v2_0_set_clockgating(adev, state); r = adev->mmhub.funcs->set_clockgating(adev, state);
if (r) if (r)
return r; return r;
@ -1136,7 +1101,7 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
mmhub_v2_0_get_clockgating(adev, flags); adev->mmhub.funcs->get_clockgating(adev, flags);
if (adev->asic_type == CHIP_SIENNA_CICHLID || if (adev->asic_type == CHIP_SIENNA_CICHLID ||
adev->asic_type == CHIP_NAVY_FLOUNDER) adev->asic_type == CHIP_NAVY_FLOUNDER)

View File

@ -805,16 +805,13 @@ static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
unsigned size; unsigned size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else { } else {
u32 viewport = RREG32(mmVIEWPORT_SIZE); u32 viewport = RREG32(mmVIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
4); 4);
} }
/* return 0 if the pre-OS buffer uses up most of vram */
if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
return 0;
return size; return size;
} }
@ -862,7 +859,7 @@ static int gmc_v6_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
adev->gmc.stolen_size = gmc_v6_0_get_vbios_fb_size(adev); amdgpu_gmc_get_vbios_allocations(adev);
r = amdgpu_bo_init(adev); r = amdgpu_bo_init(adev);
if (r) if (r)
@ -1136,6 +1133,7 @@ static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
.set_prt = gmc_v6_0_set_prt, .set_prt = gmc_v6_0_set_prt,
.get_vm_pde = gmc_v6_0_get_vm_pde, .get_vm_pde = gmc_v6_0_get_vm_pde,
.get_vm_pte = gmc_v6_0_get_vm_pte, .get_vm_pte = gmc_v6_0_get_vm_pte,
.get_vbios_fb_size = gmc_v6_0_get_vbios_fb_size,
}; };
static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = { static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {

View File

@ -434,7 +434,7 @@ static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
int vmid; int vmid;
unsigned int tmp; unsigned int tmp;
if (adev->in_gpu_reset) if (amdgpu_in_reset(adev))
return -EIO; return -EIO;
for (vmid = 1; vmid < 16; vmid++) { for (vmid = 1; vmid < 16; vmid++) {
@ -970,16 +970,14 @@ static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
unsigned size; unsigned size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else { } else {
u32 viewport = RREG32(mmVIEWPORT_SIZE); u32 viewport = RREG32(mmVIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
4); 4);
} }
/* return 0 if the pre-OS buffer uses up most of vram */
if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
return 0;
return size; return size;
} }
@ -1035,7 +1033,7 @@ static int gmc_v7_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
adev->gmc.stolen_size = gmc_v7_0_get_vbios_fb_size(adev); amdgpu_gmc_get_vbios_allocations(adev);
/* Memory manager */ /* Memory manager */
r = amdgpu_bo_init(adev); r = amdgpu_bo_init(adev);
@ -1372,7 +1370,8 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping, .emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
.set_prt = gmc_v7_0_set_prt, .set_prt = gmc_v7_0_set_prt,
.get_vm_pde = gmc_v7_0_get_vm_pde, .get_vm_pde = gmc_v7_0_get_vm_pde,
.get_vm_pte = gmc_v7_0_get_vm_pte .get_vm_pte = gmc_v7_0_get_vm_pte,
.get_vbios_fb_size = gmc_v7_0_get_vbios_fb_size,
}; };
static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = { static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {

View File

@ -635,7 +635,7 @@ static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
int vmid; int vmid;
unsigned int tmp; unsigned int tmp;
if (adev->in_gpu_reset) if (amdgpu_in_reset(adev))
return -EIO; return -EIO;
for (vmid = 1; vmid < 16; vmid++) { for (vmid = 1; vmid < 16; vmid++) {
@ -1087,16 +1087,14 @@ static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
unsigned size; unsigned size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else { } else {
u32 viewport = RREG32(mmVIEWPORT_SIZE); u32 viewport = RREG32(mmVIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
4); 4);
} }
/* return 0 if the pre-OS buffer uses up most of vram */
if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
return 0;
return size; return size;
} }
@ -1160,7 +1158,7 @@ static int gmc_v8_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
adev->gmc.stolen_size = gmc_v8_0_get_vbios_fb_size(adev); amdgpu_gmc_get_vbios_allocations(adev);
/* Memory manager */ /* Memory manager */
r = amdgpu_bo_init(adev); r = amdgpu_bo_init(adev);
@ -1739,7 +1737,8 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping, .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
.set_prt = gmc_v8_0_set_prt, .set_prt = gmc_v8_0_set_prt,
.get_vm_pde = gmc_v8_0_get_vm_pde, .get_vm_pde = gmc_v8_0_get_vm_pde,
.get_vm_pte = gmc_v8_0_get_vm_pte .get_vm_pte = gmc_v8_0_get_vm_pte,
.get_vbios_fb_size = gmc_v8_0_get_vbios_fb_size,
}; };
static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = { static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {

View File

@ -67,6 +67,222 @@
#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d
#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
static const char *gfxhub_client_ids[] = {
"CB",
"DB",
"IA",
"WD",
"CPF",
"CPC",
"CPG",
"RLC",
"TCP",
"SQC (inst)",
"SQC (data)",
"SQG",
"PA",
};
static const char *mmhub_client_ids_raven[][2] = {
[0][0] = "MP1",
[1][0] = "MP0",
[2][0] = "VCN",
[3][0] = "VCNU",
[4][0] = "HDP",
[5][0] = "DCE",
[13][0] = "UTCL2",
[19][0] = "TLS",
[26][0] = "OSS",
[27][0] = "SDMA0",
[0][1] = "MP1",
[1][1] = "MP0",
[2][1] = "VCN",
[3][1] = "VCNU",
[4][1] = "HDP",
[5][1] = "XDP",
[6][1] = "DBGU0",
[7][1] = "DCE",
[8][1] = "DCEDWB0",
[9][1] = "DCEDWB1",
[26][1] = "OSS",
[27][1] = "SDMA0",
};
static const char *mmhub_client_ids_renoir[][2] = {
[0][0] = "MP1",
[1][0] = "MP0",
[2][0] = "HDP",
[4][0] = "DCEDMC",
[5][0] = "DCEVGA",
[13][0] = "UTCL2",
[19][0] = "TLS",
[26][0] = "OSS",
[27][0] = "SDMA0",
[28][0] = "VCN",
[29][0] = "VCNU",
[30][0] = "JPEG",
[0][1] = "MP1",
[1][1] = "MP0",
[2][1] = "HDP",
[3][1] = "XDP",
[6][1] = "DBGU0",
[7][1] = "DCEDMC",
[8][1] = "DCEVGA",
[9][1] = "DCEDWB",
[26][1] = "OSS",
[27][1] = "SDMA0",
[28][1] = "VCN",
[29][1] = "VCNU",
[30][1] = "JPEG",
};
static const char *mmhub_client_ids_vega10[][2] = {
[0][0] = "MP0",
[1][0] = "UVD",
[2][0] = "UVDU",
[3][0] = "HDP",
[13][0] = "UTCL2",
[14][0] = "OSS",
[15][0] = "SDMA1",
[32+0][0] = "VCE0",
[32+1][0] = "VCE0U",
[32+2][0] = "XDMA",
[32+3][0] = "DCE",
[32+4][0] = "MP1",
[32+14][0] = "SDMA0",
[0][1] = "MP0",
[1][1] = "UVD",
[2][1] = "UVDU",
[3][1] = "DBGU0",
[4][1] = "HDP",
[5][1] = "XDP",
[14][1] = "OSS",
[15][1] = "SDMA0",
[32+0][1] = "VCE0",
[32+1][1] = "VCE0U",
[32+2][1] = "XDMA",
[32+3][1] = "DCE",
[32+4][1] = "DCEDWB",
[32+5][1] = "MP1",
[32+6][1] = "DBGU1",
[32+14][1] = "SDMA1",
};
static const char *mmhub_client_ids_vega12[][2] = {
[0][0] = "MP0",
[1][0] = "VCE0",
[2][0] = "VCE0U",
[3][0] = "HDP",
[13][0] = "UTCL2",
[14][0] = "OSS",
[15][0] = "SDMA1",
[32+0][0] = "DCE",
[32+1][0] = "XDMA",
[32+2][0] = "UVD",
[32+3][0] = "UVDU",
[32+4][0] = "MP1",
[32+15][0] = "SDMA0",
[0][1] = "MP0",
[1][1] = "VCE0",
[2][1] = "VCE0U",
[3][1] = "DBGU0",
[4][1] = "HDP",
[5][1] = "XDP",
[14][1] = "OSS",
[15][1] = "SDMA0",
[32+0][1] = "DCE",
[32+1][1] = "DCEDWB",
[32+2][1] = "XDMA",
[32+3][1] = "UVD",
[32+4][1] = "UVDU",
[32+5][1] = "MP1",
[32+6][1] = "DBGU1",
[32+15][1] = "SDMA1",
};
static const char *mmhub_client_ids_vega20[][2] = {
[0][0] = "XDMA",
[1][0] = "DCE",
[2][0] = "VCE0",
[3][0] = "VCE0U",
[4][0] = "UVD",
[5][0] = "UVD1U",
[13][0] = "OSS",
[14][0] = "HDP",
[15][0] = "SDMA0",
[32+0][0] = "UVD",
[32+1][0] = "UVDU",
[32+2][0] = "MP1",
[32+3][0] = "MP0",
[32+12][0] = "UTCL2",
[32+14][0] = "SDMA1",
[0][1] = "XDMA",
[1][1] = "DCE",
[2][1] = "DCEDWB",
[3][1] = "VCE0",
[4][1] = "VCE0U",
[5][1] = "UVD1",
[6][1] = "UVD1U",
[7][1] = "DBGU0",
[8][1] = "XDP",
[13][1] = "OSS",
[14][1] = "HDP",
[15][1] = "SDMA0",
[32+0][1] = "UVD",
[32+1][1] = "UVDU",
[32+2][1] = "DBGU1",
[32+3][1] = "MP1",
[32+4][1] = "MP0",
[32+14][1] = "SDMA1",
};
static const char *mmhub_client_ids_arcturus[][2] = {
[2][0] = "MP1",
[3][0] = "MP0",
[10][0] = "UTCL2",
[13][0] = "OSS",
[14][0] = "HDP",
[15][0] = "SDMA0",
[32+15][0] = "SDMA1",
[64+15][0] = "SDMA2",
[96+15][0] = "SDMA3",
[128+15][0] = "SDMA4",
[160+11][0] = "JPEG",
[160+12][0] = "VCN",
[160+13][0] = "VCNU",
[160+15][0] = "SDMA5",
[192+10][0] = "UTCL2",
[192+11][0] = "JPEG1",
[192+12][0] = "VCN1",
[192+13][0] = "VCN1U",
[192+15][0] = "SDMA6",
[224+15][0] = "SDMA7",
[0][1] = "DBGU1",
[1][1] = "XDP",
[2][1] = "MP1",
[3][1] = "MP0",
[13][1] = "OSS",
[14][1] = "HDP",
[15][1] = "SDMA0",
[32+15][1] = "SDMA1",
[32+15][1] = "SDMA1",
[64+15][1] = "SDMA2",
[96+15][1] = "SDMA3",
[128+15][1] = "SDMA4",
[160+11][1] = "JPEG",
[160+12][1] = "VCN",
[160+13][1] = "VCNU",
[160+15][1] = "SDMA5",
[192+11][1] = "JPEG1",
[192+12][1] = "VCN1",
[192+13][1] = "VCN1U",
[192+15][1] = "SDMA6",
[224+15][1] = "SDMA7",
};
static const u32 golden_settings_vega10_hdp[] = static const u32 golden_settings_vega10_hdp[] =
{ {
@ -300,9 +516,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
{ {
struct amdgpu_vmhub *hub; struct amdgpu_vmhub *hub;
bool retry_fault = !!(entry->src_data[1] & 0x80); bool retry_fault = !!(entry->src_data[1] & 0x80);
uint32_t status = 0; uint32_t status = 0, cid = 0, rw = 0;
u64 addr; u64 addr;
char hub_name[10]; char hub_name[10];
const char *mmhub_cid;
addr = (u64)entry->src_data[0] << 12; addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44; addr |= ((u64)entry->src_data[1] & 0xf) << 44;
@ -337,6 +554,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
RREG32(hub->vm_l2_pro_fault_status); RREG32(hub->vm_l2_pro_fault_status);
status = RREG32(hub->vm_l2_pro_fault_status); status = RREG32(hub->vm_l2_pro_fault_status);
cid = REG_GET_FIELD(status,
VM_L2_PROTECTION_FAULT_STATUS, CID);
rw = REG_GET_FIELD(status,
VM_L2_PROTECTION_FAULT_STATUS, RW);
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
} }
@ -359,9 +580,37 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev, dev_err(adev->dev,
"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status); status);
dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n", if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) {
REG_GET_FIELD(status, dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
VM_L2_PROTECTION_FAULT_STATUS, CID)); cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid],
cid);
} else {
switch (adev->asic_type) {
case CHIP_VEGA10:
mmhub_cid = mmhub_client_ids_vega10[cid][rw];
break;
case CHIP_VEGA12:
mmhub_cid = mmhub_client_ids_vega12[cid][rw];
break;
case CHIP_VEGA20:
mmhub_cid = mmhub_client_ids_vega20[cid][rw];
break;
case CHIP_ARCTURUS:
mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
break;
case CHIP_RAVEN:
mmhub_cid = mmhub_client_ids_raven[cid][rw];
break;
case CHIP_RENOIR:
mmhub_cid = mmhub_client_ids_renoir[cid][rw];
break;
default:
mmhub_cid = NULL;
break;
}
dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
mmhub_cid ? mmhub_cid : "unknown", cid);
}
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
REG_GET_FIELD(status, REG_GET_FIELD(status,
VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
@ -374,10 +623,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
REG_GET_FIELD(status, REG_GET_FIELD(status,
VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
dev_err(adev->dev, "\t RW: 0x%lx\n", dev_err(adev->dev, "\t RW: 0x%x\n", rw);
REG_GET_FIELD(status,
VM_L2_PROTECTION_FAULT_STATUS, RW));
} }
} }
@ -500,13 +746,14 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* as GFXOFF under bare metal * as GFXOFF under bare metal
*/ */
if (adev->gfx.kiq.ring.sched.ready && if (adev->gfx.kiq.ring.sched.ready &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
!adev->in_gpu_reset) { down_read_trylock(&adev->reset_sem)) {
uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng; uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
1 << vmid); 1 << vmid);
up_read(&adev->reset_sem);
return; return;
} }
@ -596,10 +843,10 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
struct amdgpu_ring *ring = &adev->gfx.kiq.ring; struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_kiq *kiq = &adev->gfx.kiq;
if (adev->in_gpu_reset) if (amdgpu_in_reset(adev))
return -EIO; return -EIO;
if (ring->sched.ready) { if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) {
/* Vega20+XGMI caches PTEs in TC and TLB. Add a /* Vega20+XGMI caches PTEs in TC and TLB. Add a
* heavy-weight TLB flush (type 2), which flushes * heavy-weight TLB flush (type 2), which flushes
* both. Due to a race condition with concurrent * both. Due to a race condition with concurrent
@ -626,6 +873,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
if (r) { if (r) {
amdgpu_ring_undo(ring); amdgpu_ring_undo(ring);
spin_unlock(&adev->gfx.kiq.ring_lock); spin_unlock(&adev->gfx.kiq.ring_lock);
up_read(&adev->reset_sem);
return -ETIME; return -ETIME;
} }
@ -633,10 +881,11 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
spin_unlock(&adev->gfx.kiq.ring_lock); spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
if (r < 1) { if (r < 1) {
DRM_ERROR("wait for kiq fence error: %ld.\n", r); dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
up_read(&adev->reset_sem);
return -ETIME; return -ETIME;
} }
up_read(&adev->reset_sem);
return 0; return 0;
} }
@ -826,6 +1075,41 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
*flags |= AMDGPU_PTE_SNOOPED; *flags |= AMDGPU_PTE_SNOOPED;
} }
static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
{
u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
unsigned size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else {
u32 viewport;
switch (adev->asic_type) {
case CHIP_RAVEN:
case CHIP_RENOIR:
viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
size = (REG_GET_FIELD(viewport,
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport,
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
4);
break;
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
default:
viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
4);
break;
}
}
return size;
}
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid, .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
@ -833,7 +1117,8 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping, .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
.map_mtype = gmc_v9_0_map_mtype, .map_mtype = gmc_v9_0_map_mtype,
.get_vm_pde = gmc_v9_0_get_vm_pde, .get_vm_pde = gmc_v9_0_get_vm_pde,
.get_vm_pte = gmc_v9_0_get_vm_pte .get_vm_pte = gmc_v9_0_get_vm_pte,
.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
}; };
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
@ -871,13 +1156,11 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
{ {
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA20:
adev->mmhub.funcs = &mmhub_v1_0_funcs;
break;
case CHIP_ARCTURUS: case CHIP_ARCTURUS:
adev->mmhub.funcs = &mmhub_v9_4_funcs; adev->mmhub.funcs = &mmhub_v9_4_funcs;
break; break;
default: default:
adev->mmhub.funcs = &mmhub_v1_0_funcs;
break; break;
} }
} }
@ -901,38 +1184,12 @@ static int gmc_v9_0_early_init(void *handle)
return 0; return 0;
} }
static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
{
/*
* TODO:
* Currently there is a bug where some memory client outside
* of the driver writes to first 8M of VRAM on S3 resume,
* this overrides GART which by default gets placed in first 8M and
* causes VM_FAULTS once GTT is accessed.
* Keep the stolen memory reservation until the while this is not solved.
* Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
*/
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_RAVEN:
case CHIP_ARCTURUS:
case CHIP_RENOIR:
return true;
case CHIP_VEGA12:
case CHIP_VEGA20:
default:
return false;
}
}
static int gmc_v9_0_late_init(void *handle) static int gmc_v9_0_late_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r; int r;
if (!gmc_v9_0_keep_stolen_memory(adev)) amdgpu_bo_late_init(adev);
amdgpu_bo_late_init(adev);
r = amdgpu_gmc_allocate_vm_inv_eng(adev); r = amdgpu_gmc_allocate_vm_inv_eng(adev);
if (r) if (r)
@ -969,10 +1226,8 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
{ {
u64 base = 0; u64 base = 0;
if (adev->asic_type == CHIP_ARCTURUS) if (!amdgpu_sriov_vf(adev))
base = mmhub_v9_4_get_fb_location(adev); base = adev->mmhub.funcs->get_fb_location(adev);
else if (!amdgpu_sriov_vf(adev))
base = mmhub_v1_0_get_fb_location(adev);
/* add the xgmi offset of the physical node */ /* add the xgmi offset of the physical node */
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
@ -1066,50 +1321,18 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
return amdgpu_gart_table_vram_alloc(adev); return amdgpu_gart_table_vram_alloc(adev);
} }
static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) /**
* gmc_v9_0_save_registers - saves regs
*
* @adev: amdgpu_device pointer
*
* This saves potential register values that should be
* restored upon resume
*/
static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
{ {
u32 d1vga_control; if (adev->asic_type == CHIP_RAVEN)
unsigned size; adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
/*
* TODO Remove once GART corruption is resolved
* Check related code in gmc_v9_0_sw_fini
* */
if (gmc_v9_0_keep_stolen_memory(adev))
return 9 * 1024 * 1024;
d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
} else {
u32 viewport;
switch (adev->asic_type) {
case CHIP_RAVEN:
case CHIP_RENOIR:
viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
size = (REG_GET_FIELD(viewport,
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport,
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
4);
break;
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
default:
viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
4);
break;
}
}
/* return 0 if the pre-OS buffer uses up most of vram */
if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
return 0;
return size;
} }
static int gmc_v9_0_sw_init(void *handle) static int gmc_v9_0_sw_init(void *handle)
@ -1118,10 +1341,8 @@ static int gmc_v9_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfxhub_v1_0_init(adev); gfxhub_v1_0_init(adev);
if (adev->asic_type == CHIP_ARCTURUS)
mmhub_v9_4_init(adev); adev->mmhub.funcs->init(adev);
else
mmhub_v1_0_init(adev);
spin_lock_init(&adev->gmc.invalidate_lock); spin_lock_init(&adev->gmc.invalidate_lock);
@ -1242,7 +1463,7 @@ static int gmc_v9_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev); amdgpu_gmc_get_vbios_allocations(adev);
/* Memory manager */ /* Memory manager */
r = amdgpu_bo_init(adev); r = amdgpu_bo_init(adev);
@ -1268,21 +1489,18 @@ static int gmc_v9_0_sw_init(void *handle)
amdgpu_vm_manager_init(adev); amdgpu_vm_manager_init(adev);
gmc_v9_0_save_registers(adev);
return 0; return 0;
} }
static int gmc_v9_0_sw_fini(void *handle) static int gmc_v9_0_sw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
void *stolen_vga_buf;
amdgpu_gmc_ras_fini(adev); amdgpu_gmc_ras_fini(adev);
amdgpu_gem_force_release(adev); amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev); amdgpu_vm_manager_fini(adev);
if (gmc_v9_0_keep_stolen_memory(adev))
amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
amdgpu_gart_table_vram_free(adev); amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev); amdgpu_bo_fini(adev);
amdgpu_gart_fini(adev); amdgpu_gart_fini(adev);
@ -1326,10 +1544,10 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
* *
* This restores register values, saved at suspend. * This restores register values, saved at suspend.
*/ */
static void gmc_v9_0_restore_registers(struct amdgpu_device *adev) void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
{ {
if (adev->asic_type == CHIP_RAVEN) if (adev->asic_type == CHIP_RAVEN)
WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register); WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
} }
/** /**
@ -1353,10 +1571,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r) if (r)
return r; return r;
if (adev->asic_type == CHIP_ARCTURUS) r = adev->mmhub.funcs->gart_enable(adev);
r = mmhub_v9_4_gart_enable(adev);
else
r = mmhub_v1_0_gart_enable(adev);
if (r) if (r)
return r; return r;
@ -1391,11 +1606,10 @@ static int gmc_v9_0_hw_init(void *handle)
golden_settings_vega10_hdp, golden_settings_vega10_hdp,
ARRAY_SIZE(golden_settings_vega10_hdp)); ARRAY_SIZE(golden_settings_vega10_hdp));
if (adev->mmhub.funcs->update_power_gating)
adev->mmhub.funcs->update_power_gating(adev, true);
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_RAVEN:
/* TODO for renoir */
mmhub_v1_0_update_power_gating(adev, true);
break;
case CHIP_ARCTURUS: case CHIP_ARCTURUS:
WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1); WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
break; break;
@ -1421,10 +1635,7 @@ static int gmc_v9_0_hw_init(void *handle)
if (!amdgpu_sriov_vf(adev)) { if (!amdgpu_sriov_vf(adev)) {
gfxhub_v1_0_set_fault_enable_default(adev, value); gfxhub_v1_0_set_fault_enable_default(adev, value);
if (adev->asic_type == CHIP_ARCTURUS) adev->mmhub.funcs->set_fault_enable_default(adev, value);
mmhub_v9_4_set_fault_enable_default(adev, value);
else
mmhub_v1_0_set_fault_enable_default(adev, value);
} }
for (i = 0; i < adev->num_vmhubs; ++i) for (i = 0; i < adev->num_vmhubs; ++i)
gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
@ -1437,20 +1648,6 @@ static int gmc_v9_0_hw_init(void *handle)
return r; return r;
} }
/**
* gmc_v9_0_save_registers - saves regs
*
* @adev: amdgpu_device pointer
*
* This saves potential register values that should be
* restored upon resume
*/
static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
{
if (adev->asic_type == CHIP_RAVEN)
adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
}
/** /**
* gmc_v9_0_gart_disable - gart disable * gmc_v9_0_gart_disable - gart disable
* *
@ -1461,10 +1658,7 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
{ {
gfxhub_v1_0_gart_disable(adev); gfxhub_v1_0_gart_disable(adev);
if (adev->asic_type == CHIP_ARCTURUS) adev->mmhub.funcs->gart_disable(adev);
mmhub_v9_4_gart_disable(adev);
else
mmhub_v1_0_gart_disable(adev);
amdgpu_gart_table_vram_unpin(adev); amdgpu_gart_table_vram_unpin(adev);
} }
@ -1494,8 +1688,6 @@ static int gmc_v9_0_suspend(void *handle)
if (r) if (r)
return r; return r;
gmc_v9_0_save_registers(adev);
return 0; return 0;
} }
@ -1504,7 +1696,6 @@ static int gmc_v9_0_resume(void *handle)
int r; int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v9_0_restore_registers(adev);
r = gmc_v9_0_hw_init(adev); r = gmc_v9_0_hw_init(adev);
if (r) if (r)
return r; return r;
@ -1537,10 +1728,7 @@ static int gmc_v9_0_set_clockgating_state(void *handle,
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->asic_type == CHIP_ARCTURUS) adev->mmhub.funcs->set_clockgating(adev, state);
mmhub_v9_4_set_clockgating(adev, state);
else
mmhub_v1_0_set_clockgating(adev, state);
athub_v1_0_set_clockgating(adev, state); athub_v1_0_set_clockgating(adev, state);
@ -1551,10 +1739,7 @@ static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->asic_type == CHIP_ARCTURUS) adev->mmhub.funcs->get_clockgating(adev, flags);
mmhub_v9_4_get_clockgating(adev, flags);
else
mmhub_v1_0_get_clockgating(adev, flags);
athub_v1_0_get_clockgating(adev, flags); athub_v1_0_get_clockgating(adev, flags);
} }

View File

@ -26,4 +26,6 @@
extern const struct amd_ip_funcs gmc_v9_0_ip_funcs; extern const struct amd_ip_funcs gmc_v9_0_ip_funcs;
extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block; extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block;
void gmc_v9_0_restore_registers(struct amdgpu_device *adev);
#endif #endif

View File

@ -55,22 +55,18 @@ static int amdgpu_ih_clientid_jpeg[] = {
static int jpeg_v2_5_early_init(void *handle) static int jpeg_v2_5_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->asic_type == CHIP_ARCTURUS) { u32 harvest;
u32 harvest; int i;
int i;
adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS; adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS;
for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING); harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING);
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
adev->jpeg.harvest_config |= 1 << i; adev->jpeg.harvest_config |= 1 << i;
} }
if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 |
if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 | AMDGPU_JPEG_HARVEST_JPEG1))
AMDGPU_JPEG_HARVEST_JPEG1)) return -ENOENT;
return -ENOENT;
} else
adev->jpeg.num_jpeg_inst = 1;
jpeg_v2_5_set_dec_ring_funcs(adev); jpeg_v2_5_set_dec_ring_funcs(adev);
jpeg_v2_5_set_irq_funcs(adev); jpeg_v2_5_set_irq_funcs(adev);

View File

@ -460,15 +460,10 @@ static bool jpeg_v3_0_is_idle(void *handle)
static int jpeg_v3_0_wait_for_idle(void *handle) static int jpeg_v3_0_wait_for_idle(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
ret = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, return SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK, UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK); UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
if (ret)
return ret;
return ret;
} }
static int jpeg_v3_0_set_clockgating_state(void *handle, static int jpeg_v3_0_set_clockgating_state(void *handle,

30
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c Executable file → Normal file
View File

@ -34,7 +34,7 @@
#define mmDAGB0_CNTL_MISC2_RV 0x008f #define mmDAGB0_CNTL_MISC2_RV 0x008f
#define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0 #define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev) static u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
{ {
u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE); u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP); u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP);
@ -51,7 +51,7 @@ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
return base; return base;
} }
void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, static void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base) uint64_t page_table_base)
{ {
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
@ -297,20 +297,19 @@ static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
} }
} }
void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
bool enable) bool enable)
{ {
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
return; return;
if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) { if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) {
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
} }
} }
int mmhub_v1_0_gart_enable(struct amdgpu_device *adev) static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
{ {
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
/* /*
@ -338,7 +337,7 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
return 0; return 0;
} }
void mmhub_v1_0_gart_disable(struct amdgpu_device *adev) static void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
{ {
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
u32 tmp; u32 tmp;
@ -373,7 +372,7 @@ void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page * @value: true redirects VM faults to the default page
*/ */
void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) static void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
{ {
u32 tmp; u32 tmp;
@ -415,7 +414,7 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp); WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
} }
void mmhub_v1_0_init(struct amdgpu_device *adev) static void mmhub_v1_0_init(struct amdgpu_device *adev)
{ {
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
@ -525,7 +524,7 @@ static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data); WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
} }
int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, static int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state) enum amd_clockgating_state state)
{ {
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
@ -549,7 +548,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
return 0; return 0;
} }
void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) static void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
{ {
int data, data1; int data, data1;
@ -781,4 +780,13 @@ const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
.ras_late_init = amdgpu_mmhub_ras_late_init, .ras_late_init = amdgpu_mmhub_ras_late_init,
.query_ras_error_count = mmhub_v1_0_query_ras_error_count, .query_ras_error_count = mmhub_v1_0_query_ras_error_count,
.reset_ras_error_count = mmhub_v1_0_reset_ras_error_count, .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
.get_fb_location = mmhub_v1_0_get_fb_location,
.init = mmhub_v1_0_init,
.gart_enable = mmhub_v1_0_gart_enable,
.set_fault_enable_default = mmhub_v1_0_set_fault_enable_default,
.gart_disable = mmhub_v1_0_gart_disable,
.set_clockgating = mmhub_v1_0_set_clockgating,
.get_clockgating = mmhub_v1_0_get_clockgating,
.setup_vm_pt_regs = mmhub_v1_0_setup_vm_pt_regs,
.update_power_gating = mmhub_v1_0_update_power_gating,
}; };

View File

@ -25,18 +25,4 @@
extern const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs; extern const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs;
u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev);
int mmhub_v1_0_gart_enable(struct amdgpu_device *adev);
void mmhub_v1_0_gart_disable(struct amdgpu_device *adev);
void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value);
void mmhub_v1_0_init(struct amdgpu_device *adev);
int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state);
void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
bool enable);
void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base);
#endif #endif

View File

@ -36,7 +36,130 @@
#define mmDAGB0_CNTL_MISC2_Sienna_Cichlid 0x0070 #define mmDAGB0_CNTL_MISC2_Sienna_Cichlid 0x0070
#define mmDAGB0_CNTL_MISC2_Sienna_Cichlid_BASE_IDX 0 #define mmDAGB0_CNTL_MISC2_Sienna_Cichlid_BASE_IDX 0
void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, static const char *mmhub_client_ids_navi1x[][2] = {
[3][0] = "DCEDMC",
[4][0] = "DCEVGA",
[5][0] = "MP0",
[6][0] = "MP1",
[13][0] = "VMC",
[14][0] = "HDP",
[15][0] = "OSS",
[16][0] = "VCNU",
[17][0] = "JPEG",
[18][0] = "VCN",
[3][1] = "DCEDMC",
[4][1] = "DCEXFC",
[5][1] = "DCEVGA",
[6][1] = "DCEDWB",
[7][1] = "MP0",
[8][1] = "MP1",
[9][1] = "DBGU1",
[10][1] = "DBGU0",
[11][1] = "XDP",
[14][1] = "HDP",
[15][1] = "OSS",
[16][1] = "VCNU",
[17][1] = "JPEG",
[18][1] = "VCN",
};
static const char *mmhub_client_ids_sienna_cichlid[][2] = {
[3][0] = "DCEDMC",
[4][0] = "DCEVGA",
[5][0] = "MP0",
[6][0] = "MP1",
[8][0] = "VMC",
[9][0] = "VCNU0",
[10][0] = "JPEG",
[12][0] = "VCNU1",
[13][0] = "VCN1",
[14][0] = "HDP",
[15][0] = "OSS",
[32+11][0] = "VCN0",
[0][1] = "DBGU0",
[1][1] = "DBGU1",
[2][1] = "DCEDWB",
[3][1] = "DCEDMC",
[4][1] = "DCEVGA",
[5][1] = "MP0",
[6][1] = "MP1",
[7][1] = "XDP",
[9][1] = "VCNU0",
[10][1] = "JPEG",
[11][1] = "VCN0",
[12][1] = "VCNU1",
[13][1] = "VCN1",
[14][1] = "HDP",
[15][1] = "OSS",
};
static uint32_t mmhub_v2_0_get_invalidate_req(unsigned int vmid,
uint32_t flush_type)
{
u32 req = 0;
/* invalidate using legacy mode on vmid*/
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
PER_VMID_INVALIDATE_REQ, 1 << vmid);
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
return req;
}
static void
mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
uint32_t status)
{
uint32_t cid, rw;
const char *mmhub_cid = NULL;
cid = REG_GET_FIELD(status,
MMVM_L2_PROTECTION_FAULT_STATUS, CID);
rw = REG_GET_FIELD(status,
MMVM_L2_PROTECTION_FAULT_STATUS, RW);
dev_err(adev->dev,
"MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI12:
case CHIP_NAVI14:
mmhub_cid = mmhub_client_ids_navi1x[cid][rw];
break;
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw];
break;
default:
mmhub_cid = NULL;
break;
}
dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
mmhub_cid ? mmhub_cid : "unknown", cid);
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
MMVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
REG_GET_FIELD(status,
MMVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
MMVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
REG_GET_FIELD(status,
MMVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
dev_err(adev->dev, "\t RW: 0x%x\n", rw);
}
static void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base) uint64_t page_table_base)
{ {
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
@ -280,7 +403,7 @@ static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev)
} }
} }
int mmhub_v2_0_gart_enable(struct amdgpu_device *adev) static int mmhub_v2_0_gart_enable(struct amdgpu_device *adev)
{ {
/* GART Enable. */ /* GART Enable. */
mmhub_v2_0_init_gart_aperture_regs(adev); mmhub_v2_0_init_gart_aperture_regs(adev);
@ -296,7 +419,7 @@ int mmhub_v2_0_gart_enable(struct amdgpu_device *adev)
return 0; return 0;
} }
void mmhub_v2_0_gart_disable(struct amdgpu_device *adev) static void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
{ {
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
u32 tmp; u32 tmp;
@ -327,7 +450,7 @@ void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page * @value: true redirects VM faults to the default page
*/ */
void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) static void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
{ {
u32 tmp; u32 tmp;
@ -370,7 +493,12 @@ void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL, tmp); WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL, tmp);
} }
void mmhub_v2_0_init(struct amdgpu_device *adev) static const struct amdgpu_vmhub_funcs mmhub_v2_0_vmhub_funcs = {
.print_l2_protection_fault_status = mmhub_v2_0_print_l2_protection_fault_status,
.get_invalidate_req = mmhub_v2_0_get_invalidate_req,
};
static void mmhub_v2_0_init(struct amdgpu_device *adev)
{ {
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
@ -400,6 +528,16 @@ void mmhub_v2_0_init(struct amdgpu_device *adev)
mmMMVM_INVALIDATE_ENG0_REQ; mmMMVM_INVALIDATE_ENG0_REQ;
hub->eng_addr_distance = mmMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - hub->eng_addr_distance = mmMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
hub->vm_cntx_cntl_vm_fault = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
hub->vmhub_funcs = &mmhub_v2_0_vmhub_funcs;
} }
static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
@ -490,7 +628,7 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
} }
} }
int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state) enum amd_clockgating_state state)
{ {
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
@ -514,7 +652,7 @@ int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
return 0; return 0;
} }
void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
{ {
int data, data1; int data, data1;
@ -547,3 +685,14 @@ void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK) if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_MC_LS; *flags |= AMD_CG_SUPPORT_MC_LS;
} }
const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs = {
.ras_late_init = amdgpu_mmhub_ras_late_init,
.init = mmhub_v2_0_init,
.gart_enable = mmhub_v2_0_gart_enable,
.set_fault_enable_default = mmhub_v2_0_set_fault_enable_default,
.gart_disable = mmhub_v2_0_gart_disable,
.set_clockgating = mmhub_v2_0_set_clockgating,
.get_clockgating = mmhub_v2_0_get_clockgating,
.setup_vm_pt_regs = mmhub_v2_0_setup_vm_pt_regs,
};

View File

@ -23,15 +23,6 @@
#ifndef __MMHUB_V2_0_H__ #ifndef __MMHUB_V2_0_H__
#define __MMHUB_V2_0_H__ #define __MMHUB_V2_0_H__
int mmhub_v2_0_gart_enable(struct amdgpu_device *adev); extern const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs;
void mmhub_v2_0_gart_disable(struct amdgpu_device *adev);
void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value);
void mmhub_v2_0_init(struct amdgpu_device *adev);
int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state);
void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base);
#endif #endif

Some files were not shown because too many files have changed in this diff Show More