2015-04-21 03:55:21 +07:00
|
|
|
/*
|
|
|
|
* Copyright 2008 Advanced Micro Devices, Inc.
|
|
|
|
* Copyright 2008 Red Hat Inc.
|
|
|
|
* Copyright 2009 Jerome Glisse.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors: Dave Airlie
|
|
|
|
* Alex Deucher
|
|
|
|
* Jerome Glisse
|
|
|
|
*/
|
2016-06-12 14:41:58 +07:00
|
|
|
#include <linux/kthread.h>
|
2015-04-21 03:55:21 +07:00
|
|
|
#include <linux/console.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <drm/drmP.h>
|
|
|
|
#include <drm/drm_crtc_helper.h>
|
2017-09-13 02:58:20 +07:00
|
|
|
#include <drm/drm_atomic_helper.h>
|
2015-04-21 03:55:21 +07:00
|
|
|
#include <drm/amdgpu_drm.h>
|
|
|
|
#include <linux/vgaarb.h>
|
|
|
|
#include <linux/vga_switcheroo.h>
|
|
|
|
#include <linux/efi.h>
|
|
|
|
#include "amdgpu.h"
|
2016-05-31 19:02:27 +07:00
|
|
|
#include "amdgpu_trace.h"
|
2015-04-21 03:55:21 +07:00
|
|
|
#include "amdgpu_i2c.h"
|
|
|
|
#include "atom.h"
|
|
|
|
#include "amdgpu_atombios.h"
|
2016-09-24 03:23:41 +07:00
|
|
|
#include "amdgpu_atomfirmware.h"
|
2015-11-12 07:45:06 +07:00
|
|
|
#include "amd_pcie.h"
|
2016-01-21 16:29:41 +07:00
|
|
|
#ifdef CONFIG_DRM_AMDGPU_SI
|
|
|
|
#include "si.h"
|
|
|
|
#endif
|
2015-04-21 04:09:27 +07:00
|
|
|
#ifdef CONFIG_DRM_AMDGPU_CIK
|
|
|
|
#include "cik.h"
|
|
|
|
#endif
|
2015-04-21 04:31:14 +07:00
|
|
|
#include "vi.h"
|
2017-03-07 02:53:16 +07:00
|
|
|
#include "soc15.h"
|
2015-04-21 03:55:21 +07:00
|
|
|
#include "bif/bif_4_1_d.h"
|
2016-08-10 15:01:25 +07:00
|
|
|
#include <linux/pci.h>
|
2016-09-14 18:38:08 +07:00
|
|
|
#include <linux/firmware.h>
|
2017-06-24 00:55:15 +07:00
|
|
|
#include "amdgpu_vf_error.h"
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2015-11-10 05:21:45 +07:00
|
|
|
#include "amdgpu_amdkfd.h"
|
2017-09-22 16:47:27 +07:00
|
|
|
#include "amdgpu_pm.h"
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-04-28 03:58:01 +07:00
|
|
|
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
|
2017-05-09 23:27:35 +07:00
|
|
|
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
|
2017-04-28 03:58:01 +07:00
|
|
|
|
2017-05-25 11:35:25 +07:00
|
|
|
#define AMDGPU_RESUME_MS 2000
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
static const char *amdgpu_asic_name[] = {
|
2016-01-21 18:08:55 +07:00
|
|
|
"TAHITI",
|
|
|
|
"PITCAIRN",
|
|
|
|
"VERDE",
|
|
|
|
"OLAND",
|
|
|
|
"HAINAN",
|
2015-04-21 03:55:21 +07:00
|
|
|
"BONAIRE",
|
|
|
|
"KAVERI",
|
|
|
|
"KABINI",
|
|
|
|
"HAWAII",
|
|
|
|
"MULLINS",
|
|
|
|
"TOPAZ",
|
|
|
|
"TONGA",
|
2015-07-08 00:05:16 +07:00
|
|
|
"FIJI",
|
2015-04-21 03:55:21 +07:00
|
|
|
"CARRIZO",
|
2015-10-09 01:50:27 +07:00
|
|
|
"STONEY",
|
2016-03-15 05:33:29 +07:00
|
|
|
"POLARIS10",
|
|
|
|
"POLARIS11",
|
2016-12-15 03:32:28 +07:00
|
|
|
"POLARIS12",
|
2016-03-09 08:28:32 +07:00
|
|
|
"VEGA10",
|
2016-12-07 16:31:19 +07:00
|
|
|
"RAVEN",
|
2015-04-21 03:55:21 +07:00
|
|
|
"LAST",
|
|
|
|
};
|
|
|
|
|
|
|
|
bool amdgpu_device_is_px(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
|
2015-07-22 10:29:01 +07:00
|
|
|
if (adev->flags & AMD_IS_PX)
|
2015-04-21 03:55:21 +07:00
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* MMIO register access helper functions.
|
|
|
|
*/
|
|
|
|
uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
|
2017-01-25 14:07:40 +07:00
|
|
|
uint32_t acc_flags)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2016-05-31 19:02:27 +07:00
|
|
|
uint32_t ret;
|
|
|
|
|
2017-10-13 14:38:35 +07:00
|
|
|
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
|
2017-01-12 13:29:34 +07:00
|
|
|
return amdgpu_virt_kiq_rreg(adev, reg);
|
|
|
|
|
2017-01-25 14:07:40 +07:00
|
|
|
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
|
2016-05-31 19:02:27 +07:00
|
|
|
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
|
2015-04-21 03:55:21 +07:00
|
|
|
else {
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
|
|
|
writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
|
|
|
|
ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
|
|
|
|
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
|
|
|
}
|
2016-05-31 19:02:27 +07:00
|
|
|
trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
|
|
|
|
return ret;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
2017-01-25 14:07:40 +07:00
|
|
|
uint32_t acc_flags)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2016-05-31 19:02:27 +07:00
|
|
|
trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
|
2016-03-31 12:26:59 +07:00
|
|
|
|
2017-07-04 12:11:52 +07:00
|
|
|
if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
|
|
|
|
adev->last_mm_index = v;
|
|
|
|
}
|
|
|
|
|
2017-10-13 14:38:35 +07:00
|
|
|
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
|
2017-01-12 13:29:34 +07:00
|
|
|
return amdgpu_virt_kiq_wreg(adev, reg, v);
|
|
|
|
|
2017-01-25 14:07:40 +07:00
|
|
|
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
|
2015-04-21 03:55:21 +07:00
|
|
|
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
|
|
|
|
else {
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
|
|
|
writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
|
|
|
|
writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
|
|
|
|
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
|
|
|
}
|
2017-07-04 12:11:52 +07:00
|
|
|
|
|
|
|
if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
|
|
|
|
udelay(500);
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
|
|
|
|
{
|
|
|
|
if ((reg * 4) < adev->rio_mem_size)
|
|
|
|
return ioread32(adev->rio_mem + (reg * 4));
|
|
|
|
else {
|
|
|
|
iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
|
|
|
|
return ioread32(adev->rio_mem + (mmMM_DATA * 4));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
|
|
|
{
|
2017-07-04 12:11:52 +07:00
|
|
|
if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
|
|
|
|
adev->last_mm_index = v;
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
if ((reg * 4) < adev->rio_mem_size)
|
|
|
|
iowrite32(v, adev->rio_mem + (reg * 4));
|
|
|
|
else {
|
|
|
|
iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
|
|
|
|
iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
|
|
|
|
}
|
2017-07-04 12:11:52 +07:00
|
|
|
|
|
|
|
if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
|
|
|
|
udelay(500);
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_mm_rdoorbell - read a doorbell dword
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @index: doorbell index
|
|
|
|
*
|
|
|
|
* Returns the value in the doorbell aperture at the
|
|
|
|
* requested doorbell index (CIK).
|
|
|
|
*/
|
|
|
|
u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
|
|
|
|
{
|
|
|
|
if (index < adev->doorbell.num_doorbells) {
|
|
|
|
return readl(adev->doorbell.ptr + index);
|
|
|
|
} else {
|
|
|
|
DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_mm_wdoorbell - write a doorbell dword
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @index: doorbell index
|
|
|
|
* @v: value to write
|
|
|
|
*
|
|
|
|
* Writes @v to the doorbell aperture at the
|
|
|
|
* requested doorbell index (CIK).
|
|
|
|
*/
|
|
|
|
void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
|
|
|
|
{
|
|
|
|
if (index < adev->doorbell.num_doorbells) {
|
|
|
|
writel(v, adev->doorbell.ptr + index);
|
|
|
|
} else {
|
|
|
|
DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-18 14:23:08 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_mm_rdoorbell64 - read a doorbell Qword
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @index: doorbell index
|
|
|
|
*
|
|
|
|
* Returns the value in the doorbell aperture at the
|
|
|
|
* requested doorbell index (VEGA10+).
|
|
|
|
*/
|
|
|
|
u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
|
|
|
|
{
|
|
|
|
if (index < adev->doorbell.num_doorbells) {
|
|
|
|
return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
|
|
|
|
} else {
|
|
|
|
DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_mm_wdoorbell64 - write a doorbell Qword
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @index: doorbell index
|
|
|
|
* @v: value to write
|
|
|
|
*
|
|
|
|
* Writes @v to the doorbell aperture at the
|
|
|
|
* requested doorbell index (VEGA10+).
|
|
|
|
*/
|
|
|
|
void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
|
|
|
|
{
|
|
|
|
if (index < adev->doorbell.num_doorbells) {
|
|
|
|
atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
|
|
|
|
} else {
|
|
|
|
DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_invalid_rreg - dummy reg read function
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device pointer
|
|
|
|
* @reg: offset of register
|
|
|
|
*
|
|
|
|
* Dummy register read function. Used for register blocks
|
|
|
|
* that certain asics don't have (all asics).
|
|
|
|
* Returns the value in the register.
|
|
|
|
*/
|
|
|
|
static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
|
|
|
|
{
|
|
|
|
DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
|
|
|
|
BUG();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_invalid_wreg - dummy reg write function
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device pointer
|
|
|
|
* @reg: offset of register
|
|
|
|
* @v: value to write to the register
|
|
|
|
*
|
|
|
|
* Dummy register read function. Used for register blocks
|
|
|
|
* that certain asics don't have (all asics).
|
|
|
|
*/
|
|
|
|
static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
|
|
|
|
{
|
|
|
|
DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
|
|
|
|
reg, v);
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_block_invalid_rreg - dummy reg read function
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device pointer
|
|
|
|
* @block: offset of instance
|
|
|
|
* @reg: offset of register
|
|
|
|
*
|
|
|
|
* Dummy register read function. Used for register blocks
|
|
|
|
* that certain asics don't have (all asics).
|
|
|
|
* Returns the value in the register.
|
|
|
|
*/
|
|
|
|
static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
|
|
|
|
uint32_t block, uint32_t reg)
|
|
|
|
{
|
|
|
|
DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
|
|
|
|
reg, block);
|
|
|
|
BUG();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_block_invalid_wreg - dummy reg write function
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device pointer
|
|
|
|
* @block: offset of instance
|
|
|
|
* @reg: offset of register
|
|
|
|
* @v: value to write to the register
|
|
|
|
*
|
|
|
|
* Dummy register read function. Used for register blocks
|
|
|
|
* that certain asics don't have (all asics).
|
|
|
|
*/
|
|
|
|
static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
|
|
|
|
uint32_t block,
|
|
|
|
uint32_t reg, uint32_t v)
|
|
|
|
{
|
|
|
|
DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
|
|
|
|
reg, block, v);
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2017-07-27 22:24:36 +07:00
|
|
|
return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
|
|
|
|
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
|
|
|
&adev->vram_scratch.robj,
|
|
|
|
&adev->vram_scratch.gpu_addr,
|
|
|
|
(void **)&adev->vram_scratch.ptr);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2017-07-27 22:43:00 +07:00
|
|
|
amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-12-15 04:20:19 +07:00
|
|
|
* amdgpu_device_program_register_sequence - program an array of registers.
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @registers: pointer to the register array
|
|
|
|
* @array_size: size of the register array
|
|
|
|
*
|
|
|
|
* Programs an array or registers with and and or masks.
|
|
|
|
* This is a helper for setting golden registers.
|
|
|
|
*/
|
2017-12-15 04:20:19 +07:00
|
|
|
void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
|
|
|
|
const u32 *registers,
|
|
|
|
const u32 array_size)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
u32 tmp, reg, and_mask, or_mask;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (array_size % 3)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < array_size; i +=3) {
|
|
|
|
reg = registers[i + 0];
|
|
|
|
and_mask = registers[i + 1];
|
|
|
|
or_mask = registers[i + 2];
|
|
|
|
|
|
|
|
if (and_mask == 0xffffffff) {
|
|
|
|
tmp = or_mask;
|
|
|
|
} else {
|
|
|
|
tmp = RREG32(reg);
|
|
|
|
tmp &= ~and_mask;
|
|
|
|
tmp |= or_mask;
|
|
|
|
}
|
|
|
|
WREG32(reg, tmp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-15 04:22:53 +07:00
|
|
|
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* GPU doorbell aperture helpers function.
|
|
|
|
*/
|
|
|
|
/**
|
2017-12-15 03:02:39 +07:00
|
|
|
* amdgpu_device_doorbell_init - Init doorbell driver information.
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Init doorbell driver information (CIK)
|
|
|
|
* Returns 0 on success, error on failure.
|
|
|
|
*/
|
2017-12-15 03:02:39 +07:00
|
|
|
static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2017-06-08 16:15:16 +07:00
|
|
|
/* No doorbell on SI hardware generation */
|
|
|
|
if (adev->asic_type < CHIP_BONAIRE) {
|
|
|
|
adev->doorbell.base = 0;
|
|
|
|
adev->doorbell.size = 0;
|
|
|
|
adev->doorbell.num_doorbells = 0;
|
|
|
|
adev->doorbell.ptr = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-28 16:36:43 +07:00
|
|
|
if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/* doorbell bar mapping */
|
|
|
|
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
|
|
|
|
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
|
|
|
|
|
2016-05-03 20:54:54 +07:00
|
|
|
adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
|
2015-04-21 03:55:21 +07:00
|
|
|
AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
|
|
|
|
if (adev->doorbell.num_doorbells == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-03-06 19:34:57 +07:00
|
|
|
adev->doorbell.ptr = ioremap(adev->doorbell.base,
|
|
|
|
adev->doorbell.num_doorbells *
|
|
|
|
sizeof(u32));
|
|
|
|
if (adev->doorbell.ptr == NULL)
|
2015-04-21 03:55:21 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-12-15 03:02:39 +07:00
|
|
|
* amdgpu_device_doorbell_fini - Tear down doorbell driver information.
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Tear down doorbell driver information (CIK)
|
|
|
|
*/
|
2017-12-15 03:02:39 +07:00
|
|
|
static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
iounmap(adev->doorbell.ptr);
|
|
|
|
adev->doorbell.ptr = NULL;
|
|
|
|
}
|
|
|
|
|
2017-12-15 04:27:11 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
/*
|
2017-12-15 03:02:39 +07:00
|
|
|
* amdgpu_device_wb_*()
|
2017-05-09 08:36:03 +07:00
|
|
|
* Writeback is the method by which the GPU updates special pages in memory
|
2017-05-09 00:41:11 +07:00
|
|
|
* with the status of certain GPU events (fences, ring pointers,etc.).
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
2017-12-15 03:02:39 +07:00
|
|
|
* amdgpu_device_wb_fini - Disable Writeback and free memory
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Disables Writeback and frees the Writeback memory (all asics).
|
|
|
|
* Used at driver shutdown.
|
|
|
|
*/
|
2017-12-15 03:02:39 +07:00
|
|
|
static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
if (adev->wb.wb_obj) {
|
2016-10-22 02:30:36 +07:00
|
|
|
amdgpu_bo_free_kernel(&adev->wb.wb_obj,
|
|
|
|
&adev->wb.gpu_addr,
|
|
|
|
(void **)&adev->wb.wb);
|
2015-04-21 03:55:21 +07:00
|
|
|
adev->wb.wb_obj = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-12-15 03:02:39 +07:00
|
|
|
* amdgpu_device_wb_init- Init Writeback driver info and allocate memory
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
2017-05-09 08:36:03 +07:00
|
|
|
* Initializes writeback and allocates writeback memory (all asics).
|
2015-04-21 03:55:21 +07:00
|
|
|
* Used at driver startup.
|
|
|
|
* Returns 0 on success or an -error on failure.
|
|
|
|
*/
|
2017-12-15 03:02:39 +07:00
|
|
|
static int amdgpu_device_wb_init(struct amdgpu_device *adev)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (adev->wb.wb_obj == NULL) {
|
2017-07-28 23:14:15 +07:00
|
|
|
/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
|
|
|
|
r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
|
2016-10-22 02:30:36 +07:00
|
|
|
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
|
|
|
|
&adev->wb.wb_obj, &adev->wb.gpu_addr,
|
|
|
|
(void **)&adev->wb.wb);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r) {
|
|
|
|
dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
adev->wb.num_wb = AMDGPU_MAX_WB;
|
|
|
|
memset(&adev->wb.used, 0, sizeof(adev->wb.used));
|
|
|
|
|
|
|
|
/* clear wb memory */
|
2017-03-15 09:13:32 +07:00
|
|
|
memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-12-15 04:03:43 +07:00
|
|
|
* amdgpu_device_wb_get - Allocate a wb entry
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @wb: wb index
|
|
|
|
*
|
|
|
|
* Allocate a wb slot for use by the driver (all asics).
|
|
|
|
* Returns 0 on success or -EINVAL on failure.
|
|
|
|
*/
|
2017-12-15 04:03:43 +07:00
|
|
|
int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
|
|
|
|
|
2017-07-28 23:14:15 +07:00
|
|
|
if (offset < adev->wb.num_wb) {
|
2016-03-18 14:08:49 +07:00
|
|
|
__set_bit(offset, adev->wb.used);
|
2017-10-17 18:18:56 +07:00
|
|
|
*wb = offset << 3; /* convert to dw offset */
|
2017-06-19 21:19:41 +07:00
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
2017-12-15 04:03:43 +07:00
|
|
|
* amdgpu_device_wb_free - Free a wb entry
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @wb: wb index
|
|
|
|
*
|
|
|
|
* Free a wb slot allocated for use by the driver (all asics)
|
|
|
|
*/
|
2017-12-15 04:03:43 +07:00
|
|
|
void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
if (wb < adev->wb.num_wb)
|
2017-10-17 18:18:56 +07:00
|
|
|
__clear_bit(wb >> 3, adev->wb.used);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-12-15 04:33:36 +07:00
|
|
|
* amdgpu_device_vram_location - try to find VRAM location
|
2015-04-21 03:55:21 +07:00
|
|
|
* @adev: amdgpu device structure holding all necessary informations
|
|
|
|
* @mc: memory controller structure holding memory informations
|
|
|
|
* @base: base address at which to put VRAM
|
|
|
|
*
|
2017-05-09 08:36:03 +07:00
|
|
|
* Function will try to place VRAM at base address provided
|
2017-11-17 01:36:10 +07:00
|
|
|
* as parameter.
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
2017-12-15 04:33:36 +07:00
|
|
|
void amdgpu_device_vram_location(struct amdgpu_device *adev,
|
2018-01-12 20:52:22 +07:00
|
|
|
struct amdgpu_gmc *mc, u64 base)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
|
|
|
|
|
|
|
|
mc->vram_start = base;
|
|
|
|
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
|
|
|
|
if (limit && limit < mc->real_vram_size)
|
|
|
|
mc->real_vram_size = limit;
|
|
|
|
dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
|
|
|
|
mc->mc_vram_size >> 20, mc->vram_start,
|
|
|
|
mc->vram_end, mc->real_vram_size >> 20);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-12-15 04:33:36 +07:00
|
|
|
* amdgpu_device_gart_location - try to find GTT location
|
2015-04-21 03:55:21 +07:00
|
|
|
* @adev: amdgpu device structure holding all necessary informations
|
|
|
|
* @mc: memory controller structure holding memory informations
|
|
|
|
*
|
|
|
|
* Function will place try to place GTT before or after VRAM.
|
|
|
|
*
|
|
|
|
* If GTT size is bigger than space left then we ajust GTT size.
|
|
|
|
* Thus function will never fails.
|
|
|
|
*
|
|
|
|
* FIXME: when reducing GTT size align new size on power of 2.
|
|
|
|
*/
|
2017-12-15 04:33:36 +07:00
|
|
|
void amdgpu_device_gart_location(struct amdgpu_device *adev,
|
2018-01-12 20:52:22 +07:00
|
|
|
struct amdgpu_gmc *mc)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
u64 size_af, size_bf;
|
|
|
|
|
2018-01-12 20:52:22 +07:00
|
|
|
size_af = adev->gmc.mc_mask - mc->vram_end;
|
2017-07-07 03:26:05 +07:00
|
|
|
size_bf = mc->vram_start;
|
2015-04-21 03:55:21 +07:00
|
|
|
if (size_bf > size_af) {
|
2017-07-07 16:56:59 +07:00
|
|
|
if (mc->gart_size > size_bf) {
|
2015-04-21 03:55:21 +07:00
|
|
|
dev_warn(adev->dev, "limiting GTT\n");
|
2017-07-07 16:56:59 +07:00
|
|
|
mc->gart_size = size_bf;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
2017-07-07 16:56:59 +07:00
|
|
|
mc->gart_start = 0;
|
2015-04-21 03:55:21 +07:00
|
|
|
} else {
|
2017-07-07 16:56:59 +07:00
|
|
|
if (mc->gart_size > size_af) {
|
2015-04-21 03:55:21 +07:00
|
|
|
dev_warn(adev->dev, "limiting GTT\n");
|
2017-07-07 16:56:59 +07:00
|
|
|
mc->gart_size = size_af;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
2017-11-17 02:12:51 +07:00
|
|
|
/* VCE doesn't like it when BOs cross a 4GB segment, so align
|
|
|
|
* the GART base on a 4GB boundary as well.
|
|
|
|
*/
|
|
|
|
mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
2017-07-07 16:56:59 +07:00
|
|
|
mc->gart_end = mc->gart_start + mc->gart_size - 1;
|
2015-04-21 03:55:21 +07:00
|
|
|
dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
|
2017-07-07 16:56:59 +07:00
|
|
|
mc->gart_size >> 20, mc->gart_start, mc->gart_end);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2017-02-28 16:36:43 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_device_resize_fb_bar - try to resize FB BAR
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
|
|
|
|
* to fail, but if any of the BARs is not accessible after the size we abort
|
|
|
|
* driver loading by returning -ENODEV.
|
|
|
|
*/
|
|
|
|
int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
|
|
|
|
{
|
2018-01-12 20:52:22 +07:00
|
|
|
u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
|
2017-02-28 16:36:43 +07:00
|
|
|
u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
|
2017-11-16 02:07:38 +07:00
|
|
|
struct pci_bus *root;
|
|
|
|
struct resource *res;
|
|
|
|
unsigned i;
|
2017-02-28 16:36:43 +07:00
|
|
|
u16 cmd;
|
|
|
|
int r;
|
|
|
|
|
2017-11-07 10:02:00 +07:00
|
|
|
/* Bypass for VF */
|
|
|
|
if (amdgpu_sriov_vf(adev))
|
|
|
|
return 0;
|
|
|
|
|
2017-11-16 02:07:38 +07:00
|
|
|
/* Check if the root BUS has 64bit memory resources */
|
|
|
|
root = adev->pdev->bus;
|
|
|
|
while (root->parent)
|
|
|
|
root = root->parent;
|
|
|
|
|
|
|
|
pci_bus_for_each_resource(root, res, i) {
|
2018-01-07 16:18:57 +07:00
|
|
|
if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
|
2017-11-16 02:07:38 +07:00
|
|
|
res->start > 0x100000000ull)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Trying to resize is pointless without a root hub window above 4GB */
|
|
|
|
if (!res)
|
|
|
|
return 0;
|
|
|
|
|
2017-02-28 16:36:43 +07:00
|
|
|
/* Disable memory decoding while we change the BAR addresses and size */
|
|
|
|
pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
|
|
|
|
pci_write_config_word(adev->pdev, PCI_COMMAND,
|
|
|
|
cmd & ~PCI_COMMAND_MEMORY);
|
|
|
|
|
|
|
|
/* Free the VRAM and doorbell BAR, we most likely need to move both. */
|
2017-12-15 03:02:39 +07:00
|
|
|
amdgpu_device_doorbell_fini(adev);
|
2017-02-28 16:36:43 +07:00
|
|
|
if (adev->asic_type >= CHIP_BONAIRE)
|
|
|
|
pci_release_resource(adev->pdev, 2);
|
|
|
|
|
|
|
|
pci_release_resource(adev->pdev, 0);
|
|
|
|
|
|
|
|
r = pci_resize_resource(adev->pdev, 0, rbar_size);
|
|
|
|
if (r == -ENOSPC)
|
|
|
|
DRM_INFO("Not enough PCI address space for a large BAR.");
|
|
|
|
else if (r && r != -ENOTSUPP)
|
|
|
|
DRM_ERROR("Problem resizing BAR0 (%d).", r);
|
|
|
|
|
|
|
|
pci_assign_unassigned_bus_resources(adev->pdev->bus);
|
|
|
|
|
|
|
|
/* When the doorbell or fb BAR isn't available we have no chance of
|
|
|
|
* using the device.
|
|
|
|
*/
|
2017-12-15 03:02:39 +07:00
|
|
|
r = amdgpu_device_doorbell_init(adev);
|
2017-02-28 16:36:43 +07:00
|
|
|
if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2017-09-29 13:41:57 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/*
|
|
|
|
* GPU helpers function.
|
|
|
|
*/
|
|
|
|
/**
|
2017-12-16 04:22:11 +07:00
|
|
|
* amdgpu_device_need_post - check if the hw need post or not
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
2017-02-10 14:59:59 +07:00
|
|
|
* Check if the asic has been initialized (all asics) at driver startup
|
|
|
|
* or post is needed if hw reset is performed.
|
|
|
|
* Returns true if need or false if not.
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
2017-12-16 04:22:11 +07:00
|
|
|
bool amdgpu_device_need_post(struct amdgpu_device *adev)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
uint32_t reg;
|
|
|
|
|
2016-09-14 18:38:08 +07:00
|
|
|
if (amdgpu_sriov_vf(adev))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (amdgpu_passthrough(adev)) {
|
2016-11-11 10:24:29 +07:00
|
|
|
/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
|
|
|
|
* some old smc fw still need driver do vPost otherwise gpu hang, while
|
|
|
|
* those smc fw version above 22.15 doesn't have this flaw, so we force
|
|
|
|
* vpost executed for smc version below 22.15
|
2016-09-14 18:38:08 +07:00
|
|
|
*/
|
|
|
|
if (adev->asic_type == CHIP_FIJI) {
|
|
|
|
int err;
|
|
|
|
uint32_t fw_ver;
|
|
|
|
err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
|
|
|
|
/* force vPost if error occured */
|
|
|
|
if (err)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
|
2016-11-11 10:24:29 +07:00
|
|
|
if (fw_ver < 0x00160e00)
|
|
|
|
return true;
|
2016-09-14 18:38:08 +07:00
|
|
|
}
|
|
|
|
}
|
2017-10-19 08:38:39 +07:00
|
|
|
|
|
|
|
if (adev->has_hw_reset) {
|
|
|
|
adev->has_hw_reset = false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* bios scratch used on CIK+ */
|
|
|
|
if (adev->asic_type >= CHIP_BONAIRE)
|
|
|
|
return amdgpu_atombios_scratch_need_asic_init(adev);
|
|
|
|
|
|
|
|
/* check MEM_SIZE for older asics */
|
|
|
|
reg = amdgpu_asic_get_config_memsize(adev);
|
|
|
|
|
|
|
|
if ((reg != 0) && (reg != 0xffffffff))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
2016-09-14 18:38:08 +07:00
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/* if we get transitioned to only one device, take VGA back */
|
|
|
|
/**
|
2017-12-15 03:02:39 +07:00
|
|
|
* amdgpu_device_vga_set_decode - enable/disable vga decode
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* @cookie: amdgpu_device pointer
|
|
|
|
* @state: enable/disable vga decode
|
|
|
|
*
|
|
|
|
* Enable/disable vga decode (all asics).
|
|
|
|
* Returns VGA resource flags.
|
|
|
|
*/
|
2017-12-15 03:02:39 +07:00
|
|
|
static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = cookie;
|
|
|
|
amdgpu_asic_set_vga_state(adev, state);
|
|
|
|
if (state)
|
|
|
|
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
|
|
|
|
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
|
|
|
|
else
|
|
|
|
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
|
2017-03-27 10:36:57 +07:00
|
|
|
{
|
|
|
|
/* defines number of bits in page table versus page directory,
|
|
|
|
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
|
|
|
|
* page table and the remaining bits are in the page directory */
|
2017-04-05 12:54:56 +07:00
|
|
|
if (amdgpu_vm_block_size == -1)
|
|
|
|
return;
|
2017-03-27 10:36:57 +07:00
|
|
|
|
2017-04-05 12:54:56 +07:00
|
|
|
if (amdgpu_vm_block_size < 9) {
|
2017-03-27 10:36:57 +07:00
|
|
|
dev_warn(adev->dev, "VM page table size (%d) too small\n",
|
|
|
|
amdgpu_vm_block_size);
|
2017-11-27 22:22:05 +07:00
|
|
|
amdgpu_vm_block_size = -1;
|
2017-03-27 10:36:57 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
|
2017-03-29 15:08:31 +07:00
|
|
|
{
|
2017-06-16 05:20:09 +07:00
|
|
|
/* no need to check the default value */
|
|
|
|
if (amdgpu_vm_size == -1)
|
|
|
|
return;
|
|
|
|
|
2017-03-29 15:08:31 +07:00
|
|
|
if (amdgpu_vm_size < 1) {
|
|
|
|
dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
|
|
|
|
amdgpu_vm_size);
|
2017-11-23 18:57:18 +07:00
|
|
|
amdgpu_vm_size = -1;
|
2017-03-29 15:08:31 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
2017-12-15 03:02:39 +07:00
|
|
|
* amdgpu_device_check_arguments - validate module params
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Validates certain module parameters and updates
|
|
|
|
* the associated values used by the driver (all asics).
|
|
|
|
*/
|
2017-12-15 03:02:39 +07:00
|
|
|
static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2015-12-10 16:34:33 +07:00
|
|
|
if (amdgpu_sched_jobs < 4) {
|
|
|
|
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
|
|
|
|
amdgpu_sched_jobs);
|
|
|
|
amdgpu_sched_jobs = 4;
|
2017-06-21 23:31:41 +07:00
|
|
|
} else if (!is_power_of_2(amdgpu_sched_jobs)){
|
2015-12-10 16:34:33 +07:00
|
|
|
dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
|
|
|
|
amdgpu_sched_jobs);
|
|
|
|
amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-08-21 22:58:25 +07:00
|
|
|
if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
|
2017-07-07 18:44:05 +07:00
|
|
|
/* gart size must be greater or equal to 32M */
|
|
|
|
dev_warn(adev->dev, "gart size (%d) too small\n",
|
|
|
|
amdgpu_gart_size);
|
2017-08-21 22:58:25 +07:00
|
|
|
amdgpu_gart_size = -1;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2017-07-07 18:17:45 +07:00
|
|
|
if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
|
2016-03-17 22:25:15 +07:00
|
|
|
/* gtt size must be greater or equal to 32M */
|
2017-07-07 18:17:45 +07:00
|
|
|
dev_warn(adev->dev, "gtt size (%d) too small\n",
|
|
|
|
amdgpu_gtt_size);
|
|
|
|
amdgpu_gtt_size = -1;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2017-08-15 15:05:59 +07:00
|
|
|
/* valid range is between 4 and 9 inclusive */
|
|
|
|
if (amdgpu_vm_fragment_size != -1 &&
|
|
|
|
(amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
|
|
|
|
dev_warn(adev->dev, "valid range is between 4 and 9\n");
|
|
|
|
amdgpu_vm_fragment_size = -1;
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
amdgpu_device_check_vm_size(adev);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
amdgpu_device_check_block_size(adev);
|
2016-08-24 20:51:49 +07:00
|
|
|
|
2016-11-07 08:53:10 +07:00
|
|
|
if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
|
2017-06-21 23:31:41 +07:00
|
|
|
!is_power_of_2(amdgpu_vram_page_split))) {
|
2016-08-24 20:51:49 +07:00
|
|
|
dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
|
|
|
|
amdgpu_vram_page_split);
|
|
|
|
amdgpu_vram_page_split = 1024;
|
|
|
|
}
|
2017-12-14 02:36:53 +07:00
|
|
|
|
|
|
|
if (amdgpu_lockup_timeout == 0) {
|
|
|
|
dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
|
|
|
|
amdgpu_lockup_timeout = 10000;
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_switcheroo_set_state - set switcheroo state
|
|
|
|
*
|
|
|
|
* @pdev: pci dev pointer
|
2015-09-05 16:17:35 +07:00
|
|
|
* @state: vga_switcheroo state
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* Callback for the switcheroo driver. Suspends or resumes the
|
|
|
|
* the asics before or after it is powered up using ACPI methods.
|
|
|
|
*/
|
|
|
|
static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = pci_get_drvdata(pdev);
|
|
|
|
|
|
|
|
if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (state == VGA_SWITCHEROO_ON) {
|
2017-02-28 19:55:52 +07:00
|
|
|
pr_info("amdgpu: switched on\n");
|
2015-04-21 03:55:21 +07:00
|
|
|
/* don't suspend or resume card normally */
|
|
|
|
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
|
|
|
|
2016-08-24 00:25:49 +07:00
|
|
|
amdgpu_device_resume(dev, true, true);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
dev->switch_power_state = DRM_SWITCH_POWER_ON;
|
|
|
|
drm_kms_helper_poll_enable(dev);
|
|
|
|
} else {
|
2017-02-28 19:55:52 +07:00
|
|
|
pr_info("amdgpu: switched off\n");
|
2015-04-21 03:55:21 +07:00
|
|
|
drm_kms_helper_poll_disable(dev);
|
|
|
|
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
2016-08-24 00:25:49 +07:00
|
|
|
amdgpu_device_suspend(dev, true, true);
|
2015-04-21 03:55:21 +07:00
|
|
|
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_switcheroo_can_switch - see if switcheroo state can change
|
|
|
|
*
|
|
|
|
* @pdev: pci dev pointer
|
|
|
|
*
|
|
|
|
* Callback for the switcheroo driver. Check of the switcheroo
|
|
|
|
* state can be changed.
|
|
|
|
* Returns true if the state can be changed, false if not.
|
|
|
|
*/
|
|
|
|
static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = pci_get_drvdata(pdev);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FIXME: open_count is protected by drm_global_mutex but that would lead to
|
|
|
|
* locking inversion with the driver load path. And the access here is
|
|
|
|
* completely racy anyway. So don't bother with locking for now.
|
|
|
|
*/
|
|
|
|
return dev->open_count == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
|
|
|
|
.set_gpu_state = amdgpu_switcheroo_set_state,
|
|
|
|
.reprobe = NULL,
|
|
|
|
.can_switch = amdgpu_switcheroo_can_switch,
|
|
|
|
};
|
|
|
|
|
2017-12-16 04:18:00 +07:00
|
|
|
int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
|
|
|
|
enum amd_ip_block_type block_type,
|
|
|
|
enum amd_clockgating_state state)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
int i, r = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2016-06-23 22:53:12 +07:00
|
|
|
continue;
|
2017-02-22 14:33:46 +07:00
|
|
|
if (adev->ip_blocks[i].version->type != block_type)
|
|
|
|
continue;
|
|
|
|
if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
|
|
|
|
continue;
|
|
|
|
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
|
|
|
|
(void *)adev, state);
|
|
|
|
if (r)
|
|
|
|
DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2017-12-16 04:18:00 +07:00
|
|
|
int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
|
|
|
|
enum amd_ip_block_type block_type,
|
|
|
|
enum amd_powergating_state state)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
int i, r = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2016-06-23 22:53:12 +07:00
|
|
|
continue;
|
2017-02-22 14:33:46 +07:00
|
|
|
if (adev->ip_blocks[i].version->type != block_type)
|
|
|
|
continue;
|
|
|
|
if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
|
|
|
|
continue;
|
|
|
|
r = adev->ip_blocks[i].version->funcs->set_powergating_state(
|
|
|
|
(void *)adev, state);
|
|
|
|
if (r)
|
|
|
|
DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2017-12-16 04:18:00 +07:00
|
|
|
void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
|
|
|
|
u32 *flags)
|
2017-01-05 17:44:41 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
|
|
|
continue;
|
|
|
|
if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
|
|
|
|
adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-16 04:18:00 +07:00
|
|
|
int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
|
|
|
|
enum amd_ip_block_type block_type)
|
2016-06-23 22:41:04 +07:00
|
|
|
{
|
|
|
|
int i, r;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2016-06-23 22:53:12 +07:00
|
|
|
continue;
|
2016-10-14 04:41:13 +07:00
|
|
|
if (adev->ip_blocks[i].version->type == block_type) {
|
|
|
|
r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
|
2016-06-23 22:41:04 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-12-16 04:18:00 +07:00
|
|
|
bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
|
|
|
|
enum amd_ip_block_type block_type)
|
2016-06-23 22:41:04 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2016-06-23 22:53:12 +07:00
|
|
|
continue;
|
2016-10-14 04:41:13 +07:00
|
|
|
if (adev->ip_blocks[i].version->type == block_type)
|
|
|
|
return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
|
2016-06-23 22:41:04 +07:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-12-16 04:18:00 +07:00
|
|
|
struct amdgpu_ip_block *
|
|
|
|
amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
|
|
|
|
enum amd_ip_block_type type)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++)
|
2016-10-14 04:41:13 +07:00
|
|
|
if (adev->ip_blocks[i].version->type == type)
|
2015-04-21 03:55:21 +07:00
|
|
|
return &adev->ip_blocks[i];
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-12-16 04:18:00 +07:00
|
|
|
* amdgpu_device_ip_block_version_cmp
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2015-05-23 01:39:35 +07:00
|
|
|
* @type: enum amd_ip_block_type
|
2015-04-21 03:55:21 +07:00
|
|
|
* @major: major version
|
|
|
|
* @minor: minor version
|
|
|
|
*
|
|
|
|
* return 0 if equal or greater
|
|
|
|
* return 1 if smaller or the ip_block doesn't exist
|
|
|
|
*/
|
2017-12-16 04:18:00 +07:00
|
|
|
int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
|
|
|
|
enum amd_ip_block_type type,
|
|
|
|
u32 major, u32 minor)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2017-12-16 04:18:00 +07:00
|
|
|
struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-10-14 04:41:13 +07:00
|
|
|
if (ip_block && ((ip_block->version->major > major) ||
|
|
|
|
((ip_block->version->major == major) &&
|
|
|
|
(ip_block->version->minor >= minor))))
|
2015-04-21 03:55:21 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-10-14 04:41:13 +07:00
|
|
|
/**
|
2017-12-16 04:18:00 +07:00
|
|
|
* amdgpu_device_ip_block_add
|
2016-10-14 04:41:13 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @ip_block_version: pointer to the IP to add
|
|
|
|
*
|
|
|
|
* Adds the IP block driver information to the collection of IPs
|
|
|
|
* on the asic.
|
|
|
|
*/
|
2017-12-16 04:18:00 +07:00
|
|
|
int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
|
|
|
|
const struct amdgpu_ip_block_version *ip_block_version)
|
2016-10-14 04:41:13 +07:00
|
|
|
{
|
|
|
|
if (!ip_block_version)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-05-03 08:52:06 +07:00
|
|
|
DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
|
|
|
|
ip_block_version->funcs->name);
|
|
|
|
|
2016-10-14 04:41:13 +07:00
|
|
|
adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-09-30 23:43:04 +07:00
|
|
|
static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
|
2016-08-10 15:01:25 +07:00
|
|
|
{
|
|
|
|
adev->enable_virtual_display = false;
|
|
|
|
|
|
|
|
if (amdgpu_virtual_display) {
|
|
|
|
struct drm_device *ddev = adev->ddev;
|
|
|
|
const char *pci_address_name = pci_name(ddev->pdev);
|
2016-10-01 00:02:18 +07:00
|
|
|
char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
|
2016-08-10 15:01:25 +07:00
|
|
|
|
|
|
|
pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
|
|
|
|
pciaddstr_tmp = pciaddstr;
|
2016-10-01 00:02:18 +07:00
|
|
|
while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
|
|
|
|
pciaddname = strsep(&pciaddname_tmp, ",");
|
2017-01-22 14:16:51 +07:00
|
|
|
if (!strcmp("all", pciaddname)
|
|
|
|
|| !strcmp(pci_address_name, pciaddname)) {
|
2016-10-01 00:02:18 +07:00
|
|
|
long num_crtc;
|
|
|
|
int res = -1;
|
|
|
|
|
2016-08-10 15:01:25 +07:00
|
|
|
adev->enable_virtual_display = true;
|
2016-10-01 00:02:18 +07:00
|
|
|
|
|
|
|
if (pciaddname_tmp)
|
|
|
|
res = kstrtol(pciaddname_tmp, 10,
|
|
|
|
&num_crtc);
|
|
|
|
|
|
|
|
if (!res) {
|
|
|
|
if (num_crtc < 1)
|
|
|
|
num_crtc = 1;
|
|
|
|
if (num_crtc > 6)
|
|
|
|
num_crtc = 6;
|
|
|
|
adev->mode_info.num_crtc = num_crtc;
|
|
|
|
} else {
|
|
|
|
adev->mode_info.num_crtc = 1;
|
|
|
|
}
|
2016-08-10 15:01:25 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-01 00:02:18 +07:00
|
|
|
DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
|
|
|
|
amdgpu_virtual_display, pci_address_name,
|
|
|
|
adev->enable_virtual_display, adev->mode_info.num_crtc);
|
2016-08-10 15:01:25 +07:00
|
|
|
|
|
|
|
kfree(pciaddstr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-28 03:58:01 +07:00
|
|
|
static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
const char *chip_name;
|
|
|
|
char fw_name[30];
|
|
|
|
int err;
|
|
|
|
const struct gpu_info_firmware_header_v1_0 *hdr;
|
|
|
|
|
2017-06-05 21:11:59 +07:00
|
|
|
adev->firmware.gpu_info_fw = NULL;
|
|
|
|
|
2017-04-28 03:58:01 +07:00
|
|
|
switch (adev->asic_type) {
|
|
|
|
case CHIP_TOPAZ:
|
|
|
|
case CHIP_TONGA:
|
|
|
|
case CHIP_FIJI:
|
|
|
|
case CHIP_POLARIS11:
|
|
|
|
case CHIP_POLARIS10:
|
|
|
|
case CHIP_POLARIS12:
|
|
|
|
case CHIP_CARRIZO:
|
|
|
|
case CHIP_STONEY:
|
|
|
|
#ifdef CONFIG_DRM_AMDGPU_SI
|
|
|
|
case CHIP_VERDE:
|
|
|
|
case CHIP_TAHITI:
|
|
|
|
case CHIP_PITCAIRN:
|
|
|
|
case CHIP_OLAND:
|
|
|
|
case CHIP_HAINAN:
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_DRM_AMDGPU_CIK
|
|
|
|
case CHIP_BONAIRE:
|
|
|
|
case CHIP_HAWAII:
|
|
|
|
case CHIP_KAVERI:
|
|
|
|
case CHIP_KABINI:
|
|
|
|
case CHIP_MULLINS:
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
case CHIP_VEGA10:
|
|
|
|
chip_name = "vega10";
|
|
|
|
break;
|
2017-05-09 23:27:35 +07:00
|
|
|
case CHIP_RAVEN:
|
|
|
|
chip_name = "raven";
|
|
|
|
break;
|
2017-04-28 03:58:01 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
|
2017-06-05 21:11:59 +07:00
|
|
|
err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
|
2017-04-28 03:58:01 +07:00
|
|
|
if (err) {
|
|
|
|
dev_err(adev->dev,
|
|
|
|
"Failed to load gpu_info firmware \"%s\"\n",
|
|
|
|
fw_name);
|
|
|
|
goto out;
|
|
|
|
}
|
2017-06-05 21:11:59 +07:00
|
|
|
err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
|
2017-04-28 03:58:01 +07:00
|
|
|
if (err) {
|
|
|
|
dev_err(adev->dev,
|
|
|
|
"Failed to validate gpu_info firmware \"%s\"\n",
|
|
|
|
fw_name);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-06-05 21:11:59 +07:00
|
|
|
hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
|
2017-04-28 03:58:01 +07:00
|
|
|
amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
|
|
|
|
|
|
|
|
switch (hdr->version_major) {
|
|
|
|
case 1:
|
|
|
|
{
|
|
|
|
const struct gpu_info_firmware_v1_0 *gpu_info_fw =
|
2017-06-05 21:11:59 +07:00
|
|
|
(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
|
2017-04-28 03:58:01 +07:00
|
|
|
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
|
|
|
|
2017-05-12 06:09:49 +07:00
|
|
|
adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
|
|
|
|
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
|
|
|
|
adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
|
|
|
|
adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
|
2017-04-28 03:58:01 +07:00
|
|
|
adev->gfx.config.max_texture_channel_caches =
|
2017-05-12 06:09:49 +07:00
|
|
|
le32_to_cpu(gpu_info_fw->gc_num_tccs);
|
|
|
|
adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
|
|
|
|
adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
|
|
|
|
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
|
|
|
|
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
|
2017-04-28 03:58:01 +07:00
|
|
|
adev->gfx.config.double_offchip_lds_buf =
|
2017-05-12 06:09:49 +07:00
|
|
|
le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
|
|
|
|
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
|
2017-06-09 21:30:52 +07:00
|
|
|
adev->gfx.cu_info.max_waves_per_simd =
|
|
|
|
le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
|
|
|
|
adev->gfx.cu_info.max_scratch_slots_per_cu =
|
|
|
|
le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
|
|
|
|
adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
|
2017-04-28 03:58:01 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
dev_err(adev->dev,
|
|
|
|
"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2015-04-21 04:31:14 +07:00
|
|
|
int i, r;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-09-30 23:43:04 +07:00
|
|
|
amdgpu_device_enable_virtual_display(adev);
|
2016-08-08 10:37:50 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
switch (adev->asic_type) {
|
2015-04-21 04:31:14 +07:00
|
|
|
case CHIP_TOPAZ:
|
|
|
|
case CHIP_TONGA:
|
2015-07-08 00:05:16 +07:00
|
|
|
case CHIP_FIJI:
|
2016-03-15 05:33:29 +07:00
|
|
|
case CHIP_POLARIS11:
|
|
|
|
case CHIP_POLARIS10:
|
2016-12-15 03:32:28 +07:00
|
|
|
case CHIP_POLARIS12:
|
2015-04-21 04:31:14 +07:00
|
|
|
case CHIP_CARRIZO:
|
2015-10-09 03:31:43 +07:00
|
|
|
case CHIP_STONEY:
|
|
|
|
if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
|
2015-04-21 04:31:14 +07:00
|
|
|
adev->family = AMDGPU_FAMILY_CZ;
|
|
|
|
else
|
|
|
|
adev->family = AMDGPU_FAMILY_VI;
|
|
|
|
|
|
|
|
r = vi_set_ip_blocks(adev);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
break;
|
2016-01-21 16:29:41 +07:00
|
|
|
#ifdef CONFIG_DRM_AMDGPU_SI
|
|
|
|
case CHIP_VERDE:
|
|
|
|
case CHIP_TAHITI:
|
|
|
|
case CHIP_PITCAIRN:
|
|
|
|
case CHIP_OLAND:
|
|
|
|
case CHIP_HAINAN:
|
2016-05-24 20:02:53 +07:00
|
|
|
adev->family = AMDGPU_FAMILY_SI;
|
2016-01-21 16:29:41 +07:00
|
|
|
r = si_set_ip_blocks(adev);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
break;
|
|
|
|
#endif
|
2015-04-21 04:09:27 +07:00
|
|
|
#ifdef CONFIG_DRM_AMDGPU_CIK
|
|
|
|
case CHIP_BONAIRE:
|
|
|
|
case CHIP_HAWAII:
|
|
|
|
case CHIP_KAVERI:
|
|
|
|
case CHIP_KABINI:
|
|
|
|
case CHIP_MULLINS:
|
|
|
|
if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
|
|
|
|
adev->family = AMDGPU_FAMILY_CI;
|
|
|
|
else
|
|
|
|
adev->family = AMDGPU_FAMILY_KV;
|
|
|
|
|
|
|
|
r = cik_set_ip_blocks(adev);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
break;
|
|
|
|
#endif
|
2016-12-07 16:31:19 +07:00
|
|
|
case CHIP_VEGA10:
|
|
|
|
case CHIP_RAVEN:
|
|
|
|
if (adev->asic_type == CHIP_RAVEN)
|
|
|
|
adev->family = AMDGPU_FAMILY_RV;
|
|
|
|
else
|
|
|
|
adev->family = AMDGPU_FAMILY_AI;
|
2017-03-07 02:53:16 +07:00
|
|
|
|
|
|
|
r = soc15_set_ip_blocks(adev);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
break;
|
2015-04-21 03:55:21 +07:00
|
|
|
default:
|
|
|
|
/* FIXME: not supported yet */
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-04-28 03:58:01 +07:00
|
|
|
r = amdgpu_device_parse_gpu_info_fw(adev);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2017-11-06 09:21:26 +07:00
|
|
|
amdgpu_amdkfd_device_probe(adev);
|
|
|
|
|
2017-01-12 14:14:36 +07:00
|
|
|
if (amdgpu_sriov_vf(adev)) {
|
|
|
|
r = amdgpu_virt_request_full_gpu(adev, true);
|
|
|
|
if (r)
|
2017-10-30 13:07:24 +07:00
|
|
|
return -EAGAIN;
|
2017-01-12 14:14:36 +07:00
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
|
|
|
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
|
2017-05-03 08:40:17 +07:00
|
|
|
DRM_ERROR("disabled ip block: %d <%s>\n",
|
|
|
|
i, adev->ip_blocks[i].version->funcs->name);
|
2016-10-14 04:41:13 +07:00
|
|
|
adev->ip_blocks[i].status.valid = false;
|
2015-04-21 03:55:21 +07:00
|
|
|
} else {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (adev->ip_blocks[i].version->funcs->early_init) {
|
|
|
|
r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
|
2015-12-08 05:02:53 +07:00
|
|
|
if (r == -ENOENT) {
|
2016-10-14 04:41:13 +07:00
|
|
|
adev->ip_blocks[i].status.valid = false;
|
2015-12-08 05:02:53 +07:00
|
|
|
} else if (r) {
|
2016-10-14 04:41:13 +07:00
|
|
|
DRM_ERROR("early_init of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2015-04-21 03:55:21 +07:00
|
|
|
return r;
|
2015-12-08 05:02:53 +07:00
|
|
|
} else {
|
2016-10-14 04:41:13 +07:00
|
|
|
adev->ip_blocks[i].status.valid = true;
|
2015-12-08 05:02:53 +07:00
|
|
|
}
|
2015-07-11 00:59:44 +07:00
|
|
|
} else {
|
2016-10-14 04:41:13 +07:00
|
|
|
adev->ip_blocks[i].status.valid = true;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-02 17:32:07 +07:00
|
|
|
adev->cg_flags &= amdgpu_cg_mask;
|
|
|
|
adev->pg_flags &= amdgpu_pg_mask;
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static int amdgpu_device_ip_init(struct amdgpu_device *adev)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
int i, r;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2015-04-21 03:55:21 +07:00
|
|
|
continue;
|
2016-10-14 04:41:13 +07:00
|
|
|
r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
|
2015-12-08 05:02:53 +07:00
|
|
|
if (r) {
|
2016-10-14 04:41:13 +07:00
|
|
|
DRM_ERROR("sw_init of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2015-04-21 03:55:21 +07:00
|
|
|
return r;
|
2015-12-08 05:02:53 +07:00
|
|
|
}
|
2016-10-14 04:41:13 +07:00
|
|
|
adev->ip_blocks[i].status.sw = true;
|
2015-04-21 03:55:21 +07:00
|
|
|
/* need to do gmc hw init early so we can allocate gpu mem */
|
2016-10-14 04:41:13 +07:00
|
|
|
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
|
2017-12-15 03:02:39 +07:00
|
|
|
r = amdgpu_device_vram_scratch_init(adev);
|
2015-12-08 05:02:53 +07:00
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
|
2015-04-21 03:55:21 +07:00
|
|
|
return r;
|
2015-12-08 05:02:53 +07:00
|
|
|
}
|
2016-10-14 04:41:13 +07:00
|
|
|
r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
|
2015-12-08 05:02:53 +07:00
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("hw_init %d failed %d\n", i, r);
|
2015-04-21 03:55:21 +07:00
|
|
|
return r;
|
2015-12-08 05:02:53 +07:00
|
|
|
}
|
2017-12-15 03:02:39 +07:00
|
|
|
r = amdgpu_device_wb_init(adev);
|
2015-12-08 05:02:53 +07:00
|
|
|
if (r) {
|
2017-12-15 03:02:39 +07:00
|
|
|
DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
|
2015-04-21 03:55:21 +07:00
|
|
|
return r;
|
2015-12-08 05:02:53 +07:00
|
|
|
}
|
2016-10-14 04:41:13 +07:00
|
|
|
adev->ip_blocks[i].status.hw = true;
|
2017-01-09 14:54:32 +07:00
|
|
|
|
|
|
|
/* right after GMC hw init, we create CSA */
|
|
|
|
if (amdgpu_sriov_vf(adev)) {
|
|
|
|
r = amdgpu_allocate_static_csa(adev);
|
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("allocate CSA failed %d\n", r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (!adev->ip_blocks[i].status.sw)
|
2015-04-21 03:55:21 +07:00
|
|
|
continue;
|
|
|
|
/* gmc hw init is done early */
|
2016-10-14 04:41:13 +07:00
|
|
|
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
|
2015-04-21 03:55:21 +07:00
|
|
|
continue;
|
2016-10-14 04:41:13 +07:00
|
|
|
r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
|
2015-12-08 05:02:53 +07:00
|
|
|
if (r) {
|
2016-10-14 04:41:13 +07:00
|
|
|
DRM_ERROR("hw_init of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2015-04-21 03:55:21 +07:00
|
|
|
return r;
|
2015-12-08 05:02:53 +07:00
|
|
|
}
|
2016-10-14 04:41:13 +07:00
|
|
|
adev->ip_blocks[i].status.hw = true;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2017-11-06 09:21:26 +07:00
|
|
|
amdgpu_amdkfd_device_init(adev);
|
2017-11-06 10:21:55 +07:00
|
|
|
|
|
|
|
if (amdgpu_sriov_vf(adev))
|
|
|
|
amdgpu_virt_release_full_gpu(adev, true);
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
|
2017-05-15 13:20:00 +07:00
|
|
|
{
|
|
|
|
memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
|
2017-05-15 13:20:00 +07:00
|
|
|
{
|
|
|
|
return !!memcmp(adev->gart.ptr, adev->reset_magic,
|
|
|
|
AMDGPU_RESET_MAGIC_NUM);
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
int i = 0, r;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2015-04-21 03:55:21 +07:00
|
|
|
continue;
|
2016-10-08 01:48:18 +07:00
|
|
|
/* skip CG for VCE/UVD, it's handled specially */
|
2016-10-14 04:41:13 +07:00
|
|
|
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
|
|
|
|
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
|
2016-10-08 01:48:18 +07:00
|
|
|
/* enable clockgating to save power */
|
2016-10-14 04:41:13 +07:00
|
|
|
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
|
|
|
|
AMD_CG_STATE_GATE);
|
2016-10-08 01:48:18 +07:00
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
|
2016-10-14 04:41:13 +07:00
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2016-10-08 01:48:18 +07:00
|
|
|
return r;
|
|
|
|
}
|
2016-10-07 20:31:37 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
2017-05-25 11:35:25 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
|
2017-05-25 11:35:25 +07:00
|
|
|
{
|
|
|
|
int i = 0, r;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
|
|
|
continue;
|
|
|
|
if (adev->ip_blocks[i].version->funcs->late_init) {
|
|
|
|
r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
|
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("late_init of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
adev->ip_blocks[i].status.late_initialized = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mod_delayed_work(system_wq, &adev->late_init_work,
|
|
|
|
msecs_to_jiffies(AMDGPU_RESUME_MS));
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
amdgpu_device_fill_reset_magic(adev);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
int i, r;
|
|
|
|
|
2017-11-06 09:21:26 +07:00
|
|
|
amdgpu_amdkfd_device_fini(adev);
|
2016-10-13 22:22:17 +07:00
|
|
|
/* need to disable SMC first */
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (!adev->ip_blocks[i].status.hw)
|
2016-10-13 22:22:17 +07:00
|
|
|
continue;
|
2016-10-14 04:41:13 +07:00
|
|
|
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
|
2016-10-13 22:22:17 +07:00
|
|
|
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
|
2016-10-14 04:41:13 +07:00
|
|
|
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
|
|
|
|
AMD_CG_STATE_UNGATE);
|
2016-10-13 22:22:17 +07:00
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
|
2016-10-14 04:41:13 +07:00
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2016-10-13 22:22:17 +07:00
|
|
|
return r;
|
|
|
|
}
|
2016-10-14 04:41:13 +07:00
|
|
|
r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
|
2016-10-13 22:22:17 +07:00
|
|
|
/* XXX handle errors */
|
|
|
|
if (r) {
|
|
|
|
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
|
2016-10-14 04:41:13 +07:00
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2016-10-13 22:22:17 +07:00
|
|
|
}
|
2016-10-14 04:41:13 +07:00
|
|
|
adev->ip_blocks[i].status.hw = false;
|
2016-10-13 22:22:17 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (!adev->ip_blocks[i].status.hw)
|
2015-04-21 03:55:21 +07:00
|
|
|
continue;
|
2016-10-14 04:41:13 +07:00
|
|
|
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
|
2017-11-14 15:52:14 +07:00
|
|
|
amdgpu_free_static_csa(adev);
|
2017-12-15 03:02:39 +07:00
|
|
|
amdgpu_device_wb_fini(adev);
|
|
|
|
amdgpu_device_vram_scratch_fini(adev);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
2016-11-24 20:44:44 +07:00
|
|
|
|
|
|
|
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
|
|
|
|
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
|
|
|
|
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
|
|
|
|
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
|
|
|
|
AMD_CG_STATE_UNGATE);
|
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
|
|
|
return r;
|
|
|
|
}
|
2015-12-08 05:02:53 +07:00
|
|
|
}
|
2016-11-24 20:44:44 +07:00
|
|
|
|
2016-10-14 04:41:13 +07:00
|
|
|
r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
|
2015-04-21 03:55:21 +07:00
|
|
|
/* XXX handle errors */
|
2015-12-08 05:02:53 +07:00
|
|
|
if (r) {
|
2016-10-14 04:41:13 +07:00
|
|
|
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2015-12-08 05:02:53 +07:00
|
|
|
}
|
2016-11-24 20:44:44 +07:00
|
|
|
|
2016-10-14 04:41:13 +07:00
|
|
|
adev->ip_blocks[i].status.hw = false;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2018-01-19 07:05:36 +07:00
|
|
|
/* disable all interrupts */
|
|
|
|
amdgpu_irq_disable_all(adev);
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (!adev->ip_blocks[i].status.sw)
|
2015-04-21 03:55:21 +07:00
|
|
|
continue;
|
2016-10-14 04:41:13 +07:00
|
|
|
r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
|
2015-04-21 03:55:21 +07:00
|
|
|
/* XXX handle errors */
|
2015-12-08 05:02:53 +07:00
|
|
|
if (r) {
|
2016-10-14 04:41:13 +07:00
|
|
|
DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2015-12-08 05:02:53 +07:00
|
|
|
}
|
2016-10-14 04:41:13 +07:00
|
|
|
adev->ip_blocks[i].status.sw = false;
|
|
|
|
adev->ip_blocks[i].status.valid = false;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2016-05-19 13:36:34 +07:00
|
|
|
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (!adev->ip_blocks[i].status.late_initialized)
|
2016-10-03 04:06:44 +07:00
|
|
|
continue;
|
2016-10-14 04:41:13 +07:00
|
|
|
if (adev->ip_blocks[i].version->funcs->late_fini)
|
|
|
|
adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
|
|
|
|
adev->ip_blocks[i].status.late_initialized = false;
|
2016-05-19 13:36:34 +07:00
|
|
|
}
|
|
|
|
|
2017-09-15 14:34:52 +07:00
|
|
|
if (amdgpu_sriov_vf(adev))
|
2017-11-14 15:56:55 +07:00
|
|
|
if (amdgpu_virt_release_full_gpu(adev, false))
|
|
|
|
DRM_ERROR("failed to release exclusive mode on fini\n");
|
2017-01-09 14:54:32 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
|
2017-05-25 11:35:25 +07:00
|
|
|
{
|
|
|
|
struct amdgpu_device *adev =
|
|
|
|
container_of(work, struct amdgpu_device, late_init_work.work);
|
2017-12-15 03:02:39 +07:00
|
|
|
amdgpu_device_ip_late_set_cg_state(adev);
|
2017-05-25 11:35:25 +07:00
|
|
|
}
|
|
|
|
|
2017-12-15 04:47:40 +07:00
|
|
|
int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
int i, r;
|
|
|
|
|
2017-01-18 11:47:55 +07:00
|
|
|
if (amdgpu_sriov_vf(adev))
|
|
|
|
amdgpu_virt_request_full_gpu(adev, false);
|
|
|
|
|
2016-02-26 09:45:25 +07:00
|
|
|
/* ungate SMC block first */
|
2017-12-16 04:18:00 +07:00
|
|
|
r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
|
|
|
|
AMD_CG_STATE_UNGATE);
|
2016-02-26 09:45:25 +07:00
|
|
|
if (r) {
|
2017-12-16 04:18:00 +07:00
|
|
|
DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
|
2016-02-26 09:45:25 +07:00
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2015-04-21 03:55:21 +07:00
|
|
|
continue;
|
|
|
|
/* ungate blocks so that suspend can properly shut them down */
|
2016-02-26 09:45:25 +07:00
|
|
|
if (i != AMD_IP_BLOCK_TYPE_SMC) {
|
2016-10-14 04:41:13 +07:00
|
|
|
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
|
|
|
|
AMD_CG_STATE_UNGATE);
|
2016-02-26 09:45:25 +07:00
|
|
|
if (r) {
|
2016-10-14 04:41:13 +07:00
|
|
|
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2016-02-26 09:45:25 +07:00
|
|
|
}
|
2015-12-08 05:02:53 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
/* XXX handle errors */
|
2016-10-14 04:41:13 +07:00
|
|
|
r = adev->ip_blocks[i].version->funcs->suspend(adev);
|
2015-04-21 03:55:21 +07:00
|
|
|
/* XXX handle errors */
|
2015-12-08 05:02:53 +07:00
|
|
|
if (r) {
|
2016-10-14 04:41:13 +07:00
|
|
|
DRM_ERROR("suspend of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2015-12-08 05:02:53 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2017-01-18 11:47:55 +07:00
|
|
|
if (amdgpu_sriov_vf(adev))
|
|
|
|
amdgpu_virt_release_full_gpu(adev, false);
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
|
2017-01-23 13:22:08 +07:00
|
|
|
{
|
|
|
|
int i, r;
|
|
|
|
|
2017-04-26 11:00:49 +07:00
|
|
|
static enum amd_ip_block_type ip_order[] = {
|
|
|
|
AMD_IP_BLOCK_TYPE_GMC,
|
|
|
|
AMD_IP_BLOCK_TYPE_COMMON,
|
|
|
|
AMD_IP_BLOCK_TYPE_IH,
|
|
|
|
};
|
2017-01-23 13:22:08 +07:00
|
|
|
|
2017-04-26 11:00:49 +07:00
|
|
|
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
|
|
|
|
int j;
|
|
|
|
struct amdgpu_ip_block *block;
|
2017-01-23 13:22:08 +07:00
|
|
|
|
2017-04-26 11:00:49 +07:00
|
|
|
for (j = 0; j < adev->num_ip_blocks; j++) {
|
|
|
|
block = &adev->ip_blocks[j];
|
|
|
|
|
|
|
|
if (block->version->type != ip_order[i] ||
|
|
|
|
!block->status.valid)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
r = block->version->funcs->hw_init(adev);
|
|
|
|
DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
|
2017-01-23 13:22:08 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
|
2017-01-23 13:22:08 +07:00
|
|
|
{
|
|
|
|
int i, r;
|
|
|
|
|
2017-04-26 11:00:49 +07:00
|
|
|
static enum amd_ip_block_type ip_order[] = {
|
|
|
|
AMD_IP_BLOCK_TYPE_SMC,
|
2017-09-22 15:23:34 +07:00
|
|
|
AMD_IP_BLOCK_TYPE_PSP,
|
2017-04-26 11:00:49 +07:00
|
|
|
AMD_IP_BLOCK_TYPE_DCE,
|
|
|
|
AMD_IP_BLOCK_TYPE_GFX,
|
|
|
|
AMD_IP_BLOCK_TYPE_SDMA,
|
2017-06-15 19:07:36 +07:00
|
|
|
AMD_IP_BLOCK_TYPE_UVD,
|
|
|
|
AMD_IP_BLOCK_TYPE_VCE
|
2017-04-26 11:00:49 +07:00
|
|
|
};
|
2017-01-23 13:22:08 +07:00
|
|
|
|
2017-04-26 11:00:49 +07:00
|
|
|
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
|
|
|
|
int j;
|
|
|
|
struct amdgpu_ip_block *block;
|
2017-01-23 13:22:08 +07:00
|
|
|
|
2017-04-26 11:00:49 +07:00
|
|
|
for (j = 0; j < adev->num_ip_blocks; j++) {
|
|
|
|
block = &adev->ip_blocks[j];
|
|
|
|
|
|
|
|
if (block->version->type != ip_order[i] ||
|
|
|
|
!block->status.valid)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
r = block->version->funcs->hw_init(adev);
|
|
|
|
DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
|
2017-01-23 13:22:08 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
int i, r;
|
|
|
|
|
2017-01-23 13:22:08 +07:00
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
|
|
|
continue;
|
|
|
|
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
|
|
|
|
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
|
2017-05-05 09:33:33 +07:00
|
|
|
adev->ip_blocks[i].version->type ==
|
|
|
|
AMD_IP_BLOCK_TYPE_IH) {
|
|
|
|
r = adev->ip_blocks[i].version->funcs->resume(adev);
|
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("resume of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
|
|
|
return r;
|
|
|
|
}
|
2017-01-23 13:22:08 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
int i, r;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2015-04-21 03:55:21 +07:00
|
|
|
continue;
|
2017-05-05 09:33:33 +07:00
|
|
|
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
|
|
|
|
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
|
|
|
|
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
|
|
|
|
continue;
|
2016-10-14 04:41:13 +07:00
|
|
|
r = adev->ip_blocks[i].version->funcs->resume(adev);
|
2015-12-08 05:02:53 +07:00
|
|
|
if (r) {
|
2016-10-14 04:41:13 +07:00
|
|
|
DRM_ERROR("resume of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2015-04-21 03:55:21 +07:00
|
|
|
return r;
|
2015-12-08 05:02:53 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
|
2017-05-05 09:33:33 +07:00
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
r = amdgpu_device_ip_resume_phase1(adev);
|
2017-05-05 09:33:33 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
2017-12-15 03:02:39 +07:00
|
|
|
r = amdgpu_device_ip_resume_phase2(adev);
|
2017-05-05 09:33:33 +07:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2016-03-31 12:26:59 +07:00
|
|
|
static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
|
2016-06-11 13:51:32 +07:00
|
|
|
{
|
2017-10-16 18:50:44 +07:00
|
|
|
if (amdgpu_sriov_vf(adev)) {
|
|
|
|
if (adev->is_atom_fw) {
|
|
|
|
if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
|
|
|
|
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
|
|
|
|
} else {
|
|
|
|
if (amdgpu_atombios_has_gpu_virtualization_table(adev))
|
|
|
|
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
|
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
|
2016-09-24 03:23:41 +07:00
|
|
|
}
|
2016-06-11 13:51:32 +07:00
|
|
|
}
|
|
|
|
|
2017-09-13 02:58:20 +07:00
|
|
|
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
|
|
|
|
{
|
|
|
|
switch (asic_type) {
|
|
|
|
#if defined(CONFIG_DRM_AMD_DC)
|
|
|
|
case CHIP_BONAIRE:
|
|
|
|
case CHIP_HAWAII:
|
2017-08-11 01:39:48 +07:00
|
|
|
case CHIP_KAVERI:
|
2018-01-26 04:53:25 +07:00
|
|
|
case CHIP_KABINI:
|
|
|
|
case CHIP_MULLINS:
|
2017-09-13 02:58:20 +07:00
|
|
|
case CHIP_CARRIZO:
|
|
|
|
case CHIP_STONEY:
|
|
|
|
case CHIP_POLARIS11:
|
|
|
|
case CHIP_POLARIS10:
|
2017-06-16 03:20:24 +07:00
|
|
|
case CHIP_POLARIS12:
|
2017-09-13 02:58:20 +07:00
|
|
|
case CHIP_TONGA:
|
|
|
|
case CHIP_FIJI:
|
|
|
|
#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
|
|
|
|
return amdgpu_dc != 0;
|
|
|
|
#endif
|
2017-09-16 01:07:30 +07:00
|
|
|
case CHIP_VEGA10:
|
|
|
|
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
2017-03-06 13:01:11 +07:00
|
|
|
case CHIP_RAVEN:
|
2017-09-16 01:07:30 +07:00
|
|
|
#endif
|
2017-03-06 13:01:11 +07:00
|
|
|
return amdgpu_dc != 0;
|
2017-09-13 02:58:20 +07:00
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_device_has_dc_support - check if dc is supported
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device_pointer
|
|
|
|
*
|
|
|
|
* Returns true for supported, false for not supported
|
|
|
|
*/
|
|
|
|
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
|
|
|
|
{
|
2017-01-10 16:34:52 +07:00
|
|
|
if (amdgpu_sriov_vf(adev))
|
|
|
|
return false;
|
|
|
|
|
2017-09-13 02:58:20 +07:00
|
|
|
return amdgpu_device_asic_has_dc_support(adev->asic_type);
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_device_init - initialize the driver
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @pdev: drm dev pointer
|
|
|
|
* @pdev: pci dev pointer
|
|
|
|
* @flags: driver flags
|
|
|
|
*
|
|
|
|
* Initializes the driver info and hw (all asics).
|
|
|
|
* Returns 0 for success or an error on failure.
|
|
|
|
* Called at driver startup.
|
|
|
|
*/
|
|
|
|
int amdgpu_device_init(struct amdgpu_device *adev,
|
|
|
|
struct drm_device *ddev,
|
|
|
|
struct pci_dev *pdev,
|
|
|
|
uint32_t flags)
|
|
|
|
{
|
|
|
|
int r, i;
|
|
|
|
bool runtime = false;
|
2016-08-18 04:49:27 +07:00
|
|
|
u32 max_MBps;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
adev->shutdown = false;
|
|
|
|
adev->dev = &pdev->dev;
|
|
|
|
adev->ddev = ddev;
|
|
|
|
adev->pdev = pdev;
|
|
|
|
adev->flags = flags;
|
2015-07-22 10:29:01 +07:00
|
|
|
adev->asic_type = flags & AMD_ASIC_MASK;
|
2015-04-21 03:55:21 +07:00
|
|
|
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
|
2018-01-12 20:52:22 +07:00
|
|
|
adev->gmc.gart_size = 512 * 1024 * 1024;
|
2015-04-21 03:55:21 +07:00
|
|
|
adev->accel_working = false;
|
|
|
|
adev->num_rings = 0;
|
|
|
|
adev->mman.buffer_funcs = NULL;
|
|
|
|
adev->mman.buffer_funcs_ring = NULL;
|
|
|
|
adev->vm_manager.vm_pte_funcs = NULL;
|
2016-02-08 23:37:38 +07:00
|
|
|
adev->vm_manager.vm_pte_num_rings = 0;
|
2018-01-12 21:26:08 +07:00
|
|
|
adev->gmc.gmc_funcs = NULL;
|
2016-10-25 19:00:45 +07:00
|
|
|
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
|
2017-04-29 07:05:51 +07:00
|
|
|
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
adev->smc_rreg = &amdgpu_invalid_rreg;
|
|
|
|
adev->smc_wreg = &amdgpu_invalid_wreg;
|
|
|
|
adev->pcie_rreg = &amdgpu_invalid_rreg;
|
|
|
|
adev->pcie_wreg = &amdgpu_invalid_wreg;
|
2016-08-31 12:23:25 +07:00
|
|
|
adev->pciep_rreg = &amdgpu_invalid_rreg;
|
|
|
|
adev->pciep_wreg = &amdgpu_invalid_wreg;
|
2015-04-21 03:55:21 +07:00
|
|
|
adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
|
|
|
|
adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
|
|
|
|
adev->didt_rreg = &amdgpu_invalid_rreg;
|
|
|
|
adev->didt_wreg = &amdgpu_invalid_wreg;
|
2016-06-08 11:47:41 +07:00
|
|
|
adev->gc_cac_rreg = &amdgpu_invalid_rreg;
|
|
|
|
adev->gc_cac_wreg = &amdgpu_invalid_wreg;
|
2015-04-21 03:55:21 +07:00
|
|
|
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
|
|
|
|
adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
|
|
|
|
|
2015-06-06 02:04:33 +07:00
|
|
|
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
|
|
|
|
amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
|
|
|
|
pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
/* mutex initialization are all done here so we
|
|
|
|
* can recall function without having locking issues */
|
|
|
|
atomic_set(&adev->irq.ih.lock, 0);
|
2017-03-04 06:37:23 +07:00
|
|
|
mutex_init(&adev->firmware.mutex);
|
2015-04-21 03:55:21 +07:00
|
|
|
mutex_init(&adev->pm.mutex);
|
|
|
|
mutex_init(&adev->gfx.gpu_clock_mutex);
|
|
|
|
mutex_init(&adev->srbm_mutex);
|
2017-04-29 07:05:51 +07:00
|
|
|
mutex_init(&adev->gfx.pipe_reserve_mutex);
|
2015-04-21 03:55:21 +07:00
|
|
|
mutex_init(&adev->grbm_idx_mutex);
|
|
|
|
mutex_init(&adev->mn_lock);
|
2017-09-28 20:47:32 +07:00
|
|
|
mutex_init(&adev->virt.vf_errors.lock);
|
2015-04-21 03:55:21 +07:00
|
|
|
hash_init(adev->mn_hash);
|
2017-10-17 14:11:12 +07:00
|
|
|
mutex_init(&adev->lock_reset);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
amdgpu_device_check_arguments(adev);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
spin_lock_init(&adev->mmio_idx_lock);
|
|
|
|
spin_lock_init(&adev->smc_idx_lock);
|
|
|
|
spin_lock_init(&adev->pcie_idx_lock);
|
|
|
|
spin_lock_init(&adev->uvd_ctx_idx_lock);
|
|
|
|
spin_lock_init(&adev->didt_idx_lock);
|
2016-06-08 11:47:41 +07:00
|
|
|
spin_lock_init(&adev->gc_cac_idx_lock);
|
2017-07-04 08:21:50 +07:00
|
|
|
spin_lock_init(&adev->se_cac_idx_lock);
|
2015-04-21 03:55:21 +07:00
|
|
|
spin_lock_init(&adev->audio_endpt_idx_lock);
|
2016-08-18 04:49:27 +07:00
|
|
|
spin_lock_init(&adev->mm_stats.lock);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-08-17 10:41:30 +07:00
|
|
|
INIT_LIST_HEAD(&adev->shadow_list);
|
|
|
|
mutex_init(&adev->shadow_list_lock);
|
|
|
|
|
2017-03-07 04:27:55 +07:00
|
|
|
INIT_LIST_HEAD(&adev->ring_lru_list);
|
|
|
|
spin_lock_init(&adev->ring_lru_list_lock);
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
INIT_DELAYED_WORK(&adev->late_init_work,
|
|
|
|
amdgpu_device_ip_late_init_func_handler);
|
2017-05-25 11:35:25 +07:00
|
|
|
|
2017-06-09 01:58:05 +07:00
|
|
|
/* Registers mapping */
|
|
|
|
/* TODO: block userspace mapping of io register */
|
2016-01-21 18:08:55 +07:00
|
|
|
if (adev->asic_type >= CHIP_BONAIRE) {
|
|
|
|
adev->rmmio_base = pci_resource_start(adev->pdev, 5);
|
|
|
|
adev->rmmio_size = pci_resource_len(adev->pdev, 5);
|
|
|
|
} else {
|
|
|
|
adev->rmmio_base = pci_resource_start(adev->pdev, 2);
|
|
|
|
adev->rmmio_size = pci_resource_len(adev->pdev, 2);
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
|
|
|
|
if (adev->rmmio == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
|
|
|
|
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
|
|
|
|
|
2017-06-08 16:15:16 +07:00
|
|
|
/* doorbell bar mapping */
|
2017-12-15 03:02:39 +07:00
|
|
|
amdgpu_device_doorbell_init(adev);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
/* io port mapping */
|
|
|
|
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
|
|
|
|
if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
|
|
|
|
adev->rio_mem_size = pci_resource_len(adev->pdev, i);
|
|
|
|
adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (adev->rio_mem == NULL)
|
2017-01-04 20:06:58 +07:00
|
|
|
DRM_INFO("PCI I/O BAR is not found.\n");
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
/* early init functions */
|
2017-12-15 03:02:39 +07:00
|
|
|
r = amdgpu_device_ip_early_init(adev);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
|
|
|
|
/* this will fail for cards that aren't VGA class devices, just
|
|
|
|
* ignore it */
|
2017-12-15 03:02:39 +07:00
|
|
|
vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-04-26 00:12:18 +07:00
|
|
|
if (amdgpu_device_is_px(ddev))
|
2015-04-21 03:55:21 +07:00
|
|
|
runtime = true;
|
2017-03-11 03:23:45 +07:00
|
|
|
if (!pci_is_thunderbolt_attached(adev->pdev))
|
|
|
|
vga_switcheroo_register_client(adev->pdev,
|
|
|
|
&amdgpu_switcheroo_ops, runtime);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (runtime)
|
|
|
|
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
|
|
|
|
|
|
|
|
/* Read BIOS */
|
2016-06-04 05:21:41 +07:00
|
|
|
if (!amdgpu_get_bios(adev)) {
|
|
|
|
r = -EINVAL;
|
|
|
|
goto failed;
|
|
|
|
}
|
2016-12-15 03:52:45 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
r = amdgpu_atombios_init(adev);
|
2015-12-08 05:02:53 +07:00
|
|
|
if (r) {
|
|
|
|
dev_err(adev->dev, "amdgpu_atombios_init failed\n");
|
2017-09-28 20:47:32 +07:00
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
|
2016-06-04 05:21:41 +07:00
|
|
|
goto failed;
|
2015-12-08 05:02:53 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-03-31 12:26:59 +07:00
|
|
|
/* detect if we are with an SRIOV vbios */
|
|
|
|
amdgpu_device_detect_sriov_bios(adev);
|
2016-06-11 13:51:32 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/* Post card if necessary */
|
2017-12-16 04:22:11 +07:00
|
|
|
if (amdgpu_device_need_post(adev)) {
|
2015-04-21 03:55:21 +07:00
|
|
|
if (!adev->bios) {
|
2016-09-14 18:38:08 +07:00
|
|
|
dev_err(adev->dev, "no vBIOS found\n");
|
2016-06-04 05:21:41 +07:00
|
|
|
r = -EINVAL;
|
|
|
|
goto failed;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
2016-09-14 18:38:08 +07:00
|
|
|
DRM_INFO("GPU posting now...\n");
|
2016-03-31 12:26:59 +07:00
|
|
|
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
|
|
|
|
if (r) {
|
|
|
|
dev_err(adev->dev, "gpu post error!\n");
|
|
|
|
goto failed;
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2017-07-10 21:43:10 +07:00
|
|
|
if (adev->is_atom_fw) {
|
|
|
|
/* Initialize clocks */
|
|
|
|
r = amdgpu_atomfirmware_get_clock_info(adev);
|
|
|
|
if (r) {
|
|
|
|
dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
|
2017-09-28 20:47:32 +07:00
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
|
2017-07-10 21:43:10 +07:00
|
|
|
goto failed;
|
|
|
|
}
|
|
|
|
} else {
|
2016-09-24 03:23:41 +07:00
|
|
|
/* Initialize clocks */
|
|
|
|
r = amdgpu_atombios_get_clock_info(adev);
|
|
|
|
if (r) {
|
|
|
|
dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
|
2017-09-28 20:47:32 +07:00
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
|
2017-06-24 00:55:15 +07:00
|
|
|
goto failed;
|
2016-09-24 03:23:41 +07:00
|
|
|
}
|
|
|
|
/* init i2c buses */
|
2017-09-13 02:58:20 +07:00
|
|
|
if (!amdgpu_device_has_dc_support(adev))
|
|
|
|
amdgpu_atombios_i2c_init(adev);
|
2015-12-08 05:02:53 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
/* Fence driver */
|
|
|
|
r = amdgpu_fence_driver_init(adev);
|
2015-12-08 05:02:53 +07:00
|
|
|
if (r) {
|
|
|
|
dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
|
2017-09-28 20:47:32 +07:00
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
|
2016-06-04 05:21:41 +07:00
|
|
|
goto failed;
|
2015-12-08 05:02:53 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
/* init the mode config */
|
|
|
|
drm_mode_config_init(adev->ddev);
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
r = amdgpu_device_ip_init(adev);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r) {
|
2017-10-23 16:22:09 +07:00
|
|
|
/* failed in exclusive mode due to timeout */
|
|
|
|
if (amdgpu_sriov_vf(adev) &&
|
|
|
|
!amdgpu_sriov_runtime(adev) &&
|
|
|
|
amdgpu_virt_mmio_blocked(adev) &&
|
|
|
|
!amdgpu_virt_wait_reset(adev)) {
|
|
|
|
dev_err(adev->dev, "VF exclusive mode timeout\n");
|
2017-11-08 10:03:14 +07:00
|
|
|
/* Don't send request since VF is inactive. */
|
|
|
|
adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
|
|
|
|
adev->virt.ops = NULL;
|
2017-10-23 16:22:09 +07:00
|
|
|
r = -EAGAIN;
|
|
|
|
goto failed;
|
|
|
|
}
|
2017-12-15 03:02:39 +07:00
|
|
|
dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
|
2017-09-28 20:47:32 +07:00
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
|
2017-12-15 03:02:39 +07:00
|
|
|
amdgpu_device_ip_fini(adev);
|
2016-06-04 05:21:41 +07:00
|
|
|
goto failed;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
adev->accel_working = true;
|
|
|
|
|
2017-06-01 20:42:59 +07:00
|
|
|
amdgpu_vm_check_compute_bug(adev);
|
|
|
|
|
2016-08-18 04:49:27 +07:00
|
|
|
/* Initialize the buffer migration limit. */
|
|
|
|
if (amdgpu_moverate >= 0)
|
|
|
|
max_MBps = amdgpu_moverate;
|
|
|
|
else
|
|
|
|
max_MBps = 8; /* Allow 8 MB/s. */
|
|
|
|
/* Get a log2 for easy divisions. */
|
|
|
|
adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
r = amdgpu_ib_pool_init(adev);
|
|
|
|
if (r) {
|
|
|
|
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
|
2017-09-28 20:47:32 +07:00
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
|
2016-06-04 05:21:41 +07:00
|
|
|
goto failed;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
r = amdgpu_ib_ring_tests(adev);
|
|
|
|
if (r)
|
|
|
|
DRM_ERROR("ib ring test failed (%d).\n", r);
|
|
|
|
|
2017-10-09 15:17:16 +07:00
|
|
|
if (amdgpu_sriov_vf(adev))
|
|
|
|
amdgpu_virt_init_data_exchange(adev);
|
|
|
|
|
2017-02-08 16:38:13 +07:00
|
|
|
amdgpu_fbdev_init(adev);
|
|
|
|
|
2017-09-22 16:47:27 +07:00
|
|
|
r = amdgpu_pm_sysfs_init(adev);
|
|
|
|
if (r)
|
|
|
|
DRM_ERROR("registering pm debugfs failed (%d).\n", r);
|
|
|
|
|
2017-12-15 03:23:14 +07:00
|
|
|
r = amdgpu_debugfs_gem_init(adev);
|
2017-02-09 12:42:27 +07:00
|
|
|
if (r)
|
2015-04-21 03:55:21 +07:00
|
|
|
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
|
|
|
|
|
|
|
|
r = amdgpu_debugfs_regs_init(adev);
|
2017-02-09 12:42:27 +07:00
|
|
|
if (r)
|
2015-04-21 03:55:21 +07:00
|
|
|
DRM_ERROR("registering register debugfs failed (%d).\n", r);
|
|
|
|
|
drm/amdgpu: introduce a firmware debugfs to dump all current firmware versions
This patch implements the debugfs to dump all currect firmware
version:
root@jenkins-All-Series:/home/jenkins# cat /sys/kernel/debug/dri/0/amdgpu_firmware_info
VCE feature version: 0, firmware version: 0x34040300
UVD feature version: 0, firmware version: 0x01451000
MC feature version: 0, firmware version: 0x00000000
ME feature version: 37, firmware version: 0x00000093
PFP feature version: 37, firmware version: 0x000000da
CE feature version: 37, firmware version: 0x00000080
RLC feature version: 1, firmware version: 0x0000010e
MEC feature version: 37, firmware version: 0x0000029e
MEC2 feature version: 37, firmware version: 0x0000029e
SMC feature version: 0, firmware version: 0x013353e6
SDMA0 feature version: 31, firmware version: 0x00000036
SDMA1 feature version: 0, firmware version: 0x00000036
Suggested-by: Alex Deucher <Alexander.Deucher@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2016-06-12 14:51:09 +07:00
|
|
|
r = amdgpu_debugfs_firmware_init(adev);
|
2017-02-09 12:42:27 +07:00
|
|
|
if (r)
|
drm/amdgpu: introduce a firmware debugfs to dump all current firmware versions
This patch implements the debugfs to dump all currect firmware
version:
root@jenkins-All-Series:/home/jenkins# cat /sys/kernel/debug/dri/0/amdgpu_firmware_info
VCE feature version: 0, firmware version: 0x34040300
UVD feature version: 0, firmware version: 0x01451000
MC feature version: 0, firmware version: 0x00000000
ME feature version: 37, firmware version: 0x00000093
PFP feature version: 37, firmware version: 0x000000da
CE feature version: 37, firmware version: 0x00000080
RLC feature version: 1, firmware version: 0x0000010e
MEC feature version: 37, firmware version: 0x0000029e
MEC2 feature version: 37, firmware version: 0x0000029e
SMC feature version: 0, firmware version: 0x013353e6
SDMA0 feature version: 31, firmware version: 0x00000036
SDMA1 feature version: 0, firmware version: 0x00000036
Suggested-by: Alex Deucher <Alexander.Deucher@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2016-06-12 14:51:09 +07:00
|
|
|
DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
|
|
|
|
|
2017-12-06 21:44:51 +07:00
|
|
|
r = amdgpu_debugfs_init(adev);
|
2017-08-22 23:31:43 +07:00
|
|
|
if (r)
|
2017-12-06 21:44:51 +07:00
|
|
|
DRM_ERROR("Creating debugfs files failed (%d).\n", r);
|
2017-08-22 23:31:43 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
if ((amdgpu_testing & 1)) {
|
|
|
|
if (adev->accel_working)
|
|
|
|
amdgpu_test_moves(adev);
|
|
|
|
else
|
|
|
|
DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
|
|
|
|
}
|
|
|
|
if (amdgpu_benchmarking) {
|
|
|
|
if (adev->accel_working)
|
|
|
|
amdgpu_benchmark(adev, amdgpu_benchmarking);
|
|
|
|
else
|
|
|
|
DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/* enable clockgating, etc. after ib tests, etc. since some blocks require
|
|
|
|
* explicit gating rather than handling it automatically.
|
|
|
|
*/
|
2017-12-15 03:02:39 +07:00
|
|
|
r = amdgpu_device_ip_late_init(adev);
|
2015-12-08 05:02:53 +07:00
|
|
|
if (r) {
|
2017-12-15 03:02:39 +07:00
|
|
|
dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
|
2017-09-28 20:47:32 +07:00
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
|
2016-06-04 05:21:41 +07:00
|
|
|
goto failed;
|
2015-12-08 05:02:53 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
return 0;
|
2016-06-04 05:21:41 +07:00
|
|
|
|
|
|
|
failed:
|
2017-06-24 00:55:15 +07:00
|
|
|
amdgpu_vf_error_trans_all(adev);
|
2016-06-04 05:21:41 +07:00
|
|
|
if (runtime)
|
|
|
|
vga_switcheroo_fini_domain_pm_ops(adev->dev);
|
2017-10-23 16:22:09 +07:00
|
|
|
|
2016-06-04 05:21:41 +07:00
|
|
|
return r;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_device_fini - tear down the driver
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Tear down the driver info (all asics).
|
|
|
|
* Called at driver shutdown.
|
|
|
|
*/
|
|
|
|
void amdgpu_device_fini(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
DRM_INFO("amdgpu: finishing device.\n");
|
|
|
|
adev->shutdown = true;
|
2017-04-25 15:47:42 +07:00
|
|
|
if (adev->mode_info.mode_config_initialized)
|
|
|
|
drm_crtc_force_disable_all(adev->ddev);
|
2017-11-22 18:21:43 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
amdgpu_ib_pool_fini(adev);
|
|
|
|
amdgpu_fence_driver_fini(adev);
|
|
|
|
amdgpu_fbdev_fini(adev);
|
2017-12-15 03:02:39 +07:00
|
|
|
r = amdgpu_device_ip_fini(adev);
|
2017-06-05 21:11:59 +07:00
|
|
|
if (adev->firmware.gpu_info_fw) {
|
|
|
|
release_firmware(adev->firmware.gpu_info_fw);
|
|
|
|
adev->firmware.gpu_info_fw = NULL;
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
adev->accel_working = false;
|
2017-05-25 11:35:25 +07:00
|
|
|
cancel_delayed_work_sync(&adev->late_init_work);
|
2015-04-21 03:55:21 +07:00
|
|
|
/* free i2c buses */
|
2017-09-13 02:58:20 +07:00
|
|
|
if (!amdgpu_device_has_dc_support(adev))
|
|
|
|
amdgpu_i2c_fini(adev);
|
2015-04-21 03:55:21 +07:00
|
|
|
amdgpu_atombios_fini(adev);
|
|
|
|
kfree(adev->bios);
|
|
|
|
adev->bios = NULL;
|
2017-03-11 03:23:45 +07:00
|
|
|
if (!pci_is_thunderbolt_attached(adev->pdev))
|
|
|
|
vga_switcheroo_unregister_client(adev->pdev);
|
2016-06-04 05:21:41 +07:00
|
|
|
if (adev->flags & AMD_IS_PX)
|
|
|
|
vga_switcheroo_fini_domain_pm_ops(adev->dev);
|
2015-04-21 03:55:21 +07:00
|
|
|
vga_client_register(adev->pdev, NULL, NULL, NULL);
|
|
|
|
if (adev->rio_mem)
|
|
|
|
pci_iounmap(adev->pdev, adev->rio_mem);
|
|
|
|
adev->rio_mem = NULL;
|
|
|
|
iounmap(adev->rmmio);
|
|
|
|
adev->rmmio = NULL;
|
2017-12-15 03:02:39 +07:00
|
|
|
amdgpu_device_doorbell_fini(adev);
|
2017-09-22 16:47:27 +07:00
|
|
|
amdgpu_pm_sysfs_fini(adev);
|
2015-04-21 03:55:21 +07:00
|
|
|
amdgpu_debugfs_regs_cleanup(adev);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Suspend & resume.
|
|
|
|
*/
|
|
|
|
/**
|
2016-08-24 00:25:49 +07:00
|
|
|
* amdgpu_device_suspend - initiate device suspend
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* @pdev: drm dev pointer
|
|
|
|
* @state: suspend state
|
|
|
|
*
|
|
|
|
* Puts the hw in the suspend state (all asics).
|
|
|
|
* Returns 0 for success or an error on failure.
|
|
|
|
* Called at driver suspend.
|
|
|
|
*/
|
2016-08-24 00:25:49 +07:00
|
|
|
int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
struct amdgpu_device *adev;
|
|
|
|
struct drm_crtc *crtc;
|
|
|
|
struct drm_connector *connector;
|
2015-08-05 23:41:48 +07:00
|
|
|
int r;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
if (dev == NULL || dev->dev_private == NULL) {
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
adev = dev->dev_private;
|
|
|
|
|
|
|
|
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
drm_kms_helper_poll_disable(dev);
|
|
|
|
|
2017-09-13 02:58:20 +07:00
|
|
|
if (!amdgpu_device_has_dc_support(adev)) {
|
|
|
|
/* turn off display hw */
|
|
|
|
drm_modeset_lock_all(dev);
|
|
|
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
|
|
|
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
|
|
|
|
}
|
|
|
|
drm_modeset_unlock_all(dev);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2015-11-10 05:21:45 +07:00
|
|
|
amdgpu_amdkfd_suspend(adev);
|
|
|
|
|
2015-10-08 11:03:36 +07:00
|
|
|
/* unpin the front buffers and cursors */
|
2015-04-21 03:55:21 +07:00
|
|
|
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
2015-10-08 11:03:36 +07:00
|
|
|
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
2015-04-21 03:55:21 +07:00
|
|
|
struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
|
|
|
|
struct amdgpu_bo *robj;
|
|
|
|
|
2015-10-08 11:03:36 +07:00
|
|
|
if (amdgpu_crtc->cursor_bo) {
|
|
|
|
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
|
2017-04-25 00:52:41 +07:00
|
|
|
r = amdgpu_bo_reserve(aobj, true);
|
2015-10-08 11:03:36 +07:00
|
|
|
if (r == 0) {
|
|
|
|
amdgpu_bo_unpin(aobj);
|
|
|
|
amdgpu_bo_unreserve(aobj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
if (rfb == NULL || rfb->obj == NULL) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
robj = gem_to_amdgpu_bo(rfb->obj);
|
|
|
|
/* don't unpin kernel fb objects */
|
|
|
|
if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
|
2017-04-25 00:52:41 +07:00
|
|
|
r = amdgpu_bo_reserve(robj, true);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r == 0) {
|
|
|
|
amdgpu_bo_unpin(robj);
|
|
|
|
amdgpu_bo_unreserve(robj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* evict vram memory */
|
|
|
|
amdgpu_bo_evict_vram(adev);
|
|
|
|
|
2015-08-05 23:41:48 +07:00
|
|
|
amdgpu_fence_driver_suspend(adev);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-12-15 04:47:40 +07:00
|
|
|
r = amdgpu_device_ip_suspend(adev);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-10-10 23:41:36 +07:00
|
|
|
/* evict remaining vram memory
|
|
|
|
* This second call to evict vram is to evict the gart page table
|
|
|
|
* using the CPU.
|
|
|
|
*/
|
2015-04-21 03:55:21 +07:00
|
|
|
amdgpu_bo_evict_vram(adev);
|
|
|
|
|
|
|
|
pci_save_state(dev->pdev);
|
|
|
|
if (suspend) {
|
|
|
|
/* Shut down the device */
|
|
|
|
pci_disable_device(dev->pdev);
|
|
|
|
pci_set_power_state(dev->pdev, PCI_D3hot);
|
2016-09-07 16:09:12 +07:00
|
|
|
} else {
|
|
|
|
r = amdgpu_asic_reset(adev);
|
|
|
|
if (r)
|
|
|
|
DRM_ERROR("amdgpu asic reset failed\n");
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (fbcon) {
|
|
|
|
console_lock();
|
|
|
|
amdgpu_fbdev_set_suspend(adev, 1);
|
|
|
|
console_unlock();
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2016-08-24 00:25:49 +07:00
|
|
|
* amdgpu_device_resume - initiate device resume
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* @pdev: drm dev pointer
|
|
|
|
*
|
|
|
|
* Bring the hw back to operating state (all asics).
|
|
|
|
* Returns 0 for success or an error on failure.
|
|
|
|
* Called at driver resume.
|
|
|
|
*/
|
2016-08-24 00:25:49 +07:00
|
|
|
int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
struct drm_connector *connector;
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
2015-10-08 11:03:36 +07:00
|
|
|
struct drm_crtc *crtc;
|
2017-04-13 15:12:26 +07:00
|
|
|
int r = 0;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
|
|
|
return 0;
|
|
|
|
|
2016-09-07 16:09:12 +07:00
|
|
|
if (fbcon)
|
2015-04-21 03:55:21 +07:00
|
|
|
console_lock();
|
2016-09-07 16:09:12 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
if (resume) {
|
|
|
|
pci_set_power_state(dev->pdev, PCI_D0);
|
|
|
|
pci_restore_state(dev->pdev);
|
2016-09-07 16:09:12 +07:00
|
|
|
r = pci_enable_device(dev->pdev);
|
2017-04-13 15:12:26 +07:00
|
|
|
if (r)
|
|
|
|
goto unlock;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* post card */
|
2017-12-16 04:22:11 +07:00
|
|
|
if (amdgpu_device_need_post(adev)) {
|
2016-09-07 16:09:12 +07:00
|
|
|
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
|
|
|
|
if (r)
|
|
|
|
DRM_ERROR("amdgpu asic init failed\n");
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
r = amdgpu_device_ip_resume(adev);
|
2017-03-30 12:21:01 +07:00
|
|
|
if (r) {
|
2017-12-15 03:02:39 +07:00
|
|
|
DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
|
2017-04-13 15:12:26 +07:00
|
|
|
goto unlock;
|
2017-03-30 12:21:01 +07:00
|
|
|
}
|
2015-08-05 23:41:48 +07:00
|
|
|
amdgpu_fence_driver_resume(adev);
|
|
|
|
|
2016-02-04 14:10:08 +07:00
|
|
|
if (resume) {
|
|
|
|
r = amdgpu_ib_ring_tests(adev);
|
|
|
|
if (r)
|
|
|
|
DRM_ERROR("ib ring test failed (%d).\n", r);
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
r = amdgpu_device_ip_late_init(adev);
|
2017-04-13 15:12:26 +07:00
|
|
|
if (r)
|
|
|
|
goto unlock;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2015-10-08 11:03:36 +07:00
|
|
|
/* pin cursors */
|
|
|
|
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
|
|
|
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
|
|
|
|
|
|
|
if (amdgpu_crtc->cursor_bo) {
|
|
|
|
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
|
2017-04-25 00:52:41 +07:00
|
|
|
r = amdgpu_bo_reserve(aobj, true);
|
2015-10-08 11:03:36 +07:00
|
|
|
if (r == 0) {
|
|
|
|
r = amdgpu_bo_pin(aobj,
|
|
|
|
AMDGPU_GEM_DOMAIN_VRAM,
|
|
|
|
&amdgpu_crtc->cursor_addr);
|
|
|
|
if (r != 0)
|
|
|
|
DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
|
|
|
|
amdgpu_bo_unreserve(aobj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-11-10 05:21:45 +07:00
|
|
|
r = amdgpu_amdkfd_resume(adev);
|
|
|
|
if (r)
|
|
|
|
return r;
|
2015-10-08 11:03:36 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/* blat the mode back in */
|
|
|
|
if (fbcon) {
|
2017-09-13 02:58:20 +07:00
|
|
|
if (!amdgpu_device_has_dc_support(adev)) {
|
|
|
|
/* pre DCE11 */
|
|
|
|
drm_helper_resume_force_mode(dev);
|
|
|
|
|
|
|
|
/* turn on display hw */
|
|
|
|
drm_modeset_lock_all(dev);
|
|
|
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
|
|
|
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
|
|
|
|
}
|
|
|
|
drm_modeset_unlock_all(dev);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* There is no equivalent atomic helper to turn on
|
|
|
|
* display, so we defined our own function for this,
|
|
|
|
* once suspend resume is supported by the atomic
|
|
|
|
* framework this will be reworked
|
|
|
|
*/
|
|
|
|
amdgpu_dm_display_resume(adev);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_kms_helper_poll_enable(dev);
|
2016-07-18 22:41:37 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Most of the connector probing functions try to acquire runtime pm
|
|
|
|
* refs to ensure that the GPU is powered on when connector polling is
|
|
|
|
* performed. Since we're calling this from a runtime PM callback,
|
|
|
|
* trying to acquire rpm refs will cause us to deadlock.
|
|
|
|
*
|
|
|
|
* Since we're guaranteed to be holding the rpm lock, it's safe to
|
|
|
|
* temporarily disable the rpm helpers so this doesn't deadlock us.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_PM
|
|
|
|
dev->dev->power.disable_depth++;
|
|
|
|
#endif
|
2017-09-13 02:58:20 +07:00
|
|
|
if (!amdgpu_device_has_dc_support(adev))
|
|
|
|
drm_helper_hpd_irq_event(dev);
|
|
|
|
else
|
|
|
|
drm_kms_helper_hotplug_event(dev);
|
2016-07-18 22:41:37 +07:00
|
|
|
#ifdef CONFIG_PM
|
|
|
|
dev->dev->power.disable_depth--;
|
|
|
|
#endif
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-04-13 15:12:26 +07:00
|
|
|
if (fbcon)
|
2015-04-21 03:55:21 +07:00
|
|
|
amdgpu_fbdev_set_suspend(adev, 0);
|
2017-04-13 15:12:26 +07:00
|
|
|
|
|
|
|
unlock:
|
|
|
|
if (fbcon)
|
2015-04-21 03:55:21 +07:00
|
|
|
console_unlock();
|
|
|
|
|
2017-04-13 15:12:26 +07:00
|
|
|
return r;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
|
2016-07-15 10:19:20 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
bool asic_hang = false;
|
|
|
|
|
2017-10-16 18:46:01 +07:00
|
|
|
if (amdgpu_sriov_vf(adev))
|
|
|
|
return true;
|
|
|
|
|
2016-07-15 10:19:20 +07:00
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2016-07-15 10:19:20 +07:00
|
|
|
continue;
|
2016-10-14 04:41:13 +07:00
|
|
|
if (adev->ip_blocks[i].version->funcs->check_soft_reset)
|
|
|
|
adev->ip_blocks[i].status.hang =
|
|
|
|
adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
|
|
|
|
if (adev->ip_blocks[i].status.hang) {
|
|
|
|
DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
|
2016-07-15 10:19:20 +07:00
|
|
|
asic_hang = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return asic_hang;
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
|
2016-07-18 09:04:34 +07:00
|
|
|
{
|
|
|
|
int i, r = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2016-07-18 09:04:34 +07:00
|
|
|
continue;
|
2016-10-14 04:41:13 +07:00
|
|
|
if (adev->ip_blocks[i].status.hang &&
|
|
|
|
adev->ip_blocks[i].version->funcs->pre_soft_reset) {
|
|
|
|
r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
|
2016-07-18 09:04:34 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
|
2016-07-15 14:57:13 +07:00
|
|
|
{
|
2016-10-14 03:07:03 +07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2016-10-14 03:07:03 +07:00
|
|
|
continue;
|
2016-10-14 04:41:13 +07:00
|
|
|
if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
|
|
|
|
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
|
|
|
|
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
|
2017-09-14 15:25:19 +07:00
|
|
|
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
|
|
|
|
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (adev->ip_blocks[i].status.hang) {
|
2016-10-14 03:07:03 +07:00
|
|
|
DRM_INFO("Some block need full reset!\n");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2016-07-15 14:57:13 +07:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
|
2016-07-15 14:57:13 +07:00
|
|
|
{
|
|
|
|
int i, r = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2016-07-15 14:57:13 +07:00
|
|
|
continue;
|
2016-10-14 04:41:13 +07:00
|
|
|
if (adev->ip_blocks[i].status.hang &&
|
|
|
|
adev->ip_blocks[i].version->funcs->soft_reset) {
|
|
|
|
r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
|
2016-07-15 14:57:13 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
|
2016-07-15 14:57:13 +07:00
|
|
|
{
|
|
|
|
int i, r = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 04:41:13 +07:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2016-07-15 14:57:13 +07:00
|
|
|
continue;
|
2016-10-14 04:41:13 +07:00
|
|
|
if (adev->ip_blocks[i].status.hang &&
|
|
|
|
adev->ip_blocks[i].version->funcs->post_soft_reset)
|
|
|
|
r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
|
2016-07-15 14:57:13 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_ring *ring,
|
|
|
|
struct amdgpu_bo *bo,
|
|
|
|
struct dma_fence **fence)
|
2016-07-21 16:20:52 +07:00
|
|
|
{
|
|
|
|
uint32_t domain;
|
|
|
|
int r;
|
|
|
|
|
2017-04-21 13:24:26 +07:00
|
|
|
if (!bo->shadow)
|
|
|
|
return 0;
|
|
|
|
|
2017-04-25 00:53:04 +07:00
|
|
|
r = amdgpu_bo_reserve(bo, true);
|
2017-04-21 13:24:26 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
|
|
|
|
/* if bo has been evicted, then no need to recover */
|
|
|
|
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
|
2017-04-21 12:08:43 +07:00
|
|
|
r = amdgpu_bo_validate(bo->shadow);
|
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("bo validate failed!\n");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2017-04-21 13:24:26 +07:00
|
|
|
r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
|
2016-07-21 16:20:52 +07:00
|
|
|
NULL, fence, true);
|
2017-04-21 13:24:26 +07:00
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("recover page table failed!\n");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
2016-07-21 16:20:52 +07:00
|
|
|
err:
|
2017-04-21 13:24:26 +07:00
|
|
|
amdgpu_bo_unreserve(bo);
|
|
|
|
return r;
|
2016-07-21 16:20:52 +07:00
|
|
|
}
|
|
|
|
|
2017-10-25 15:37:02 +07:00
|
|
|
/*
|
2017-12-15 03:02:39 +07:00
|
|
|
* amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
|
2017-01-23 13:22:08 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu device pointer
|
2017-10-25 15:37:02 +07:00
|
|
|
* @reset_flags: output param tells caller the reset result
|
2017-01-23 13:22:08 +07:00
|
|
|
*
|
2017-10-25 15:37:02 +07:00
|
|
|
* attempt to do soft-reset or full-reset and reinitialize Asic
|
|
|
|
* return 0 means successed otherwise failed
|
|
|
|
*/
|
2017-12-15 03:02:39 +07:00
|
|
|
static int amdgpu_device_reset(struct amdgpu_device *adev,
|
|
|
|
uint64_t* reset_flags)
|
2017-01-23 13:22:08 +07:00
|
|
|
{
|
2017-10-25 15:37:02 +07:00
|
|
|
bool need_full_reset, vram_lost = 0;
|
|
|
|
int r;
|
2017-01-23 13:22:08 +07:00
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
need_full_reset = amdgpu_device_ip_need_full_reset(adev);
|
2017-01-23 13:22:08 +07:00
|
|
|
|
2017-10-25 15:37:02 +07:00
|
|
|
if (!need_full_reset) {
|
2017-12-15 03:02:39 +07:00
|
|
|
amdgpu_device_ip_pre_soft_reset(adev);
|
|
|
|
r = amdgpu_device_ip_soft_reset(adev);
|
|
|
|
amdgpu_device_ip_post_soft_reset(adev);
|
|
|
|
if (r || amdgpu_device_ip_check_soft_reset(adev)) {
|
2017-10-25 15:37:02 +07:00
|
|
|
DRM_INFO("soft reset failed, will fallback to full reset!\n");
|
|
|
|
need_full_reset = true;
|
|
|
|
}
|
2017-01-23 13:22:08 +07:00
|
|
|
|
2017-10-25 15:37:02 +07:00
|
|
|
}
|
2017-01-23 13:22:08 +07:00
|
|
|
|
2017-10-25 15:37:02 +07:00
|
|
|
if (need_full_reset) {
|
2017-12-15 04:47:40 +07:00
|
|
|
r = amdgpu_device_ip_suspend(adev);
|
2017-01-23 13:22:08 +07:00
|
|
|
|
2017-10-25 15:37:02 +07:00
|
|
|
retry:
|
|
|
|
r = amdgpu_asic_reset(adev);
|
|
|
|
/* post card */
|
|
|
|
amdgpu_atom_asic_init(adev->mode_info.atom_context);
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 12:36:44 +07:00
|
|
|
|
2017-10-25 15:37:02 +07:00
|
|
|
if (!r) {
|
|
|
|
dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
|
2017-12-15 03:02:39 +07:00
|
|
|
r = amdgpu_device_ip_resume_phase1(adev);
|
2017-10-25 15:37:02 +07:00
|
|
|
if (r)
|
|
|
|
goto out;
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 12:36:44 +07:00
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
vram_lost = amdgpu_device_check_vram_lost(adev);
|
2017-10-25 15:37:02 +07:00
|
|
|
if (vram_lost) {
|
|
|
|
DRM_ERROR("VRAM is lost!\n");
|
|
|
|
atomic_inc(&adev->vram_lost_counter);
|
|
|
|
}
|
|
|
|
|
2017-10-16 21:50:32 +07:00
|
|
|
r = amdgpu_gtt_mgr_recover(
|
|
|
|
&adev->mman.bdev.man[TTM_PL_TT]);
|
2017-10-25 15:37:02 +07:00
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
|
2017-12-15 03:02:39 +07:00
|
|
|
r = amdgpu_device_ip_resume_phase2(adev);
|
2017-10-25 15:37:02 +07:00
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (vram_lost)
|
2017-12-15 03:02:39 +07:00
|
|
|
amdgpu_device_fill_reset_magic(adev);
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 12:36:44 +07:00
|
|
|
}
|
2017-10-25 15:37:02 +07:00
|
|
|
}
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 12:36:44 +07:00
|
|
|
|
2017-10-25 15:37:02 +07:00
|
|
|
out:
|
|
|
|
if (!r) {
|
|
|
|
amdgpu_irq_gpu_reset_resume_helper(adev);
|
|
|
|
r = amdgpu_ib_ring_tests(adev);
|
|
|
|
if (r) {
|
|
|
|
dev_err(adev->dev, "ib ring test failed (%d).\n", r);
|
2017-12-15 04:47:40 +07:00
|
|
|
r = amdgpu_device_ip_suspend(adev);
|
2017-10-25 15:37:02 +07:00
|
|
|
need_full_reset = true;
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
}
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 12:36:44 +07:00
|
|
|
|
2017-10-25 15:37:02 +07:00
|
|
|
if (reset_flags) {
|
|
|
|
if (vram_lost)
|
|
|
|
(*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
|
2017-01-23 13:22:08 +07:00
|
|
|
|
2017-10-25 15:37:02 +07:00
|
|
|
if (need_full_reset)
|
|
|
|
(*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 12:36:44 +07:00
|
|
|
}
|
2017-01-23 13:22:08 +07:00
|
|
|
|
2017-10-25 15:37:02 +07:00
|
|
|
return r;
|
|
|
|
}
|
2017-01-23 13:22:08 +07:00
|
|
|
|
2017-10-25 15:37:02 +07:00
|
|
|
/*
|
2017-12-15 03:02:39 +07:00
|
|
|
* amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
|
2017-10-25 15:37:02 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu device pointer
|
|
|
|
* @reset_flags: output param tells caller the reset result
|
|
|
|
*
|
|
|
|
* do VF FLR and reinitialize Asic
|
|
|
|
* return 0 means successed otherwise failed
|
|
|
|
*/
|
2017-12-15 03:02:39 +07:00
|
|
|
static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
|
|
|
uint64_t *reset_flags,
|
|
|
|
bool from_hypervisor)
|
2017-10-25 15:37:02 +07:00
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (from_hypervisor)
|
|
|
|
r = amdgpu_virt_request_full_gpu(adev, true);
|
|
|
|
else
|
|
|
|
r = amdgpu_virt_reset_gpu(adev);
|
|
|
|
if (r)
|
|
|
|
return r;
|
2017-01-23 13:22:08 +07:00
|
|
|
|
|
|
|
/* Resume IP prior to SMC */
|
2017-12-15 03:02:39 +07:00
|
|
|
r = amdgpu_device_ip_reinit_early_sriov(adev);
|
2017-10-25 15:37:02 +07:00
|
|
|
if (r)
|
|
|
|
goto error;
|
2017-01-23 13:22:08 +07:00
|
|
|
|
|
|
|
/* we need recover gart prior to run SMC/CP/SDMA resume */
|
2017-10-16 21:50:32 +07:00
|
|
|
amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
|
2017-01-23 13:22:08 +07:00
|
|
|
|
|
|
|
/* now we are okay to resume SMC/CP/SDMA */
|
2017-12-15 03:02:39 +07:00
|
|
|
r = amdgpu_device_ip_reinit_late_sriov(adev);
|
2017-10-25 15:37:02 +07:00
|
|
|
if (r)
|
|
|
|
goto error;
|
2017-01-23 13:22:08 +07:00
|
|
|
|
|
|
|
amdgpu_irq_gpu_reset_resume_helper(adev);
|
2017-10-25 15:37:02 +07:00
|
|
|
r = amdgpu_ib_ring_tests(adev);
|
|
|
|
if (r)
|
2017-01-23 13:22:08 +07:00
|
|
|
dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
|
|
|
|
|
2017-10-25 15:37:02 +07:00
|
|
|
error:
|
2017-01-23 13:22:08 +07:00
|
|
|
/* release full control of GPU after ib test */
|
|
|
|
amdgpu_virt_release_full_gpu(adev, true);
|
|
|
|
|
2017-10-25 15:37:02 +07:00
|
|
|
if (reset_flags) {
|
2017-10-30 19:11:54 +07:00
|
|
|
if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
|
|
|
|
(*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
|
|
|
|
atomic_inc(&adev->vram_lost_counter);
|
|
|
|
}
|
2017-01-23 13:22:08 +07:00
|
|
|
|
2017-10-25 15:37:02 +07:00
|
|
|
/* VF FLR or hotlink reset is always full-reset */
|
|
|
|
(*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
|
2017-01-23 13:22:08 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
2017-12-16 04:40:49 +07:00
|
|
|
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu device pointer
|
2017-10-25 15:37:02 +07:00
|
|
|
* @job: which job trigger hang
|
2017-12-13 02:09:30 +07:00
|
|
|
* @force forces reset regardless of amdgpu_gpu_recovery
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
2017-10-25 15:37:02 +07:00
|
|
|
* Attempt to reset the GPU if it has hung (all asics).
|
2015-04-21 03:55:21 +07:00
|
|
|
* Returns 0 for success or an error on failure.
|
|
|
|
*/
|
2017-12-16 04:40:49 +07:00
|
|
|
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_job *job, bool force)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2017-09-13 02:58:20 +07:00
|
|
|
struct drm_atomic_state *state = NULL;
|
2017-10-25 15:37:02 +07:00
|
|
|
uint64_t reset_flags = 0;
|
|
|
|
int i, r, resched;
|
2016-12-17 21:48:57 +07:00
|
|
|
|
2018-01-20 05:23:08 +07:00
|
|
|
if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
|
2016-07-15 10:19:20 +07:00
|
|
|
DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
|
|
|
|
return 0;
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-12-13 02:09:30 +07:00
|
|
|
if (!force && (amdgpu_gpu_recovery == 0 ||
|
|
|
|
(amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) {
|
|
|
|
DRM_INFO("GPU recovery disabled.\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-25 15:37:02 +07:00
|
|
|
dev_info(adev->dev, "GPU reset begin!\n");
|
|
|
|
|
2017-10-17 14:11:12 +07:00
|
|
|
mutex_lock(&adev->lock_reset);
|
2015-05-06 02:13:49 +07:00
|
|
|
atomic_inc(&adev->gpu_reset_counter);
|
2017-10-17 14:11:12 +07:00
|
|
|
adev->in_gpu_reset = 1;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-06-30 15:44:41 +07:00
|
|
|
/* block TTM */
|
|
|
|
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
2017-09-13 02:58:20 +07:00
|
|
|
/* store modesetting */
|
|
|
|
if (amdgpu_device_has_dc_support(adev))
|
|
|
|
state = drm_atomic_helper_suspend(adev->ddev);
|
2016-06-30 15:44:41 +07:00
|
|
|
|
2016-06-12 14:41:58 +07:00
|
|
|
/* block scheduler */
|
|
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
|
|
|
struct amdgpu_ring *ring = adev->rings[i];
|
|
|
|
|
2017-04-24 16:09:15 +07:00
|
|
|
if (!ring || !ring->sched.thread)
|
2016-06-12 14:41:58 +07:00
|
|
|
continue;
|
2017-10-25 15:37:02 +07:00
|
|
|
|
|
|
|
/* only focus on the ring hit timeout if &job not NULL */
|
|
|
|
if (job && job->ring->idx != i)
|
|
|
|
continue;
|
|
|
|
|
2016-06-12 14:41:58 +07:00
|
|
|
kthread_park(ring->sched.thread);
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_hw_job_reset(&ring->sched, &job->base);
|
2017-10-25 15:37:02 +07:00
|
|
|
|
2017-10-16 13:38:10 +07:00
|
|
|
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
|
|
|
amdgpu_fence_driver_force_completion(ring);
|
2016-06-12 14:41:58 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-10-25 15:37:02 +07:00
|
|
|
if (amdgpu_sriov_vf(adev))
|
2017-12-15 03:02:39 +07:00
|
|
|
r = amdgpu_device_reset_sriov(adev, &reset_flags, job ? false : true);
|
2017-10-25 15:37:02 +07:00
|
|
|
else
|
2017-12-15 03:02:39 +07:00
|
|
|
r = amdgpu_device_reset(adev, &reset_flags);
|
2016-07-15 14:57:13 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
if (!r) {
|
2017-10-25 15:37:02 +07:00
|
|
|
if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) ||
|
|
|
|
(reset_flags & AMDGPU_RESET_INFO_VRAM_LOST)) {
|
2016-07-21 16:20:52 +07:00
|
|
|
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
|
|
|
struct amdgpu_bo *bo, *tmp;
|
2016-10-25 19:00:45 +07:00
|
|
|
struct dma_fence *fence = NULL, *next = NULL;
|
2016-07-21 16:20:52 +07:00
|
|
|
|
|
|
|
DRM_INFO("recover vram bo from shadow\n");
|
|
|
|
mutex_lock(&adev->shadow_list_lock);
|
|
|
|
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
|
2017-05-01 15:15:31 +07:00
|
|
|
next = NULL;
|
2017-12-15 03:02:39 +07:00
|
|
|
amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
|
2016-07-21 16:20:52 +07:00
|
|
|
if (fence) {
|
2016-10-25 19:00:45 +07:00
|
|
|
r = dma_fence_wait(fence, false);
|
2016-07-21 16:20:52 +07:00
|
|
|
if (r) {
|
2017-01-22 17:52:56 +07:00
|
|
|
WARN(r, "recovery from shadow isn't completed\n");
|
2016-07-21 16:20:52 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2016-06-30 14:02:26 +07:00
|
|
|
|
2016-10-25 19:00:45 +07:00
|
|
|
dma_fence_put(fence);
|
2016-07-21 16:20:52 +07:00
|
|
|
fence = next;
|
|
|
|
}
|
|
|
|
mutex_unlock(&adev->shadow_list_lock);
|
|
|
|
if (fence) {
|
2016-10-25 19:00:45 +07:00
|
|
|
r = dma_fence_wait(fence, false);
|
2016-07-21 16:20:52 +07:00
|
|
|
if (r)
|
2017-01-22 17:52:56 +07:00
|
|
|
WARN(r, "recovery from shadow isn't completed\n");
|
2016-07-21 16:20:52 +07:00
|
|
|
}
|
2016-10-25 19:00:45 +07:00
|
|
|
dma_fence_put(fence);
|
2016-07-21 16:20:52 +07:00
|
|
|
}
|
2017-10-25 15:37:02 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
|
|
|
struct amdgpu_ring *ring = adev->rings[i];
|
2017-04-24 16:09:15 +07:00
|
|
|
|
|
|
|
if (!ring || !ring->sched.thread)
|
2015-04-21 03:55:21 +07:00
|
|
|
continue;
|
2016-07-21 16:20:52 +07:00
|
|
|
|
2017-10-25 15:37:02 +07:00
|
|
|
/* only focus on the ring hit timeout if &job not NULL */
|
|
|
|
if (job && job->ring->idx != i)
|
|
|
|
continue;
|
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_job_recovery(&ring->sched);
|
2016-06-12 14:41:58 +07:00
|
|
|
kthread_unpark(ring->sched.thread);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
2017-10-25 15:37:02 +07:00
|
|
|
struct amdgpu_ring *ring = adev->rings[i];
|
|
|
|
|
|
|
|
if (!ring || !ring->sched.thread)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* only focus on the ring hit timeout if &job not NULL */
|
|
|
|
if (job && job->ring->idx != i)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
kthread_unpark(adev->rings[i]->sched.thread);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-13 02:58:20 +07:00
|
|
|
if (amdgpu_device_has_dc_support(adev)) {
|
2017-10-25 15:37:02 +07:00
|
|
|
if (drm_atomic_helper_resume(adev->ddev, state))
|
|
|
|
dev_info(adev->dev, "drm resume failed:%d\n", r);
|
2017-09-13 02:58:20 +07:00
|
|
|
amdgpu_dm_display_resume(adev);
|
2017-10-25 15:37:02 +07:00
|
|
|
} else {
|
2017-09-13 02:58:20 +07:00
|
|
|
drm_helper_resume_force_mode(adev->ddev);
|
2017-10-25 15:37:02 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
|
2017-10-25 15:37:02 +07:00
|
|
|
|
2017-06-24 00:55:15 +07:00
|
|
|
if (r) {
|
2015-04-21 03:55:21 +07:00
|
|
|
/* bad news, how to tell it to userspace ? */
|
2017-10-25 15:37:02 +07:00
|
|
|
dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
|
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
|
|
|
|
} else {
|
|
|
|
dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
|
2017-06-24 00:55:15 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-06-24 00:55:15 +07:00
|
|
|
amdgpu_vf_error_trans_all(adev);
|
2017-10-17 14:11:12 +07:00
|
|
|
adev->in_gpu_reset = 0;
|
|
|
|
mutex_unlock(&adev->lock_reset);
|
2015-04-21 03:55:21 +07:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2017-12-16 04:49:33 +07:00
|
|
|
void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
|
2015-11-12 07:45:06 +07:00
|
|
|
{
|
|
|
|
u32 mask;
|
|
|
|
int ret;
|
|
|
|
|
2016-02-04 22:21:23 +07:00
|
|
|
if (amdgpu_pcie_gen_cap)
|
|
|
|
adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
|
2015-11-12 07:45:06 +07:00
|
|
|
|
2016-02-04 22:21:23 +07:00
|
|
|
if (amdgpu_pcie_lane_cap)
|
|
|
|
adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
|
2015-11-12 07:45:06 +07:00
|
|
|
|
2016-02-04 22:21:23 +07:00
|
|
|
/* covers APUs as well */
|
|
|
|
if (pci_is_root_bus(adev->pdev->bus)) {
|
|
|
|
if (adev->pm.pcie_gen_mask == 0)
|
|
|
|
adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
|
|
|
|
if (adev->pm.pcie_mlw_mask == 0)
|
|
|
|
adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
|
2015-11-12 07:45:06 +07:00
|
|
|
return;
|
2016-02-04 22:21:23 +07:00
|
|
|
}
|
2015-11-12 07:45:06 +07:00
|
|
|
|
2016-02-04 22:21:23 +07:00
|
|
|
if (adev->pm.pcie_gen_mask == 0) {
|
|
|
|
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
|
|
|
|
if (!ret) {
|
|
|
|
adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
|
|
|
|
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
|
|
|
|
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
|
|
|
|
|
|
|
|
if (mask & DRM_PCIE_SPEED_25)
|
|
|
|
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
|
|
|
|
if (mask & DRM_PCIE_SPEED_50)
|
|
|
|
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
|
|
|
|
if (mask & DRM_PCIE_SPEED_80)
|
|
|
|
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
|
|
|
|
} else {
|
|
|
|
adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (adev->pm.pcie_mlw_mask == 0) {
|
|
|
|
ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
|
|
|
|
if (!ret) {
|
|
|
|
switch (mask) {
|
|
|
|
case 32:
|
|
|
|
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
|
|
|
break;
|
|
|
|
case 16:
|
|
|
|
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
|
|
|
break;
|
|
|
|
case 12:
|
|
|
|
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
|
2015-11-12 07:45:06 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|