mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 08:00:52 +07:00
drm msm + fixes for 5.5-rc1
msm-next: - OCMEM support for a3xx and a4xx GPUs. - a510 support + display support core: - mst payload deletion fix i915: - uapi alignment fix - fix for power usage regression due to security fixes - change default preemption timeout to 640ms from 100ms - EHL voltage level display fixes - TGL DGL PHY fix - gvt - MI_ATOMIC cmd parser fix, CFL non-priv warning - CI spotted deadlock fix - EHL port D programming fix amdgpu: - VRAM lost fixes on BACO for CI/VI - navi14 DC fixes - misc SR-IOV, gfx10 fixes - XGMI fixes for arcturus - SRIOV fixes amdkfd: - KFD on ppc64le enabled - page table optimisations radeon: - fix for r1xx/2xx register checker. tegra: - displayport regression fixes - DMA API regression fixes mgag200: - fix devices that can't scanout except at 0 addr omap: - fix dma_addr refcounting -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJd6cqnAAoJEAx081l5xIa+YR0P/A0LkilEbSnF/k7zKDjm0HN8 JGsf9ZfQRGA2y8URoLRtNdFjZfyuTSpiDSxsbDI0ShBhRimGHyCSxAJXO42vp8q3 jE57jBoaTSiGtagSO3nxrc1vQP7CfUpaggC2ilKSmcVvTrlqip6iPx7s2PoNyQYc GRVUhkcylnZK5UrMiE8Yz/iNcy3Mh0X8bJQKXMEYxpW2KA3SL4qxuRlYIxXEoMyB 4MlWEV09wHTduf1uYuKdusHjILgR5EiVOdmbvpM92obqZOTokt5/S20TEdhFqiy0 0IHxuEkgVx+trXzGFbmqgh2I7BZvZIbKVCSnBT4AXAvUEJ99kGTdEP0I6uOp2lsC 1DCm+7/hcI8BlwmwC9N6ogUwoAzKn7DNc1urcet/0QVbnZLZlueUK/6fSgUNnUYe miOeMNBmfHr83b75MpnNxYVoyz5S+/DFbtUplYKqxgjDYfiWWceSSE47NB+IHAiI RVpz3AxGpKaw4/w5l2q8VuToWZxdO85TNjgVCTmKfwlYjIbEuveWpZNFqO/GHMm9 x50f4ZYVOjU2TEPnLQNTIJOgv71JrTpoAdFzPVwCeWUf4h4Y4lVLgTLvdG1JLcw+ k9BrA5z2R0kjzPtabRhS6WfSjpgSbY3DgY9hfi+HIUmKvZq4fdtAbBlp1oGSXJ9N zkVrs9eE6Ahkcndi6ZV9 =3cs2 -----END PGP SIGNATURE----- Merge tag 'drm-next-2019-12-06' of git://anongit.freedesktop.org/drm/drm Pull more drm updates from Dave Airlie: "Rob pointed out I missed his pull request for msm-next, it's been in next for a while outside of my tree so shouldn't cause any unexpected issues, it has some OCMEM support in drivers/soc that is acked by other maintainers as it's outside my tree. Otherwise it's a usual fixes pull, i915, amdgpu, the main ones, with some tegra, omap, mgag200 and one core fix. Summary: msm-next: - OCMEM support for a3xx and a4xx GPUs. - a510 support + display support core: - mst payload deletion fix i915: - uapi alignment fix - fix for power usage regression due to security fixes - change default preemption timeout to 640ms from 100ms - EHL voltage level display fixes - TGL DGL PHY fix - gvt - MI_ATOMIC cmd parser fix, CFL non-priv warning - CI spotted deadlock fix - EHL port D programming fix amdgpu: - VRAM lost fixes on BACO for CI/VI - navi14 DC fixes - misc SR-IOV, gfx10 fixes - XGMI fixes for arcturus - SRIOV fixes amdkfd: - KFD on ppc64le enabled - page table optimisations radeon: - fix for r1xx/2xx register checker. tegra: - displayport regression fixes - DMA API regression fixes mgag200: - fix devices that can't scanout except at 0 addr omap: - fix dma_addr refcounting" * tag 'drm-next-2019-12-06' of git://anongit.freedesktop.org/drm/drm: (100 commits) drm/dp_mst: Correct the bug in drm_dp_update_payload_part1() drm/omap: fix dma_addr refcounting drm/tegra: Run hub cleanup on ->remove() drm/tegra: sor: Make the +5V HDMI supply optional drm/tegra: Silence expected errors on IOMMU attach drm/tegra: vic: Export module device table drm/tegra: sor: Implement system suspend/resume drm/tegra: Use proper IOVA address for cursor image drm/tegra: gem: Remove premature import restrictions drm/tegra: gem: Properly pin imported buffers drm/tegra: hub: Remove bogus connection mutex check ia64: agp: Replace empty define with do while agp: Add bridge parameter documentation agp: remove unused variable num_segments agp: move AGPGART_MINOR to include/linux/miscdevice.h agp: remove unused variable size in agp_generic_create_gatt_table drm/dp_mst: Fix build on systems with STACKTRACE_SUPPORT=n drm/radeon: fix r1xx/r2xx register checker for POT textures drm/amdgpu: fix GFX10 missing CSIB set(v3) drm/amdgpu: should stop GFX ring in hw_fini ...
This commit is contained in:
commit
7ada90eb9c
@ -31,6 +31,10 @@ Required properties:
|
|||||||
- iommus: phandle to the adreno iommu
|
- iommus: phandle to the adreno iommu
|
||||||
- operating-points-v2: phandle to the OPP operating points
|
- operating-points-v2: phandle to the OPP operating points
|
||||||
|
|
||||||
|
Optional properties:
|
||||||
|
- sram: phandle to the On Chip Memory (OCMEM) that's present on some Snapdragon
|
||||||
|
SoCs. See Documentation/devicetree/bindings/sram/qcom,ocmem.yaml.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
/ {
|
/ {
|
||||||
@ -63,3 +67,50 @@ Example:
|
|||||||
operating-points-v2 = <&gmu_opp_table>;
|
operating-points-v2 = <&gmu_opp_table>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
a3xx example with OCMEM support:
|
||||||
|
|
||||||
|
/ {
|
||||||
|
...
|
||||||
|
|
||||||
|
gpu: adreno@fdb00000 {
|
||||||
|
compatible = "qcom,adreno-330.2",
|
||||||
|
"qcom,adreno";
|
||||||
|
reg = <0xfdb00000 0x10000>;
|
||||||
|
reg-names = "kgsl_3d0_reg_memory";
|
||||||
|
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
|
interrupt-names = "kgsl_3d0_irq";
|
||||||
|
clock-names = "core",
|
||||||
|
"iface",
|
||||||
|
"mem_iface";
|
||||||
|
clocks = <&mmcc OXILI_GFX3D_CLK>,
|
||||||
|
<&mmcc OXILICX_AHB_CLK>,
|
||||||
|
<&mmcc OXILICX_AXI_CLK>;
|
||||||
|
sram = <&gmu_sram>;
|
||||||
|
power-domains = <&mmcc OXILICX_GDSC>;
|
||||||
|
operating-points-v2 = <&gpu_opp_table>;
|
||||||
|
iommus = <&gpu_iommu 0>;
|
||||||
|
};
|
||||||
|
|
||||||
|
ocmem@fdd00000 {
|
||||||
|
compatible = "qcom,msm8974-ocmem";
|
||||||
|
|
||||||
|
reg = <0xfdd00000 0x2000>,
|
||||||
|
<0xfec00000 0x180000>;
|
||||||
|
reg-names = "ctrl",
|
||||||
|
"mem";
|
||||||
|
|
||||||
|
clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>,
|
||||||
|
<&mmcc OCMEMCX_OCMEMNOC_CLK>;
|
||||||
|
clock-names = "core",
|
||||||
|
"iface";
|
||||||
|
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <1>;
|
||||||
|
|
||||||
|
gmu_sram: gmu-sram@0 {
|
||||||
|
reg = <0x0 0x100000>;
|
||||||
|
ranges = <0 0 0xfec00000 0x100000>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
@ -76,6 +76,8 @@ Required properties:
|
|||||||
Optional properties:
|
Optional properties:
|
||||||
- clock-names: the following clocks are optional:
|
- clock-names: the following clocks are optional:
|
||||||
* "lut"
|
* "lut"
|
||||||
|
* "tbu"
|
||||||
|
* "tbu_rt"
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
|
96
Documentation/devicetree/bindings/sram/qcom,ocmem.yaml
Normal file
96
Documentation/devicetree/bindings/sram/qcom,ocmem.yaml
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
|
||||||
|
%YAML 1.2
|
||||||
|
---
|
||||||
|
$id: http://devicetree.org/schemas/sram/qcom,ocmem.yaml#
|
||||||
|
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||||
|
|
||||||
|
title: On Chip Memory (OCMEM) that is present on some Qualcomm Snapdragon SoCs.
|
||||||
|
|
||||||
|
maintainers:
|
||||||
|
- Brian Masney <masneyb@onstation.org>
|
||||||
|
|
||||||
|
description: |
|
||||||
|
The On Chip Memory (OCMEM) is typically used by the GPU, camera/video, and
|
||||||
|
audio components on some Snapdragon SoCs.
|
||||||
|
|
||||||
|
properties:
|
||||||
|
compatible:
|
||||||
|
const: qcom,msm8974-ocmem
|
||||||
|
|
||||||
|
reg:
|
||||||
|
items:
|
||||||
|
- description: Control registers
|
||||||
|
- description: OCMEM address range
|
||||||
|
|
||||||
|
reg-names:
|
||||||
|
items:
|
||||||
|
- const: ctrl
|
||||||
|
- const: mem
|
||||||
|
|
||||||
|
clocks:
|
||||||
|
items:
|
||||||
|
- description: Core clock
|
||||||
|
- description: Interface clock
|
||||||
|
|
||||||
|
clock-names:
|
||||||
|
items:
|
||||||
|
- const: core
|
||||||
|
- const: iface
|
||||||
|
|
||||||
|
'#address-cells':
|
||||||
|
const: 1
|
||||||
|
|
||||||
|
'#size-cells':
|
||||||
|
const: 1
|
||||||
|
|
||||||
|
required:
|
||||||
|
- compatible
|
||||||
|
- reg
|
||||||
|
- reg-names
|
||||||
|
- clocks
|
||||||
|
- clock-names
|
||||||
|
- '#address-cells'
|
||||||
|
- '#size-cells'
|
||||||
|
|
||||||
|
patternProperties:
|
||||||
|
"^.+-sram$":
|
||||||
|
type: object
|
||||||
|
description: A region of reserved memory.
|
||||||
|
|
||||||
|
properties:
|
||||||
|
reg:
|
||||||
|
maxItems: 1
|
||||||
|
|
||||||
|
ranges:
|
||||||
|
maxItems: 1
|
||||||
|
|
||||||
|
required:
|
||||||
|
- reg
|
||||||
|
- ranges
|
||||||
|
|
||||||
|
examples:
|
||||||
|
- |
|
||||||
|
#include <dt-bindings/clock/qcom,rpmcc.h>
|
||||||
|
#include <dt-bindings/clock/qcom,mmcc-msm8974.h>
|
||||||
|
|
||||||
|
ocmem: ocmem@fdd00000 {
|
||||||
|
compatible = "qcom,msm8974-ocmem";
|
||||||
|
|
||||||
|
reg = <0xfdd00000 0x2000>,
|
||||||
|
<0xfec00000 0x180000>;
|
||||||
|
reg-names = "ctrl",
|
||||||
|
"mem";
|
||||||
|
|
||||||
|
clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>,
|
||||||
|
<&mmcc OCMEMCX_OCMEMNOC_CLK>;
|
||||||
|
clock-names = "core",
|
||||||
|
"iface";
|
||||||
|
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <1>;
|
||||||
|
|
||||||
|
gmu-sram@0 {
|
||||||
|
reg = <0x0 0x100000>;
|
||||||
|
ranges = <0 0 0xfec00000 0x100000>;
|
||||||
|
};
|
||||||
|
};
|
@ -862,7 +862,6 @@ S: Maintained
|
|||||||
F: drivers/i2c/busses/i2c-amd-mp2*
|
F: drivers/i2c/busses/i2c-amd-mp2*
|
||||||
|
|
||||||
AMD POWERPLAY
|
AMD POWERPLAY
|
||||||
M: Rex Zhu <rex.zhu@amd.com>
|
|
||||||
M: Evan Quan <evan.quan@amd.com>
|
M: Evan Quan <evan.quan@amd.com>
|
||||||
L: amd-gfx@lists.freedesktop.org
|
L: amd-gfx@lists.freedesktop.org
|
||||||
S: Supported
|
S: Supported
|
||||||
|
@ -14,8 +14,8 @@
|
|||||||
* in coherent mode, which lets us map the AGP memory as normal (write-back) memory
|
* in coherent mode, which lets us map the AGP memory as normal (write-back) memory
|
||||||
* (unlike x86, where it gets mapped "write-coalescing").
|
* (unlike x86, where it gets mapped "write-coalescing").
|
||||||
*/
|
*/
|
||||||
#define map_page_into_agp(page) /* nothing */
|
#define map_page_into_agp(page) do { } while (0)
|
||||||
#define unmap_page_from_agp(page) /* nothing */
|
#define unmap_page_from_agp(page) do { } while (0)
|
||||||
#define flush_agp_cache() mb()
|
#define flush_agp_cache() mb()
|
||||||
|
|
||||||
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
|
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
|
||||||
|
@ -102,14 +102,13 @@ agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client,
|
|||||||
int size, pgprot_t page_prot)
|
int size, pgprot_t page_prot)
|
||||||
{
|
{
|
||||||
struct agp_segment_priv *seg;
|
struct agp_segment_priv *seg;
|
||||||
int num_segments, i;
|
int i;
|
||||||
off_t pg_start;
|
off_t pg_start;
|
||||||
size_t pg_count;
|
size_t pg_count;
|
||||||
|
|
||||||
pg_start = offset / 4096;
|
pg_start = offset / 4096;
|
||||||
pg_count = size / 4096;
|
pg_count = size / 4096;
|
||||||
seg = *(client->segments);
|
seg = *(client->segments);
|
||||||
num_segments = client->num_segments;
|
|
||||||
|
|
||||||
for (i = 0; i < client->num_segments; i++) {
|
for (i = 0; i < client->num_segments; i++) {
|
||||||
if ((seg[i].pg_start == pg_start) &&
|
if ((seg[i].pg_start == pg_start) &&
|
||||||
|
@ -207,6 +207,7 @@ EXPORT_SYMBOL(agp_free_memory);
|
|||||||
/**
|
/**
|
||||||
* agp_allocate_memory - allocate a group of pages of a certain type.
|
* agp_allocate_memory - allocate a group of pages of a certain type.
|
||||||
*
|
*
|
||||||
|
* @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
|
||||||
* @page_count: size_t argument of the number of pages
|
* @page_count: size_t argument of the number of pages
|
||||||
* @type: u32 argument of the type of memory to be allocated.
|
* @type: u32 argument of the type of memory to be allocated.
|
||||||
*
|
*
|
||||||
@ -355,6 +356,7 @@ EXPORT_SYMBOL_GPL(agp_num_entries);
|
|||||||
/**
|
/**
|
||||||
* agp_copy_info - copy bridge state information
|
* agp_copy_info - copy bridge state information
|
||||||
*
|
*
|
||||||
|
* @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
|
||||||
* @info: agp_kern_info pointer. The caller should insure that this pointer is valid.
|
* @info: agp_kern_info pointer. The caller should insure that this pointer is valid.
|
||||||
*
|
*
|
||||||
* This function copies information about the agp bridge device and the state of
|
* This function copies information about the agp bridge device and the state of
|
||||||
@ -850,7 +852,6 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
|
|||||||
{
|
{
|
||||||
char *table;
|
char *table;
|
||||||
char *table_end;
|
char *table_end;
|
||||||
int size;
|
|
||||||
int page_order;
|
int page_order;
|
||||||
int num_entries;
|
int num_entries;
|
||||||
int i;
|
int i;
|
||||||
@ -864,25 +865,22 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
|
|||||||
table = NULL;
|
table = NULL;
|
||||||
i = bridge->aperture_size_idx;
|
i = bridge->aperture_size_idx;
|
||||||
temp = bridge->current_size;
|
temp = bridge->current_size;
|
||||||
size = page_order = num_entries = 0;
|
page_order = num_entries = 0;
|
||||||
|
|
||||||
if (bridge->driver->size_type != FIXED_APER_SIZE) {
|
if (bridge->driver->size_type != FIXED_APER_SIZE) {
|
||||||
do {
|
do {
|
||||||
switch (bridge->driver->size_type) {
|
switch (bridge->driver->size_type) {
|
||||||
case U8_APER_SIZE:
|
case U8_APER_SIZE:
|
||||||
size = A_SIZE_8(temp)->size;
|
|
||||||
page_order =
|
page_order =
|
||||||
A_SIZE_8(temp)->page_order;
|
A_SIZE_8(temp)->page_order;
|
||||||
num_entries =
|
num_entries =
|
||||||
A_SIZE_8(temp)->num_entries;
|
A_SIZE_8(temp)->num_entries;
|
||||||
break;
|
break;
|
||||||
case U16_APER_SIZE:
|
case U16_APER_SIZE:
|
||||||
size = A_SIZE_16(temp)->size;
|
|
||||||
page_order = A_SIZE_16(temp)->page_order;
|
page_order = A_SIZE_16(temp)->page_order;
|
||||||
num_entries = A_SIZE_16(temp)->num_entries;
|
num_entries = A_SIZE_16(temp)->num_entries;
|
||||||
break;
|
break;
|
||||||
case U32_APER_SIZE:
|
case U32_APER_SIZE:
|
||||||
size = A_SIZE_32(temp)->size;
|
|
||||||
page_order = A_SIZE_32(temp)->page_order;
|
page_order = A_SIZE_32(temp)->page_order;
|
||||||
num_entries = A_SIZE_32(temp)->num_entries;
|
num_entries = A_SIZE_32(temp)->num_entries;
|
||||||
break;
|
break;
|
||||||
@ -890,7 +888,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
|
|||||||
case FIXED_APER_SIZE:
|
case FIXED_APER_SIZE:
|
||||||
case LVL2_APER_SIZE:
|
case LVL2_APER_SIZE:
|
||||||
default:
|
default:
|
||||||
size = page_order = num_entries = 0;
|
page_order = num_entries = 0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -920,7 +918,6 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
|
|||||||
}
|
}
|
||||||
} while (!table && (i < bridge->driver->num_aperture_sizes));
|
} while (!table && (i < bridge->driver->num_aperture_sizes));
|
||||||
} else {
|
} else {
|
||||||
size = ((struct aper_size_info_fixed *) temp)->size;
|
|
||||||
page_order = ((struct aper_size_info_fixed *) temp)->page_order;
|
page_order = ((struct aper_size_info_fixed *) temp)->page_order;
|
||||||
num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
|
num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
|
||||||
table = alloc_gatt_pages(page_order);
|
table = alloc_gatt_pages(page_order);
|
||||||
@ -1282,6 +1279,7 @@ EXPORT_SYMBOL(agp_generic_destroy_page);
|
|||||||
/**
|
/**
|
||||||
* agp_enable - initialise the agp point-to-point connection.
|
* agp_enable - initialise the agp point-to-point connection.
|
||||||
*
|
*
|
||||||
|
* @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
|
||||||
* @mode: agp mode register value to configure with.
|
* @mode: agp mode register value to configure with.
|
||||||
*/
|
*/
|
||||||
void agp_enable(struct agp_bridge_data *bridge, u32 mode)
|
void agp_enable(struct agp_bridge_data *bridge, u32 mode)
|
||||||
|
@ -442,6 +442,41 @@ int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
|
|||||||
req, req_cnt * sizeof(*req), resp, sizeof(*resp));
|
req, req_cnt * sizeof(*req), resp, sizeof(*resp));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset, u32 size,
|
||||||
|
u32 mode)
|
||||||
|
{
|
||||||
|
struct ocmem_tz_lock {
|
||||||
|
__le32 id;
|
||||||
|
__le32 offset;
|
||||||
|
__le32 size;
|
||||||
|
__le32 mode;
|
||||||
|
} request;
|
||||||
|
|
||||||
|
request.id = cpu_to_le32(id);
|
||||||
|
request.offset = cpu_to_le32(offset);
|
||||||
|
request.size = cpu_to_le32(size);
|
||||||
|
request.mode = cpu_to_le32(mode);
|
||||||
|
|
||||||
|
return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_LOCK_CMD,
|
||||||
|
&request, sizeof(request), NULL, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset, u32 size)
|
||||||
|
{
|
||||||
|
struct ocmem_tz_unlock {
|
||||||
|
__le32 id;
|
||||||
|
__le32 offset;
|
||||||
|
__le32 size;
|
||||||
|
} request;
|
||||||
|
|
||||||
|
request.id = cpu_to_le32(id);
|
||||||
|
request.offset = cpu_to_le32(offset);
|
||||||
|
request.size = cpu_to_le32(size);
|
||||||
|
|
||||||
|
return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_UNLOCK_CMD,
|
||||||
|
&request, sizeof(request), NULL, 0);
|
||||||
|
}
|
||||||
|
|
||||||
void __qcom_scm_init(void)
|
void __qcom_scm_init(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -582,7 +617,22 @@ int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
|
|||||||
int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
|
int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
|
||||||
u32 spare)
|
u32 spare)
|
||||||
{
|
{
|
||||||
return -ENODEV;
|
struct msm_scm_sec_cfg {
|
||||||
|
__le32 id;
|
||||||
|
__le32 ctx_bank_num;
|
||||||
|
} cfg;
|
||||||
|
int ret, scm_ret = 0;
|
||||||
|
|
||||||
|
cfg.id = cpu_to_le32(device_id);
|
||||||
|
cfg.ctx_bank_num = cpu_to_le32(spare);
|
||||||
|
|
||||||
|
ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG,
|
||||||
|
&cfg, sizeof(cfg), &scm_ret, sizeof(scm_ret));
|
||||||
|
|
||||||
|
if (ret || scm_ret)
|
||||||
|
return ret ? ret : -EINVAL;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
|
int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
|
||||||
|
@ -291,6 +291,18 @@ int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int __qcom_scm_ocmem_lock(struct device *dev, uint32_t id, uint32_t offset,
|
||||||
|
uint32_t size, uint32_t mode)
|
||||||
|
{
|
||||||
|
return -ENOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
|
int __qcom_scm_ocmem_unlock(struct device *dev, uint32_t id, uint32_t offset,
|
||||||
|
uint32_t size)
|
||||||
|
{
|
||||||
|
return -ENOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
void __qcom_scm_init(void)
|
void __qcom_scm_init(void)
|
||||||
{
|
{
|
||||||
u64 cmd;
|
u64 cmd;
|
||||||
|
@ -191,6 +191,46 @@ bool qcom_scm_pas_supported(u32 peripheral)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(qcom_scm_pas_supported);
|
EXPORT_SYMBOL(qcom_scm_pas_supported);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
|
||||||
|
*/
|
||||||
|
bool qcom_scm_ocmem_lock_available(void)
|
||||||
|
{
|
||||||
|
return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_OCMEM_SVC,
|
||||||
|
QCOM_SCM_OCMEM_LOCK_CMD);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
|
||||||
|
* region to the specified initiator
|
||||||
|
*
|
||||||
|
* @id: tz initiator id
|
||||||
|
* @offset: OCMEM offset
|
||||||
|
* @size: OCMEM size
|
||||||
|
* @mode: access mode (WIDE/NARROW)
|
||||||
|
*/
|
||||||
|
int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
|
||||||
|
u32 mode)
|
||||||
|
{
|
||||||
|
return __qcom_scm_ocmem_lock(__scm->dev, id, offset, size, mode);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(qcom_scm_ocmem_lock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
|
||||||
|
* region from the specified initiator
|
||||||
|
*
|
||||||
|
* @id: tz initiator id
|
||||||
|
* @offset: OCMEM offset
|
||||||
|
* @size: OCMEM size
|
||||||
|
*/
|
||||||
|
int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
|
||||||
|
{
|
||||||
|
return __qcom_scm_ocmem_unlock(__scm->dev, id, offset, size);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* qcom_scm_pas_init_image() - Initialize peripheral authentication service
|
* qcom_scm_pas_init_image() - Initialize peripheral authentication service
|
||||||
* state machine for a given peripheral, using the
|
* state machine for a given peripheral, using the
|
||||||
@ -327,6 +367,19 @@ static const struct reset_control_ops qcom_scm_pas_reset_ops = {
|
|||||||
.deassert = qcom_scm_pas_reset_deassert,
|
.deassert = qcom_scm_pas_reset_deassert,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* qcom_scm_restore_sec_cfg_available() - Check if secure environment
|
||||||
|
* supports restore security config interface.
|
||||||
|
*
|
||||||
|
* Return true if restore-cfg interface is supported, false if not.
|
||||||
|
*/
|
||||||
|
bool qcom_scm_restore_sec_cfg_available(void)
|
||||||
|
{
|
||||||
|
return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
|
||||||
|
QCOM_SCM_RESTORE_SEC_CFG);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
|
||||||
|
|
||||||
int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
|
int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
|
||||||
{
|
{
|
||||||
return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare);
|
return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare);
|
||||||
|
@ -42,6 +42,15 @@ extern int __qcom_scm_hdcp_req(struct device *dev,
|
|||||||
|
|
||||||
extern void __qcom_scm_init(void);
|
extern void __qcom_scm_init(void);
|
||||||
|
|
||||||
|
#define QCOM_SCM_OCMEM_SVC 0xf
|
||||||
|
#define QCOM_SCM_OCMEM_LOCK_CMD 0x1
|
||||||
|
#define QCOM_SCM_OCMEM_UNLOCK_CMD 0x2
|
||||||
|
|
||||||
|
extern int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset,
|
||||||
|
u32 size, u32 mode);
|
||||||
|
extern int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset,
|
||||||
|
u32 size);
|
||||||
|
|
||||||
#define QCOM_SCM_SVC_PIL 0x2
|
#define QCOM_SCM_SVC_PIL 0x2
|
||||||
#define QCOM_SCM_PAS_INIT_IMAGE_CMD 0x1
|
#define QCOM_SCM_PAS_INIT_IMAGE_CMD 0x1
|
||||||
#define QCOM_SCM_PAS_MEM_SETUP_CMD 0x2
|
#define QCOM_SCM_PAS_MEM_SETUP_CMD 0x2
|
||||||
|
@ -95,6 +95,7 @@ config DRM_KMS_FB_HELPER
|
|||||||
|
|
||||||
config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
|
config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
|
||||||
bool "Enable refcount backtrace history in the DP MST helpers"
|
bool "Enable refcount backtrace history in the DP MST helpers"
|
||||||
|
depends on STACKTRACE_SUPPORT
|
||||||
select STACKDEPOT
|
select STACKDEPOT
|
||||||
depends on DRM_KMS_HELPER
|
depends on DRM_KMS_HELPER
|
||||||
depends on DEBUG_KERNEL
|
depends on DEBUG_KERNEL
|
||||||
|
@ -105,11 +105,24 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
|
|||||||
(kfd_mem_limit.max_ttm_mem_limit >> 20));
|
(kfd_mem_limit.max_ttm_mem_limit >> 20));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Estimate page table size needed to represent a given memory size
|
||||||
|
*
|
||||||
|
* With 4KB pages, we need one 8 byte PTE for each 4KB of memory
|
||||||
|
* (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
|
||||||
|
* of memory (factor 256K, >> 18). ROCm user mode tries to optimize
|
||||||
|
* for 2MB pages for TLB efficiency. However, small allocations and
|
||||||
|
* fragmented system memory still need some 4KB pages. We choose a
|
||||||
|
* compromise that should work in most cases without reserving too
|
||||||
|
* much memory for page tables unnecessarily (factor 16K, >> 14).
|
||||||
|
*/
|
||||||
|
#define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
|
||||||
|
|
||||||
static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
|
static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
|
||||||
uint64_t size, u32 domain, bool sg)
|
uint64_t size, u32 domain, bool sg)
|
||||||
{
|
{
|
||||||
|
uint64_t reserved_for_pt =
|
||||||
|
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
|
||||||
size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
|
size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
|
||||||
uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9;
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
|
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
|
||||||
|
@ -1487,8 +1487,8 @@ static int psp_np_fw_load(struct psp_context *psp)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* Start rlc autoload after psp recieved all the gfx firmware */
|
/* Start rlc autoload after psp recieved all the gfx firmware */
|
||||||
if (psp->autoload_supported && ucode->ucode_id ==
|
if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
|
||||||
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
|
AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) {
|
||||||
ret = psp_rlc_autoload(psp);
|
ret = psp_rlc_autoload(psp);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("Failed to start rlc autoload\n");
|
DRM_ERROR("Failed to start rlc autoload\n");
|
||||||
|
@ -27,7 +27,8 @@
|
|||||||
#include <linux/bits.h>
|
#include <linux/bits.h>
|
||||||
#include "smu_v11_0_i2c.h"
|
#include "smu_v11_0_i2c.h"
|
||||||
|
|
||||||
#define EEPROM_I2C_TARGET_ADDR 0xA0
|
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
|
||||||
|
#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The 2 macros bellow represent the actual size in bytes that
|
* The 2 macros bellow represent the actual size in bytes that
|
||||||
@ -83,7 +84,7 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
|
|||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct i2c_msg msg = {
|
struct i2c_msg msg = {
|
||||||
.addr = EEPROM_I2C_TARGET_ADDR,
|
.addr = 0,
|
||||||
.flags = 0,
|
.flags = 0,
|
||||||
.len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE,
|
.len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE,
|
||||||
.buf = buff,
|
.buf = buff,
|
||||||
@ -93,6 +94,8 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
|
|||||||
*(uint16_t *)buff = EEPROM_HDR_START;
|
*(uint16_t *)buff = EEPROM_HDR_START;
|
||||||
__encode_table_header_to_buff(&control->tbl_hdr, buff + EEPROM_ADDRESS_SIZE);
|
__encode_table_header_to_buff(&control->tbl_hdr, buff + EEPROM_ADDRESS_SIZE);
|
||||||
|
|
||||||
|
msg.addr = control->i2c_address;
|
||||||
|
|
||||||
ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
|
ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
|
||||||
if (ret < 1)
|
if (ret < 1)
|
||||||
DRM_ERROR("Failed to write EEPROM table header, ret:%d", ret);
|
DRM_ERROR("Failed to write EEPROM table header, ret:%d", ret);
|
||||||
@ -203,7 +206,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
|
|||||||
unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
|
unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
|
||||||
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
|
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
|
||||||
struct i2c_msg msg = {
|
struct i2c_msg msg = {
|
||||||
.addr = EEPROM_I2C_TARGET_ADDR,
|
.addr = 0,
|
||||||
.flags = I2C_M_RD,
|
.flags = I2C_M_RD,
|
||||||
.len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE,
|
.len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE,
|
||||||
.buf = buff,
|
.buf = buff,
|
||||||
@ -213,10 +216,12 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
|
|||||||
|
|
||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_VEGA20:
|
case CHIP_VEGA20:
|
||||||
|
control->i2c_address = EEPROM_I2C_TARGET_ADDR_VEGA20;
|
||||||
ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
|
ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case CHIP_ARCTURUS:
|
case CHIP_ARCTURUS:
|
||||||
|
control->i2c_address = EEPROM_I2C_TARGET_ADDR_ARCTURUS;
|
||||||
ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor);
|
ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -229,6 +234,8 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
msg.addr = control->i2c_address;
|
||||||
|
|
||||||
/* Read/Create table header from EEPROM address 0 */
|
/* Read/Create table header from EEPROM address 0 */
|
||||||
ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
|
ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
|
||||||
if (ret < 1) {
|
if (ret < 1) {
|
||||||
@ -408,8 +415,8 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
|
|||||||
* Update bits 16,17 of EEPROM address in I2C address by setting them
|
* Update bits 16,17 of EEPROM address in I2C address by setting them
|
||||||
* to bits 1,2 of Device address byte
|
* to bits 1,2 of Device address byte
|
||||||
*/
|
*/
|
||||||
msg->addr = EEPROM_I2C_TARGET_ADDR |
|
msg->addr = control->i2c_address |
|
||||||
((control->next_addr & EEPROM_ADDR_MSB_MASK) >> 15);
|
((control->next_addr & EEPROM_ADDR_MSB_MASK) >> 15);
|
||||||
msg->flags = write ? 0 : I2C_M_RD;
|
msg->flags = write ? 0 : I2C_M_RD;
|
||||||
msg->len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE;
|
msg->len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE;
|
||||||
msg->buf = buff;
|
msg->buf = buff;
|
||||||
|
@ -50,6 +50,7 @@ struct amdgpu_ras_eeprom_control {
|
|||||||
struct mutex tbl_mutex;
|
struct mutex tbl_mutex;
|
||||||
bool bus_locked;
|
bool bus_locked;
|
||||||
uint32_t tbl_byte_sum;
|
uint32_t tbl_byte_sum;
|
||||||
|
uint16_t i2c_address; // 8-bit represented address
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -124,13 +124,12 @@ int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
|
|||||||
*/
|
*/
|
||||||
int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
|
int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
volatile u32 *dst_ptr;
|
|
||||||
u32 dws;
|
u32 dws;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
/* allocate clear state block */
|
/* allocate clear state block */
|
||||||
adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
|
adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
|
||||||
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
|
r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
|
||||||
AMDGPU_GEM_DOMAIN_VRAM,
|
AMDGPU_GEM_DOMAIN_VRAM,
|
||||||
&adev->gfx.rlc.clear_state_obj,
|
&adev->gfx.rlc.clear_state_obj,
|
||||||
&adev->gfx.rlc.clear_state_gpu_addr,
|
&adev->gfx.rlc.clear_state_gpu_addr,
|
||||||
@ -141,13 +140,6 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* set up the cs buffer */
|
|
||||||
dst_ptr = adev->gfx.rlc.cs_ptr;
|
|
||||||
adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr);
|
|
||||||
amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
|
|
||||||
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
|
|
||||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1346,10 +1346,13 @@ static int cik_asic_reset(struct amdgpu_device *adev)
|
|||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
|
if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
|
||||||
|
if (!adev->in_suspend)
|
||||||
|
amdgpu_inc_vram_lost(adev);
|
||||||
r = smu7_asic_baco_reset(adev);
|
r = smu7_asic_baco_reset(adev);
|
||||||
else
|
} else {
|
||||||
r = cik_asic_pci_config_reset(adev);
|
r = cik_asic_pci_config_reset(adev);
|
||||||
|
}
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -690,60 +690,62 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
|
|||||||
adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
|
adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
|
||||||
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
|
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
|
||||||
|
|
||||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
|
if (!amdgpu_sriov_vf(adev)) {
|
||||||
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
|
||||||
if (err)
|
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
|
||||||
goto out;
|
if (err)
|
||||||
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
|
goto out;
|
||||||
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
|
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
|
||||||
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
|
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
|
||||||
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
|
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
|
||||||
if (version_major == 2 && version_minor == 1)
|
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
|
||||||
adev->gfx.rlc.is_rlc_v2_1 = true;
|
if (version_major == 2 && version_minor == 1)
|
||||||
|
adev->gfx.rlc.is_rlc_v2_1 = true;
|
||||||
|
|
||||||
adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
|
adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
|
||||||
adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
|
adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
|
||||||
adev->gfx.rlc.save_and_restore_offset =
|
adev->gfx.rlc.save_and_restore_offset =
|
||||||
le32_to_cpu(rlc_hdr->save_and_restore_offset);
|
le32_to_cpu(rlc_hdr->save_and_restore_offset);
|
||||||
adev->gfx.rlc.clear_state_descriptor_offset =
|
adev->gfx.rlc.clear_state_descriptor_offset =
|
||||||
le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
|
le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
|
||||||
adev->gfx.rlc.avail_scratch_ram_locations =
|
adev->gfx.rlc.avail_scratch_ram_locations =
|
||||||
le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
|
le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
|
||||||
adev->gfx.rlc.reg_restore_list_size =
|
adev->gfx.rlc.reg_restore_list_size =
|
||||||
le32_to_cpu(rlc_hdr->reg_restore_list_size);
|
le32_to_cpu(rlc_hdr->reg_restore_list_size);
|
||||||
adev->gfx.rlc.reg_list_format_start =
|
adev->gfx.rlc.reg_list_format_start =
|
||||||
le32_to_cpu(rlc_hdr->reg_list_format_start);
|
le32_to_cpu(rlc_hdr->reg_list_format_start);
|
||||||
adev->gfx.rlc.reg_list_format_separate_start =
|
adev->gfx.rlc.reg_list_format_separate_start =
|
||||||
le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
|
le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
|
||||||
adev->gfx.rlc.starting_offsets_start =
|
adev->gfx.rlc.starting_offsets_start =
|
||||||
le32_to_cpu(rlc_hdr->starting_offsets_start);
|
le32_to_cpu(rlc_hdr->starting_offsets_start);
|
||||||
adev->gfx.rlc.reg_list_format_size_bytes =
|
adev->gfx.rlc.reg_list_format_size_bytes =
|
||||||
le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
|
le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
|
||||||
adev->gfx.rlc.reg_list_size_bytes =
|
adev->gfx.rlc.reg_list_size_bytes =
|
||||||
le32_to_cpu(rlc_hdr->reg_list_size_bytes);
|
le32_to_cpu(rlc_hdr->reg_list_size_bytes);
|
||||||
adev->gfx.rlc.register_list_format =
|
adev->gfx.rlc.register_list_format =
|
||||||
kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
|
kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
|
||||||
adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
|
adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
|
||||||
if (!adev->gfx.rlc.register_list_format) {
|
if (!adev->gfx.rlc.register_list_format) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
tmp = (unsigned int *)((uintptr_t)rlc_hdr +
|
||||||
|
le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
|
||||||
|
for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
|
||||||
|
adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
|
||||||
|
|
||||||
|
adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
|
||||||
|
|
||||||
|
tmp = (unsigned int *)((uintptr_t)rlc_hdr +
|
||||||
|
le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
|
||||||
|
for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
|
||||||
|
adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
|
||||||
|
|
||||||
|
if (adev->gfx.rlc.is_rlc_v2_1)
|
||||||
|
gfx_v10_0_init_rlc_ext_microcode(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
tmp = (unsigned int *)((uintptr_t)rlc_hdr +
|
|
||||||
le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
|
|
||||||
for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
|
|
||||||
adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
|
|
||||||
|
|
||||||
adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
|
|
||||||
|
|
||||||
tmp = (unsigned int *)((uintptr_t)rlc_hdr +
|
|
||||||
le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
|
|
||||||
for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
|
|
||||||
adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
|
|
||||||
|
|
||||||
if (adev->gfx.rlc.is_rlc_v2_1)
|
|
||||||
gfx_v10_0_init_rlc_ext_microcode(adev);
|
|
||||||
|
|
||||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
|
||||||
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
|
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
|
||||||
if (err)
|
if (err)
|
||||||
@ -993,39 +995,6 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gfx_v10_0_csb_vram_pin(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
int r;
|
|
||||||
|
|
||||||
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
|
|
||||||
if (unlikely(r != 0))
|
|
||||||
return r;
|
|
||||||
|
|
||||||
r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
|
|
||||||
AMDGPU_GEM_DOMAIN_VRAM);
|
|
||||||
if (!r)
|
|
||||||
adev->gfx.rlc.clear_state_gpu_addr =
|
|
||||||
amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
|
|
||||||
|
|
||||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
|
||||||
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void gfx_v10_0_csb_vram_unpin(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
int r;
|
|
||||||
|
|
||||||
if (!adev->gfx.rlc.clear_state_obj)
|
|
||||||
return;
|
|
||||||
|
|
||||||
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
|
|
||||||
if (likely(r == 0)) {
|
|
||||||
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
|
|
||||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void gfx_v10_0_mec_fini(struct amdgpu_device *adev)
|
static void gfx_v10_0_mec_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
|
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
|
||||||
@ -1787,25 +1756,7 @@ static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
|
static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
int r;
|
adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
|
||||||
|
|
||||||
if (adev->in_gpu_reset) {
|
|
||||||
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj,
|
|
||||||
(void **)&adev->gfx.rlc.cs_ptr);
|
|
||||||
if (!r) {
|
|
||||||
adev->gfx.rlc.funcs->get_csb_buffer(adev,
|
|
||||||
adev->gfx.rlc.cs_ptr);
|
|
||||||
amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* csib */
|
/* csib */
|
||||||
WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
|
WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
|
||||||
@ -1817,22 +1768,6 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gfx_v10_0_init_pg(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
int r;
|
|
||||||
|
|
||||||
r = gfx_v10_0_init_csb(adev);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
for (i = 0; i < adev->num_vmhubs; i++)
|
|
||||||
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
|
|
||||||
|
|
||||||
/* TODO: init power gating */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void gfx_v10_0_rlc_stop(struct amdgpu_device *adev)
|
void gfx_v10_0_rlc_stop(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
|
u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
|
||||||
@ -1925,21 +1860,16 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
|
|||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||||
|
|
||||||
r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
|
r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = gfx_v10_0_init_pg(adev);
|
gfx_v10_0_init_csb(adev);
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
/* enable RLC SRM */
|
|
||||||
gfx_v10_0_rlc_enable_srm(adev);
|
|
||||||
|
|
||||||
|
if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
|
||||||
|
gfx_v10_0_rlc_enable_srm(adev);
|
||||||
} else {
|
} else {
|
||||||
adev->gfx.rlc.funcs->stop(adev);
|
adev->gfx.rlc.funcs->stop(adev);
|
||||||
|
|
||||||
@ -1961,9 +1891,7 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = gfx_v10_0_init_pg(adev);
|
gfx_v10_0_init_csb(adev);
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
adev->gfx.rlc.funcs->start(adev);
|
adev->gfx.rlc.funcs->start(adev);
|
||||||
|
|
||||||
@ -2825,7 +2753,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
|
|||||||
/* Init gfx ring 0 for pipe 0 */
|
/* Init gfx ring 0 for pipe 0 */
|
||||||
mutex_lock(&adev->srbm_mutex);
|
mutex_lock(&adev->srbm_mutex);
|
||||||
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
|
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
|
||||||
mutex_unlock(&adev->srbm_mutex);
|
|
||||||
/* Set ring buffer size */
|
/* Set ring buffer size */
|
||||||
ring = &adev->gfx.gfx_ring[0];
|
ring = &adev->gfx.gfx_ring[0];
|
||||||
rb_bufsz = order_base_2(ring->ring_size / 8);
|
rb_bufsz = order_base_2(ring->ring_size / 8);
|
||||||
@ -2863,11 +2791,11 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
|
|||||||
WREG32_SOC15(GC, 0, mmCP_RB_ACTIVE, 1);
|
WREG32_SOC15(GC, 0, mmCP_RB_ACTIVE, 1);
|
||||||
|
|
||||||
gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
|
gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
|
||||||
|
mutex_unlock(&adev->srbm_mutex);
|
||||||
|
|
||||||
/* Init gfx ring 1 for pipe 1 */
|
/* Init gfx ring 1 for pipe 1 */
|
||||||
mutex_lock(&adev->srbm_mutex);
|
mutex_lock(&adev->srbm_mutex);
|
||||||
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
|
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
|
||||||
mutex_unlock(&adev->srbm_mutex);
|
|
||||||
ring = &adev->gfx.gfx_ring[1];
|
ring = &adev->gfx.gfx_ring[1];
|
||||||
rb_bufsz = order_base_2(ring->ring_size / 8);
|
rb_bufsz = order_base_2(ring->ring_size / 8);
|
||||||
tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
|
tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
|
||||||
@ -2897,6 +2825,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
|
|||||||
WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
|
WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
|
||||||
|
|
||||||
gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
|
gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
|
||||||
|
mutex_unlock(&adev->srbm_mutex);
|
||||||
|
|
||||||
/* Switch to pipe 0 */
|
/* Switch to pipe 0 */
|
||||||
mutex_lock(&adev->srbm_mutex);
|
mutex_lock(&adev->srbm_mutex);
|
||||||
@ -3775,10 +3704,6 @@ static int gfx_v10_0_hw_init(void *handle)
|
|||||||
int r;
|
int r;
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
r = gfx_v10_0_csb_vram_pin(adev);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
if (!amdgpu_emu_mode)
|
if (!amdgpu_emu_mode)
|
||||||
gfx_v10_0_init_golden_registers(adev);
|
gfx_v10_0_init_golden_registers(adev);
|
||||||
|
|
||||||
@ -3861,12 +3786,11 @@ static int gfx_v10_0_hw_fini(void *handle)
|
|||||||
if (amdgpu_gfx_disable_kcq(adev))
|
if (amdgpu_gfx_disable_kcq(adev))
|
||||||
DRM_ERROR("KCQ disable failed\n");
|
DRM_ERROR("KCQ disable failed\n");
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
pr_debug("For SRIOV client, shouldn't do anything.\n");
|
gfx_v10_0_cp_gfx_enable(adev, false);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
gfx_v10_0_cp_enable(adev, false);
|
gfx_v10_0_cp_enable(adev, false);
|
||||||
gfx_v10_0_enable_gui_idle_interrupt(adev, false);
|
gfx_v10_0_enable_gui_idle_interrupt(adev, false);
|
||||||
gfx_v10_0_csb_vram_unpin(adev);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -4554,6 +4554,8 @@ static int gfx_v7_0_hw_init(void *handle)
|
|||||||
|
|
||||||
gfx_v7_0_constants_init(adev);
|
gfx_v7_0_constants_init(adev);
|
||||||
|
|
||||||
|
/* init CSB */
|
||||||
|
adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
|
||||||
/* init rlc */
|
/* init rlc */
|
||||||
r = adev->gfx.rlc.funcs->resume(adev);
|
r = adev->gfx.rlc.funcs->resume(adev);
|
||||||
if (r)
|
if (r)
|
||||||
|
@ -1321,39 +1321,6 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gfx_v8_0_csb_vram_pin(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
int r;
|
|
||||||
|
|
||||||
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
|
|
||||||
if (unlikely(r != 0))
|
|
||||||
return r;
|
|
||||||
|
|
||||||
r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
|
|
||||||
AMDGPU_GEM_DOMAIN_VRAM);
|
|
||||||
if (!r)
|
|
||||||
adev->gfx.rlc.clear_state_gpu_addr =
|
|
||||||
amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
|
|
||||||
|
|
||||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
|
||||||
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void gfx_v8_0_csb_vram_unpin(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
int r;
|
|
||||||
|
|
||||||
if (!adev->gfx.rlc.clear_state_obj)
|
|
||||||
return;
|
|
||||||
|
|
||||||
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
|
|
||||||
if (likely(r == 0)) {
|
|
||||||
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
|
|
||||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
|
static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
|
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
|
||||||
@ -3917,6 +3884,7 @@ static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
static void gfx_v8_0_init_csb(struct amdgpu_device *adev)
|
static void gfx_v8_0_init_csb(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
|
||||||
/* csib */
|
/* csib */
|
||||||
WREG32(mmRLC_CSIB_ADDR_HI,
|
WREG32(mmRLC_CSIB_ADDR_HI,
|
||||||
adev->gfx.rlc.clear_state_gpu_addr >> 32);
|
adev->gfx.rlc.clear_state_gpu_addr >> 32);
|
||||||
@ -4837,10 +4805,6 @@ static int gfx_v8_0_hw_init(void *handle)
|
|||||||
gfx_v8_0_init_golden_registers(adev);
|
gfx_v8_0_init_golden_registers(adev);
|
||||||
gfx_v8_0_constants_init(adev);
|
gfx_v8_0_constants_init(adev);
|
||||||
|
|
||||||
r = gfx_v8_0_csb_vram_pin(adev);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
r = adev->gfx.rlc.funcs->resume(adev);
|
r = adev->gfx.rlc.funcs->resume(adev);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
@ -4958,8 +4922,6 @@ static int gfx_v8_0_hw_fini(void *handle)
|
|||||||
pr_err("rlc is busy, skip halt rlc\n");
|
pr_err("rlc is busy, skip halt rlc\n");
|
||||||
amdgpu_gfx_rlc_exit_safe_mode(adev);
|
amdgpu_gfx_rlc_exit_safe_mode(adev);
|
||||||
|
|
||||||
gfx_v8_0_csb_vram_unpin(adev);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1695,39 +1695,6 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
int r;
|
|
||||||
|
|
||||||
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
|
|
||||||
if (unlikely(r != 0))
|
|
||||||
return r;
|
|
||||||
|
|
||||||
r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
|
|
||||||
AMDGPU_GEM_DOMAIN_VRAM);
|
|
||||||
if (!r)
|
|
||||||
adev->gfx.rlc.clear_state_gpu_addr =
|
|
||||||
amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
|
|
||||||
|
|
||||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
|
||||||
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
int r;
|
|
||||||
|
|
||||||
if (!adev->gfx.rlc.clear_state_obj)
|
|
||||||
return;
|
|
||||||
|
|
||||||
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
|
|
||||||
if (likely(r == 0)) {
|
|
||||||
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
|
|
||||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
|
static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
|
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
|
||||||
@ -2415,6 +2382,7 @@ static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
|
static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
|
||||||
/* csib */
|
/* csib */
|
||||||
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
|
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
|
||||||
adev->gfx.rlc.clear_state_gpu_addr >> 32);
|
adev->gfx.rlc.clear_state_gpu_addr >> 32);
|
||||||
@ -3706,10 +3674,6 @@ static int gfx_v9_0_hw_init(void *handle)
|
|||||||
|
|
||||||
gfx_v9_0_constants_init(adev);
|
gfx_v9_0_constants_init(adev);
|
||||||
|
|
||||||
r = gfx_v9_0_csb_vram_pin(adev);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
r = adev->gfx.rlc.funcs->resume(adev);
|
r = adev->gfx.rlc.funcs->resume(adev);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
@ -3791,8 +3755,6 @@ static int gfx_v9_0_hw_fini(void *handle)
|
|||||||
gfx_v9_0_cp_enable(adev, false);
|
gfx_v9_0_cp_enable(adev, false);
|
||||||
adev->gfx.rlc.funcs->stop(adev);
|
adev->gfx.rlc.funcs->stop(adev);
|
||||||
|
|
||||||
gfx_v9_0_csb_vram_unpin(adev);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,16 +33,31 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
|
|||||||
u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL);
|
u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL);
|
||||||
u32 max_region =
|
u32 max_region =
|
||||||
REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
|
REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
|
||||||
|
u32 max_num_physical_nodes = 0;
|
||||||
|
u32 max_physical_node_id = 0;
|
||||||
|
|
||||||
|
switch (adev->asic_type) {
|
||||||
|
case CHIP_VEGA20:
|
||||||
|
max_num_physical_nodes = 4;
|
||||||
|
max_physical_node_id = 3;
|
||||||
|
break;
|
||||||
|
case CHIP_ARCTURUS:
|
||||||
|
max_num_physical_nodes = 8;
|
||||||
|
max_physical_node_id = 7;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
/* PF_MAX_REGION=0 means xgmi is disabled */
|
/* PF_MAX_REGION=0 means xgmi is disabled */
|
||||||
if (max_region) {
|
if (max_region) {
|
||||||
adev->gmc.xgmi.num_physical_nodes = max_region + 1;
|
adev->gmc.xgmi.num_physical_nodes = max_region + 1;
|
||||||
if (adev->gmc.xgmi.num_physical_nodes > 4)
|
if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
adev->gmc.xgmi.physical_node_id =
|
adev->gmc.xgmi.physical_node_id =
|
||||||
REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_LFB_REGION);
|
REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_LFB_REGION);
|
||||||
if (adev->gmc.xgmi.physical_node_id > 3)
|
if (adev->gmc.xgmi.physical_node_id > max_physical_node_id)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
adev->gmc.xgmi.node_segment_size = REG_GET_FIELD(
|
adev->gmc.xgmi.node_segment_size = REG_GET_FIELD(
|
||||||
RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE),
|
RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE),
|
||||||
|
@ -326,7 +326,8 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
|||||||
|
|
||||||
if (!adev->mman.buffer_funcs_enabled ||
|
if (!adev->mman.buffer_funcs_enabled ||
|
||||||
!adev->ib_pool_ready ||
|
!adev->ib_pool_ready ||
|
||||||
adev->in_gpu_reset) {
|
adev->in_gpu_reset ||
|
||||||
|
ring->sched.ready == false) {
|
||||||
gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
|
gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
|
||||||
mutex_unlock(&adev->mman.gtt_window_lock);
|
mutex_unlock(&adev->mman.gtt_window_lock);
|
||||||
return;
|
return;
|
||||||
|
@ -783,10 +783,13 @@ static int vi_asic_reset(struct amdgpu_device *adev)
|
|||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
|
if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
|
||||||
|
if (!adev->in_suspend)
|
||||||
|
amdgpu_inc_vram_lost(adev);
|
||||||
r = smu7_asic_baco_reset(adev);
|
r = smu7_asic_baco_reset(adev);
|
||||||
else
|
} else {
|
||||||
r = vi_asic_pci_config_reset(adev);
|
r = vi_asic_pci_config_reset(adev);
|
||||||
|
}
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
|
|
||||||
config HSA_AMD
|
config HSA_AMD
|
||||||
bool "HSA kernel driver for AMD GPU devices"
|
bool "HSA kernel driver for AMD GPU devices"
|
||||||
depends on DRM_AMDGPU && (X86_64 || ARM64)
|
depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64)
|
||||||
imply AMD_IOMMU_V2 if X86_64
|
imply AMD_IOMMU_V2 if X86_64
|
||||||
select MMU_NOTIFIER
|
select MMU_NOTIFIER
|
||||||
help
|
help
|
||||||
|
@ -342,7 +342,8 @@ bool dm_pp_get_clock_levels_by_type(
|
|||||||
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) {
|
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) {
|
||||||
if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
|
if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
|
||||||
dc_to_pp_clock_type(clk_type), &pp_clks)) {
|
dc_to_pp_clock_type(clk_type), &pp_clks)) {
|
||||||
/* Error in pplib. Provide default values. */
|
/* Error in pplib. Provide default values. */
|
||||||
|
get_default_clock_levels(clk_type, dc_clks);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type) {
|
} else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type) {
|
||||||
|
@ -1037,6 +1037,25 @@ void dcn20_pipe_control_lock(
|
|||||||
if (pipe->plane_state != NULL)
|
if (pipe->plane_state != NULL)
|
||||||
flip_immediate = pipe->plane_state->flip_immediate;
|
flip_immediate = pipe->plane_state->flip_immediate;
|
||||||
|
|
||||||
|
if (flip_immediate && lock) {
|
||||||
|
const int TIMEOUT_FOR_FLIP_PENDING = 100000;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) {
|
||||||
|
if (!pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp))
|
||||||
|
break;
|
||||||
|
udelay(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pipe->bottom_pipe != NULL) {
|
||||||
|
for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) {
|
||||||
|
if (!pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp))
|
||||||
|
break;
|
||||||
|
udelay(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* In flip immediate and pipe splitting case, we need to use GSL
|
/* In flip immediate and pipe splitting case, we need to use GSL
|
||||||
* for synchronization. Only do setup on locking and on flip type change.
|
* for synchronization. Only do setup on locking and on flip type change.
|
||||||
*/
|
*/
|
||||||
|
@ -157,6 +157,74 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
|
|||||||
.xfc_fill_constant_bytes = 0,
|
.xfc_fill_constant_bytes = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
|
||||||
|
.odm_capable = 1,
|
||||||
|
.gpuvm_enable = 0,
|
||||||
|
.hostvm_enable = 0,
|
||||||
|
.gpuvm_max_page_table_levels = 4,
|
||||||
|
.hostvm_max_page_table_levels = 4,
|
||||||
|
.hostvm_cached_page_table_levels = 0,
|
||||||
|
.num_dsc = 5,
|
||||||
|
.rob_buffer_size_kbytes = 168,
|
||||||
|
.det_buffer_size_kbytes = 164,
|
||||||
|
.dpte_buffer_size_in_pte_reqs_luma = 84,
|
||||||
|
.dpte_buffer_size_in_pte_reqs_chroma = 42,//todo
|
||||||
|
.dpp_output_buffer_pixels = 2560,
|
||||||
|
.opp_output_buffer_lines = 1,
|
||||||
|
.pixel_chunk_size_kbytes = 8,
|
||||||
|
.pte_enable = 1,
|
||||||
|
.max_page_table_levels = 4,
|
||||||
|
.pte_chunk_size_kbytes = 2,
|
||||||
|
.meta_chunk_size_kbytes = 2,
|
||||||
|
.writeback_chunk_size_kbytes = 2,
|
||||||
|
.line_buffer_size_bits = 789504,
|
||||||
|
.is_line_buffer_bpp_fixed = 0,
|
||||||
|
.line_buffer_fixed_bpp = 0,
|
||||||
|
.dcc_supported = true,
|
||||||
|
.max_line_buffer_lines = 12,
|
||||||
|
.writeback_luma_buffer_size_kbytes = 12,
|
||||||
|
.writeback_chroma_buffer_size_kbytes = 8,
|
||||||
|
.writeback_chroma_line_buffer_width_pixels = 4,
|
||||||
|
.writeback_max_hscl_ratio = 1,
|
||||||
|
.writeback_max_vscl_ratio = 1,
|
||||||
|
.writeback_min_hscl_ratio = 1,
|
||||||
|
.writeback_min_vscl_ratio = 1,
|
||||||
|
.writeback_max_hscl_taps = 12,
|
||||||
|
.writeback_max_vscl_taps = 12,
|
||||||
|
.writeback_line_buffer_luma_buffer_size = 0,
|
||||||
|
.writeback_line_buffer_chroma_buffer_size = 14643,
|
||||||
|
.cursor_buffer_size = 8,
|
||||||
|
.cursor_chunk_size = 2,
|
||||||
|
.max_num_otg = 5,
|
||||||
|
.max_num_dpp = 5,
|
||||||
|
.max_num_wb = 1,
|
||||||
|
.max_dchub_pscl_bw_pix_per_clk = 4,
|
||||||
|
.max_pscl_lb_bw_pix_per_clk = 2,
|
||||||
|
.max_lb_vscl_bw_pix_per_clk = 4,
|
||||||
|
.max_vscl_hscl_bw_pix_per_clk = 4,
|
||||||
|
.max_hscl_ratio = 8,
|
||||||
|
.max_vscl_ratio = 8,
|
||||||
|
.hscl_mults = 4,
|
||||||
|
.vscl_mults = 4,
|
||||||
|
.max_hscl_taps = 8,
|
||||||
|
.max_vscl_taps = 8,
|
||||||
|
.dispclk_ramp_margin_percent = 1,
|
||||||
|
.underscan_factor = 1.10,
|
||||||
|
.min_vblank_lines = 32, //
|
||||||
|
.dppclk_delay_subtotal = 77, //
|
||||||
|
.dppclk_delay_scl_lb_only = 16,
|
||||||
|
.dppclk_delay_scl = 50,
|
||||||
|
.dppclk_delay_cnvc_formatter = 8,
|
||||||
|
.dppclk_delay_cnvc_cursor = 6,
|
||||||
|
.dispclk_delay_subtotal = 87, //
|
||||||
|
.dcfclk_cstate_latency = 10, // SRExitTime
|
||||||
|
.max_inter_dcn_tile_repeaters = 8,
|
||||||
|
.xfc_supported = true,
|
||||||
|
.xfc_fill_bw_overhead_percent = 10.0,
|
||||||
|
.xfc_fill_constant_bytes = 0,
|
||||||
|
.ptoi_supported = 0
|
||||||
|
};
|
||||||
|
|
||||||
struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
|
struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
|
||||||
/* Defaults that get patched on driver load from firmware. */
|
/* Defaults that get patched on driver load from firmware. */
|
||||||
.clock_limits = {
|
.clock_limits = {
|
||||||
@ -854,6 +922,8 @@ static const struct resource_caps res_cap_nv14 = {
|
|||||||
.num_pll = 5,
|
.num_pll = 5,
|
||||||
.num_dwb = 1,
|
.num_dwb = 1,
|
||||||
.num_ddc = 5,
|
.num_ddc = 5,
|
||||||
|
.num_vmid = 16,
|
||||||
|
.num_dsc = 5,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct dc_debug_options debug_defaults_drv = {
|
static const struct dc_debug_options debug_defaults_drv = {
|
||||||
@ -3212,6 +3282,10 @@ static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
|
|||||||
static struct _vcs_dpi_ip_params_st *get_asic_rev_ip_params(
|
static struct _vcs_dpi_ip_params_st *get_asic_rev_ip_params(
|
||||||
uint32_t hw_internal_rev)
|
uint32_t hw_internal_rev)
|
||||||
{
|
{
|
||||||
|
/* NV14 */
|
||||||
|
if (ASICREV_IS_NAVI14_M(hw_internal_rev))
|
||||||
|
return &dcn2_0_nv14_ip;
|
||||||
|
|
||||||
/* NV12 and NV10 */
|
/* NV12 and NV10 */
|
||||||
return &dcn2_0_ip;
|
return &dcn2_0_ip;
|
||||||
}
|
}
|
||||||
|
@ -2548,3 +2548,12 @@ uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
|
|||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int smu_send_smc_msg(struct smu_context *smu,
|
||||||
|
enum smu_message_type msg)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = smu_send_smc_msg_with_param(smu, msg, 0);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
@ -2130,7 +2130,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
|
|||||||
.set_tool_table_location = smu_v11_0_set_tool_table_location,
|
.set_tool_table_location = smu_v11_0_set_tool_table_location,
|
||||||
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
|
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
|
||||||
.system_features_control = smu_v11_0_system_features_control,
|
.system_features_control = smu_v11_0_system_features_control,
|
||||||
.send_smc_msg = smu_v11_0_send_msg,
|
|
||||||
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
|
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
|
||||||
.read_smc_arg = smu_v11_0_read_arg,
|
.read_smc_arg = smu_v11_0_read_arg,
|
||||||
.init_display_count = smu_v11_0_init_display_count,
|
.init_display_count = smu_v11_0_init_display_count,
|
||||||
|
@ -497,8 +497,8 @@ struct pptable_funcs {
|
|||||||
int (*notify_memory_pool_location)(struct smu_context *smu);
|
int (*notify_memory_pool_location)(struct smu_context *smu);
|
||||||
int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
|
int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
|
||||||
int (*system_features_control)(struct smu_context *smu, bool en);
|
int (*system_features_control)(struct smu_context *smu, bool en);
|
||||||
int (*send_smc_msg)(struct smu_context *smu, uint16_t msg);
|
int (*send_smc_msg_with_param)(struct smu_context *smu,
|
||||||
int (*send_smc_msg_with_param)(struct smu_context *smu, uint16_t msg, uint32_t param);
|
enum smu_message_type msg, uint32_t param);
|
||||||
int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg);
|
int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg);
|
||||||
int (*init_display_count)(struct smu_context *smu, uint32_t count);
|
int (*init_display_count)(struct smu_context *smu, uint32_t count);
|
||||||
int (*set_allowed_mask)(struct smu_context *smu);
|
int (*set_allowed_mask)(struct smu_context *smu);
|
||||||
|
@ -177,10 +177,9 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu);
|
|||||||
int smu_v11_0_system_features_control(struct smu_context *smu,
|
int smu_v11_0_system_features_control(struct smu_context *smu,
|
||||||
bool en);
|
bool en);
|
||||||
|
|
||||||
int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg);
|
|
||||||
|
|
||||||
int
|
int
|
||||||
smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
|
smu_v11_0_send_msg_with_param(struct smu_context *smu,
|
||||||
|
enum smu_message_type msg,
|
||||||
uint32_t param);
|
uint32_t param);
|
||||||
|
|
||||||
int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg);
|
int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg);
|
||||||
|
@ -44,10 +44,9 @@ int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg);
|
|||||||
|
|
||||||
int smu_v12_0_wait_for_response(struct smu_context *smu);
|
int smu_v12_0_wait_for_response(struct smu_context *smu);
|
||||||
|
|
||||||
int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg);
|
|
||||||
|
|
||||||
int
|
int
|
||||||
smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
|
smu_v12_0_send_msg_with_param(struct smu_context *smu,
|
||||||
|
enum smu_message_type msg,
|
||||||
uint32_t param);
|
uint32_t param);
|
||||||
|
|
||||||
int smu_v12_0_check_fw_status(struct smu_context *smu);
|
int smu_v12_0_check_fw_status(struct smu_context *smu);
|
||||||
|
@ -2055,7 +2055,6 @@ static const struct pptable_funcs navi10_ppt_funcs = {
|
|||||||
.set_tool_table_location = smu_v11_0_set_tool_table_location,
|
.set_tool_table_location = smu_v11_0_set_tool_table_location,
|
||||||
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
|
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
|
||||||
.system_features_control = smu_v11_0_system_features_control,
|
.system_features_control = smu_v11_0_system_features_control,
|
||||||
.send_smc_msg = smu_v11_0_send_msg,
|
|
||||||
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
|
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
|
||||||
.read_smc_arg = smu_v11_0_read_arg,
|
.read_smc_arg = smu_v11_0_read_arg,
|
||||||
.init_display_count = smu_v11_0_init_display_count,
|
.init_display_count = smu_v11_0_init_display_count,
|
||||||
|
@ -697,7 +697,6 @@ static const struct pptable_funcs renoir_ppt_funcs = {
|
|||||||
.check_fw_version = smu_v12_0_check_fw_version,
|
.check_fw_version = smu_v12_0_check_fw_version,
|
||||||
.powergate_sdma = smu_v12_0_powergate_sdma,
|
.powergate_sdma = smu_v12_0_powergate_sdma,
|
||||||
.powergate_vcn = smu_v12_0_powergate_vcn,
|
.powergate_vcn = smu_v12_0_powergate_vcn,
|
||||||
.send_smc_msg = smu_v12_0_send_msg,
|
|
||||||
.send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
|
.send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
|
||||||
.read_smc_arg = smu_v12_0_read_arg,
|
.read_smc_arg = smu_v12_0_read_arg,
|
||||||
.set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
|
.set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
|
||||||
|
@ -75,8 +75,8 @@
|
|||||||
#define smu_set_default_od_settings(smu, initialize) \
|
#define smu_set_default_od_settings(smu, initialize) \
|
||||||
((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
|
((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
|
||||||
|
|
||||||
#define smu_send_smc_msg(smu, msg) \
|
int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg);
|
||||||
((smu)->ppt_funcs->send_smc_msg? (smu)->ppt_funcs->send_smc_msg((smu), (msg)) : 0)
|
|
||||||
#define smu_send_smc_msg_with_param(smu, msg, param) \
|
#define smu_send_smc_msg_with_param(smu, msg, param) \
|
||||||
((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
|
((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
|
||||||
#define smu_read_smc_arg(smu, arg) \
|
#define smu_read_smc_arg(smu, arg) \
|
||||||
|
@ -90,36 +90,11 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
|
|||||||
return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
|
return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
|
|
||||||
{
|
|
||||||
struct amdgpu_device *adev = smu->adev;
|
|
||||||
int ret = 0, index = 0;
|
|
||||||
|
|
||||||
index = smu_msg_get_index(smu, msg);
|
|
||||||
if (index < 0)
|
|
||||||
return index;
|
|
||||||
|
|
||||||
smu_v11_0_wait_for_response(smu);
|
|
||||||
|
|
||||||
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
|
|
||||||
|
|
||||||
smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
|
|
||||||
|
|
||||||
ret = smu_v11_0_wait_for_response(smu);
|
|
||||||
|
|
||||||
if (ret)
|
|
||||||
pr_err("failed send message: %10s (%d) response %#x\n",
|
|
||||||
smu_get_message_name(smu, msg), index, ret);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
int
|
||||||
smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
|
smu_v11_0_send_msg_with_param(struct smu_context *smu,
|
||||||
|
enum smu_message_type msg,
|
||||||
uint32_t param)
|
uint32_t param)
|
||||||
{
|
{
|
||||||
|
|
||||||
struct amdgpu_device *adev = smu->adev;
|
struct amdgpu_device *adev = smu->adev;
|
||||||
int ret = 0, index = 0;
|
int ret = 0, index = 0;
|
||||||
|
|
||||||
|
@ -77,33 +77,9 @@ int smu_v12_0_wait_for_response(struct smu_context *smu)
|
|||||||
return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
|
return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg)
|
|
||||||
{
|
|
||||||
struct amdgpu_device *adev = smu->adev;
|
|
||||||
int ret = 0, index = 0;
|
|
||||||
|
|
||||||
index = smu_msg_get_index(smu, msg);
|
|
||||||
if (index < 0)
|
|
||||||
return index;
|
|
||||||
|
|
||||||
smu_v12_0_wait_for_response(smu);
|
|
||||||
|
|
||||||
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
|
|
||||||
|
|
||||||
smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
|
|
||||||
|
|
||||||
ret = smu_v12_0_wait_for_response(smu);
|
|
||||||
|
|
||||||
if (ret)
|
|
||||||
pr_err("Failed to send message 0x%x, response 0x%x\n", index,
|
|
||||||
ret);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
int
|
||||||
smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
|
smu_v12_0_send_msg_with_param(struct smu_context *smu,
|
||||||
|
enum smu_message_type msg,
|
||||||
uint32_t param)
|
uint32_t param)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = smu->adev;
|
struct amdgpu_device *adev = smu->adev;
|
||||||
|
@ -3231,7 +3231,6 @@ static const struct pptable_funcs vega20_ppt_funcs = {
|
|||||||
.set_tool_table_location = smu_v11_0_set_tool_table_location,
|
.set_tool_table_location = smu_v11_0_set_tool_table_location,
|
||||||
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
|
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
|
||||||
.system_features_control = smu_v11_0_system_features_control,
|
.system_features_control = smu_v11_0_system_features_control,
|
||||||
.send_smc_msg = smu_v11_0_send_msg,
|
|
||||||
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
|
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
|
||||||
.read_smc_arg = smu_v11_0_read_arg,
|
.read_smc_arg = smu_v11_0_read_arg,
|
||||||
.init_display_count = smu_v11_0_init_display_count,
|
.init_display_count = smu_v11_0_init_display_count,
|
||||||
|
@ -3176,9 +3176,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
|
|||||||
drm_dp_mst_topology_put_port(port);
|
drm_dp_mst_topology_put_port(port);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < mgr->max_payloads; i++) {
|
for (i = 0; i < mgr->max_payloads; /* do nothing */) {
|
||||||
if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL)
|
if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
|
||||||
|
i++;
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
DRM_DEBUG_KMS("removing payload %d\n", i);
|
DRM_DEBUG_KMS("removing payload %d\n", i);
|
||||||
for (j = i; j < mgr->max_payloads - 1; j++) {
|
for (j = i; j < mgr->max_payloads - 1; j++) {
|
||||||
|
@ -25,7 +25,7 @@ config DRM_I915_HEARTBEAT_INTERVAL
|
|||||||
|
|
||||||
config DRM_I915_PREEMPT_TIMEOUT
|
config DRM_I915_PREEMPT_TIMEOUT
|
||||||
int "Preempt timeout (ms, jiffy granularity)"
|
int "Preempt timeout (ms, jiffy granularity)"
|
||||||
default 100 # milliseconds
|
default 640 # milliseconds
|
||||||
help
|
help
|
||||||
How long to wait (in milliseconds) for a preemption event to occur
|
How long to wait (in milliseconds) for a preemption event to occur
|
||||||
when submitting a new context via execlists. If the current context
|
when submitting a new context via execlists. If the current context
|
||||||
|
@ -1273,7 +1273,9 @@ static u8 icl_calc_voltage_level(int cdclk)
|
|||||||
|
|
||||||
static u8 ehl_calc_voltage_level(int cdclk)
|
static u8 ehl_calc_voltage_level(int cdclk)
|
||||||
{
|
{
|
||||||
if (cdclk > 312000)
|
if (cdclk > 326400)
|
||||||
|
return 3;
|
||||||
|
else if (cdclk > 312000)
|
||||||
return 2;
|
return 2;
|
||||||
else if (cdclk > 180000)
|
else if (cdclk > 180000)
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -593,7 +593,7 @@ struct tgl_dkl_phy_ddi_buf_trans {
|
|||||||
u32 dkl_de_emphasis_control;
|
u32 dkl_de_emphasis_control;
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_ddi_translations[] = {
|
static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans[] = {
|
||||||
/* VS pre-emp Non-trans mV Pre-emph dB */
|
/* VS pre-emp Non-trans mV Pre-emph dB */
|
||||||
{ 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
|
{ 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
|
||||||
{ 0x5, 0x0, 0x03 }, /* 0 1 400mV 3.5 dB */
|
{ 0x5, 0x0, 0x03 }, /* 0 1 400mV 3.5 dB */
|
||||||
@ -607,6 +607,20 @@ static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_ddi_translations[] = {
|
|||||||
{ 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
|
{ 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_hdmi_ddi_trans[] = {
|
||||||
|
/* HDMI Preset VS Pre-emph */
|
||||||
|
{ 0x7, 0x0, 0x0 }, /* 1 400mV 0dB */
|
||||||
|
{ 0x6, 0x0, 0x0 }, /* 2 500mV 0dB */
|
||||||
|
{ 0x4, 0x0, 0x0 }, /* 3 650mV 0dB */
|
||||||
|
{ 0x2, 0x0, 0x0 }, /* 4 800mV 0dB */
|
||||||
|
{ 0x0, 0x0, 0x0 }, /* 5 1000mV 0dB */
|
||||||
|
{ 0x0, 0x0, 0x5 }, /* 6 Full -1.5 dB */
|
||||||
|
{ 0x0, 0x0, 0x6 }, /* 7 Full -1.8 dB */
|
||||||
|
{ 0x0, 0x0, 0x7 }, /* 8 Full -2 dB */
|
||||||
|
{ 0x0, 0x0, 0x8 }, /* 9 Full -2.5 dB */
|
||||||
|
{ 0x0, 0x0, 0xA }, /* 10 Full -3 dB */
|
||||||
|
};
|
||||||
|
|
||||||
static const struct ddi_buf_trans *
|
static const struct ddi_buf_trans *
|
||||||
bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
|
bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
|
||||||
{
|
{
|
||||||
@ -898,7 +912,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
|
|||||||
icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
|
icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
|
||||||
0, &n_entries);
|
0, &n_entries);
|
||||||
else
|
else
|
||||||
n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations);
|
n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
|
||||||
default_entry = n_entries - 1;
|
default_entry = n_entries - 1;
|
||||||
} else if (INTEL_GEN(dev_priv) == 11) {
|
} else if (INTEL_GEN(dev_priv) == 11) {
|
||||||
if (intel_phy_is_combo(dev_priv, phy))
|
if (intel_phy_is_combo(dev_priv, phy))
|
||||||
@ -2371,7 +2385,7 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
|
|||||||
icl_get_combo_buf_trans(dev_priv, encoder->type,
|
icl_get_combo_buf_trans(dev_priv, encoder->type,
|
||||||
intel_dp->link_rate, &n_entries);
|
intel_dp->link_rate, &n_entries);
|
||||||
else
|
else
|
||||||
n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations);
|
n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
|
||||||
} else if (INTEL_GEN(dev_priv) == 11) {
|
} else if (INTEL_GEN(dev_priv) == 11) {
|
||||||
if (intel_phy_is_combo(dev_priv, phy))
|
if (intel_phy_is_combo(dev_priv, phy))
|
||||||
icl_get_combo_buf_trans(dev_priv, encoder->type,
|
icl_get_combo_buf_trans(dev_priv, encoder->type,
|
||||||
@ -2823,8 +2837,13 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
|
|||||||
const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations;
|
const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations;
|
||||||
u32 n_entries, val, ln, dpcnt_mask, dpcnt_val;
|
u32 n_entries, val, ln, dpcnt_mask, dpcnt_val;
|
||||||
|
|
||||||
n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations);
|
if (encoder->type == INTEL_OUTPUT_HDMI) {
|
||||||
ddi_translations = tgl_dkl_phy_ddi_translations;
|
n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
|
||||||
|
ddi_translations = tgl_dkl_phy_hdmi_ddi_trans;
|
||||||
|
} else {
|
||||||
|
n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
|
||||||
|
ddi_translations = tgl_dkl_phy_dp_ddi_trans;
|
||||||
|
}
|
||||||
|
|
||||||
if (level >= n_entries)
|
if (level >= n_entries)
|
||||||
level = n_entries - 1;
|
level = n_entries - 1;
|
||||||
|
@ -5476,15 +5476,13 @@ static bool bxt_digital_port_connected(struct intel_encoder *encoder)
|
|||||||
return I915_READ(GEN8_DE_PORT_ISR) & bit;
|
return I915_READ(GEN8_DE_PORT_ISR) & bit;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
|
static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv,
|
||||||
struct intel_digital_port *intel_dig_port)
|
enum phy phy)
|
||||||
{
|
{
|
||||||
enum port port = intel_dig_port->base.port;
|
if (HAS_PCH_MCC(dev_priv) && phy == PHY_C)
|
||||||
|
|
||||||
if (HAS_PCH_MCC(dev_priv) && port == PORT_C)
|
|
||||||
return I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
|
return I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
|
||||||
|
|
||||||
return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
|
return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(phy);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool icl_digital_port_connected(struct intel_encoder *encoder)
|
static bool icl_digital_port_connected(struct intel_encoder *encoder)
|
||||||
@ -5494,7 +5492,7 @@ static bool icl_digital_port_connected(struct intel_encoder *encoder)
|
|||||||
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
|
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
|
||||||
|
|
||||||
if (intel_phy_is_combo(dev_priv, phy))
|
if (intel_phy_is_combo(dev_priv, phy))
|
||||||
return icl_combo_port_connected(dev_priv, dig_port);
|
return intel_combo_phy_connected(dev_priv, phy);
|
||||||
else if (intel_phy_is_tc(dev_priv, phy))
|
else if (intel_phy_is_tc(dev_priv, phy))
|
||||||
return intel_tc_port_connected(dig_port);
|
return intel_tc_port_connected(dig_port);
|
||||||
else
|
else
|
||||||
|
@ -368,7 +368,7 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
|
|||||||
if (!ce->timeline)
|
if (!ce->timeline)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
rcu_read_lock();
|
mutex_lock(&ce->timeline->mutex);
|
||||||
list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
|
list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
|
||||||
if (i915_request_completed(rq))
|
if (i915_request_completed(rq))
|
||||||
break;
|
break;
|
||||||
@ -378,7 +378,7 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
|
|||||||
if (engine)
|
if (engine)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
mutex_unlock(&ce->timeline->mutex);
|
||||||
|
|
||||||
return engine;
|
return engine;
|
||||||
}
|
}
|
||||||
|
@ -310,10 +310,23 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
|
|||||||
GEM_BUG_ON(rq->hw_context == ce);
|
GEM_BUG_ON(rq->hw_context == ce);
|
||||||
|
|
||||||
if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
|
if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
|
||||||
err = mutex_lock_interruptible_nested(&tl->mutex,
|
/*
|
||||||
SINGLE_DEPTH_NESTING);
|
* Ideally, we just want to insert our foreign fence as
|
||||||
if (err)
|
* a barrier into the remove context, such that this operation
|
||||||
return err;
|
* occurs after all current operations in that context, and
|
||||||
|
* all future operations must occur after this.
|
||||||
|
*
|
||||||
|
* Currently, the timeline->last_request tracking is guarded
|
||||||
|
* by its mutex and so we must obtain that to atomically
|
||||||
|
* insert our barrier. However, since we already hold our
|
||||||
|
* timeline->mutex, we must be careful against potential
|
||||||
|
* inversion if we are the kernel_context as the remote context
|
||||||
|
* will itself poke at the kernel_context when it needs to
|
||||||
|
* unpin. Ergo, if already locked, we drop both locks and
|
||||||
|
* try again (through the magic of userspace repeating EAGAIN).
|
||||||
|
*/
|
||||||
|
if (!mutex_trylock(&tl->mutex))
|
||||||
|
return -EAGAIN;
|
||||||
|
|
||||||
/* Queue this switch after current activity by this context. */
|
/* Queue this switch after current activity by this context. */
|
||||||
err = i915_active_fence_set(&tl->last_request, rq);
|
err = i915_active_fence_set(&tl->last_request, rq);
|
||||||
|
@ -100,9 +100,7 @@ execlists_num_ports(const struct intel_engine_execlists * const execlists)
|
|||||||
static inline struct i915_request *
|
static inline struct i915_request *
|
||||||
execlists_active(const struct intel_engine_execlists *execlists)
|
execlists_active(const struct intel_engine_execlists *execlists)
|
||||||
{
|
{
|
||||||
GEM_BUG_ON(execlists->active - execlists->inflight >
|
return *READ_ONCE(execlists->active);
|
||||||
execlists_num_ports(execlists));
|
|
||||||
return READ_ONCE(*execlists->active);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
@ -28,13 +28,13 @@
|
|||||||
|
|
||||||
#include "i915_drv.h"
|
#include "i915_drv.h"
|
||||||
|
|
||||||
#include "gt/intel_gt.h"
|
#include "intel_context.h"
|
||||||
|
|
||||||
#include "intel_engine.h"
|
#include "intel_engine.h"
|
||||||
#include "intel_engine_pm.h"
|
#include "intel_engine_pm.h"
|
||||||
#include "intel_engine_pool.h"
|
#include "intel_engine_pool.h"
|
||||||
#include "intel_engine_user.h"
|
#include "intel_engine_user.h"
|
||||||
#include "intel_context.h"
|
#include "intel_gt.h"
|
||||||
|
#include "intel_gt_requests.h"
|
||||||
#include "intel_lrc.h"
|
#include "intel_lrc.h"
|
||||||
#include "intel_reset.h"
|
#include "intel_reset.h"
|
||||||
#include "intel_ring.h"
|
#include "intel_ring.h"
|
||||||
@ -616,6 +616,7 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
|
|||||||
intel_engine_init_execlists(engine);
|
intel_engine_init_execlists(engine);
|
||||||
intel_engine_init_cmd_parser(engine);
|
intel_engine_init_cmd_parser(engine);
|
||||||
intel_engine_init__pm(engine);
|
intel_engine_init__pm(engine);
|
||||||
|
intel_engine_init_retire(engine);
|
||||||
|
|
||||||
intel_engine_pool_init(&engine->pool);
|
intel_engine_pool_init(&engine->pool);
|
||||||
|
|
||||||
@ -838,6 +839,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
|
|||||||
|
|
||||||
cleanup_status_page(engine);
|
cleanup_status_page(engine);
|
||||||
|
|
||||||
|
intel_engine_fini_retire(engine);
|
||||||
intel_engine_pool_fini(&engine->pool);
|
intel_engine_pool_fini(&engine->pool);
|
||||||
intel_engine_fini_breadcrumbs(engine);
|
intel_engine_fini_breadcrumbs(engine);
|
||||||
intel_engine_cleanup_cmd_parser(engine);
|
intel_engine_cleanup_cmd_parser(engine);
|
||||||
|
@ -73,8 +73,42 @@ static inline void __timeline_mark_unlock(struct intel_context *ce,
|
|||||||
|
|
||||||
#endif /* !IS_ENABLED(CONFIG_LOCKDEP) */
|
#endif /* !IS_ENABLED(CONFIG_LOCKDEP) */
|
||||||
|
|
||||||
|
static void
|
||||||
|
__queue_and_release_pm(struct i915_request *rq,
|
||||||
|
struct intel_timeline *tl,
|
||||||
|
struct intel_engine_cs *engine)
|
||||||
|
{
|
||||||
|
struct intel_gt_timelines *timelines = &engine->gt->timelines;
|
||||||
|
|
||||||
|
GEM_TRACE("%s\n", engine->name);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We have to serialise all potential retirement paths with our
|
||||||
|
* submission, as we don't want to underflow either the
|
||||||
|
* engine->wakeref.counter or our timeline->active_count.
|
||||||
|
*
|
||||||
|
* Equally, we cannot allow a new submission to start until
|
||||||
|
* after we finish queueing, nor could we allow that submitter
|
||||||
|
* to retire us before we are ready!
|
||||||
|
*/
|
||||||
|
spin_lock(&timelines->lock);
|
||||||
|
|
||||||
|
/* Let intel_gt_retire_requests() retire us (acquired under lock) */
|
||||||
|
if (!atomic_fetch_inc(&tl->active_count))
|
||||||
|
list_add_tail(&tl->link, &timelines->active_list);
|
||||||
|
|
||||||
|
/* Hand the request over to HW and so engine_retire() */
|
||||||
|
__i915_request_queue(rq, NULL);
|
||||||
|
|
||||||
|
/* Let new submissions commence (and maybe retire this timeline) */
|
||||||
|
__intel_wakeref_defer_park(&engine->wakeref);
|
||||||
|
|
||||||
|
spin_unlock(&timelines->lock);
|
||||||
|
}
|
||||||
|
|
||||||
static bool switch_to_kernel_context(struct intel_engine_cs *engine)
|
static bool switch_to_kernel_context(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
|
struct intel_context *ce = engine->kernel_context;
|
||||||
struct i915_request *rq;
|
struct i915_request *rq;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
bool result = true;
|
bool result = true;
|
||||||
@ -98,16 +132,31 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
|
|||||||
* This should hold true as we can only park the engine after
|
* This should hold true as we can only park the engine after
|
||||||
* retiring the last request, thus all rings should be empty and
|
* retiring the last request, thus all rings should be empty and
|
||||||
* all timelines idle.
|
* all timelines idle.
|
||||||
|
*
|
||||||
|
* For unlocking, there are 2 other parties and the GPU who have a
|
||||||
|
* stake here.
|
||||||
|
*
|
||||||
|
* A new gpu user will be waiting on the engine-pm to start their
|
||||||
|
* engine_unpark. New waiters are predicated on engine->wakeref.count
|
||||||
|
* and so intel_wakeref_defer_park() acts like a mutex_unlock of the
|
||||||
|
* engine->wakeref.
|
||||||
|
*
|
||||||
|
* The other party is intel_gt_retire_requests(), which is walking the
|
||||||
|
* list of active timelines looking for completions. Meanwhile as soon
|
||||||
|
* as we call __i915_request_queue(), the GPU may complete our request.
|
||||||
|
* Ergo, if we put ourselves on the timelines.active_list
|
||||||
|
* (se intel_timeline_enter()) before we increment the
|
||||||
|
* engine->wakeref.count, we may see the request completion and retire
|
||||||
|
* it causing an undeflow of the engine->wakeref.
|
||||||
*/
|
*/
|
||||||
flags = __timeline_mark_lock(engine->kernel_context);
|
flags = __timeline_mark_lock(ce);
|
||||||
|
GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
|
||||||
|
|
||||||
rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
|
rq = __i915_request_create(ce, GFP_NOWAIT);
|
||||||
if (IS_ERR(rq))
|
if (IS_ERR(rq))
|
||||||
/* Context switch failed, hope for the best! Maybe reset? */
|
/* Context switch failed, hope for the best! Maybe reset? */
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
intel_timeline_enter(i915_request_timeline(rq));
|
|
||||||
|
|
||||||
/* Check again on the next retirement. */
|
/* Check again on the next retirement. */
|
||||||
engine->wakeref_serial = engine->serial + 1;
|
engine->wakeref_serial = engine->serial + 1;
|
||||||
i915_request_add_active_barriers(rq);
|
i915_request_add_active_barriers(rq);
|
||||||
@ -116,13 +165,12 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
|
|||||||
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
|
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
|
||||||
__i915_request_commit(rq);
|
__i915_request_commit(rq);
|
||||||
|
|
||||||
/* Release our exclusive hold on the engine */
|
/* Expose ourselves to the world */
|
||||||
__intel_wakeref_defer_park(&engine->wakeref);
|
__queue_and_release_pm(rq, ce->timeline, engine);
|
||||||
__i915_request_queue(rq, NULL);
|
|
||||||
|
|
||||||
result = false;
|
result = false;
|
||||||
out_unlock:
|
out_unlock:
|
||||||
__timeline_mark_unlock(engine->kernel_context, flags);
|
__timeline_mark_unlock(ce, flags);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -177,7 +225,8 @@ static int __engine_park(struct intel_wakeref *wf)
|
|||||||
|
|
||||||
engine->execlists.no_priolist = false;
|
engine->execlists.no_priolist = false;
|
||||||
|
|
||||||
intel_gt_pm_put(engine->gt);
|
/* While gt calls i915_vma_parked(), we have to break the lock cycle */
|
||||||
|
intel_gt_pm_put_async(engine->gt);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,6 +31,16 @@ static inline void intel_engine_pm_put(struct intel_engine_cs *engine)
|
|||||||
intel_wakeref_put(&engine->wakeref);
|
intel_wakeref_put(&engine->wakeref);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine)
|
||||||
|
{
|
||||||
|
intel_wakeref_put_async(&engine->wakeref);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
|
||||||
|
{
|
||||||
|
intel_wakeref_unlock_wait(&engine->wakeref);
|
||||||
|
}
|
||||||
|
|
||||||
void intel_engine_init__pm(struct intel_engine_cs *engine);
|
void intel_engine_init__pm(struct intel_engine_cs *engine);
|
||||||
|
|
||||||
#endif /* INTEL_ENGINE_PM_H */
|
#endif /* INTEL_ENGINE_PM_H */
|
||||||
|
@ -451,6 +451,14 @@ struct intel_engine_cs {
|
|||||||
|
|
||||||
struct intel_engine_execlists execlists;
|
struct intel_engine_execlists execlists;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Keep track of completed timelines on this engine for early
|
||||||
|
* retirement with the goal of quickly enabling powersaving as
|
||||||
|
* soon as the engine is idle.
|
||||||
|
*/
|
||||||
|
struct intel_timeline *retire;
|
||||||
|
struct work_struct retire_work;
|
||||||
|
|
||||||
/* status_notifier: list of callbacks for context-switch changes */
|
/* status_notifier: list of callbacks for context-switch changes */
|
||||||
struct atomic_notifier_head context_status_notifier;
|
struct atomic_notifier_head context_status_notifier;
|
||||||
|
|
||||||
|
@ -105,7 +105,6 @@ static int __gt_park(struct intel_wakeref *wf)
|
|||||||
static const struct intel_wakeref_ops wf_ops = {
|
static const struct intel_wakeref_ops wf_ops = {
|
||||||
.get = __gt_unpark,
|
.get = __gt_unpark,
|
||||||
.put = __gt_park,
|
.put = __gt_park,
|
||||||
.flags = INTEL_WAKEREF_PUT_ASYNC,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
void intel_gt_pm_init_early(struct intel_gt *gt)
|
void intel_gt_pm_init_early(struct intel_gt *gt)
|
||||||
@ -272,7 +271,7 @@ void intel_gt_suspend_prepare(struct intel_gt *gt)
|
|||||||
|
|
||||||
static suspend_state_t pm_suspend_target(void)
|
static suspend_state_t pm_suspend_target(void)
|
||||||
{
|
{
|
||||||
#if IS_ENABLED(CONFIG_PM_SLEEP)
|
#if IS_ENABLED(CONFIG_SUSPEND) && IS_ENABLED(CONFIG_PM_SLEEP)
|
||||||
return pm_suspend_target_state;
|
return pm_suspend_target_state;
|
||||||
#else
|
#else
|
||||||
return PM_SUSPEND_TO_IDLE;
|
return PM_SUSPEND_TO_IDLE;
|
||||||
|
@ -32,6 +32,11 @@ static inline void intel_gt_pm_put(struct intel_gt *gt)
|
|||||||
intel_wakeref_put(>->wakeref);
|
intel_wakeref_put(>->wakeref);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void intel_gt_pm_put_async(struct intel_gt *gt)
|
||||||
|
{
|
||||||
|
intel_wakeref_put_async(>->wakeref);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
|
static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
|
||||||
{
|
{
|
||||||
return intel_wakeref_wait_for_idle(>->wakeref);
|
return intel_wakeref_wait_for_idle(>->wakeref);
|
||||||
|
@ -4,6 +4,8 @@
|
|||||||
* Copyright © 2019 Intel Corporation
|
* Copyright © 2019 Intel Corporation
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/workqueue.h>
|
||||||
|
|
||||||
#include "i915_drv.h" /* for_each_engine() */
|
#include "i915_drv.h" /* for_each_engine() */
|
||||||
#include "i915_request.h"
|
#include "i915_request.h"
|
||||||
#include "intel_gt.h"
|
#include "intel_gt.h"
|
||||||
@ -29,6 +31,79 @@ static void flush_submission(struct intel_gt *gt)
|
|||||||
intel_engine_flush_submission(engine);
|
intel_engine_flush_submission(engine);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void engine_retire(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct intel_engine_cs *engine =
|
||||||
|
container_of(work, typeof(*engine), retire_work);
|
||||||
|
struct intel_timeline *tl = xchg(&engine->retire, NULL);
|
||||||
|
|
||||||
|
do {
|
||||||
|
struct intel_timeline *next = xchg(&tl->retire, NULL);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Our goal here is to retire _idle_ timelines as soon as
|
||||||
|
* possible (as they are idle, we do not expect userspace
|
||||||
|
* to be cleaning up anytime soon).
|
||||||
|
*
|
||||||
|
* If the timeline is currently locked, either it is being
|
||||||
|
* retired elsewhere or about to be!
|
||||||
|
*/
|
||||||
|
if (mutex_trylock(&tl->mutex)) {
|
||||||
|
retire_requests(tl);
|
||||||
|
mutex_unlock(&tl->mutex);
|
||||||
|
}
|
||||||
|
intel_timeline_put(tl);
|
||||||
|
|
||||||
|
GEM_BUG_ON(!next);
|
||||||
|
tl = ptr_mask_bits(next, 1);
|
||||||
|
} while (tl);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool add_retire(struct intel_engine_cs *engine,
|
||||||
|
struct intel_timeline *tl)
|
||||||
|
{
|
||||||
|
struct intel_timeline *first;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We open-code a llist here to include the additional tag [BIT(0)]
|
||||||
|
* so that we know when the timeline is already on a
|
||||||
|
* retirement queue: either this engine or another.
|
||||||
|
*
|
||||||
|
* However, we rely on that a timeline can only be active on a single
|
||||||
|
* engine at any one time and that add_retire() is called before the
|
||||||
|
* engine releases the timeline and transferred to another to retire.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (READ_ONCE(tl->retire)) /* already queued */
|
||||||
|
return false;
|
||||||
|
|
||||||
|
intel_timeline_get(tl);
|
||||||
|
first = READ_ONCE(engine->retire);
|
||||||
|
do
|
||||||
|
tl->retire = ptr_pack_bits(first, 1, 1);
|
||||||
|
while (!try_cmpxchg(&engine->retire, &first, tl));
|
||||||
|
|
||||||
|
return !first;
|
||||||
|
}
|
||||||
|
|
||||||
|
void intel_engine_add_retire(struct intel_engine_cs *engine,
|
||||||
|
struct intel_timeline *tl)
|
||||||
|
{
|
||||||
|
if (add_retire(engine, tl))
|
||||||
|
schedule_work(&engine->retire_work);
|
||||||
|
}
|
||||||
|
|
||||||
|
void intel_engine_init_retire(struct intel_engine_cs *engine)
|
||||||
|
{
|
||||||
|
INIT_WORK(&engine->retire_work, engine_retire);
|
||||||
|
}
|
||||||
|
|
||||||
|
void intel_engine_fini_retire(struct intel_engine_cs *engine)
|
||||||
|
{
|
||||||
|
flush_work(&engine->retire_work);
|
||||||
|
GEM_BUG_ON(engine->retire);
|
||||||
|
}
|
||||||
|
|
||||||
long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
||||||
{
|
{
|
||||||
struct intel_gt_timelines *timelines = >->timelines;
|
struct intel_gt_timelines *timelines = >->timelines;
|
||||||
@ -52,8 +127,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
|||||||
}
|
}
|
||||||
|
|
||||||
intel_timeline_get(tl);
|
intel_timeline_get(tl);
|
||||||
GEM_BUG_ON(!tl->active_count);
|
GEM_BUG_ON(!atomic_read(&tl->active_count));
|
||||||
tl->active_count++; /* pin the list element */
|
atomic_inc(&tl->active_count); /* pin the list element */
|
||||||
spin_unlock_irqrestore(&timelines->lock, flags);
|
spin_unlock_irqrestore(&timelines->lock, flags);
|
||||||
|
|
||||||
if (timeout > 0) {
|
if (timeout > 0) {
|
||||||
@ -74,7 +149,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
|||||||
|
|
||||||
/* Resume iteration after dropping lock */
|
/* Resume iteration after dropping lock */
|
||||||
list_safe_reset_next(tl, tn, link);
|
list_safe_reset_next(tl, tn, link);
|
||||||
if (!--tl->active_count)
|
if (atomic_dec_and_test(&tl->active_count))
|
||||||
list_del(&tl->link);
|
list_del(&tl->link);
|
||||||
else
|
else
|
||||||
active_count += !!rcu_access_pointer(tl->last_request.fence);
|
active_count += !!rcu_access_pointer(tl->last_request.fence);
|
||||||
@ -83,7 +158,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
|||||||
|
|
||||||
/* Defer the final release to after the spinlock */
|
/* Defer the final release to after the spinlock */
|
||||||
if (refcount_dec_and_test(&tl->kref.refcount)) {
|
if (refcount_dec_and_test(&tl->kref.refcount)) {
|
||||||
GEM_BUG_ON(tl->active_count);
|
GEM_BUG_ON(atomic_read(&tl->active_count));
|
||||||
list_add(&tl->link, &free);
|
list_add(&tl->link, &free);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,9 @@
|
|||||||
#ifndef INTEL_GT_REQUESTS_H
|
#ifndef INTEL_GT_REQUESTS_H
|
||||||
#define INTEL_GT_REQUESTS_H
|
#define INTEL_GT_REQUESTS_H
|
||||||
|
|
||||||
|
struct intel_engine_cs;
|
||||||
struct intel_gt;
|
struct intel_gt;
|
||||||
|
struct intel_timeline;
|
||||||
|
|
||||||
long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout);
|
long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout);
|
||||||
static inline void intel_gt_retire_requests(struct intel_gt *gt)
|
static inline void intel_gt_retire_requests(struct intel_gt *gt)
|
||||||
@ -15,6 +17,11 @@ static inline void intel_gt_retire_requests(struct intel_gt *gt)
|
|||||||
intel_gt_retire_requests_timeout(gt, 0);
|
intel_gt_retire_requests_timeout(gt, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void intel_engine_init_retire(struct intel_engine_cs *engine);
|
||||||
|
void intel_engine_add_retire(struct intel_engine_cs *engine,
|
||||||
|
struct intel_timeline *tl);
|
||||||
|
void intel_engine_fini_retire(struct intel_engine_cs *engine);
|
||||||
|
|
||||||
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
|
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
|
||||||
|
|
||||||
void intel_gt_init_requests(struct intel_gt *gt);
|
void intel_gt_init_requests(struct intel_gt *gt);
|
||||||
|
@ -142,6 +142,7 @@
|
|||||||
#include "intel_engine_pm.h"
|
#include "intel_engine_pm.h"
|
||||||
#include "intel_gt.h"
|
#include "intel_gt.h"
|
||||||
#include "intel_gt_pm.h"
|
#include "intel_gt_pm.h"
|
||||||
|
#include "intel_gt_requests.h"
|
||||||
#include "intel_lrc_reg.h"
|
#include "intel_lrc_reg.h"
|
||||||
#include "intel_mocs.h"
|
#include "intel_mocs.h"
|
||||||
#include "intel_reset.h"
|
#include "intel_reset.h"
|
||||||
@ -1115,9 +1116,17 @@ __execlists_schedule_out(struct i915_request *rq,
|
|||||||
* refrain from doing non-trivial work here.
|
* refrain from doing non-trivial work here.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we have just completed this context, the engine may now be
|
||||||
|
* idle and we want to re-enter powersaving.
|
||||||
|
*/
|
||||||
|
if (list_is_last(&rq->link, &ce->timeline->requests) &&
|
||||||
|
i915_request_completed(rq))
|
||||||
|
intel_engine_add_retire(engine, ce->timeline);
|
||||||
|
|
||||||
intel_engine_context_out(engine);
|
intel_engine_context_out(engine);
|
||||||
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
|
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
|
||||||
intel_gt_pm_put(engine->gt);
|
intel_gt_pm_put_async(engine->gt);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this is part of a virtual engine, its next request may
|
* If this is part of a virtual engine, its next request may
|
||||||
@ -1937,16 +1946,17 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
|||||||
static void
|
static void
|
||||||
cancel_port_requests(struct intel_engine_execlists * const execlists)
|
cancel_port_requests(struct intel_engine_execlists * const execlists)
|
||||||
{
|
{
|
||||||
struct i915_request * const *port, *rq;
|
struct i915_request * const *port;
|
||||||
|
|
||||||
for (port = execlists->pending; (rq = *port); port++)
|
for (port = execlists->pending; *port; port++)
|
||||||
execlists_schedule_out(rq);
|
execlists_schedule_out(*port);
|
||||||
memset(execlists->pending, 0, sizeof(execlists->pending));
|
memset(execlists->pending, 0, sizeof(execlists->pending));
|
||||||
|
|
||||||
for (port = execlists->active; (rq = *port); port++)
|
/* Mark the end of active before we overwrite *active */
|
||||||
execlists_schedule_out(rq);
|
for (port = xchg(&execlists->active, execlists->pending); *port; port++)
|
||||||
execlists->active =
|
execlists_schedule_out(*port);
|
||||||
memset(execlists->inflight, 0, sizeof(execlists->inflight));
|
WRITE_ONCE(execlists->active,
|
||||||
|
memset(execlists->inflight, 0, sizeof(execlists->inflight)));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
@ -2099,23 +2109,27 @@ static void process_csb(struct intel_engine_cs *engine)
|
|||||||
else
|
else
|
||||||
promote = gen8_csb_parse(execlists, buf + 2 * head);
|
promote = gen8_csb_parse(execlists, buf + 2 * head);
|
||||||
if (promote) {
|
if (promote) {
|
||||||
|
struct i915_request * const *old = execlists->active;
|
||||||
|
|
||||||
|
/* Point active to the new ELSP; prevent overwriting */
|
||||||
|
WRITE_ONCE(execlists->active, execlists->pending);
|
||||||
|
set_timeslice(engine);
|
||||||
|
|
||||||
if (!inject_preempt_hang(execlists))
|
if (!inject_preempt_hang(execlists))
|
||||||
ring_set_paused(engine, 0);
|
ring_set_paused(engine, 0);
|
||||||
|
|
||||||
/* cancel old inflight, prepare for switch */
|
/* cancel old inflight, prepare for switch */
|
||||||
trace_ports(execlists, "preempted", execlists->active);
|
trace_ports(execlists, "preempted", old);
|
||||||
while (*execlists->active)
|
while (*old)
|
||||||
execlists_schedule_out(*execlists->active++);
|
execlists_schedule_out(*old++);
|
||||||
|
|
||||||
/* switch pending to inflight */
|
/* switch pending to inflight */
|
||||||
GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
|
GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
|
||||||
execlists->active =
|
WRITE_ONCE(execlists->active,
|
||||||
memcpy(execlists->inflight,
|
memcpy(execlists->inflight,
|
||||||
execlists->pending,
|
execlists->pending,
|
||||||
execlists_num_ports(execlists) *
|
execlists_num_ports(execlists) *
|
||||||
sizeof(*execlists->pending));
|
sizeof(*execlists->pending)));
|
||||||
|
|
||||||
set_timeslice(engine);
|
|
||||||
|
|
||||||
WRITE_ONCE(execlists->pending[0], NULL);
|
WRITE_ONCE(execlists->pending[0], NULL);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1114,7 +1114,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
|
|||||||
out:
|
out:
|
||||||
intel_engine_cancel_stop_cs(engine);
|
intel_engine_cancel_stop_cs(engine);
|
||||||
reset_finish_engine(engine);
|
reset_finish_engine(engine);
|
||||||
intel_engine_pm_put(engine);
|
intel_engine_pm_put_async(engine);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,9 +57,10 @@ int intel_ring_pin(struct intel_ring *ring)
|
|||||||
|
|
||||||
i915_vma_make_unshrinkable(vma);
|
i915_vma_make_unshrinkable(vma);
|
||||||
|
|
||||||
GEM_BUG_ON(ring->vaddr);
|
/* Discard any unused bytes beyond that submitted to hw. */
|
||||||
ring->vaddr = addr;
|
intel_ring_reset(ring, ring->emit);
|
||||||
|
|
||||||
|
ring->vaddr = addr;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_ring:
|
err_ring:
|
||||||
@ -85,20 +86,14 @@ void intel_ring_unpin(struct intel_ring *ring)
|
|||||||
if (!atomic_dec_and_test(&ring->pin_count))
|
if (!atomic_dec_and_test(&ring->pin_count))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Discard any unused bytes beyond that submitted to hw. */
|
|
||||||
intel_ring_reset(ring, ring->emit);
|
|
||||||
|
|
||||||
i915_vma_unset_ggtt_write(vma);
|
i915_vma_unset_ggtt_write(vma);
|
||||||
if (i915_vma_is_map_and_fenceable(vma))
|
if (i915_vma_is_map_and_fenceable(vma))
|
||||||
i915_vma_unpin_iomap(vma);
|
i915_vma_unpin_iomap(vma);
|
||||||
else
|
else
|
||||||
i915_gem_object_unpin_map(vma->obj);
|
i915_gem_object_unpin_map(vma->obj);
|
||||||
|
|
||||||
GEM_BUG_ON(!ring->vaddr);
|
|
||||||
ring->vaddr = NULL;
|
|
||||||
|
|
||||||
i915_vma_unpin(vma);
|
|
||||||
i915_vma_make_purgeable(vma);
|
i915_vma_make_purgeable(vma);
|
||||||
|
i915_vma_unpin(vma);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
|
static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
|
||||||
|
@ -282,6 +282,7 @@ void intel_timeline_fini(struct intel_timeline *timeline)
|
|||||||
{
|
{
|
||||||
GEM_BUG_ON(atomic_read(&timeline->pin_count));
|
GEM_BUG_ON(atomic_read(&timeline->pin_count));
|
||||||
GEM_BUG_ON(!list_empty(&timeline->requests));
|
GEM_BUG_ON(!list_empty(&timeline->requests));
|
||||||
|
GEM_BUG_ON(timeline->retire);
|
||||||
|
|
||||||
if (timeline->hwsp_cacheline)
|
if (timeline->hwsp_cacheline)
|
||||||
cacheline_free(timeline->hwsp_cacheline);
|
cacheline_free(timeline->hwsp_cacheline);
|
||||||
@ -339,15 +340,33 @@ void intel_timeline_enter(struct intel_timeline *tl)
|
|||||||
struct intel_gt_timelines *timelines = &tl->gt->timelines;
|
struct intel_gt_timelines *timelines = &tl->gt->timelines;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Pretend we are serialised by the timeline->mutex.
|
||||||
|
*
|
||||||
|
* While generally true, there are a few exceptions to the rule
|
||||||
|
* for the engine->kernel_context being used to manage power
|
||||||
|
* transitions. As the engine_park may be called from under any
|
||||||
|
* timeline, it uses the power mutex as a global serialisation
|
||||||
|
* lock to prevent any other request entering its timeline.
|
||||||
|
*
|
||||||
|
* The rule is generally tl->mutex, otherwise engine->wakeref.mutex.
|
||||||
|
*
|
||||||
|
* However, intel_gt_retire_request() does not know which engine
|
||||||
|
* it is retiring along and so cannot partake in the engine-pm
|
||||||
|
* barrier, and there we use the tl->active_count as a means to
|
||||||
|
* pin the timeline in the active_list while the locks are dropped.
|
||||||
|
* Ergo, as that is outside of the engine-pm barrier, we need to
|
||||||
|
* use atomic to manipulate tl->active_count.
|
||||||
|
*/
|
||||||
lockdep_assert_held(&tl->mutex);
|
lockdep_assert_held(&tl->mutex);
|
||||||
|
|
||||||
GEM_BUG_ON(!atomic_read(&tl->pin_count));
|
GEM_BUG_ON(!atomic_read(&tl->pin_count));
|
||||||
if (tl->active_count++)
|
|
||||||
|
if (atomic_add_unless(&tl->active_count, 1, 0))
|
||||||
return;
|
return;
|
||||||
GEM_BUG_ON(!tl->active_count); /* overflow? */
|
|
||||||
|
|
||||||
spin_lock_irqsave(&timelines->lock, flags);
|
spin_lock_irqsave(&timelines->lock, flags);
|
||||||
list_add(&tl->link, &timelines->active_list);
|
if (!atomic_fetch_inc(&tl->active_count))
|
||||||
|
list_add_tail(&tl->link, &timelines->active_list);
|
||||||
spin_unlock_irqrestore(&timelines->lock, flags);
|
spin_unlock_irqrestore(&timelines->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -356,14 +375,16 @@ void intel_timeline_exit(struct intel_timeline *tl)
|
|||||||
struct intel_gt_timelines *timelines = &tl->gt->timelines;
|
struct intel_gt_timelines *timelines = &tl->gt->timelines;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
/* See intel_timeline_enter() */
|
||||||
lockdep_assert_held(&tl->mutex);
|
lockdep_assert_held(&tl->mutex);
|
||||||
|
|
||||||
GEM_BUG_ON(!tl->active_count);
|
GEM_BUG_ON(!atomic_read(&tl->active_count));
|
||||||
if (--tl->active_count)
|
if (atomic_add_unless(&tl->active_count, -1, 1))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&timelines->lock, flags);
|
spin_lock_irqsave(&timelines->lock, flags);
|
||||||
list_del(&tl->link);
|
if (atomic_dec_and_test(&tl->active_count))
|
||||||
|
list_del(&tl->link);
|
||||||
spin_unlock_irqrestore(&timelines->lock, flags);
|
spin_unlock_irqrestore(&timelines->lock, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -42,7 +42,7 @@ struct intel_timeline {
|
|||||||
* from the intel_context caller plus internal atomicity.
|
* from the intel_context caller plus internal atomicity.
|
||||||
*/
|
*/
|
||||||
atomic_t pin_count;
|
atomic_t pin_count;
|
||||||
unsigned int active_count;
|
atomic_t active_count;
|
||||||
|
|
||||||
const u32 *hwsp_seqno;
|
const u32 *hwsp_seqno;
|
||||||
struct i915_vma *hwsp_ggtt;
|
struct i915_vma *hwsp_ggtt;
|
||||||
@ -66,6 +66,9 @@ struct intel_timeline {
|
|||||||
*/
|
*/
|
||||||
struct i915_active_fence last_request;
|
struct i915_active_fence last_request;
|
||||||
|
|
||||||
|
/** A chain of completed timelines ready for early retirement. */
|
||||||
|
struct intel_timeline *retire;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* We track the most recent seqno that we wait on in every context so
|
* We track the most recent seqno that we wait on in every context so
|
||||||
* that we only have to emit a new await and dependency on a more
|
* that we only have to emit a new await and dependency on a more
|
||||||
|
@ -51,11 +51,12 @@ static int live_engine_pm(void *arg)
|
|||||||
pr_err("intel_engine_pm_get_if_awake(%s) failed under %s\n",
|
pr_err("intel_engine_pm_get_if_awake(%s) failed under %s\n",
|
||||||
engine->name, p->name);
|
engine->name, p->name);
|
||||||
else
|
else
|
||||||
intel_engine_pm_put(engine);
|
intel_engine_pm_put_async(engine);
|
||||||
intel_engine_pm_put(engine);
|
intel_engine_pm_put_async(engine);
|
||||||
p->critical_section_end();
|
p->critical_section_end();
|
||||||
|
|
||||||
/* engine wakeref is sync (instant) */
|
intel_engine_pm_flush(engine);
|
||||||
|
|
||||||
if (intel_engine_pm_is_awake(engine)) {
|
if (intel_engine_pm_is_awake(engine)) {
|
||||||
pr_err("%s is still awake after flushing pm\n",
|
pr_err("%s is still awake after flushing pm\n",
|
||||||
engine->name);
|
engine->name);
|
||||||
|
@ -1599,9 +1599,9 @@ static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
|
|||||||
if (!(cmd_val(s, 0) & (1 << 22)))
|
if (!(cmd_val(s, 0) & (1 << 22)))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* check if QWORD */
|
/* check inline data */
|
||||||
if (DWORD_FIELD(0, 20, 19) == 1)
|
if (cmd_val(s, 0) & BIT(18))
|
||||||
valid_len += 8;
|
valid_len = CMD_LEN(9);
|
||||||
ret = gvt_check_valid_cmd_length(cmd_length(s),
|
ret = gvt_check_valid_cmd_length(cmd_length(s),
|
||||||
valid_len);
|
valid_len);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -460,6 +460,7 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||||||
static i915_reg_t force_nonpriv_white_list[] = {
|
static i915_reg_t force_nonpriv_white_list[] = {
|
||||||
GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
|
GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
|
||||||
GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
|
GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
|
||||||
|
PS_INVOCATION_COUNT,//_MMIO(0x2348)
|
||||||
GEN8_CS_CHICKEN1,//_MMIO(0x2580)
|
GEN8_CS_CHICKEN1,//_MMIO(0x2580)
|
||||||
_MMIO(0x2690),
|
_MMIO(0x2690),
|
||||||
_MMIO(0x2694),
|
_MMIO(0x2694),
|
||||||
@ -508,7 +509,7 @@ static inline bool in_whitelist(unsigned int reg)
|
|||||||
static int force_nonpriv_write(struct intel_vgpu *vgpu,
|
static int force_nonpriv_write(struct intel_vgpu *vgpu,
|
||||||
unsigned int offset, void *p_data, unsigned int bytes)
|
unsigned int offset, void *p_data, unsigned int bytes)
|
||||||
{
|
{
|
||||||
u32 reg_nonpriv = *(u32 *)p_data;
|
u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
|
||||||
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
|
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
|
||||||
u32 ring_base;
|
u32 ring_base;
|
||||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||||
@ -528,7 +529,7 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu,
|
|||||||
bytes);
|
bytes);
|
||||||
} else
|
} else
|
||||||
gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
|
gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
|
||||||
vgpu->id, reg_nonpriv, offset);
|
vgpu->id, *(u32 *)p_data, offset);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -672,12 +672,13 @@ void i915_active_acquire_barrier(struct i915_active *ref)
|
|||||||
* populated by i915_request_add_active_barriers() to point to the
|
* populated by i915_request_add_active_barriers() to point to the
|
||||||
* request that will eventually release them.
|
* request that will eventually release them.
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave_nested(&ref->tree_lock, flags, SINGLE_DEPTH_NESTING);
|
|
||||||
llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
|
llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
|
||||||
struct active_node *node = barrier_from_ll(pos);
|
struct active_node *node = barrier_from_ll(pos);
|
||||||
struct intel_engine_cs *engine = barrier_to_engine(node);
|
struct intel_engine_cs *engine = barrier_to_engine(node);
|
||||||
struct rb_node **p, *parent;
|
struct rb_node **p, *parent;
|
||||||
|
|
||||||
|
spin_lock_irqsave_nested(&ref->tree_lock, flags,
|
||||||
|
SINGLE_DEPTH_NESTING);
|
||||||
parent = NULL;
|
parent = NULL;
|
||||||
p = &ref->tree.rb_node;
|
p = &ref->tree.rb_node;
|
||||||
while (*p) {
|
while (*p) {
|
||||||
@ -693,12 +694,12 @@ void i915_active_acquire_barrier(struct i915_active *ref)
|
|||||||
}
|
}
|
||||||
rb_link_node(&node->node, parent, p);
|
rb_link_node(&node->node, parent, p);
|
||||||
rb_insert_color(&node->node, &ref->tree);
|
rb_insert_color(&node->node, &ref->tree);
|
||||||
|
spin_unlock_irqrestore(&ref->tree_lock, flags);
|
||||||
|
|
||||||
GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
|
GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
|
||||||
llist_add(barrier_to_ll(node), &engine->barrier_tasks);
|
llist_add(barrier_to_ll(node), &engine->barrier_tasks);
|
||||||
intel_engine_pm_put(engine);
|
intel_engine_pm_put(engine);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&ref->tree_lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void i915_request_add_active_barriers(struct i915_request *rq)
|
void i915_request_add_active_barriers(struct i915_request *rq)
|
||||||
|
@ -190,7 +190,7 @@ static u64 get_rc6(struct intel_gt *gt)
|
|||||||
val = 0;
|
val = 0;
|
||||||
if (intel_gt_pm_get_if_awake(gt)) {
|
if (intel_gt_pm_get_if_awake(gt)) {
|
||||||
val = __get_rc6(gt);
|
val = __get_rc6(gt);
|
||||||
intel_gt_pm_put(gt);
|
intel_gt_pm_put_async(gt);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&pmu->lock, flags);
|
spin_lock_irqsave(&pmu->lock, flags);
|
||||||
@ -343,7 +343,7 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
|
|||||||
|
|
||||||
skip:
|
skip:
|
||||||
spin_unlock_irqrestore(&engine->uncore->lock, flags);
|
spin_unlock_irqrestore(&engine->uncore->lock, flags);
|
||||||
intel_engine_pm_put(engine);
|
intel_engine_pm_put_async(engine);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -368,7 +368,7 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
|
|||||||
if (intel_gt_pm_get_if_awake(gt)) {
|
if (intel_gt_pm_get_if_awake(gt)) {
|
||||||
val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1);
|
val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1);
|
||||||
val = intel_get_cagf(rps, val);
|
val = intel_get_cagf(rps, val);
|
||||||
intel_gt_pm_put(gt);
|
intel_gt_pm_put_async(gt);
|
||||||
}
|
}
|
||||||
|
|
||||||
add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
|
add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
|
||||||
|
@ -103,15 +103,18 @@ query_engine_info(struct drm_i915_private *i915,
|
|||||||
struct drm_i915_engine_info __user *info_ptr;
|
struct drm_i915_engine_info __user *info_ptr;
|
||||||
struct drm_i915_query_engine_info query;
|
struct drm_i915_query_engine_info query;
|
||||||
struct drm_i915_engine_info info = { };
|
struct drm_i915_engine_info info = { };
|
||||||
|
unsigned int num_uabi_engines = 0;
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
int len, ret;
|
int len, ret;
|
||||||
|
|
||||||
if (query_item->flags)
|
if (query_item->flags)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
for_each_uabi_engine(engine, i915)
|
||||||
|
num_uabi_engines++;
|
||||||
|
|
||||||
len = sizeof(struct drm_i915_query_engine_info) +
|
len = sizeof(struct drm_i915_query_engine_info) +
|
||||||
RUNTIME_INFO(i915)->num_engines *
|
num_uabi_engines * sizeof(struct drm_i915_engine_info);
|
||||||
sizeof(struct drm_i915_engine_info);
|
|
||||||
|
|
||||||
ret = copy_query_item(&query, sizeof(query), len, query_item);
|
ret = copy_query_item(&query, sizeof(query), len, query_item);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
|
@ -54,7 +54,8 @@ int __intel_wakeref_get_first(struct intel_wakeref *wf)
|
|||||||
|
|
||||||
static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
|
static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
|
||||||
{
|
{
|
||||||
if (!atomic_dec_and_test(&wf->count))
|
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
|
||||||
|
if (unlikely(!atomic_dec_and_test(&wf->count)))
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
/* ops->put() must reschedule its own release on error/deferral */
|
/* ops->put() must reschedule its own release on error/deferral */
|
||||||
@ -67,13 +68,12 @@ static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
|
|||||||
mutex_unlock(&wf->mutex);
|
mutex_unlock(&wf->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __intel_wakeref_put_last(struct intel_wakeref *wf)
|
void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
|
||||||
{
|
{
|
||||||
INTEL_WAKEREF_BUG_ON(work_pending(&wf->work));
|
INTEL_WAKEREF_BUG_ON(work_pending(&wf->work));
|
||||||
|
|
||||||
/* Assume we are not in process context and so cannot sleep. */
|
/* Assume we are not in process context and so cannot sleep. */
|
||||||
if (wf->ops->flags & INTEL_WAKEREF_PUT_ASYNC ||
|
if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
|
||||||
!mutex_trylock(&wf->mutex)) {
|
|
||||||
schedule_work(&wf->work);
|
schedule_work(&wf->work);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -109,8 +109,17 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
|
|||||||
|
|
||||||
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
|
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
|
||||||
{
|
{
|
||||||
return wait_var_event_killable(&wf->wakeref,
|
int err;
|
||||||
!intel_wakeref_is_active(wf));
|
|
||||||
|
might_sleep();
|
||||||
|
|
||||||
|
err = wait_var_event_killable(&wf->wakeref,
|
||||||
|
!intel_wakeref_is_active(wf));
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
intel_wakeref_unlock_wait(wf);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void wakeref_auto_timeout(struct timer_list *t)
|
static void wakeref_auto_timeout(struct timer_list *t)
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
|
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <linux/bits.h>
|
#include <linux/bits.h>
|
||||||
|
#include <linux/lockdep.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/refcount.h>
|
#include <linux/refcount.h>
|
||||||
#include <linux/stackdepot.h>
|
#include <linux/stackdepot.h>
|
||||||
@ -29,9 +30,6 @@ typedef depot_stack_handle_t intel_wakeref_t;
|
|||||||
struct intel_wakeref_ops {
|
struct intel_wakeref_ops {
|
||||||
int (*get)(struct intel_wakeref *wf);
|
int (*get)(struct intel_wakeref *wf);
|
||||||
int (*put)(struct intel_wakeref *wf);
|
int (*put)(struct intel_wakeref *wf);
|
||||||
|
|
||||||
unsigned long flags;
|
|
||||||
#define INTEL_WAKEREF_PUT_ASYNC BIT(0)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct intel_wakeref {
|
struct intel_wakeref {
|
||||||
@ -57,7 +55,7 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
|
|||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
int __intel_wakeref_get_first(struct intel_wakeref *wf);
|
int __intel_wakeref_get_first(struct intel_wakeref *wf);
|
||||||
void __intel_wakeref_put_last(struct intel_wakeref *wf);
|
void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_wakeref_get: Acquire the wakeref
|
* intel_wakeref_get: Acquire the wakeref
|
||||||
@ -100,10 +98,9 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_wakeref_put: Release the wakeref
|
* intel_wakeref_put_flags: Release the wakeref
|
||||||
* @i915: the drm_i915_private device
|
|
||||||
* @wf: the wakeref
|
* @wf: the wakeref
|
||||||
* @fn: callback for releasing the wakeref, called only on final release.
|
* @flags: control flags
|
||||||
*
|
*
|
||||||
* Release our hold on the wakeref. When there are no more users,
|
* Release our hold on the wakeref. When there are no more users,
|
||||||
* the runtime pm wakeref will be released after the @fn callback is called
|
* the runtime pm wakeref will be released after the @fn callback is called
|
||||||
@ -116,11 +113,25 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
|
|||||||
* code otherwise.
|
* code otherwise.
|
||||||
*/
|
*/
|
||||||
static inline void
|
static inline void
|
||||||
intel_wakeref_put(struct intel_wakeref *wf)
|
__intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
|
||||||
|
#define INTEL_WAKEREF_PUT_ASYNC BIT(0)
|
||||||
{
|
{
|
||||||
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
|
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
|
||||||
if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
|
if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
|
||||||
__intel_wakeref_put_last(wf);
|
__intel_wakeref_put_last(wf, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
intel_wakeref_put(struct intel_wakeref *wf)
|
||||||
|
{
|
||||||
|
might_sleep();
|
||||||
|
__intel_wakeref_put(wf, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
intel_wakeref_put_async(struct intel_wakeref *wf)
|
||||||
|
{
|
||||||
|
__intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -151,6 +162,21 @@ intel_wakeref_unlock(struct intel_wakeref *wf)
|
|||||||
mutex_unlock(&wf->mutex);
|
mutex_unlock(&wf->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intel_wakeref_unlock_wait: Wait until the active callback is complete
|
||||||
|
* @wf: the wakeref
|
||||||
|
*
|
||||||
|
* Waits for the active callback (under the @wf->mutex or another CPU) is
|
||||||
|
* complete.
|
||||||
|
*/
|
||||||
|
static inline void
|
||||||
|
intel_wakeref_unlock_wait(struct intel_wakeref *wf)
|
||||||
|
{
|
||||||
|
mutex_lock(&wf->mutex);
|
||||||
|
mutex_unlock(&wf->mutex);
|
||||||
|
flush_work(&wf->work);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_wakeref_is_active: Query whether the wakeref is currently held
|
* intel_wakeref_is_active: Query whether the wakeref is currently held
|
||||||
* @wf: the wakeref
|
* @wf: the wakeref
|
||||||
@ -170,6 +196,7 @@ intel_wakeref_is_active(const struct intel_wakeref *wf)
|
|||||||
static inline void
|
static inline void
|
||||||
__intel_wakeref_defer_park(struct intel_wakeref *wf)
|
__intel_wakeref_defer_park(struct intel_wakeref *wf)
|
||||||
{
|
{
|
||||||
|
lockdep_assert_held(&wf->mutex);
|
||||||
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
|
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
|
||||||
atomic_set_release(&wf->count, 1);
|
atomic_set_release(&wf->count, 1);
|
||||||
}
|
}
|
||||||
|
@ -30,6 +30,8 @@ module_param_named(modeset, mgag200_modeset, int, 0400);
|
|||||||
static struct drm_driver driver;
|
static struct drm_driver driver;
|
||||||
|
|
||||||
static const struct pci_device_id pciidlist[] = {
|
static const struct pci_device_id pciidlist[] = {
|
||||||
|
{ PCI_VENDOR_ID_MATROX, 0x522, PCI_VENDOR_ID_SUN, 0x4852, 0, 0,
|
||||||
|
G200_SE_A | MGAG200_FLAG_HW_BUG_NO_STARTADD},
|
||||||
{ PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A },
|
{ PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A },
|
||||||
{ PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
|
{ PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
|
||||||
{ PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV },
|
{ PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV },
|
||||||
@ -60,6 +62,35 @@ static void mga_pci_remove(struct pci_dev *pdev)
|
|||||||
|
|
||||||
DEFINE_DRM_GEM_FOPS(mgag200_driver_fops);
|
DEFINE_DRM_GEM_FOPS(mgag200_driver_fops);
|
||||||
|
|
||||||
|
static bool mgag200_pin_bo_at_0(const struct mga_device *mdev)
|
||||||
|
{
|
||||||
|
return mdev->flags & MGAG200_FLAG_HW_BUG_NO_STARTADD;
|
||||||
|
}
|
||||||
|
|
||||||
|
int mgag200_driver_dumb_create(struct drm_file *file,
|
||||||
|
struct drm_device *dev,
|
||||||
|
struct drm_mode_create_dumb *args)
|
||||||
|
{
|
||||||
|
struct mga_device *mdev = dev->dev_private;
|
||||||
|
unsigned long pg_align;
|
||||||
|
|
||||||
|
if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
pg_align = 0ul;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Aligning scanout buffers to the size of the video ram forces
|
||||||
|
* placement at offset 0. Works around a bug where HW does not
|
||||||
|
* respect 'startadd' field.
|
||||||
|
*/
|
||||||
|
if (mgag200_pin_bo_at_0(mdev))
|
||||||
|
pg_align = PFN_UP(mdev->mc.vram_size);
|
||||||
|
|
||||||
|
return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev,
|
||||||
|
pg_align, false, args);
|
||||||
|
}
|
||||||
|
|
||||||
static struct drm_driver driver = {
|
static struct drm_driver driver = {
|
||||||
.driver_features = DRIVER_GEM | DRIVER_MODESET,
|
.driver_features = DRIVER_GEM | DRIVER_MODESET,
|
||||||
.load = mgag200_driver_load,
|
.load = mgag200_driver_load,
|
||||||
@ -71,7 +102,10 @@ static struct drm_driver driver = {
|
|||||||
.major = DRIVER_MAJOR,
|
.major = DRIVER_MAJOR,
|
||||||
.minor = DRIVER_MINOR,
|
.minor = DRIVER_MINOR,
|
||||||
.patchlevel = DRIVER_PATCHLEVEL,
|
.patchlevel = DRIVER_PATCHLEVEL,
|
||||||
DRM_GEM_VRAM_DRIVER
|
.debugfs_init = drm_vram_mm_debugfs_init,
|
||||||
|
.dumb_create = mgag200_driver_dumb_create,
|
||||||
|
.dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset,
|
||||||
|
.gem_prime_mmap = drm_gem_prime_mmap,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct pci_driver mgag200_pci_driver = {
|
static struct pci_driver mgag200_pci_driver = {
|
||||||
|
@ -150,6 +150,12 @@ enum mga_type {
|
|||||||
G200_EW3,
|
G200_EW3,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* HW does not handle 'startadd' field correct. */
|
||||||
|
#define MGAG200_FLAG_HW_BUG_NO_STARTADD (1ul << 8)
|
||||||
|
|
||||||
|
#define MGAG200_TYPE_MASK (0x000000ff)
|
||||||
|
#define MGAG200_FLAG_MASK (0x00ffff00)
|
||||||
|
|
||||||
#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
|
#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
|
||||||
|
|
||||||
struct mga_device {
|
struct mga_device {
|
||||||
@ -181,6 +187,18 @@ struct mga_device {
|
|||||||
u32 unique_rev_id;
|
u32 unique_rev_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static inline enum mga_type
|
||||||
|
mgag200_type_from_driver_data(kernel_ulong_t driver_data)
|
||||||
|
{
|
||||||
|
return (enum mga_type)(driver_data & MGAG200_TYPE_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long
|
||||||
|
mgag200_flags_from_driver_data(kernel_ulong_t driver_data)
|
||||||
|
{
|
||||||
|
return driver_data & MGAG200_FLAG_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
/* mgag200_mode.c */
|
/* mgag200_mode.c */
|
||||||
int mgag200_modeset_init(struct mga_device *mdev);
|
int mgag200_modeset_init(struct mga_device *mdev);
|
||||||
void mgag200_modeset_fini(struct mga_device *mdev);
|
void mgag200_modeset_fini(struct mga_device *mdev);
|
||||||
|
@ -94,7 +94,8 @@ static int mgag200_device_init(struct drm_device *dev,
|
|||||||
struct mga_device *mdev = dev->dev_private;
|
struct mga_device *mdev = dev->dev_private;
|
||||||
int ret, option;
|
int ret, option;
|
||||||
|
|
||||||
mdev->type = flags;
|
mdev->flags = mgag200_flags_from_driver_data(flags);
|
||||||
|
mdev->type = mgag200_type_from_driver_data(flags);
|
||||||
|
|
||||||
/* Hardcode the number of CRTCs to 1 */
|
/* Hardcode the number of CRTCs to 1 */
|
||||||
mdev->num_crtc = 1;
|
mdev->num_crtc = 1;
|
||||||
|
@ -7,6 +7,7 @@ config DRM_MSM
|
|||||||
depends on OF && COMMON_CLK
|
depends on OF && COMMON_CLK
|
||||||
depends on MMU
|
depends on MMU
|
||||||
depends on INTERCONNECT || !INTERCONNECT
|
depends on INTERCONNECT || !INTERCONNECT
|
||||||
|
depends on QCOM_OCMEM || QCOM_OCMEM=n
|
||||||
select QCOM_MDT_LOADER if ARCH_QCOM
|
select QCOM_MDT_LOADER if ARCH_QCOM
|
||||||
select REGULATOR
|
select REGULATOR
|
||||||
select DRM_KMS_HELPER
|
select DRM_KMS_HELPER
|
||||||
|
@ -6,10 +6,6 @@
|
|||||||
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
|
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_MSM_OCMEM
|
|
||||||
# include <mach/ocmem.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "a3xx_gpu.h"
|
#include "a3xx_gpu.h"
|
||||||
|
|
||||||
#define A3XX_INT0_MASK \
|
#define A3XX_INT0_MASK \
|
||||||
@ -195,9 +191,9 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
|
|||||||
gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000);
|
gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000);
|
||||||
|
|
||||||
/* Set the OCMEM base address for A330, etc */
|
/* Set the OCMEM base address for A330, etc */
|
||||||
if (a3xx_gpu->ocmem_hdl) {
|
if (a3xx_gpu->ocmem.hdl) {
|
||||||
gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
|
gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
|
||||||
(unsigned int)(a3xx_gpu->ocmem_base >> 14));
|
(unsigned int)(a3xx_gpu->ocmem.base >> 14));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Turn on performance counters: */
|
/* Turn on performance counters: */
|
||||||
@ -318,10 +314,7 @@ static void a3xx_destroy(struct msm_gpu *gpu)
|
|||||||
|
|
||||||
adreno_gpu_cleanup(adreno_gpu);
|
adreno_gpu_cleanup(adreno_gpu);
|
||||||
|
|
||||||
#ifdef CONFIG_MSM_OCMEM
|
adreno_gpu_ocmem_cleanup(&a3xx_gpu->ocmem);
|
||||||
if (a3xx_gpu->ocmem_base)
|
|
||||||
ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
kfree(a3xx_gpu);
|
kfree(a3xx_gpu);
|
||||||
}
|
}
|
||||||
@ -494,17 +487,10 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
|
|||||||
|
|
||||||
/* if needed, allocate gmem: */
|
/* if needed, allocate gmem: */
|
||||||
if (adreno_is_a330(adreno_gpu)) {
|
if (adreno_is_a330(adreno_gpu)) {
|
||||||
#ifdef CONFIG_MSM_OCMEM
|
ret = adreno_gpu_ocmem_init(&adreno_gpu->base.pdev->dev,
|
||||||
/* TODO this is different/missing upstream: */
|
adreno_gpu, &a3xx_gpu->ocmem);
|
||||||
struct ocmem_buf *ocmem_hdl =
|
if (ret)
|
||||||
ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
|
goto fail;
|
||||||
|
|
||||||
a3xx_gpu->ocmem_hdl = ocmem_hdl;
|
|
||||||
a3xx_gpu->ocmem_base = ocmem_hdl->addr;
|
|
||||||
adreno_gpu->gmem = ocmem_hdl->len;
|
|
||||||
DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
|
|
||||||
a3xx_gpu->ocmem_base);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!gpu->aspace) {
|
if (!gpu->aspace) {
|
||||||
|
@ -19,8 +19,7 @@ struct a3xx_gpu {
|
|||||||
struct adreno_gpu base;
|
struct adreno_gpu base;
|
||||||
|
|
||||||
/* if OCMEM is used for GMEM: */
|
/* if OCMEM is used for GMEM: */
|
||||||
uint32_t ocmem_base;
|
struct adreno_ocmem ocmem;
|
||||||
void *ocmem_hdl;
|
|
||||||
};
|
};
|
||||||
#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
|
#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
|
||||||
|
|
||||||
|
@ -2,9 +2,6 @@
|
|||||||
/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
|
/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
|
||||||
*/
|
*/
|
||||||
#include "a4xx_gpu.h"
|
#include "a4xx_gpu.h"
|
||||||
#ifdef CONFIG_MSM_OCMEM
|
|
||||||
# include <soc/qcom/ocmem.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define A4XX_INT0_MASK \
|
#define A4XX_INT0_MASK \
|
||||||
(A4XX_INT0_RBBM_AHB_ERROR | \
|
(A4XX_INT0_RBBM_AHB_ERROR | \
|
||||||
@ -188,7 +185,7 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
|
|||||||
(1 << 30) | 0xFFFF);
|
(1 << 30) | 0xFFFF);
|
||||||
|
|
||||||
gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR,
|
gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR,
|
||||||
(unsigned int)(a4xx_gpu->ocmem_base >> 14));
|
(unsigned int)(a4xx_gpu->ocmem.base >> 14));
|
||||||
|
|
||||||
/* Turn on performance counters: */
|
/* Turn on performance counters: */
|
||||||
gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
|
gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
|
||||||
@ -318,10 +315,7 @@ static void a4xx_destroy(struct msm_gpu *gpu)
|
|||||||
|
|
||||||
adreno_gpu_cleanup(adreno_gpu);
|
adreno_gpu_cleanup(adreno_gpu);
|
||||||
|
|
||||||
#ifdef CONFIG_MSM_OCMEM
|
adreno_gpu_ocmem_cleanup(&a4xx_gpu->ocmem);
|
||||||
if (a4xx_gpu->ocmem_base)
|
|
||||||
ocmem_free(OCMEM_GRAPHICS, a4xx_gpu->ocmem_hdl);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
kfree(a4xx_gpu);
|
kfree(a4xx_gpu);
|
||||||
}
|
}
|
||||||
@ -578,17 +572,10 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
|
|||||||
|
|
||||||
/* if needed, allocate gmem: */
|
/* if needed, allocate gmem: */
|
||||||
if (adreno_is_a4xx(adreno_gpu)) {
|
if (adreno_is_a4xx(adreno_gpu)) {
|
||||||
#ifdef CONFIG_MSM_OCMEM
|
ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
|
||||||
/* TODO this is different/missing upstream: */
|
&a4xx_gpu->ocmem);
|
||||||
struct ocmem_buf *ocmem_hdl =
|
if (ret)
|
||||||
ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
|
goto fail;
|
||||||
|
|
||||||
a4xx_gpu->ocmem_hdl = ocmem_hdl;
|
|
||||||
a4xx_gpu->ocmem_base = ocmem_hdl->addr;
|
|
||||||
adreno_gpu->gmem = ocmem_hdl->len;
|
|
||||||
DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
|
|
||||||
a4xx_gpu->ocmem_base);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!gpu->aspace) {
|
if (!gpu->aspace) {
|
||||||
|
@ -16,8 +16,7 @@ struct a4xx_gpu {
|
|||||||
struct adreno_gpu base;
|
struct adreno_gpu base;
|
||||||
|
|
||||||
/* if OCMEM is used for GMEM: */
|
/* if OCMEM is used for GMEM: */
|
||||||
uint32_t ocmem_base;
|
struct adreno_ocmem ocmem;
|
||||||
void *ocmem_hdl;
|
|
||||||
};
|
};
|
||||||
#define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base)
|
#define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base)
|
||||||
|
|
||||||
|
@ -353,6 +353,9 @@ static int a5xx_me_init(struct msm_gpu *gpu)
|
|||||||
* 2D mode 3 draw
|
* 2D mode 3 draw
|
||||||
*/
|
*/
|
||||||
OUT_RING(ring, 0x0000000B);
|
OUT_RING(ring, 0x0000000B);
|
||||||
|
} else if (adreno_is_a510(adreno_gpu)) {
|
||||||
|
/* Workaround for token and syncs */
|
||||||
|
OUT_RING(ring, 0x00000001);
|
||||||
} else {
|
} else {
|
||||||
/* No workarounds enabled */
|
/* No workarounds enabled */
|
||||||
OUT_RING(ring, 0x00000000);
|
OUT_RING(ring, 0x00000000);
|
||||||
@ -568,15 +571,24 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
|||||||
0x00100000 + adreno_gpu->gmem - 1);
|
0x00100000 + adreno_gpu->gmem - 1);
|
||||||
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
|
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
|
||||||
|
|
||||||
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
|
if (adreno_is_a510(adreno_gpu)) {
|
||||||
if (adreno_is_a530(adreno_gpu))
|
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20);
|
||||||
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
|
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
|
||||||
if (adreno_is_a540(adreno_gpu))
|
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
|
||||||
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
|
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
|
||||||
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
|
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
|
||||||
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
|
(0x200 << 11 | 0x200 << 22));
|
||||||
|
} else {
|
||||||
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
|
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
|
||||||
|
if (adreno_is_a530(adreno_gpu))
|
||||||
|
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
|
||||||
|
if (adreno_is_a540(adreno_gpu))
|
||||||
|
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
|
||||||
|
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
|
||||||
|
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
|
||||||
|
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
|
||||||
|
(0x400 << 11 | 0x300 << 22));
|
||||||
|
}
|
||||||
|
|
||||||
if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
|
if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
|
||||||
gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
|
gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
|
||||||
@ -589,6 +601,19 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
|||||||
/* Enable ME/PFP split notification */
|
/* Enable ME/PFP split notification */
|
||||||
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
|
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* In A5x, CCU can send context_done event of a particular context to
|
||||||
|
* UCHE which ultimately reaches CP even when there is valid
|
||||||
|
* transaction of that context inside CCU. This can let CP to program
|
||||||
|
* config registers, which will make the "valid transaction" inside
|
||||||
|
* CCU to be interpreted differently. This can cause gpu fault. This
|
||||||
|
* bug is fixed in latest A510 revision. To enable this bug fix -
|
||||||
|
* bit[11] of RB_DBG_ECO_CNTL need to be set to 0, default is 1
|
||||||
|
* (disable). For older A510 version this bit is unused.
|
||||||
|
*/
|
||||||
|
if (adreno_is_a510(adreno_gpu))
|
||||||
|
gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, (1 << 11), 0);
|
||||||
|
|
||||||
/* Enable HWCG */
|
/* Enable HWCG */
|
||||||
a5xx_set_hwcg(gpu, true);
|
a5xx_set_hwcg(gpu, true);
|
||||||
|
|
||||||
@ -635,7 +660,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
|||||||
/* UCHE */
|
/* UCHE */
|
||||||
gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
|
gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
|
||||||
|
|
||||||
if (adreno_is_a530(adreno_gpu))
|
if (adreno_is_a530(adreno_gpu) || adreno_is_a510(adreno_gpu))
|
||||||
gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
|
gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
|
||||||
ADRENO_PROTECT_RW(0x10000, 0x8000));
|
ADRENO_PROTECT_RW(0x10000, 0x8000));
|
||||||
|
|
||||||
@ -679,7 +704,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
|||||||
|
|
||||||
a5xx_preempt_hw_init(gpu);
|
a5xx_preempt_hw_init(gpu);
|
||||||
|
|
||||||
a5xx_gpmu_ucode_init(gpu);
|
if (!adreno_is_a510(adreno_gpu))
|
||||||
|
a5xx_gpmu_ucode_init(gpu);
|
||||||
|
|
||||||
ret = a5xx_ucode_init(gpu);
|
ret = a5xx_ucode_init(gpu);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -712,7 +738,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try to load a zap shader into the secure world. If successful
|
* If the chip that we are using does support loading one, then
|
||||||
|
* try to load a zap shader into the secure world. If successful
|
||||||
* we can use the CP to switch out of secure mode. If not then we
|
* we can use the CP to switch out of secure mode. If not then we
|
||||||
* have no resource but to try to switch ourselves out manually. If we
|
* have no resource but to try to switch ourselves out manually. If we
|
||||||
* guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
|
* guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
|
||||||
@ -1066,6 +1093,7 @@ static void a5xx_dump(struct msm_gpu *gpu)
|
|||||||
|
|
||||||
static int a5xx_pm_resume(struct msm_gpu *gpu)
|
static int a5xx_pm_resume(struct msm_gpu *gpu)
|
||||||
{
|
{
|
||||||
|
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* Turn on the core power */
|
/* Turn on the core power */
|
||||||
@ -1073,6 +1101,15 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
if (adreno_is_a510(adreno_gpu)) {
|
||||||
|
/* Halt the sp_input_clk at HM level */
|
||||||
|
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055);
|
||||||
|
a5xx_set_hwcg(gpu, true);
|
||||||
|
/* Turn on sp_input_clk at HM level */
|
||||||
|
gpu_rmw(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xff, 0);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Turn the RBCCU domain first to limit the chances of voltage droop */
|
/* Turn the RBCCU domain first to limit the chances of voltage droop */
|
||||||
gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
|
gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
|
||||||
|
|
||||||
@ -1101,9 +1138,17 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
|
|||||||
|
|
||||||
static int a5xx_pm_suspend(struct msm_gpu *gpu)
|
static int a5xx_pm_suspend(struct msm_gpu *gpu)
|
||||||
{
|
{
|
||||||
|
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||||
|
u32 mask = 0xf;
|
||||||
|
|
||||||
|
/* A510 has 3 XIN ports in VBIF */
|
||||||
|
if (adreno_is_a510(adreno_gpu))
|
||||||
|
mask = 0x7;
|
||||||
|
|
||||||
/* Clear the VBIF pipe before shutting down */
|
/* Clear the VBIF pipe before shutting down */
|
||||||
gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
|
gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, mask);
|
||||||
spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF);
|
spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) &
|
||||||
|
mask) == mask);
|
||||||
|
|
||||||
gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
|
gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
|
||||||
|
|
||||||
@ -1289,7 +1334,7 @@ static void a5xx_gpu_state_destroy(struct kref *kref)
|
|||||||
kfree(a5xx_state);
|
kfree(a5xx_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
int a5xx_gpu_state_put(struct msm_gpu_state *state)
|
static int a5xx_gpu_state_put(struct msm_gpu_state *state)
|
||||||
{
|
{
|
||||||
if (IS_ERR_OR_NULL(state))
|
if (IS_ERR_OR_NULL(state))
|
||||||
return 1;
|
return 1;
|
||||||
@ -1299,8 +1344,8 @@ int a5xx_gpu_state_put(struct msm_gpu_state *state)
|
|||||||
|
|
||||||
|
|
||||||
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
|
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
|
||||||
void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
|
static void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
|
||||||
struct drm_printer *p)
|
struct drm_printer *p)
|
||||||
{
|
{
|
||||||
int i, j;
|
int i, j;
|
||||||
u32 pos = 0;
|
u32 pos = 0;
|
||||||
|
@ -297,6 +297,10 @@ int a5xx_power_init(struct msm_gpu *gpu)
|
|||||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
/* Not all A5xx chips have a GPMU */
|
||||||
|
if (adreno_is_a510(adreno_gpu))
|
||||||
|
return 0;
|
||||||
|
|
||||||
/* Set up the limits management */
|
/* Set up the limits management */
|
||||||
if (adreno_is_a530(adreno_gpu))
|
if (adreno_is_a530(adreno_gpu))
|
||||||
a530_lm_setup(gpu);
|
a530_lm_setup(gpu);
|
||||||
@ -326,6 +330,9 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
|
|||||||
unsigned int *data, *ptr, *cmds;
|
unsigned int *data, *ptr, *cmds;
|
||||||
unsigned int cmds_size;
|
unsigned int cmds_size;
|
||||||
|
|
||||||
|
if (adreno_is_a510(adreno_gpu))
|
||||||
|
return;
|
||||||
|
|
||||||
if (a5xx_gpu->gpmu_bo)
|
if (a5xx_gpu->gpmu_bo)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -114,6 +114,21 @@ static const struct adreno_info gpulist[] = {
|
|||||||
.gmem = (SZ_1M + SZ_512K),
|
.gmem = (SZ_1M + SZ_512K),
|
||||||
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
|
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
|
||||||
.init = a4xx_gpu_init,
|
.init = a4xx_gpu_init,
|
||||||
|
}, {
|
||||||
|
.rev = ADRENO_REV(5, 1, 0, ANY_ID),
|
||||||
|
.revn = 510,
|
||||||
|
.name = "A510",
|
||||||
|
.fw = {
|
||||||
|
[ADRENO_FW_PM4] = "a530_pm4.fw",
|
||||||
|
[ADRENO_FW_PFP] = "a530_pfp.fw",
|
||||||
|
},
|
||||||
|
.gmem = SZ_256K,
|
||||||
|
/*
|
||||||
|
* Increase inactive period to 250 to avoid bouncing
|
||||||
|
* the GDSC which appears to make it grumpy
|
||||||
|
*/
|
||||||
|
.inactive_period = 250,
|
||||||
|
.init = a5xx_gpu_init,
|
||||||
}, {
|
}, {
|
||||||
.rev = ADRENO_REV(5, 3, 0, 2),
|
.rev = ADRENO_REV(5, 3, 0, 2),
|
||||||
.revn = 530,
|
.revn = 530,
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
#include <linux/pm_opp.h>
|
#include <linux/pm_opp.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/soc/qcom/mdt_loader.h>
|
#include <linux/soc/qcom/mdt_loader.h>
|
||||||
|
#include <soc/qcom/ocmem.h>
|
||||||
#include "adreno_gpu.h"
|
#include "adreno_gpu.h"
|
||||||
#include "msm_gem.h"
|
#include "msm_gem.h"
|
||||||
#include "msm_mmu.h"
|
#include "msm_mmu.h"
|
||||||
@ -893,6 +894,45 @@ static int adreno_get_pwrlevels(struct device *dev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
|
||||||
|
struct adreno_ocmem *adreno_ocmem)
|
||||||
|
{
|
||||||
|
struct ocmem_buf *ocmem_hdl;
|
||||||
|
struct ocmem *ocmem;
|
||||||
|
|
||||||
|
ocmem = of_get_ocmem(dev);
|
||||||
|
if (IS_ERR(ocmem)) {
|
||||||
|
if (PTR_ERR(ocmem) == -ENODEV) {
|
||||||
|
/*
|
||||||
|
* Return success since either the ocmem property was
|
||||||
|
* not specified in device tree, or ocmem support is
|
||||||
|
* not compiled into the kernel.
|
||||||
|
*/
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return PTR_ERR(ocmem);
|
||||||
|
}
|
||||||
|
|
||||||
|
ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->gmem);
|
||||||
|
if (IS_ERR(ocmem_hdl))
|
||||||
|
return PTR_ERR(ocmem_hdl);
|
||||||
|
|
||||||
|
adreno_ocmem->ocmem = ocmem;
|
||||||
|
adreno_ocmem->base = ocmem_hdl->addr;
|
||||||
|
adreno_ocmem->hdl = ocmem_hdl;
|
||||||
|
adreno_gpu->gmem = ocmem_hdl->len;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *adreno_ocmem)
|
||||||
|
{
|
||||||
|
if (adreno_ocmem && adreno_ocmem->base)
|
||||||
|
ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS,
|
||||||
|
adreno_ocmem->hdl);
|
||||||
|
}
|
||||||
|
|
||||||
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||||
struct adreno_gpu *adreno_gpu,
|
struct adreno_gpu *adreno_gpu,
|
||||||
const struct adreno_gpu_funcs *funcs, int nr_rings)
|
const struct adreno_gpu_funcs *funcs, int nr_rings)
|
||||||
|
@ -126,6 +126,12 @@ struct adreno_gpu {
|
|||||||
};
|
};
|
||||||
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
|
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
|
||||||
|
|
||||||
|
struct adreno_ocmem {
|
||||||
|
struct ocmem *ocmem;
|
||||||
|
unsigned long base;
|
||||||
|
void *hdl;
|
||||||
|
};
|
||||||
|
|
||||||
/* platform config data (ie. from DT, or pdata) */
|
/* platform config data (ie. from DT, or pdata) */
|
||||||
struct adreno_platform_config {
|
struct adreno_platform_config {
|
||||||
struct adreno_rev rev;
|
struct adreno_rev rev;
|
||||||
@ -206,6 +212,11 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu)
|
|||||||
return gpu->revn == 430;
|
return gpu->revn == 430;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int adreno_is_a510(struct adreno_gpu *gpu)
|
||||||
|
{
|
||||||
|
return gpu->revn == 510;
|
||||||
|
}
|
||||||
|
|
||||||
static inline int adreno_is_a530(struct adreno_gpu *gpu)
|
static inline int adreno_is_a530(struct adreno_gpu *gpu)
|
||||||
{
|
{
|
||||||
return gpu->revn == 530;
|
return gpu->revn == 530;
|
||||||
@ -236,6 +247,10 @@ void adreno_dump(struct msm_gpu *gpu);
|
|||||||
void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
|
void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
|
||||||
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
|
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
|
||||||
|
|
||||||
|
int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
|
||||||
|
struct adreno_ocmem *ocmem);
|
||||||
|
void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *ocmem);
|
||||||
|
|
||||||
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||||
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
|
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
|
||||||
int nr_rings);
|
int nr_rings);
|
||||||
|
@ -55,8 +55,7 @@ static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
|
|||||||
int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
|
int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
|
||||||
enum dpu_intr_type intr_type, u32 instance_idx)
|
enum dpu_intr_type intr_type, u32 instance_idx)
|
||||||
{
|
{
|
||||||
if (!dpu_kms || !dpu_kms->hw_intr ||
|
if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.irq_idx_lookup)
|
||||||
!dpu_kms->hw_intr->ops.irq_idx_lookup)
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
|
return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
|
||||||
@ -73,7 +72,7 @@ static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
|
|||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
int ret = 0, enable_count;
|
int ret = 0, enable_count;
|
||||||
|
|
||||||
if (!dpu_kms || !dpu_kms->hw_intr ||
|
if (!dpu_kms->hw_intr ||
|
||||||
!dpu_kms->irq_obj.enable_counts ||
|
!dpu_kms->irq_obj.enable_counts ||
|
||||||
!dpu_kms->irq_obj.irq_counts) {
|
!dpu_kms->irq_obj.irq_counts) {
|
||||||
DPU_ERROR("invalid params\n");
|
DPU_ERROR("invalid params\n");
|
||||||
@ -114,7 +113,7 @@ int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
|
|||||||
{
|
{
|
||||||
int i, ret = 0, counts;
|
int i, ret = 0, counts;
|
||||||
|
|
||||||
if (!dpu_kms || !irq_idxs || !irq_count) {
|
if (!irq_idxs || !irq_count) {
|
||||||
DPU_ERROR("invalid params\n");
|
DPU_ERROR("invalid params\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -138,7 +137,7 @@ static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
|
|||||||
{
|
{
|
||||||
int ret = 0, enable_count;
|
int ret = 0, enable_count;
|
||||||
|
|
||||||
if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
|
if (!dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
|
||||||
DPU_ERROR("invalid params\n");
|
DPU_ERROR("invalid params\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -169,7 +168,7 @@ int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
|
|||||||
{
|
{
|
||||||
int i, ret = 0, counts;
|
int i, ret = 0, counts;
|
||||||
|
|
||||||
if (!dpu_kms || !irq_idxs || !irq_count) {
|
if (!irq_idxs || !irq_count) {
|
||||||
DPU_ERROR("invalid params\n");
|
DPU_ERROR("invalid params\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -186,7 +185,7 @@ int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
|
|||||||
|
|
||||||
u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
|
u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
|
||||||
{
|
{
|
||||||
if (!dpu_kms || !dpu_kms->hw_intr ||
|
if (!dpu_kms->hw_intr ||
|
||||||
!dpu_kms->hw_intr->ops.get_interrupt_status)
|
!dpu_kms->hw_intr->ops.get_interrupt_status)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -205,7 +204,7 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
|
|||||||
{
|
{
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
|
|
||||||
if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
|
if (!dpu_kms->irq_obj.irq_cb_tbl) {
|
||||||
DPU_ERROR("invalid params\n");
|
DPU_ERROR("invalid params\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -240,7 +239,7 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
|
|||||||
{
|
{
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
|
|
||||||
if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
|
if (!dpu_kms->irq_obj.irq_cb_tbl) {
|
||||||
DPU_ERROR("invalid params\n");
|
DPU_ERROR("invalid params\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -274,8 +273,7 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
|
|||||||
|
|
||||||
static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
|
static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
|
||||||
{
|
{
|
||||||
if (!dpu_kms || !dpu_kms->hw_intr ||
|
if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.clear_all_irqs)
|
||||||
!dpu_kms->hw_intr->ops.clear_all_irqs)
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
|
dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
|
||||||
@ -283,8 +281,7 @@ static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
|
|||||||
|
|
||||||
static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
|
static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
|
||||||
{
|
{
|
||||||
if (!dpu_kms || !dpu_kms->hw_intr ||
|
if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.disable_all_irqs)
|
||||||
!dpu_kms->hw_intr->ops.disable_all_irqs)
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
|
dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
|
||||||
@ -343,18 +340,8 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
|
|||||||
|
|
||||||
void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
|
void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
|
||||||
{
|
{
|
||||||
struct msm_drm_private *priv;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!dpu_kms->dev) {
|
|
||||||
DPU_ERROR("invalid drm device\n");
|
|
||||||
return;
|
|
||||||
} else if (!dpu_kms->dev->dev_private) {
|
|
||||||
DPU_ERROR("invalid device private\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
priv = dpu_kms->dev->dev_private;
|
|
||||||
|
|
||||||
pm_runtime_get_sync(&dpu_kms->pdev->dev);
|
pm_runtime_get_sync(&dpu_kms->pdev->dev);
|
||||||
dpu_clear_all_irqs(dpu_kms);
|
dpu_clear_all_irqs(dpu_kms);
|
||||||
dpu_disable_all_irqs(dpu_kms);
|
dpu_disable_all_irqs(dpu_kms);
|
||||||
@ -379,18 +366,8 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
|
|||||||
|
|
||||||
void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
|
void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
|
||||||
{
|
{
|
||||||
struct msm_drm_private *priv;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!dpu_kms->dev) {
|
|
||||||
DPU_ERROR("invalid drm device\n");
|
|
||||||
return;
|
|
||||||
} else if (!dpu_kms->dev->dev_private) {
|
|
||||||
DPU_ERROR("invalid device private\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
priv = dpu_kms->dev->dev_private;
|
|
||||||
|
|
||||||
pm_runtime_get_sync(&dpu_kms->pdev->dev);
|
pm_runtime_get_sync(&dpu_kms->pdev->dev);
|
||||||
for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
|
for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
|
||||||
if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
|
if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
|
||||||
|
@ -32,18 +32,7 @@ enum dpu_perf_mode {
|
|||||||
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
|
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct msm_drm_private *priv;
|
struct msm_drm_private *priv;
|
||||||
|
|
||||||
if (!crtc->dev || !crtc->dev->dev_private) {
|
|
||||||
DPU_ERROR("invalid device\n");
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
priv = crtc->dev->dev_private;
|
priv = crtc->dev->dev_private;
|
||||||
if (!priv || !priv->kms) {
|
|
||||||
DPU_ERROR("invalid kms\n");
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return to_dpu_kms(priv->kms);
|
return to_dpu_kms(priv->kms);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -116,7 +105,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
kms = _dpu_crtc_get_kms(crtc);
|
kms = _dpu_crtc_get_kms(crtc);
|
||||||
if (!kms || !kms->catalog) {
|
if (!kms->catalog) {
|
||||||
DPU_ERROR("invalid parameters\n");
|
DPU_ERROR("invalid parameters\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -215,7 +204,6 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
|
|||||||
void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
|
void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct dpu_crtc *dpu_crtc;
|
struct dpu_crtc *dpu_crtc;
|
||||||
struct dpu_crtc_state *dpu_cstate;
|
|
||||||
struct dpu_kms *kms;
|
struct dpu_kms *kms;
|
||||||
|
|
||||||
if (!crtc) {
|
if (!crtc) {
|
||||||
@ -224,13 +212,12 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
|
|||||||
}
|
}
|
||||||
|
|
||||||
kms = _dpu_crtc_get_kms(crtc);
|
kms = _dpu_crtc_get_kms(crtc);
|
||||||
if (!kms || !kms->catalog) {
|
if (!kms->catalog) {
|
||||||
DPU_ERROR("invalid kms\n");
|
DPU_ERROR("invalid kms\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
dpu_crtc = to_dpu_crtc(crtc);
|
dpu_crtc = to_dpu_crtc(crtc);
|
||||||
dpu_cstate = to_dpu_crtc_state(crtc->state);
|
|
||||||
|
|
||||||
if (atomic_dec_return(&kms->bandwidth_ref) > 0)
|
if (atomic_dec_return(&kms->bandwidth_ref) > 0)
|
||||||
return;
|
return;
|
||||||
@ -287,7 +274,6 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
|
|||||||
u64 clk_rate = 0;
|
u64 clk_rate = 0;
|
||||||
struct dpu_crtc *dpu_crtc;
|
struct dpu_crtc *dpu_crtc;
|
||||||
struct dpu_crtc_state *dpu_cstate;
|
struct dpu_crtc_state *dpu_cstate;
|
||||||
struct msm_drm_private *priv;
|
|
||||||
struct dpu_kms *kms;
|
struct dpu_kms *kms;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -297,11 +283,10 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
kms = _dpu_crtc_get_kms(crtc);
|
kms = _dpu_crtc_get_kms(crtc);
|
||||||
if (!kms || !kms->catalog) {
|
if (!kms->catalog) {
|
||||||
DPU_ERROR("invalid kms\n");
|
DPU_ERROR("invalid kms\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
priv = kms->dev->dev_private;
|
|
||||||
|
|
||||||
dpu_crtc = to_dpu_crtc(crtc);
|
dpu_crtc = to_dpu_crtc(crtc);
|
||||||
dpu_cstate = to_dpu_crtc_state(crtc->state);
|
dpu_cstate = to_dpu_crtc_state(crtc->state);
|
||||||
|
@ -266,11 +266,20 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
|
|||||||
{
|
{
|
||||||
struct drm_encoder *encoder;
|
struct drm_encoder *encoder;
|
||||||
|
|
||||||
if (!crtc || !crtc->dev) {
|
if (!crtc) {
|
||||||
DPU_ERROR("invalid crtc\n");
|
DPU_ERROR("invalid crtc\n");
|
||||||
return INTF_MODE_NONE;
|
return INTF_MODE_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* TODO: This function is called from dpu debugfs and as part of atomic
|
||||||
|
* check. When called from debugfs, the crtc->mutex must be held to
|
||||||
|
* read crtc->state. However reading crtc->state from atomic check isn't
|
||||||
|
* allowed (unless you have a good reason, a big comment, and a deep
|
||||||
|
* understanding of how the atomic/modeset locks work (<- and this is
|
||||||
|
* probably not possible)). So we'll keep the WARN_ON here for now, but
|
||||||
|
* really we need to figure out a better way to track our operating mode
|
||||||
|
*/
|
||||||
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
|
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
|
||||||
|
|
||||||
/* TODO: Returns the first INTF_MODE, could there be multiple values? */
|
/* TODO: Returns the first INTF_MODE, could there be multiple values? */
|
||||||
@ -694,7 +703,7 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
bool release_bandwidth = false;
|
bool release_bandwidth = false;
|
||||||
|
|
||||||
if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
|
if (!crtc || !crtc->state) {
|
||||||
DPU_ERROR("invalid crtc\n");
|
DPU_ERROR("invalid crtc\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -766,7 +775,7 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
|
|||||||
struct msm_drm_private *priv;
|
struct msm_drm_private *priv;
|
||||||
bool request_bandwidth;
|
bool request_bandwidth;
|
||||||
|
|
||||||
if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
|
if (!crtc) {
|
||||||
DPU_ERROR("invalid crtc\n");
|
DPU_ERROR("invalid crtc\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1288,13 +1297,8 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
|
|||||||
{
|
{
|
||||||
struct drm_crtc *crtc = NULL;
|
struct drm_crtc *crtc = NULL;
|
||||||
struct dpu_crtc *dpu_crtc = NULL;
|
struct dpu_crtc *dpu_crtc = NULL;
|
||||||
struct msm_drm_private *priv = NULL;
|
|
||||||
struct dpu_kms *kms = NULL;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
priv = dev->dev_private;
|
|
||||||
kms = to_dpu_kms(priv->kms);
|
|
||||||
|
|
||||||
dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
|
dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
|
||||||
if (!dpu_crtc)
|
if (!dpu_crtc)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
@ -645,11 +645,6 @@ static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
|
|||||||
priv = drm_enc->dev->dev_private;
|
priv = drm_enc->dev->dev_private;
|
||||||
|
|
||||||
dpu_kms = to_dpu_kms(priv->kms);
|
dpu_kms = to_dpu_kms(priv->kms);
|
||||||
if (!dpu_kms) {
|
|
||||||
DPU_ERROR("invalid dpu_kms\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
hw_mdptop = dpu_kms->hw_mdp;
|
hw_mdptop = dpu_kms->hw_mdp;
|
||||||
if (!hw_mdptop) {
|
if (!hw_mdptop) {
|
||||||
DPU_ERROR("invalid mdptop\n");
|
DPU_ERROR("invalid mdptop\n");
|
||||||
@ -735,8 +730,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
|
|||||||
struct msm_drm_private *priv;
|
struct msm_drm_private *priv;
|
||||||
bool is_vid_mode = false;
|
bool is_vid_mode = false;
|
||||||
|
|
||||||
if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
|
if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
|
||||||
!drm_enc->crtc) {
|
|
||||||
DPU_ERROR("invalid parameters\n");
|
DPU_ERROR("invalid parameters\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -1092,17 +1086,13 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
|
|||||||
struct msm_drm_private *priv;
|
struct msm_drm_private *priv;
|
||||||
struct dpu_kms *dpu_kms;
|
struct dpu_kms *dpu_kms;
|
||||||
|
|
||||||
if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
|
if (!drm_enc || !drm_enc->dev) {
|
||||||
DPU_ERROR("invalid parameters\n");
|
DPU_ERROR("invalid parameters\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv = drm_enc->dev->dev_private;
|
priv = drm_enc->dev->dev_private;
|
||||||
dpu_kms = to_dpu_kms(priv->kms);
|
dpu_kms = to_dpu_kms(priv->kms);
|
||||||
if (!dpu_kms) {
|
|
||||||
DPU_ERROR("invalid dpu_kms\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
dpu_enc = to_dpu_encoder_virt(drm_enc);
|
dpu_enc = to_dpu_encoder_virt(drm_enc);
|
||||||
if (!dpu_enc || !dpu_enc->cur_master) {
|
if (!dpu_enc || !dpu_enc->cur_master) {
|
||||||
@ -1184,7 +1174,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
|
|||||||
struct dpu_encoder_virt *dpu_enc = NULL;
|
struct dpu_encoder_virt *dpu_enc = NULL;
|
||||||
struct msm_drm_private *priv;
|
struct msm_drm_private *priv;
|
||||||
struct dpu_kms *dpu_kms;
|
struct dpu_kms *dpu_kms;
|
||||||
struct drm_display_mode *mode;
|
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
if (!drm_enc) {
|
if (!drm_enc) {
|
||||||
@ -1193,9 +1182,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
|
|||||||
} else if (!drm_enc->dev) {
|
} else if (!drm_enc->dev) {
|
||||||
DPU_ERROR("invalid dev\n");
|
DPU_ERROR("invalid dev\n");
|
||||||
return;
|
return;
|
||||||
} else if (!drm_enc->dev->dev_private) {
|
|
||||||
DPU_ERROR("invalid dev_private\n");
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dpu_enc = to_dpu_encoder_virt(drm_enc);
|
dpu_enc = to_dpu_encoder_virt(drm_enc);
|
||||||
@ -1204,8 +1190,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
|
|||||||
mutex_lock(&dpu_enc->enc_lock);
|
mutex_lock(&dpu_enc->enc_lock);
|
||||||
dpu_enc->enabled = false;
|
dpu_enc->enabled = false;
|
||||||
|
|
||||||
mode = &drm_enc->crtc->state->adjusted_mode;
|
|
||||||
|
|
||||||
priv = drm_enc->dev->dev_private;
|
priv = drm_enc->dev->dev_private;
|
||||||
dpu_kms = to_dpu_kms(priv->kms);
|
dpu_kms = to_dpu_kms(priv->kms);
|
||||||
|
|
||||||
@ -1734,8 +1718,7 @@ static void dpu_encoder_vsync_event_handler(struct timer_list *t)
|
|||||||
struct msm_drm_private *priv;
|
struct msm_drm_private *priv;
|
||||||
struct msm_drm_thread *event_thread;
|
struct msm_drm_thread *event_thread;
|
||||||
|
|
||||||
if (!drm_enc->dev || !drm_enc->dev->dev_private ||
|
if (!drm_enc->dev || !drm_enc->crtc) {
|
||||||
!drm_enc->crtc) {
|
|
||||||
DPU_ERROR("invalid parameters\n");
|
DPU_ERROR("invalid parameters\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1914,8 +1897,6 @@ static int _dpu_encoder_debugfs_status_open(struct inode *inode,
|
|||||||
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
|
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
|
||||||
{
|
{
|
||||||
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
|
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
|
||||||
struct msm_drm_private *priv;
|
|
||||||
struct dpu_kms *dpu_kms;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
static const struct file_operations debugfs_status_fops = {
|
static const struct file_operations debugfs_status_fops = {
|
||||||
@ -1927,14 +1908,11 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
|
|||||||
|
|
||||||
char name[DPU_NAME_SIZE];
|
char name[DPU_NAME_SIZE];
|
||||||
|
|
||||||
if (!drm_enc->dev || !drm_enc->dev->dev_private) {
|
if (!drm_enc->dev) {
|
||||||
DPU_ERROR("invalid encoder or kms\n");
|
DPU_ERROR("invalid encoder or kms\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv = drm_enc->dev->dev_private;
|
|
||||||
dpu_kms = to_dpu_kms(priv->kms);
|
|
||||||
|
|
||||||
snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
|
snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
|
||||||
|
|
||||||
/* create overall sub-directory for the encoder */
|
/* create overall sub-directory for the encoder */
|
||||||
@ -2042,9 +2020,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
|
|||||||
enum dpu_intf_type intf_type;
|
enum dpu_intf_type intf_type;
|
||||||
struct dpu_enc_phys_init_params phys_params;
|
struct dpu_enc_phys_init_params phys_params;
|
||||||
|
|
||||||
if (!dpu_enc || !dpu_kms) {
|
if (!dpu_enc) {
|
||||||
DPU_ERROR("invalid arg(s), enc %d kms %d\n",
|
DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != 0);
|
||||||
dpu_enc != 0, dpu_kms != 0);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2133,14 +2110,12 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t)
|
|||||||
struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
|
struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
|
||||||
frame_done_timer);
|
frame_done_timer);
|
||||||
struct drm_encoder *drm_enc = &dpu_enc->base;
|
struct drm_encoder *drm_enc = &dpu_enc->base;
|
||||||
struct msm_drm_private *priv;
|
|
||||||
u32 event;
|
u32 event;
|
||||||
|
|
||||||
if (!drm_enc->dev || !drm_enc->dev->dev_private) {
|
if (!drm_enc->dev) {
|
||||||
DPU_ERROR("invalid parameters\n");
|
DPU_ERROR("invalid parameters\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
priv = drm_enc->dev->dev_private;
|
|
||||||
|
|
||||||
if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
|
if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
|
||||||
DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
|
DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
|
||||||
|
@ -124,13 +124,11 @@ static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
|
|||||||
static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
|
static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
|
||||||
{
|
{
|
||||||
struct dpu_encoder_phys *phys_enc = arg;
|
struct dpu_encoder_phys *phys_enc = arg;
|
||||||
struct dpu_encoder_phys_cmd *cmd_enc;
|
|
||||||
|
|
||||||
if (!phys_enc || !phys_enc->hw_ctl)
|
if (!phys_enc || !phys_enc->hw_ctl)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
DPU_ATRACE_BEGIN("ctl_start_irq");
|
DPU_ATRACE_BEGIN("ctl_start_irq");
|
||||||
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
|
|
||||||
|
|
||||||
atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
|
atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
|
||||||
|
|
||||||
@ -316,13 +314,9 @@ static int dpu_encoder_phys_cmd_control_vblank_irq(
|
|||||||
static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
|
static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
|
||||||
bool enable)
|
bool enable)
|
||||||
{
|
{
|
||||||
struct dpu_encoder_phys_cmd *cmd_enc;
|
|
||||||
|
|
||||||
if (!phys_enc)
|
if (!phys_enc)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
|
|
||||||
|
|
||||||
trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
|
trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
|
||||||
phys_enc->hw_pp->idx - PINGPONG_0,
|
phys_enc->hw_pp->idx - PINGPONG_0,
|
||||||
enable, atomic_read(&phys_enc->vblank_refcount));
|
enable, atomic_read(&phys_enc->vblank_refcount));
|
||||||
@ -355,7 +349,6 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
|
|||||||
struct drm_display_mode *mode;
|
struct drm_display_mode *mode;
|
||||||
bool tc_enable = true;
|
bool tc_enable = true;
|
||||||
u32 vsync_hz;
|
u32 vsync_hz;
|
||||||
struct msm_drm_private *priv;
|
|
||||||
struct dpu_kms *dpu_kms;
|
struct dpu_kms *dpu_kms;
|
||||||
|
|
||||||
if (!phys_enc || !phys_enc->hw_pp) {
|
if (!phys_enc || !phys_enc->hw_pp) {
|
||||||
@ -373,11 +366,6 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
|
|||||||
}
|
}
|
||||||
|
|
||||||
dpu_kms = phys_enc->dpu_kms;
|
dpu_kms = phys_enc->dpu_kms;
|
||||||
if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
|
|
||||||
DPU_ERROR("invalid device\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
priv = dpu_kms->dev->dev_private;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TE default: dsi byte clock calculated base on 70 fps;
|
* TE default: dsi byte clock calculated base on 70 fps;
|
||||||
@ -650,13 +638,10 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete(
|
|||||||
struct dpu_encoder_phys *phys_enc)
|
struct dpu_encoder_phys *phys_enc)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
struct dpu_encoder_phys_cmd *cmd_enc;
|
|
||||||
|
|
||||||
if (!phys_enc)
|
if (!phys_enc)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
|
|
||||||
|
|
||||||
rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
|
rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
|
DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
|
||||||
|
@ -374,7 +374,7 @@ static void dpu_encoder_phys_vid_mode_set(
|
|||||||
struct drm_display_mode *mode,
|
struct drm_display_mode *mode,
|
||||||
struct drm_display_mode *adj_mode)
|
struct drm_display_mode *adj_mode)
|
||||||
{
|
{
|
||||||
if (!phys_enc || !phys_enc->dpu_kms) {
|
if (!phys_enc) {
|
||||||
DPU_ERROR("invalid encoder/kms\n");
|
DPU_ERROR("invalid encoder/kms\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -566,16 +566,13 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
|
|||||||
|
|
||||||
static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
|
static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
|
||||||
{
|
{
|
||||||
struct msm_drm_private *priv;
|
|
||||||
unsigned long lock_flags;
|
unsigned long lock_flags;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
|
if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev) {
|
||||||
!phys_enc->parent->dev->dev_private) {
|
|
||||||
DPU_ERROR("invalid encoder/device\n");
|
DPU_ERROR("invalid encoder/device\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
priv = phys_enc->parent->dev->dev_private;
|
|
||||||
|
|
||||||
if (!phys_enc->hw_intf || !phys_enc->hw_ctl) {
|
if (!phys_enc->hw_intf || !phys_enc->hw_ctl) {
|
||||||
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
|
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
|
||||||
|
@ -30,10 +30,6 @@
|
|||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include "dpu_trace.h"
|
#include "dpu_trace.h"
|
||||||
|
|
||||||
static const char * const iommu_ports[] = {
|
|
||||||
"mdp_0",
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* To enable overall DRM driver logging
|
* To enable overall DRM driver logging
|
||||||
* # echo 0x2 > /sys/module/drm/parameters/debug
|
* # echo 0x2 > /sys/module/drm/parameters/debug
|
||||||
@ -68,16 +64,14 @@ static int _dpu_danger_signal_status(struct seq_file *s,
|
|||||||
bool danger_status)
|
bool danger_status)
|
||||||
{
|
{
|
||||||
struct dpu_kms *kms = (struct dpu_kms *)s->private;
|
struct dpu_kms *kms = (struct dpu_kms *)s->private;
|
||||||
struct msm_drm_private *priv;
|
|
||||||
struct dpu_danger_safe_status status;
|
struct dpu_danger_safe_status status;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
|
if (!kms->hw_mdp) {
|
||||||
DPU_ERROR("invalid arg(s)\n");
|
DPU_ERROR("invalid arg(s)\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv = kms->dev->dev_private;
|
|
||||||
memset(&status, 0, sizeof(struct dpu_danger_safe_status));
|
memset(&status, 0, sizeof(struct dpu_danger_safe_status));
|
||||||
|
|
||||||
pm_runtime_get_sync(&kms->pdev->dev);
|
pm_runtime_get_sync(&kms->pdev->dev);
|
||||||
@ -153,13 +147,7 @@ static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
dev = dpu_kms->dev;
|
dev = dpu_kms->dev;
|
||||||
if (!dev)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
priv = dev->dev_private;
|
priv = dev->dev_private;
|
||||||
if (!priv)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
base = dpu_kms->mmio + regset->offset;
|
base = dpu_kms->mmio + regset->offset;
|
||||||
|
|
||||||
/* insert padding spaces, if needed */
|
/* insert padding spaces, if needed */
|
||||||
@ -280,7 +268,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
|
|||||||
struct drm_atomic_state *state)
|
struct drm_atomic_state *state)
|
||||||
{
|
{
|
||||||
struct dpu_kms *dpu_kms;
|
struct dpu_kms *dpu_kms;
|
||||||
struct msm_drm_private *priv;
|
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
struct drm_crtc *crtc;
|
struct drm_crtc *crtc;
|
||||||
struct drm_crtc_state *crtc_state;
|
struct drm_crtc_state *crtc_state;
|
||||||
@ -292,10 +279,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
|
|||||||
dpu_kms = to_dpu_kms(kms);
|
dpu_kms = to_dpu_kms(kms);
|
||||||
dev = dpu_kms->dev;
|
dev = dpu_kms->dev;
|
||||||
|
|
||||||
if (!dev || !dev->dev_private)
|
|
||||||
return;
|
|
||||||
priv = dev->dev_private;
|
|
||||||
|
|
||||||
/* Call prepare_commit for all affected encoders */
|
/* Call prepare_commit for all affected encoders */
|
||||||
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
|
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
|
||||||
drm_for_each_encoder_mask(encoder, crtc->dev,
|
drm_for_each_encoder_mask(encoder, crtc->dev,
|
||||||
@ -333,7 +316,6 @@ void dpu_kms_encoder_enable(struct drm_encoder *encoder)
|
|||||||
if (funcs && funcs->commit)
|
if (funcs && funcs->commit)
|
||||||
funcs->commit(encoder);
|
funcs->commit(encoder);
|
||||||
|
|
||||||
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
|
|
||||||
drm_for_each_crtc(crtc, dev) {
|
drm_for_each_crtc(crtc, dev) {
|
||||||
if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder)))
|
if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder)))
|
||||||
continue;
|
continue;
|
||||||
@ -464,16 +446,6 @@ static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
|
|||||||
struct msm_drm_private *priv;
|
struct msm_drm_private *priv;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!dpu_kms) {
|
|
||||||
DPU_ERROR("invalid dpu_kms\n");
|
|
||||||
return;
|
|
||||||
} else if (!dpu_kms->dev) {
|
|
||||||
DPU_ERROR("invalid dev\n");
|
|
||||||
return;
|
|
||||||
} else if (!dpu_kms->dev->dev_private) {
|
|
||||||
DPU_ERROR("invalid dev_private\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
priv = dpu_kms->dev->dev_private;
|
priv = dpu_kms->dev->dev_private;
|
||||||
|
|
||||||
for (i = 0; i < priv->num_crtcs; i++)
|
for (i = 0; i < priv->num_crtcs; i++)
|
||||||
@ -505,7 +477,6 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
|
|||||||
|
|
||||||
int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
|
int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
|
||||||
int max_crtc_count;
|
int max_crtc_count;
|
||||||
|
|
||||||
dev = dpu_kms->dev;
|
dev = dpu_kms->dev;
|
||||||
priv = dev->dev_private;
|
priv = dev->dev_private;
|
||||||
catalog = dpu_kms->catalog;
|
catalog = dpu_kms->catalog;
|
||||||
@ -585,8 +556,6 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
dev = dpu_kms->dev;
|
dev = dpu_kms->dev;
|
||||||
if (!dev)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (dpu_kms->hw_intr)
|
if (dpu_kms->hw_intr)
|
||||||
dpu_hw_intr_destroy(dpu_kms->hw_intr);
|
dpu_hw_intr_destroy(dpu_kms->hw_intr);
|
||||||
@ -725,8 +694,7 @@ static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
|
|||||||
|
|
||||||
mmu = dpu_kms->base.aspace->mmu;
|
mmu = dpu_kms->base.aspace->mmu;
|
||||||
|
|
||||||
mmu->funcs->detach(mmu, (const char **)iommu_ports,
|
mmu->funcs->detach(mmu);
|
||||||
ARRAY_SIZE(iommu_ports));
|
|
||||||
msm_gem_address_space_put(dpu_kms->base.aspace);
|
msm_gem_address_space_put(dpu_kms->base.aspace);
|
||||||
|
|
||||||
dpu_kms->base.aspace = NULL;
|
dpu_kms->base.aspace = NULL;
|
||||||
@ -752,8 +720,7 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
|
|||||||
return PTR_ERR(aspace);
|
return PTR_ERR(aspace);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
|
ret = aspace->mmu->funcs->attach(aspace->mmu);
|
||||||
ARRAY_SIZE(iommu_ports));
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DPU_ERROR("failed to attach iommu %d\n", ret);
|
DPU_ERROR("failed to attach iommu %d\n", ret);
|
||||||
msm_gem_address_space_put(aspace);
|
msm_gem_address_space_put(aspace);
|
||||||
@ -803,16 +770,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
|
|||||||
|
|
||||||
dpu_kms = to_dpu_kms(kms);
|
dpu_kms = to_dpu_kms(kms);
|
||||||
dev = dpu_kms->dev;
|
dev = dpu_kms->dev;
|
||||||
if (!dev) {
|
|
||||||
DPU_ERROR("invalid device\n");
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
priv = dev->dev_private;
|
priv = dev->dev_private;
|
||||||
if (!priv) {
|
|
||||||
DPU_ERROR("invalid private data\n");
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic_set(&dpu_kms->bandwidth_ref, 0);
|
atomic_set(&dpu_kms->bandwidth_ref, 0);
|
||||||
|
|
||||||
@ -974,7 +932,7 @@ struct msm_kms *dpu_kms_init(struct drm_device *dev)
|
|||||||
struct dpu_kms *dpu_kms;
|
struct dpu_kms *dpu_kms;
|
||||||
int irq;
|
int irq;
|
||||||
|
|
||||||
if (!dev || !dev->dev_private) {
|
if (!dev) {
|
||||||
DPU_ERROR("drm device node invalid\n");
|
DPU_ERROR("drm device node invalid\n");
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
@ -1064,11 +1022,6 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
|
|||||||
struct dss_module_power *mp = &dpu_kms->mp;
|
struct dss_module_power *mp = &dpu_kms->mp;
|
||||||
|
|
||||||
ddev = dpu_kms->dev;
|
ddev = dpu_kms->dev;
|
||||||
if (!ddev) {
|
|
||||||
DPU_ERROR("invalid drm_device\n");
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
|
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
|
||||||
if (rc)
|
if (rc)
|
||||||
DPU_ERROR("clock disable failed rc:%d\n", rc);
|
DPU_ERROR("clock disable failed rc:%d\n", rc);
|
||||||
@ -1086,11 +1039,6 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
|
|||||||
struct dss_module_power *mp = &dpu_kms->mp;
|
struct dss_module_power *mp = &dpu_kms->mp;
|
||||||
|
|
||||||
ddev = dpu_kms->dev;
|
ddev = dpu_kms->dev;
|
||||||
if (!ddev) {
|
|
||||||
DPU_ERROR("invalid drm_device\n");
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
|
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
DPU_ERROR("clock enable failed rc:%d\n", rc);
|
DPU_ERROR("clock enable failed rc:%d\n", rc);
|
||||||
|
@ -139,10 +139,6 @@ struct vsync_info {
|
|||||||
|
|
||||||
#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
|
#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
|
||||||
|
|
||||||
/* get struct msm_kms * from drm_device * */
|
|
||||||
#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \
|
|
||||||
((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Debugfs functions - extra helper functions for debugfs support
|
* Debugfs functions - extra helper functions for debugfs support
|
||||||
*
|
*
|
||||||
|
@ -154,10 +154,6 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
|
|||||||
u32 ot_lim;
|
u32 ot_lim;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
|
||||||
if (!dpu_kms) {
|
|
||||||
DPU_ERROR("invalid arguments\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
mdp = dpu_kms->hw_mdp;
|
mdp = dpu_kms->hw_mdp;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
|
for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
|
||||||
@ -214,7 +210,7 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
|
|||||||
const struct dpu_vbif_qos_tbl *qos_tbl;
|
const struct dpu_vbif_qos_tbl *qos_tbl;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!dpu_kms || !params || !dpu_kms->hw_mdp) {
|
if (!params || !dpu_kms->hw_mdp) {
|
||||||
DPU_ERROR("invalid arguments\n");
|
DPU_ERROR("invalid arguments\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -157,10 +157,6 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static const char * const iommu_ports[] = {
|
|
||||||
"mdp_port0_cb0", "mdp_port1_cb0",
|
|
||||||
};
|
|
||||||
|
|
||||||
static void mdp4_destroy(struct msm_kms *kms)
|
static void mdp4_destroy(struct msm_kms *kms)
|
||||||
{
|
{
|
||||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
|
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
|
||||||
@ -172,8 +168,7 @@ static void mdp4_destroy(struct msm_kms *kms)
|
|||||||
drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);
|
drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);
|
||||||
|
|
||||||
if (aspace) {
|
if (aspace) {
|
||||||
aspace->mmu->funcs->detach(aspace->mmu,
|
aspace->mmu->funcs->detach(aspace->mmu);
|
||||||
iommu_ports, ARRAY_SIZE(iommu_ports));
|
|
||||||
msm_gem_address_space_put(aspace);
|
msm_gem_address_space_put(aspace);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -524,8 +519,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
|||||||
|
|
||||||
kms->aspace = aspace;
|
kms->aspace = aspace;
|
||||||
|
|
||||||
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
|
ret = aspace->mmu->funcs->attach(aspace->mmu);
|
||||||
ARRAY_SIZE(iommu_ports));
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
} else {
|
} else {
|
||||||
|
@ -14,7 +14,7 @@ struct mdp5_cfg_handler {
|
|||||||
/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
|
/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
|
||||||
const struct mdp5_cfg_hw *mdp5_cfg = NULL;
|
const struct mdp5_cfg_hw *mdp5_cfg = NULL;
|
||||||
|
|
||||||
const struct mdp5_cfg_hw msm8x74v1_config = {
|
static const struct mdp5_cfg_hw msm8x74v1_config = {
|
||||||
.name = "msm8x74v1",
|
.name = "msm8x74v1",
|
||||||
.mdp = {
|
.mdp = {
|
||||||
.count = 1,
|
.count = 1,
|
||||||
@ -98,7 +98,7 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
|
|||||||
.max_clk = 200000000,
|
.max_clk = 200000000,
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct mdp5_cfg_hw msm8x74v2_config = {
|
static const struct mdp5_cfg_hw msm8x74v2_config = {
|
||||||
.name = "msm8x74",
|
.name = "msm8x74",
|
||||||
.mdp = {
|
.mdp = {
|
||||||
.count = 1,
|
.count = 1,
|
||||||
@ -180,7 +180,7 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
|
|||||||
.max_clk = 200000000,
|
.max_clk = 200000000,
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct mdp5_cfg_hw apq8084_config = {
|
static const struct mdp5_cfg_hw apq8084_config = {
|
||||||
.name = "apq8084",
|
.name = "apq8084",
|
||||||
.mdp = {
|
.mdp = {
|
||||||
.count = 1,
|
.count = 1,
|
||||||
@ -275,7 +275,7 @@ const struct mdp5_cfg_hw apq8084_config = {
|
|||||||
.max_clk = 320000000,
|
.max_clk = 320000000,
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct mdp5_cfg_hw msm8x16_config = {
|
static const struct mdp5_cfg_hw msm8x16_config = {
|
||||||
.name = "msm8x16",
|
.name = "msm8x16",
|
||||||
.mdp = {
|
.mdp = {
|
||||||
.count = 1,
|
.count = 1,
|
||||||
@ -342,7 +342,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
|
|||||||
.max_clk = 320000000,
|
.max_clk = 320000000,
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct mdp5_cfg_hw msm8x94_config = {
|
static const struct mdp5_cfg_hw msm8x94_config = {
|
||||||
.name = "msm8x94",
|
.name = "msm8x94",
|
||||||
.mdp = {
|
.mdp = {
|
||||||
.count = 1,
|
.count = 1,
|
||||||
@ -437,7 +437,7 @@ const struct mdp5_cfg_hw msm8x94_config = {
|
|||||||
.max_clk = 400000000,
|
.max_clk = 400000000,
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct mdp5_cfg_hw msm8x96_config = {
|
static const struct mdp5_cfg_hw msm8x96_config = {
|
||||||
.name = "msm8x96",
|
.name = "msm8x96",
|
||||||
.mdp = {
|
.mdp = {
|
||||||
.count = 1,
|
.count = 1,
|
||||||
@ -545,7 +545,104 @@ const struct mdp5_cfg_hw msm8x96_config = {
|
|||||||
.max_clk = 412500000,
|
.max_clk = 412500000,
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct mdp5_cfg_hw msm8917_config = {
|
const struct mdp5_cfg_hw msm8x76_config = {
|
||||||
|
.name = "msm8x76",
|
||||||
|
.mdp = {
|
||||||
|
.count = 1,
|
||||||
|
.caps = MDP_CAP_SMP |
|
||||||
|
MDP_CAP_DSC |
|
||||||
|
MDP_CAP_SRC_SPLIT |
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
.ctl = {
|
||||||
|
.count = 3,
|
||||||
|
.base = { 0x01000, 0x01200, 0x01400 },
|
||||||
|
.flush_hw_mask = 0xffffffff,
|
||||||
|
},
|
||||||
|
.smp = {
|
||||||
|
.mmb_count = 10,
|
||||||
|
.mmb_size = 10240,
|
||||||
|
.clients = {
|
||||||
|
[SSPP_VIG0] = 1, [SSPP_VIG1] = 9,
|
||||||
|
[SSPP_DMA0] = 4,
|
||||||
|
[SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
.pipe_vig = {
|
||||||
|
.count = 2,
|
||||||
|
.base = { 0x04000, 0x06000 },
|
||||||
|
.caps = MDP_PIPE_CAP_HFLIP |
|
||||||
|
MDP_PIPE_CAP_VFLIP |
|
||||||
|
MDP_PIPE_CAP_SCALE |
|
||||||
|
MDP_PIPE_CAP_CSC |
|
||||||
|
MDP_PIPE_CAP_DECIMATION |
|
||||||
|
MDP_PIPE_CAP_SW_PIX_EXT |
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
.pipe_rgb = {
|
||||||
|
.count = 2,
|
||||||
|
.base = { 0x14000, 0x16000 },
|
||||||
|
.caps = MDP_PIPE_CAP_HFLIP |
|
||||||
|
MDP_PIPE_CAP_VFLIP |
|
||||||
|
MDP_PIPE_CAP_DECIMATION |
|
||||||
|
MDP_PIPE_CAP_SW_PIX_EXT |
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
.pipe_dma = {
|
||||||
|
.count = 1,
|
||||||
|
.base = { 0x24000 },
|
||||||
|
.caps = MDP_PIPE_CAP_HFLIP |
|
||||||
|
MDP_PIPE_CAP_VFLIP |
|
||||||
|
MDP_PIPE_CAP_SW_PIX_EXT |
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
.pipe_cursor = {
|
||||||
|
.count = 1,
|
||||||
|
.base = { 0x440DC },
|
||||||
|
.caps = MDP_PIPE_CAP_HFLIP |
|
||||||
|
MDP_PIPE_CAP_VFLIP |
|
||||||
|
MDP_PIPE_CAP_SW_PIX_EXT |
|
||||||
|
MDP_PIPE_CAP_CURSOR |
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
.lm = {
|
||||||
|
.count = 2,
|
||||||
|
.base = { 0x44000, 0x45000 },
|
||||||
|
.instances = {
|
||||||
|
{ .id = 0, .pp = 0, .dspp = 0,
|
||||||
|
.caps = MDP_LM_CAP_DISPLAY, },
|
||||||
|
{ .id = 1, .pp = -1, .dspp = -1,
|
||||||
|
.caps = MDP_LM_CAP_WB },
|
||||||
|
},
|
||||||
|
.nb_stages = 8,
|
||||||
|
.max_width = 2560,
|
||||||
|
.max_height = 0xFFFF,
|
||||||
|
},
|
||||||
|
.dspp = {
|
||||||
|
.count = 1,
|
||||||
|
.base = { 0x54000 },
|
||||||
|
|
||||||
|
},
|
||||||
|
.pp = {
|
||||||
|
.count = 3,
|
||||||
|
.base = { 0x70000, 0x70800, 0x72000 },
|
||||||
|
},
|
||||||
|
.dsc = {
|
||||||
|
.count = 2,
|
||||||
|
.base = { 0x80000, 0x80400 },
|
||||||
|
},
|
||||||
|
.intf = {
|
||||||
|
.base = { 0x6a000, 0x6a800, 0x6b000 },
|
||||||
|
.connect = {
|
||||||
|
[0] = INTF_DISABLED,
|
||||||
|
[1] = INTF_DSI,
|
||||||
|
[2] = INTF_DSI,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
.max_clk = 360000000,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct mdp5_cfg_hw msm8917_config = {
|
||||||
.name = "msm8917",
|
.name = "msm8917",
|
||||||
.mdp = {
|
.mdp = {
|
||||||
.count = 1,
|
.count = 1,
|
||||||
@ -630,7 +727,7 @@ const struct mdp5_cfg_hw msm8917_config = {
|
|||||||
.max_clk = 320000000,
|
.max_clk = 320000000,
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct mdp5_cfg_hw msm8998_config = {
|
static const struct mdp5_cfg_hw msm8998_config = {
|
||||||
.name = "msm8998",
|
.name = "msm8998",
|
||||||
.mdp = {
|
.mdp = {
|
||||||
.count = 1,
|
.count = 1,
|
||||||
@ -745,6 +842,7 @@ static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
|
|||||||
{ .revision = 6, .config = { .hw = &msm8x16_config } },
|
{ .revision = 6, .config = { .hw = &msm8x16_config } },
|
||||||
{ .revision = 9, .config = { .hw = &msm8x94_config } },
|
{ .revision = 9, .config = { .hw = &msm8x94_config } },
|
||||||
{ .revision = 7, .config = { .hw = &msm8x96_config } },
|
{ .revision = 7, .config = { .hw = &msm8x96_config } },
|
||||||
|
{ .revision = 11, .config = { .hw = &msm8x76_config } },
|
||||||
{ .revision = 15, .config = { .hw = &msm8917_config } },
|
{ .revision = 15, .config = { .hw = &msm8917_config } },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -214,7 +214,6 @@ static void blend_setup(struct drm_crtc *crtc)
|
|||||||
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
|
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
|
||||||
struct mdp5_kms *mdp5_kms = get_kms(crtc);
|
struct mdp5_kms *mdp5_kms = get_kms(crtc);
|
||||||
struct drm_plane *plane;
|
struct drm_plane *plane;
|
||||||
const struct mdp5_cfg_hw *hw_cfg;
|
|
||||||
struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
|
struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
|
||||||
const struct mdp_format *format;
|
const struct mdp_format *format;
|
||||||
struct mdp5_hw_mixer *mixer = pipeline->mixer;
|
struct mdp5_hw_mixer *mixer = pipeline->mixer;
|
||||||
@ -232,8 +231,6 @@ static void blend_setup(struct drm_crtc *crtc)
|
|||||||
u32 val;
|
u32 val;
|
||||||
#define blender(stage) ((stage) - STAGE0)
|
#define blender(stage) ((stage) - STAGE0)
|
||||||
|
|
||||||
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
|
|
||||||
|
|
||||||
spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
|
spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
|
||||||
|
|
||||||
/* ctl could be released already when we are shutting down: */
|
/* ctl could be released already when we are shutting down: */
|
||||||
|
@ -19,10 +19,6 @@
|
|||||||
#include "msm_mmu.h"
|
#include "msm_mmu.h"
|
||||||
#include "mdp5_kms.h"
|
#include "mdp5_kms.h"
|
||||||
|
|
||||||
static const char *iommu_ports[] = {
|
|
||||||
"mdp_0",
|
|
||||||
};
|
|
||||||
|
|
||||||
static int mdp5_hw_init(struct msm_kms *kms)
|
static int mdp5_hw_init(struct msm_kms *kms)
|
||||||
{
|
{
|
||||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
||||||
@ -233,8 +229,7 @@ static void mdp5_kms_destroy(struct msm_kms *kms)
|
|||||||
mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
|
mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
|
||||||
|
|
||||||
if (aspace) {
|
if (aspace) {
|
||||||
aspace->mmu->funcs->detach(aspace->mmu,
|
aspace->mmu->funcs->detach(aspace->mmu);
|
||||||
iommu_ports, ARRAY_SIZE(iommu_ports));
|
|
||||||
msm_gem_address_space_put(aspace);
|
msm_gem_address_space_put(aspace);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -314,6 +309,10 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms)
|
|||||||
mdp5_kms->enable_count--;
|
mdp5_kms->enable_count--;
|
||||||
WARN_ON(mdp5_kms->enable_count < 0);
|
WARN_ON(mdp5_kms->enable_count < 0);
|
||||||
|
|
||||||
|
if (mdp5_kms->tbu_rt_clk)
|
||||||
|
clk_disable_unprepare(mdp5_kms->tbu_rt_clk);
|
||||||
|
if (mdp5_kms->tbu_clk)
|
||||||
|
clk_disable_unprepare(mdp5_kms->tbu_clk);
|
||||||
clk_disable_unprepare(mdp5_kms->ahb_clk);
|
clk_disable_unprepare(mdp5_kms->ahb_clk);
|
||||||
clk_disable_unprepare(mdp5_kms->axi_clk);
|
clk_disable_unprepare(mdp5_kms->axi_clk);
|
||||||
clk_disable_unprepare(mdp5_kms->core_clk);
|
clk_disable_unprepare(mdp5_kms->core_clk);
|
||||||
@ -334,6 +333,10 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
|
|||||||
clk_prepare_enable(mdp5_kms->core_clk);
|
clk_prepare_enable(mdp5_kms->core_clk);
|
||||||
if (mdp5_kms->lut_clk)
|
if (mdp5_kms->lut_clk)
|
||||||
clk_prepare_enable(mdp5_kms->lut_clk);
|
clk_prepare_enable(mdp5_kms->lut_clk);
|
||||||
|
if (mdp5_kms->tbu_clk)
|
||||||
|
clk_prepare_enable(mdp5_kms->tbu_clk);
|
||||||
|
if (mdp5_kms->tbu_rt_clk)
|
||||||
|
clk_prepare_enable(mdp5_kms->tbu_rt_clk);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -466,14 +469,11 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
|
|||||||
{
|
{
|
||||||
struct drm_device *dev = mdp5_kms->dev;
|
struct drm_device *dev = mdp5_kms->dev;
|
||||||
struct msm_drm_private *priv = dev->dev_private;
|
struct msm_drm_private *priv = dev->dev_private;
|
||||||
const struct mdp5_cfg_hw *hw_cfg;
|
|
||||||
unsigned int num_crtcs;
|
unsigned int num_crtcs;
|
||||||
int i, ret, pi = 0, ci = 0;
|
int i, ret, pi = 0, ci = 0;
|
||||||
struct drm_plane *primary[MAX_BASES] = { NULL };
|
struct drm_plane *primary[MAX_BASES] = { NULL };
|
||||||
struct drm_plane *cursor[MAX_BASES] = { NULL };
|
struct drm_plane *cursor[MAX_BASES] = { NULL };
|
||||||
|
|
||||||
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Construct encoders and modeset initialize connector devices
|
* Construct encoders and modeset initialize connector devices
|
||||||
* for each external display interface.
|
* for each external display interface.
|
||||||
@ -737,8 +737,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
|
|||||||
|
|
||||||
kms->aspace = aspace;
|
kms->aspace = aspace;
|
||||||
|
|
||||||
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
|
ret = aspace->mmu->funcs->attach(aspace->mmu);
|
||||||
ARRAY_SIZE(iommu_ports));
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
|
DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
|
||||||
ret);
|
ret);
|
||||||
@ -974,6 +973,8 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
|
|||||||
|
|
||||||
/* optional clocks: */
|
/* optional clocks: */
|
||||||
get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
|
get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
|
||||||
|
get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false);
|
||||||
|
get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false);
|
||||||
|
|
||||||
/* we need to set a default rate before enabling. Set a safe
|
/* we need to set a default rate before enabling. Set a safe
|
||||||
* rate first, then figure out hw revision, and then set a
|
* rate first, then figure out hw revision, and then set a
|
||||||
|
@ -53,6 +53,8 @@ struct mdp5_kms {
|
|||||||
struct clk *ahb_clk;
|
struct clk *ahb_clk;
|
||||||
struct clk *core_clk;
|
struct clk *core_clk;
|
||||||
struct clk *lut_clk;
|
struct clk *lut_clk;
|
||||||
|
struct clk *tbu_clk;
|
||||||
|
struct clk *tbu_rt_clk;
|
||||||
struct clk *vsync_clk;
|
struct clk *vsync_clk;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -121,7 +121,6 @@ uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
|
|||||||
struct mdp5_kms *mdp5_kms = get_kms(smp);
|
struct mdp5_kms *mdp5_kms = get_kms(smp);
|
||||||
int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
|
int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
|
||||||
int i, hsub, nplanes, nlines;
|
int i, hsub, nplanes, nlines;
|
||||||
u32 fmt = format->base.pixel_format;
|
|
||||||
uint32_t blkcfg = 0;
|
uint32_t blkcfg = 0;
|
||||||
|
|
||||||
nplanes = info->num_planes;
|
nplanes = info->num_planes;
|
||||||
@ -135,7 +134,6 @@ uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
|
|||||||
* them together, writes to SMP using a single client.
|
* them together, writes to SMP using a single client.
|
||||||
*/
|
*/
|
||||||
if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
|
if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
|
||||||
fmt = DRM_FORMAT_NV24;
|
|
||||||
nplanes = 2;
|
nplanes = 2;
|
||||||
|
|
||||||
/* if decimation is enabled, HW decimates less on the
|
/* if decimation is enabled, HW decimates less on the
|
||||||
|
@ -66,6 +66,26 @@ static const struct msm_dsi_config msm8916_dsi_cfg = {
|
|||||||
.num_dsi = 1,
|
.num_dsi = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const char * const dsi_8976_bus_clk_names[] = {
|
||||||
|
"mdp_core", "iface", "bus",
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct msm_dsi_config msm8976_dsi_cfg = {
|
||||||
|
.io_offset = DSI_6G_REG_SHIFT,
|
||||||
|
.reg_cfg = {
|
||||||
|
.num = 3,
|
||||||
|
.regs = {
|
||||||
|
{"gdsc", -1, -1},
|
||||||
|
{"vdda", 100000, 100}, /* 1.2 V */
|
||||||
|
{"vddio", 100000, 100}, /* 1.8 V */
|
||||||
|
},
|
||||||
|
},
|
||||||
|
.bus_clk_names = dsi_8976_bus_clk_names,
|
||||||
|
.num_bus_clks = ARRAY_SIZE(dsi_8976_bus_clk_names),
|
||||||
|
.io_start = { 0x1a94000, 0x1a96000 },
|
||||||
|
.num_dsi = 2,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct msm_dsi_config msm8994_dsi_cfg = {
|
static const struct msm_dsi_config msm8994_dsi_cfg = {
|
||||||
.io_offset = DSI_6G_REG_SHIFT,
|
.io_offset = DSI_6G_REG_SHIFT,
|
||||||
.reg_cfg = {
|
.reg_cfg = {
|
||||||
@ -147,7 +167,7 @@ static const struct msm_dsi_config sdm845_dsi_cfg = {
|
|||||||
.num_dsi = 2,
|
.num_dsi = 2,
|
||||||
};
|
};
|
||||||
|
|
||||||
const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
|
static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
|
||||||
.link_clk_enable = dsi_link_clk_enable_v2,
|
.link_clk_enable = dsi_link_clk_enable_v2,
|
||||||
.link_clk_disable = dsi_link_clk_disable_v2,
|
.link_clk_disable = dsi_link_clk_disable_v2,
|
||||||
.clk_init_ver = dsi_clk_init_v2,
|
.clk_init_ver = dsi_clk_init_v2,
|
||||||
@ -158,7 +178,7 @@ const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
|
|||||||
.calc_clk_rate = dsi_calc_clk_rate_v2,
|
.calc_clk_rate = dsi_calc_clk_rate_v2,
|
||||||
};
|
};
|
||||||
|
|
||||||
const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
|
static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
|
||||||
.link_clk_enable = dsi_link_clk_enable_6g,
|
.link_clk_enable = dsi_link_clk_enable_6g,
|
||||||
.link_clk_disable = dsi_link_clk_disable_6g,
|
.link_clk_disable = dsi_link_clk_disable_6g,
|
||||||
.clk_init_ver = NULL,
|
.clk_init_ver = NULL,
|
||||||
@ -169,7 +189,7 @@ const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
|
|||||||
.calc_clk_rate = dsi_calc_clk_rate_6g,
|
.calc_clk_rate = dsi_calc_clk_rate_6g,
|
||||||
};
|
};
|
||||||
|
|
||||||
const static struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
|
static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
|
||||||
.link_clk_enable = dsi_link_clk_enable_6g,
|
.link_clk_enable = dsi_link_clk_enable_6g,
|
||||||
.link_clk_disable = dsi_link_clk_disable_6g,
|
.link_clk_disable = dsi_link_clk_disable_6g,
|
||||||
.clk_init_ver = dsi_clk_init_6g_v2,
|
.clk_init_ver = dsi_clk_init_6g_v2,
|
||||||
@ -197,6 +217,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
|
|||||||
&msm8916_dsi_cfg, &msm_dsi_6g_host_ops},
|
&msm8916_dsi_cfg, &msm_dsi_6g_host_ops},
|
||||||
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1,
|
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1,
|
||||||
&msm8996_dsi_cfg, &msm_dsi_6g_host_ops},
|
&msm8996_dsi_cfg, &msm_dsi_6g_host_ops},
|
||||||
|
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_2,
|
||||||
|
&msm8976_dsi_cfg, &msm_dsi_6g_host_ops},
|
||||||
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_0,
|
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_0,
|
||||||
&msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
|
&msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
|
||||||
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
|
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
|
#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
|
||||||
#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
|
#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
|
||||||
#define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001
|
#define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001
|
||||||
|
#define MSM_DSI_6G_VER_MINOR_V1_4_2 0x10040002
|
||||||
#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000
|
#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000
|
||||||
#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
|
#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
|
||||||
|
|
||||||
|
@ -1293,14 +1293,13 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
|
|||||||
static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
|
static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
|
||||||
u8 *buf, int rx_byte, int pkt_size)
|
u8 *buf, int rx_byte, int pkt_size)
|
||||||
{
|
{
|
||||||
u32 *lp, *temp, data;
|
u32 *temp, data;
|
||||||
int i, j = 0, cnt;
|
int i, j = 0, cnt;
|
||||||
u32 read_cnt;
|
u32 read_cnt;
|
||||||
u8 reg[16];
|
u8 reg[16];
|
||||||
int repeated_bytes = 0;
|
int repeated_bytes = 0;
|
||||||
int buf_offset = buf - msm_host->rx_buf;
|
int buf_offset = buf - msm_host->rx_buf;
|
||||||
|
|
||||||
lp = (u32 *)buf;
|
|
||||||
temp = (u32 *)reg;
|
temp = (u32 *)reg;
|
||||||
cnt = (rx_byte + 3) >> 2;
|
cnt = (rx_byte + 3) >> 2;
|
||||||
if (cnt > 4)
|
if (cnt > 4)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user