linux_dsm_epyc7002/drivers/gpu/drm/i915/intel_device_info.h
Lionel Landwerlin 8cc7669355 drm/i915: store all subslice masks
Up to now, subslice mask was assumed to be uniform across slices. But
starting with Cannonlake, slices can be asymmetric (for example slice0
has different number of subslices as slice1+). This change stores all
subslices masks for all slices rather than having a single mask that
applies to all slices.

v2: Rework how we store total numbers in sseu_dev_info (Tvrtko)
    Fix CHV eu masks, was reading disabled as enabled (Tvrtko)
    Readability changes (Tvrtko)
    Add EU index helper (Tvrtko)

v3: Turn ALIGN(v, 8) / 8 into DIV_ROUND_UP(v, BITS_PER_BYTE) (Tvrtko)
    Reuse sseu_eu_idx() for setting eu_mask on CHV (Tvrtko)
    Reformat debug prints for subslices (Tvrtko)

v4: Change eu_mask helper into sseu_set_eus() (Tvrtko)

v5: With Haswell reporting masks & counts, bump sseu_*_eus() functions
    to use u16 (Lionel)

v6: Fix sseu_get_eus() for > 8 EUs per subslice (Lionel)

v7: Change debugfs enabels for number of subslices per slice, will
    need a small igt/pm_sseu change (Lionel)
    Drop subslice_total field from sseu_dev_info, rely on
    sseu_subslice_total() to recompute the value instead (Lionel)

v8: Remove unused function compute_subslice_total() (Lionel)

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180306122857.27317-2-lionel.g.landwerlin@intel.com
2018-03-08 10:06:20 +00:00

252 lines
6.3 KiB
C

/*
* Copyright © 2014-2017 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef _INTEL_DEVICE_INFO_H_
#define _INTEL_DEVICE_INFO_H_
#include "intel_display.h"
struct drm_printer;
struct drm_i915_private;
/* Keep in gen based order, and chronological order within a gen */
enum intel_platform {
INTEL_PLATFORM_UNINITIALIZED = 0,
/* gen2 */
INTEL_I830,
INTEL_I845G,
INTEL_I85X,
INTEL_I865G,
/* gen3 */
INTEL_I915G,
INTEL_I915GM,
INTEL_I945G,
INTEL_I945GM,
INTEL_G33,
INTEL_PINEVIEW,
/* gen4 */
INTEL_I965G,
INTEL_I965GM,
INTEL_G45,
INTEL_GM45,
/* gen5 */
INTEL_IRONLAKE,
/* gen6 */
INTEL_SANDYBRIDGE,
/* gen7 */
INTEL_IVYBRIDGE,
INTEL_VALLEYVIEW,
INTEL_HASWELL,
/* gen8 */
INTEL_BROADWELL,
INTEL_CHERRYVIEW,
/* gen9 */
INTEL_SKYLAKE,
INTEL_BROXTON,
INTEL_KABYLAKE,
INTEL_GEMINILAKE,
INTEL_COFFEELAKE,
/* gen10 */
INTEL_CANNONLAKE,
/* gen11 */
INTEL_ICELAKE,
INTEL_MAX_PLATFORMS
};
#define DEV_INFO_FOR_EACH_FLAG(func) \
func(is_mobile); \
func(is_lp); \
func(is_alpha_support); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
func(has_aliasing_ppgtt); \
func(has_csr); \
func(has_ddi); \
func(has_dp_mst); \
func(has_reset_engine); \
func(has_fbc); \
func(has_fpga_dbg); \
func(has_full_ppgtt); \
func(has_full_48bit_ppgtt); \
func(has_gmch_display); \
func(has_guc); \
func(has_guc_ct); \
func(has_hotplug); \
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
func(has_logical_ring_elsq); \
func(has_logical_ring_preemption); \
func(has_overlay); \
func(has_pooled_eu); \
func(has_psr); \
func(has_rc6); \
func(has_rc6p); \
func(has_resource_streamer); \
func(has_runtime_pm); \
func(has_snoop); \
func(unfenced_needs_alignment); \
func(cursor_needs_physical); \
func(hws_needs_physical); \
func(overlay_needs_physical); \
func(supports_tv); \
func(has_ipc);
#define GEN_MAX_SLICES (6) /* CNL upper bound */
#define GEN_MAX_SUBSLICES (7)
struct sseu_dev_info {
u8 slice_mask;
u8 subslice_mask[GEN_MAX_SUBSLICES];
u16 eu_total;
u8 eu_per_subslice;
u8 min_eu_in_pool;
/* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
u8 subslice_7eu[3];
u8 has_slice_pg:1;
u8 has_subslice_pg:1;
u8 has_eu_pg:1;
/* Topology fields */
u8 max_slices;
u8 max_subslices;
u8 max_eus_per_subslice;
/* We don't have more than 8 eus per subslice at the moment and as we
* store eus enabled using bits, no need to multiply by eus per
* subslice.
*/
u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES];
};
typedef u8 intel_ring_mask_t;
struct intel_device_info {
u16 device_id;
u16 gen_mask;
u8 gen;
u8 gt; /* GT number, 0 if undefined */
u8 num_rings;
intel_ring_mask_t ring_mask; /* Rings supported by the HW */
enum intel_platform platform;
u32 platform_mask;
unsigned int page_sizes; /* page sizes supported by the HW */
u32 display_mmio_offset;
u8 num_pipes;
u8 num_sprites[I915_MAX_PIPES];
u8 num_scalers[I915_MAX_PIPES];
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
u16 ddb_size; /* in blocks */
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
int trans_offsets[I915_MAX_TRANSCODERS];
int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES];
/* Slice/subslice/EU info */
struct sseu_dev_info sseu;
u32 cs_timestamp_frequency_khz;
struct color_luts {
u16 degamma_lut_size;
u16 gamma_lut_size;
} color;
};
struct intel_driver_caps {
unsigned int scheduler;
};
static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
{
unsigned int i, total = 0;
for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask); i++)
total += hweight8(sseu->subslice_mask[i]);
return total;
}
static inline int sseu_eu_idx(const struct sseu_dev_info *sseu,
int slice, int subslice)
{
int subslice_stride = DIV_ROUND_UP(sseu->max_eus_per_subslice,
BITS_PER_BYTE);
int slice_stride = sseu->max_subslices * subslice_stride;
return slice * slice_stride + subslice * subslice_stride;
}
static inline u16 sseu_get_eus(const struct sseu_dev_info *sseu,
int slice, int subslice)
{
int i, offset = sseu_eu_idx(sseu, slice, subslice);
u16 eu_mask = 0;
for (i = 0;
i < DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); i++) {
eu_mask |= ((u16) sseu->eu_mask[offset + i]) <<
(i * BITS_PER_BYTE);
}
return eu_mask;
}
static inline void sseu_set_eus(struct sseu_dev_info *sseu,
int slice, int subslice, u16 eu_mask)
{
int i, offset = sseu_eu_idx(sseu, slice, subslice);
for (i = 0;
i < DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); i++) {
sseu->eu_mask[offset + i] =
(eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
}
}
const char *intel_platform_name(enum intel_platform platform);
void intel_device_info_runtime_init(struct intel_device_info *info);
void intel_device_info_dump(const struct intel_device_info *info,
struct drm_printer *p);
void intel_device_info_dump_flags(const struct intel_device_info *info,
struct drm_printer *p);
void intel_device_info_dump_runtime(const struct intel_device_info *info,
struct drm_printer *p);
void intel_driver_caps_print(const struct intel_driver_caps *caps,
struct drm_printer *p);
#endif