mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 11:16:48 +07:00
Merge tag 'drm-intel-next-fixes-2015-10-22' of git://anongit.freedesktop.org/drm-intel into drm-next
Bunch of -fixes for 4.4. Well not just, I've left the mmio/register work from Ville in here since it's low-risk but lots of churn all over. * tag 'drm-intel-next-fixes-2015-10-22' of git://anongit.freedesktop.org/drm-intel: (23 commits) drm/i915: Use round to closest when computing the CEA 1.001 pixel clocks drm/i915: Kill the leftover RMW from ivb_sprite_disable() drm/i915: restore ggtt double-bind avoidance drm/i915/skl: Enable pipe gamma for sprite planes. drm/i915/skl+: Enable pipe CSC on cursor planes. (v2) MAINTAINERS: add link to the Intel Graphics for Linux web site drm/i915: Move skl/bxt gt specific workarounds to ring init drm/i915: Drop i915_gem_obj_is_pinned() from set-cache-level drm/i915: revert a few more watermark commits drm/i915: Remove dev_priv argument from NEEDS_FORCE_WAKE drm/i915: Clean up LVDS register handling drm/i915: Throw out some useless variables drm/i915: Parametrize and fix SWF registers drm/i915: s/PIPE_FRMCOUNT_GM45/PIPE_FRMCOUNT_G4X/ etc. drm/i915: Turn GEN5_ASSERT_IIR_IS_ZERO() into a function drm/i915: Fix a few bad hex numbers in register defines drm/i915: Protect register macro arguments drm/i915: Include gpio_mmio_base in GMBUS reg defines drm/i915: Parametrize HSW video DIP data registers drm/i915: Eliminate weird parameter inversion from BXT PPS registers ...
This commit is contained in:
commit
974e59ba0b
@ -3584,6 +3584,7 @@ M: Daniel Vetter <daniel.vetter@intel.com>
|
||||
M: Jani Nikula <jani.nikula@linux.intel.com>
|
||||
L: intel-gfx@lists.freedesktop.org
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
W: https://01.org/linuxgraphics/
|
||||
Q: http://patchwork.freedesktop.org/project/intel-gfx/
|
||||
T: git git://anongit.freedesktop.org/drm-intel
|
||||
S: Supported
|
||||
|
@ -1850,7 +1850,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
|
||||
goto out;
|
||||
|
||||
if (opregion->header) {
|
||||
memcpy_fromio(data, opregion->header, OPREGION_SIZE);
|
||||
memcpy(data, opregion->header, OPREGION_SIZE);
|
||||
seq_write(m, data, OPREGION_SIZE);
|
||||
}
|
||||
|
||||
|
@ -450,14 +450,14 @@ struct opregion_swsci;
|
||||
struct opregion_asle;
|
||||
|
||||
struct intel_opregion {
|
||||
struct opregion_header __iomem *header;
|
||||
struct opregion_acpi __iomem *acpi;
|
||||
struct opregion_swsci __iomem *swsci;
|
||||
struct opregion_header *header;
|
||||
struct opregion_acpi *acpi;
|
||||
struct opregion_swsci *swsci;
|
||||
u32 swsci_gbda_sub_functions;
|
||||
u32 swsci_sbcb_sub_functions;
|
||||
struct opregion_asle __iomem *asle;
|
||||
void __iomem *vbt;
|
||||
u32 __iomem *lid_state;
|
||||
struct opregion_asle *asle;
|
||||
void *vbt;
|
||||
u32 *lid_state;
|
||||
struct work_struct asle_work;
|
||||
};
|
||||
#define OPREGION_SIZE (8*1024)
|
||||
@ -628,6 +628,10 @@ struct drm_i915_display_funcs {
|
||||
struct dpll *match_clock,
|
||||
struct dpll *best_clock);
|
||||
void (*update_wm)(struct drm_crtc *crtc);
|
||||
void (*update_sprite_wm)(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc,
|
||||
uint32_t sprite_width, uint32_t sprite_height,
|
||||
int pixel_size, bool enable, bool scaled);
|
||||
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
|
||||
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
|
||||
/* Returns the active state of the crtc, and if the crtc is active,
|
||||
@ -1031,7 +1035,7 @@ struct i915_suspend_saved_registers {
|
||||
u32 saveMI_ARB_STATE;
|
||||
u32 saveSWF0[16];
|
||||
u32 saveSWF1[16];
|
||||
u32 saveSWF2[3];
|
||||
u32 saveSWF3[3];
|
||||
uint64_t saveFENCE[I915_MAX_NUM_FENCES];
|
||||
u32 savePCH_PORT_HOTPLUG;
|
||||
u16 saveGCDGMBUS;
|
||||
|
@ -3657,53 +3657,106 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Changes the cache-level of an object across all VMA.
|
||||
*
|
||||
* After this function returns, the object will be in the new cache-level
|
||||
* across all GTT and the contents of the backing storage will be coherent,
|
||||
* with respect to the new cache-level. In order to keep the backing storage
|
||||
* coherent for all users, we only allow a single cache level to be set
|
||||
* globally on the object and prevent it from being changed whilst the
|
||||
* hardware is reading from the object. That is if the object is currently
|
||||
* on the scanout it will be set to uncached (or equivalent display
|
||||
* cache coherency) and all non-MOCS GPU access will also be uncached so
|
||||
* that all direct access to the scanout remains coherent.
|
||||
*/
|
||||
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
||||
enum i915_cache_level cache_level)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct i915_vma *vma, *next;
|
||||
bool bound = false;
|
||||
int ret = 0;
|
||||
|
||||
if (obj->cache_level == cache_level)
|
||||
goto out;
|
||||
|
||||
if (i915_gem_obj_is_pinned(obj)) {
|
||||
DRM_DEBUG("can not change the cache level of pinned objects\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* Inspect the list of currently bound VMA and unbind any that would
|
||||
* be invalid given the new cache-level. This is principally to
|
||||
* catch the issue of the CS prefetch crossing page boundaries and
|
||||
* reading an invalid PTE on older architectures.
|
||||
*/
|
||||
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
|
||||
if (!drm_mm_node_allocated(&vma->node))
|
||||
continue;
|
||||
|
||||
if (vma->pin_count) {
|
||||
DRM_DEBUG("can not change the cache level of pinned objects\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!i915_gem_valid_gtt_space(vma, cache_level)) {
|
||||
ret = i915_vma_unbind(vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
} else
|
||||
bound = true;
|
||||
}
|
||||
|
||||
if (i915_gem_obj_bound_any(obj)) {
|
||||
/* We can reuse the existing drm_mm nodes but need to change the
|
||||
* cache-level on the PTE. We could simply unbind them all and
|
||||
* rebind with the correct cache-level on next use. However since
|
||||
* we already have a valid slot, dma mapping, pages etc, we may as
|
||||
* rewrite the PTE in the belief that doing so tramples upon less
|
||||
* state and so involves less work.
|
||||
*/
|
||||
if (bound) {
|
||||
/* Before we change the PTE, the GPU must not be accessing it.
|
||||
* If we wait upon the object, we know that all the bound
|
||||
* VMA are no longer active.
|
||||
*/
|
||||
ret = i915_gem_object_wait_rendering(obj, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_object_finish_gtt(obj);
|
||||
if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
|
||||
/* Access to snoopable pages through the GTT is
|
||||
* incoherent and on some machines causes a hard
|
||||
* lockup. Relinquish the CPU mmaping to force
|
||||
* userspace to refault in the pages and we can
|
||||
* then double check if the GTT mapping is still
|
||||
* valid for that pointer access.
|
||||
*/
|
||||
i915_gem_release_mmap(obj);
|
||||
|
||||
/* Before SandyBridge, you could not use tiling or fence
|
||||
* registers with snooped memory, so relinquish any fences
|
||||
* currently pointing to our region in the aperture.
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen < 6) {
|
||||
/* As we no longer need a fence for GTT access,
|
||||
* we can relinquish it now (and so prevent having
|
||||
* to steal a fence from someone else on the next
|
||||
* fence request). Note GPU activity would have
|
||||
* dropped the fence as all snoopable access is
|
||||
* supposed to be linear.
|
||||
*/
|
||||
ret = i915_gem_object_put_fence(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
/* We either have incoherent backing store and
|
||||
* so no GTT access or the architecture is fully
|
||||
* coherent. In such cases, existing GTT mmaps
|
||||
* ignore the cache bit in the PTE and we can
|
||||
* rewrite it without confusing the GPU or having
|
||||
* to force userspace to fault back in its mmaps.
|
||||
*/
|
||||
}
|
||||
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link)
|
||||
if (drm_mm_node_allocated(&vma->node)) {
|
||||
ret = i915_vma_bind(vma, cache_level,
|
||||
PIN_UPDATE);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link) {
|
||||
if (!drm_mm_node_allocated(&vma->node))
|
||||
continue;
|
||||
|
||||
ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link)
|
||||
@ -3711,6 +3764,10 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
||||
obj->cache_level = cache_level;
|
||||
|
||||
out:
|
||||
/* Flush the dirty CPU caches to the backing storage so that the
|
||||
* object is now coherent at its new cache level (with respect
|
||||
* to the access domain).
|
||||
*/
|
||||
if (obj->cache_dirty &&
|
||||
obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
|
||||
cpu_write_needs_clflush(obj)) {
|
||||
|
@ -2501,6 +2501,36 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
|
||||
static int ggtt_bind_vma(struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
u32 pte_flags = 0;
|
||||
int ret;
|
||||
|
||||
ret = i915_get_ggtt_vma_pages(vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Currently applicable only to VLV */
|
||||
if (obj->gt_ro)
|
||||
pte_flags |= PTE_READ_ONLY;
|
||||
|
||||
vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
|
||||
vma->node.start,
|
||||
cache_level, pte_flags);
|
||||
|
||||
/*
|
||||
* Without aliasing PPGTT there's no difference between
|
||||
* GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
|
||||
* upgrade to both bound if we bind either to avoid double-binding.
|
||||
*/
|
||||
vma->bound |= GLOBAL_BIND | LOCAL_BIND;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aliasing_gtt_bind_vma(struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
struct drm_device *dev = vma->vm->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -2519,23 +2549,13 @@ static int ggtt_bind_vma(struct i915_vma *vma,
|
||||
pte_flags |= PTE_READ_ONLY;
|
||||
|
||||
|
||||
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
|
||||
if (flags & GLOBAL_BIND) {
|
||||
vma->vm->insert_entries(vma->vm, pages,
|
||||
vma->node.start,
|
||||
cache_level, pte_flags);
|
||||
|
||||
/* Note the inconsistency here is due to absence of the
|
||||
* aliasing ppgtt on gen4 and earlier. Though we always
|
||||
* request PIN_USER for execbuffer (translated to LOCAL_BIND),
|
||||
* without the appgtt, we cannot honour that request and so
|
||||
* must substitute it with a global binding. Since we do this
|
||||
* behind the upper layers back, we need to explicitly set
|
||||
* the bound flag ourselves.
|
||||
*/
|
||||
vma->bound |= GLOBAL_BIND;
|
||||
}
|
||||
|
||||
if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {
|
||||
if (flags & LOCAL_BIND) {
|
||||
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
appgtt->base.insert_entries(&appgtt->base, pages,
|
||||
vma->node.start,
|
||||
@ -2699,6 +2719,8 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
|
||||
true);
|
||||
|
||||
dev_priv->mm.aliasing_ppgtt = ppgtt;
|
||||
WARN_ON(dev_priv->gtt.base.bind_vma != ggtt_bind_vma);
|
||||
dev_priv->gtt.base.bind_vma = aliasing_gtt_bind_vma;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -139,27 +139,30 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
|
||||
/*
|
||||
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
|
||||
*/
|
||||
#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
|
||||
u32 val = I915_READ(reg); \
|
||||
if (val) { \
|
||||
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
|
||||
(reg), val); \
|
||||
I915_WRITE((reg), 0xffffffff); \
|
||||
POSTING_READ(reg); \
|
||||
I915_WRITE((reg), 0xffffffff); \
|
||||
POSTING_READ(reg); \
|
||||
} \
|
||||
} while (0)
|
||||
static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
|
||||
{
|
||||
u32 val = I915_READ(reg);
|
||||
|
||||
if (val == 0)
|
||||
return;
|
||||
|
||||
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
|
||||
reg, val);
|
||||
I915_WRITE(reg, 0xffffffff);
|
||||
POSTING_READ(reg);
|
||||
I915_WRITE(reg, 0xffffffff);
|
||||
POSTING_READ(reg);
|
||||
}
|
||||
|
||||
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
|
||||
GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
|
||||
gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
|
||||
I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
|
||||
I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
|
||||
POSTING_READ(GEN8_##type##_IMR(which)); \
|
||||
} while (0)
|
||||
|
||||
#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
|
||||
GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
|
||||
gen5_assert_iir_is_zero(dev_priv, type##IIR); \
|
||||
I915_WRITE(type##IER, (ier_val)); \
|
||||
I915_WRITE(type##IMR, (imr_val)); \
|
||||
POSTING_READ(type##IMR); \
|
||||
@ -707,12 +710,11 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
|
||||
return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
|
||||
}
|
||||
|
||||
static u32 gm45_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
|
||||
static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int reg = PIPE_FRMCOUNT_GM45(pipe);
|
||||
|
||||
return I915_READ(reg);
|
||||
return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
|
||||
}
|
||||
|
||||
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
|
||||
@ -3365,7 +3367,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
|
||||
else
|
||||
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
|
||||
|
||||
GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
|
||||
gen5_assert_iir_is_zero(dev_priv, SDEIIR);
|
||||
I915_WRITE(SDEIMR, ~mask);
|
||||
}
|
||||
|
||||
@ -4397,7 +4399,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
||||
dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
|
||||
} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
|
||||
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
|
||||
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
|
||||
dev->driver->get_vblank_counter = g4x_get_vblank_counter;
|
||||
} else {
|
||||
dev->driver->get_vblank_counter = i915_get_vblank_counter;
|
||||
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
|
||||
|
@ -429,7 +429,7 @@
|
||||
#define ASYNC_FLIP (1<<22)
|
||||
#define DISPLAY_PLANE_A (0<<20)
|
||||
#define DISPLAY_PLANE_B (1<<20)
|
||||
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
|
||||
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
|
||||
#define PIPE_CONTROL_FLUSH_L3 (1<<27)
|
||||
#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
|
||||
#define PIPE_CONTROL_MMIO_WRITE (1<<23)
|
||||
@ -1255,7 +1255,7 @@ enum skl_disp_power_wells {
|
||||
#define PORT_PLL_DCO_AMP_OVR_EN_H (1<<27)
|
||||
#define PORT_PLL_DCO_AMP_DEFAULT 15
|
||||
#define PORT_PLL_DCO_AMP_MASK 0x3c00
|
||||
#define PORT_PLL_DCO_AMP(x) (x<<10)
|
||||
#define PORT_PLL_DCO_AMP(x) ((x)<<10)
|
||||
#define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \
|
||||
_PORT_PLL_0_B, \
|
||||
_PORT_PLL_0_C)
|
||||
@ -1552,8 +1552,8 @@ enum skl_disp_power_wells {
|
||||
#define RENDER_HWS_PGA_GEN7 (0x04080)
|
||||
#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
|
||||
#define RING_FAULT_GTTSEL_MASK (1<<11)
|
||||
#define RING_FAULT_SRCID(x) ((x >> 3) & 0xff)
|
||||
#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
|
||||
#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
|
||||
#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
|
||||
#define RING_FAULT_VALID (1<<0)
|
||||
#define DONE_REG 0x40b0
|
||||
#define GEN8_PRIVATE_PAT_LO 0x40e0
|
||||
@ -1641,9 +1641,9 @@ enum skl_disp_power_wells {
|
||||
#define ERR_INT_PIPE_CRC_DONE_B (1<<5)
|
||||
#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
|
||||
#define ERR_INT_PIPE_CRC_DONE_A (1<<2)
|
||||
#define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + pipe*3))
|
||||
#define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + (pipe)*3))
|
||||
#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
|
||||
#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
|
||||
#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
|
||||
|
||||
#define GEN8_FAULT_TLB_DATA0 0x04b10
|
||||
#define GEN8_FAULT_TLB_DATA1 0x04b14
|
||||
@ -1704,8 +1704,8 @@ enum skl_disp_power_wells {
|
||||
#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
|
||||
#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
|
||||
#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
|
||||
#define GEN9_IZ_HASHING_MASK(slice) (0x3 << (slice * 2))
|
||||
#define GEN9_IZ_HASHING(slice, val) ((val) << (slice * 2))
|
||||
#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
|
||||
#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
|
||||
|
||||
#define GFX_MODE 0x02520
|
||||
#define GFX_MODE_GEN7 0x0229c
|
||||
@ -2144,7 +2144,7 @@ enum skl_disp_power_wells {
|
||||
# define GPIO_DATA_VAL_IN (1 << 12)
|
||||
# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
|
||||
|
||||
#define GMBUS0 0x5100 /* clock/port select */
|
||||
#define GMBUS0 (dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
|
||||
#define GMBUS_RATE_100KHZ (0<<8)
|
||||
#define GMBUS_RATE_50KHZ (1<<8)
|
||||
#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
|
||||
@ -2163,7 +2163,7 @@ enum skl_disp_power_wells {
|
||||
#define GMBUS_PIN_2_BXT 2
|
||||
#define GMBUS_PIN_3_BXT 3
|
||||
#define GMBUS_NUM_PINS 7 /* including 0 */
|
||||
#define GMBUS1 0x5104 /* command/status */
|
||||
#define GMBUS1 (dev_priv->gpio_mmio_base + 0x5104) /* command/status */
|
||||
#define GMBUS_SW_CLR_INT (1<<31)
|
||||
#define GMBUS_SW_RDY (1<<30)
|
||||
#define GMBUS_ENT (1<<29) /* enable timeout */
|
||||
@ -2177,7 +2177,7 @@ enum skl_disp_power_wells {
|
||||
#define GMBUS_SLAVE_ADDR_SHIFT 1
|
||||
#define GMBUS_SLAVE_READ (1<<0)
|
||||
#define GMBUS_SLAVE_WRITE (0<<0)
|
||||
#define GMBUS2 0x5108 /* status */
|
||||
#define GMBUS2 (dev_priv->gpio_mmio_base + 0x5108) /* status */
|
||||
#define GMBUS_INUSE (1<<15)
|
||||
#define GMBUS_HW_WAIT_PHASE (1<<14)
|
||||
#define GMBUS_STALL_TIMEOUT (1<<13)
|
||||
@ -2185,14 +2185,14 @@ enum skl_disp_power_wells {
|
||||
#define GMBUS_HW_RDY (1<<11)
|
||||
#define GMBUS_SATOER (1<<10)
|
||||
#define GMBUS_ACTIVE (1<<9)
|
||||
#define GMBUS3 0x510c /* data buffer bytes 3-0 */
|
||||
#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */
|
||||
#define GMBUS3 (dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
|
||||
#define GMBUS4 (dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
|
||||
#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
|
||||
#define GMBUS_NAK_EN (1<<3)
|
||||
#define GMBUS_IDLE_EN (1<<2)
|
||||
#define GMBUS_HW_WAIT_EN (1<<1)
|
||||
#define GMBUS_HW_RDY_EN (1<<0)
|
||||
#define GMBUS5 0x5120 /* byte index */
|
||||
#define GMBUS5 (dev_priv->gpio_mmio_base + 0x5120) /* byte index */
|
||||
#define GMBUS_2BYTE_INDEX_EN (1<<31)
|
||||
|
||||
/*
|
||||
@ -2866,21 +2866,21 @@ enum skl_disp_power_wells {
|
||||
* doesn't need saving on GT1
|
||||
*/
|
||||
#define CXT_SIZE 0x21a0
|
||||
#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f)
|
||||
#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f)
|
||||
#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f)
|
||||
#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f)
|
||||
#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f)
|
||||
#define GEN6_CXT_POWER_SIZE(cxt_reg) (((cxt_reg) >> 24) & 0x3f)
|
||||
#define GEN6_CXT_RING_SIZE(cxt_reg) (((cxt_reg) >> 18) & 0x3f)
|
||||
#define GEN6_CXT_RENDER_SIZE(cxt_reg) (((cxt_reg) >> 12) & 0x3f)
|
||||
#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) (((cxt_reg) >> 6) & 0x3f)
|
||||
#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) (((cxt_reg) >> 0) & 0x3f)
|
||||
#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \
|
||||
GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
|
||||
GEN6_CXT_PIPELINE_SIZE(cxt_reg))
|
||||
#define GEN7_CXT_SIZE 0x21a8
|
||||
#define GEN7_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 25) & 0x7f)
|
||||
#define GEN7_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 22) & 0x7)
|
||||
#define GEN7_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f)
|
||||
#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f)
|
||||
#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7)
|
||||
#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f)
|
||||
#define GEN7_CXT_POWER_SIZE(ctx_reg) (((ctx_reg) >> 25) & 0x7f)
|
||||
#define GEN7_CXT_RING_SIZE(ctx_reg) (((ctx_reg) >> 22) & 0x7)
|
||||
#define GEN7_CXT_RENDER_SIZE(ctx_reg) (((ctx_reg) >> 16) & 0x3f)
|
||||
#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) (((ctx_reg) >> 9) & 0x7f)
|
||||
#define GEN7_CXT_GT1_SIZE(ctx_reg) (((ctx_reg) >> 6) & 0x7)
|
||||
#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) (((ctx_reg) >> 0) & 0x3f)
|
||||
#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
|
||||
GEN7_CXT_VFSTATE_SIZE(ctx_reg))
|
||||
/* Haswell does have the CXT_SIZE register however it does not appear to be
|
||||
@ -4284,7 +4284,7 @@ enum skl_disp_power_wells {
|
||||
#define DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL (1 << 14)
|
||||
#define DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL (1 << 13)
|
||||
#define DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL (1 << 12)
|
||||
#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (1f << 5)
|
||||
#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (0x1f << 5)
|
||||
#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5)
|
||||
#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1)
|
||||
|
||||
@ -4846,10 +4846,10 @@ enum skl_disp_power_wells {
|
||||
#define PIPE_PIXEL_MASK 0x00ffffff
|
||||
#define PIPE_PIXEL_SHIFT 0
|
||||
/* GM45+ just has to be different */
|
||||
#define _PIPEA_FRMCOUNT_GM45 0x70040
|
||||
#define _PIPEA_FLIPCOUNT_GM45 0x70044
|
||||
#define PIPE_FRMCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_GM45)
|
||||
#define PIPE_FLIPCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FLIPCOUNT_GM45)
|
||||
#define _PIPEA_FRMCOUNT_G4X 0x70040
|
||||
#define _PIPEA_FLIPCOUNT_G4X 0x70044
|
||||
#define PIPE_FRMCOUNT_G4X(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_G4X)
|
||||
#define PIPE_FLIPCOUNT_G4X(pipe) _PIPE2(pipe, _PIPEA_FLIPCOUNT_G4X)
|
||||
|
||||
/* Cursor A & B regs */
|
||||
#define _CURACNTR 0x70080
|
||||
@ -4991,20 +4991,20 @@ enum skl_disp_power_wells {
|
||||
#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
|
||||
#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
|
||||
|
||||
/* VBIOS flags */
|
||||
#define SWF00 (dev_priv->info.display_mmio_offset + 0x71410)
|
||||
#define SWF01 (dev_priv->info.display_mmio_offset + 0x71414)
|
||||
#define SWF02 (dev_priv->info.display_mmio_offset + 0x71418)
|
||||
#define SWF03 (dev_priv->info.display_mmio_offset + 0x7141c)
|
||||
#define SWF04 (dev_priv->info.display_mmio_offset + 0x71420)
|
||||
#define SWF05 (dev_priv->info.display_mmio_offset + 0x71424)
|
||||
#define SWF06 (dev_priv->info.display_mmio_offset + 0x71428)
|
||||
#define SWF10 (dev_priv->info.display_mmio_offset + 0x70410)
|
||||
#define SWF11 (dev_priv->info.display_mmio_offset + 0x70414)
|
||||
#define SWF14 (dev_priv->info.display_mmio_offset + 0x71420)
|
||||
#define SWF30 (dev_priv->info.display_mmio_offset + 0x72414)
|
||||
#define SWF31 (dev_priv->info.display_mmio_offset + 0x72418)
|
||||
#define SWF32 (dev_priv->info.display_mmio_offset + 0x7241c)
|
||||
/*
|
||||
* VBIOS flags
|
||||
* gen2:
|
||||
* [00:06] alm,mgm
|
||||
* [10:16] all
|
||||
* [30:32] alm,mgm
|
||||
* gen3+:
|
||||
* [00:0f] all
|
||||
* [10:1f] all
|
||||
* [30:32] all
|
||||
*/
|
||||
#define SWF0(i) (dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4)
|
||||
#define SWF1(i) (dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4)
|
||||
#define SWF3(i) (dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4)
|
||||
|
||||
/* Pipe B */
|
||||
#define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000)
|
||||
@ -5012,8 +5012,8 @@ enum skl_disp_power_wells {
|
||||
#define _PIPEBSTAT (dev_priv->info.display_mmio_offset + 0x71024)
|
||||
#define _PIPEBFRAMEHIGH 0x71040
|
||||
#define _PIPEBFRAMEPIXEL 0x71044
|
||||
#define _PIPEB_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71040)
|
||||
#define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71044)
|
||||
#define _PIPEB_FRMCOUNT_G4X (dev_priv->info.display_mmio_offset + 0x71040)
|
||||
#define _PIPEB_FLIPCOUNT_G4X (dev_priv->info.display_mmio_offset + 0x71044)
|
||||
|
||||
|
||||
/* Display B control */
|
||||
@ -5223,18 +5223,18 @@ enum skl_disp_power_wells {
|
||||
#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
|
||||
#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
|
||||
|
||||
#define SPCNTR(pipe, plane) _PIPE(pipe * 2 + plane, _SPACNTR, _SPBCNTR)
|
||||
#define SPLINOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPALINOFF, _SPBLINOFF)
|
||||
#define SPSTRIDE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASTRIDE, _SPBSTRIDE)
|
||||
#define SPPOS(pipe, plane) _PIPE(pipe * 2 + plane, _SPAPOS, _SPBPOS)
|
||||
#define SPSIZE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASIZE, _SPBSIZE)
|
||||
#define SPKEYMINVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMINVAL, _SPBKEYMINVAL)
|
||||
#define SPKEYMSK(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMSK, _SPBKEYMSK)
|
||||
#define SPSURF(pipe, plane) _PIPE(pipe * 2 + plane, _SPASURF, _SPBSURF)
|
||||
#define SPKEYMAXVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMAXVAL, _SPBKEYMAXVAL)
|
||||
#define SPTILEOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPATILEOFF, _SPBTILEOFF)
|
||||
#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA)
|
||||
#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC)
|
||||
#define SPCNTR(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPACNTR, _SPBCNTR)
|
||||
#define SPLINOFF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPALINOFF, _SPBLINOFF)
|
||||
#define SPSTRIDE(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASTRIDE, _SPBSTRIDE)
|
||||
#define SPPOS(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAPOS, _SPBPOS)
|
||||
#define SPSIZE(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASIZE, _SPBSIZE)
|
||||
#define SPKEYMINVAL(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMINVAL, _SPBKEYMINVAL)
|
||||
#define SPKEYMSK(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMSK, _SPBKEYMSK)
|
||||
#define SPSURF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASURF, _SPBSURF)
|
||||
#define SPKEYMAXVAL(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
|
||||
#define SPTILEOFF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPATILEOFF, _SPBTILEOFF)
|
||||
#define SPCONSTALPHA(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPACONSTALPHA, _SPBCONSTALPHA)
|
||||
#define SPGAMC(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAGAMC, _SPBGAMC)
|
||||
|
||||
/*
|
||||
* CHV pipe B sprite CSC
|
||||
@ -5580,7 +5580,7 @@ enum skl_disp_power_wells {
|
||||
#define PS_SCALER_MODE_DYN (0 << 28)
|
||||
#define PS_SCALER_MODE_HQ (1 << 28)
|
||||
#define PS_PLANE_SEL_MASK (7 << 25)
|
||||
#define PS_PLANE_SEL(plane) ((plane + 1) << 25)
|
||||
#define PS_PLANE_SEL(plane) (((plane) + 1) << 25)
|
||||
#define PS_FILTER_MASK (3 << 23)
|
||||
#define PS_FILTER_MEDIUM (0 << 23)
|
||||
#define PS_FILTER_EDGE_ENHANCE (2 << 23)
|
||||
@ -5745,7 +5745,7 @@ enum skl_disp_power_wells {
|
||||
#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
|
||||
#define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane)))
|
||||
#define DE_PIPEA_VBLANK_IVB (1<<0)
|
||||
#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5))
|
||||
#define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5))
|
||||
|
||||
#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
|
||||
#define MASTER_INTERRUPT_ENABLE (1<<31)
|
||||
@ -5769,7 +5769,7 @@ enum skl_disp_power_wells {
|
||||
#define GEN8_DE_PIPE_C_IRQ (1<<18)
|
||||
#define GEN8_DE_PIPE_B_IRQ (1<<17)
|
||||
#define GEN8_DE_PIPE_A_IRQ (1<<16)
|
||||
#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+pipe))
|
||||
#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+(pipe)))
|
||||
#define GEN8_GT_VECS_IRQ (1<<6)
|
||||
#define GEN8_GT_PM_IRQ (1<<4)
|
||||
#define GEN8_GT_VCS2_IRQ (1<<3)
|
||||
@ -5813,7 +5813,7 @@ enum skl_disp_power_wells {
|
||||
#define GEN9_PIPE_PLANE3_FLIP_DONE (1 << 5)
|
||||
#define GEN9_PIPE_PLANE2_FLIP_DONE (1 << 4)
|
||||
#define GEN9_PIPE_PLANE1_FLIP_DONE (1 << 3)
|
||||
#define GEN9_PIPE_PLANE_FLIP_DONE(p) (1 << (3 + p))
|
||||
#define GEN9_PIPE_PLANE_FLIP_DONE(p) (1 << (3 + (p)))
|
||||
#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
|
||||
(GEN8_PIPE_CURSOR_FAULT | \
|
||||
GEN8_PIPE_SPRITE_FAULT | \
|
||||
@ -6072,7 +6072,7 @@ enum skl_disp_power_wells {
|
||||
#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
|
||||
#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
|
||||
#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0)
|
||||
#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
|
||||
#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
|
||||
|
||||
/* digital port hotplug */
|
||||
#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
|
||||
@ -6183,9 +6183,9 @@ enum skl_disp_power_wells {
|
||||
#define PCH_SSC4_AUX_PARMS 0xc6214
|
||||
|
||||
#define PCH_DPLL_SEL 0xc7000
|
||||
#define TRANS_DPLLB_SEL(pipe) (1 << (pipe * 4))
|
||||
#define TRANS_DPLLB_SEL(pipe) (1 << ((pipe) * 4))
|
||||
#define TRANS_DPLLA_SEL(pipe) 0
|
||||
#define TRANS_DPLL_ENABLE(pipe) (1 << (pipe * 4 + 3))
|
||||
#define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3))
|
||||
|
||||
/* transcoder */
|
||||
|
||||
@ -6286,16 +6286,16 @@ enum skl_disp_power_wells {
|
||||
|
||||
#define HSW_TVIDEO_DIP_CTL(trans) \
|
||||
_TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A)
|
||||
#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
|
||||
_TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A)
|
||||
#define HSW_TVIDEO_DIP_VS_DATA(trans) \
|
||||
_TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A)
|
||||
#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
|
||||
_TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A)
|
||||
#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) \
|
||||
(_TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A) + (i) * 4)
|
||||
#define HSW_TVIDEO_DIP_VS_DATA(trans, i) \
|
||||
(_TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A) + (i) * 4)
|
||||
#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) \
|
||||
(_TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A) + (i) * 4)
|
||||
#define HSW_TVIDEO_DIP_GCP(trans) \
|
||||
_TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A)
|
||||
#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
|
||||
_TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A)
|
||||
#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) \
|
||||
(_TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A) + (i) * 4)
|
||||
|
||||
#define HSW_STEREO_3D_CTL_A 0x70020
|
||||
#define S3D_ENABLE (1<<31)
|
||||
@ -6587,10 +6587,10 @@ enum skl_disp_power_wells {
|
||||
#define _BXT_PP_ON_DELAYS2 0xc7308
|
||||
#define _BXT_PP_OFF_DELAYS2 0xc730c
|
||||
|
||||
#define BXT_PP_STATUS(n) ((!n) ? PCH_PP_STATUS : _BXT_PP_STATUS2)
|
||||
#define BXT_PP_CONTROL(n) ((!n) ? PCH_PP_CONTROL : _BXT_PP_CONTROL2)
|
||||
#define BXT_PP_ON_DELAYS(n) ((!n) ? PCH_PP_ON_DELAYS : _BXT_PP_ON_DELAYS2)
|
||||
#define BXT_PP_OFF_DELAYS(n) ((!n) ? PCH_PP_OFF_DELAYS : _BXT_PP_OFF_DELAYS2)
|
||||
#define BXT_PP_STATUS(n) _PIPE(n, PCH_PP_STATUS, _BXT_PP_STATUS2)
|
||||
#define BXT_PP_CONTROL(n) _PIPE(n, PCH_PP_CONTROL, _BXT_PP_CONTROL2)
|
||||
#define BXT_PP_ON_DELAYS(n) _PIPE(n, PCH_PP_ON_DELAYS, _BXT_PP_ON_DELAYS2)
|
||||
#define BXT_PP_OFF_DELAYS(n) _PIPE(n, PCH_PP_OFF_DELAYS, _BXT_PP_OFF_DELAYS2)
|
||||
|
||||
#define PCH_DP_B 0xe4100
|
||||
#define PCH_DPB_AUX_CH_CTL 0xe4110
|
||||
@ -7348,7 +7348,7 @@ enum skl_disp_power_wells {
|
||||
#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
|
||||
/* For each transcoder, we need to select the corresponding port clock */
|
||||
#define TRANS_CLK_SEL_DISABLED (0x0<<29)
|
||||
#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
|
||||
#define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29)
|
||||
|
||||
#define TRANSA_MSA_MISC 0x60410
|
||||
#define TRANSB_MSA_MISC 0x61410
|
||||
@ -7421,10 +7421,10 @@ enum skl_disp_power_wells {
|
||||
|
||||
/* DPLL control2 */
|
||||
#define DPLL_CTRL2 0x6C05C
|
||||
#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<(port+15))
|
||||
#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<((port)+15))
|
||||
#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1))
|
||||
#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1)
|
||||
#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) (clk<<((port)*3+1))
|
||||
#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) ((clk)<<((port)*3+1))
|
||||
#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3))
|
||||
|
||||
/* DPLL Status */
|
||||
@ -7437,23 +7437,23 @@ enum skl_disp_power_wells {
|
||||
#define DPLL3_CFGCR1 0x6C050
|
||||
#define DPLL_CFGCR1_FREQ_ENABLE (1<<31)
|
||||
#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9)
|
||||
#define DPLL_CFGCR1_DCO_FRACTION(x) (x<<9)
|
||||
#define DPLL_CFGCR1_DCO_FRACTION(x) ((x)<<9)
|
||||
#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
|
||||
|
||||
#define DPLL1_CFGCR2 0x6C044
|
||||
#define DPLL2_CFGCR2 0x6C04C
|
||||
#define DPLL3_CFGCR2 0x6C054
|
||||
#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8)
|
||||
#define DPLL_CFGCR2_QDIV_RATIO(x) (x<<8)
|
||||
#define DPLL_CFGCR2_QDIV_MODE(x) (x<<7)
|
||||
#define DPLL_CFGCR2_QDIV_RATIO(x) ((x)<<8)
|
||||
#define DPLL_CFGCR2_QDIV_MODE(x) ((x)<<7)
|
||||
#define DPLL_CFGCR2_KDIV_MASK (3<<5)
|
||||
#define DPLL_CFGCR2_KDIV(x) (x<<5)
|
||||
#define DPLL_CFGCR2_KDIV(x) ((x)<<5)
|
||||
#define DPLL_CFGCR2_KDIV_5 (0<<5)
|
||||
#define DPLL_CFGCR2_KDIV_2 (1<<5)
|
||||
#define DPLL_CFGCR2_KDIV_3 (2<<5)
|
||||
#define DPLL_CFGCR2_KDIV_1 (3<<5)
|
||||
#define DPLL_CFGCR2_PDIV_MASK (7<<2)
|
||||
#define DPLL_CFGCR2_PDIV(x) (x<<2)
|
||||
#define DPLL_CFGCR2_PDIV(x) ((x)<<2)
|
||||
#define DPLL_CFGCR2_PDIV_1 (0<<2)
|
||||
#define DPLL_CFGCR2_PDIV_2 (1<<2)
|
||||
#define DPLL_CFGCR2_PDIV_3 (2<<2)
|
||||
@ -7979,7 +7979,7 @@ enum skl_disp_power_wells {
|
||||
#define VIRTUAL_CHANNEL_SHIFT 6
|
||||
#define VIRTUAL_CHANNEL_MASK (3 << 6)
|
||||
#define DATA_TYPE_SHIFT 0
|
||||
#define DATA_TYPE_MASK (3f << 0)
|
||||
#define DATA_TYPE_MASK (0x3f << 0)
|
||||
/* data type values, see include/video/mipi_display.h */
|
||||
|
||||
#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
|
||||
|
@ -122,12 +122,24 @@ int i915_save_state(struct drm_device *dev)
|
||||
dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
|
||||
|
||||
/* Scratch space */
|
||||
for (i = 0; i < 16; i++) {
|
||||
dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2));
|
||||
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2));
|
||||
if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) {
|
||||
for (i = 0; i < 7; i++) {
|
||||
dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
|
||||
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
|
||||
} else if (IS_GEN2(dev_priv)) {
|
||||
for (i = 0; i < 7; i++)
|
||||
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
|
||||
} else if (HAS_GMCH_DISPLAY(dev_priv)) {
|
||||
for (i = 0; i < 16; i++) {
|
||||
dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
|
||||
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2));
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
@ -156,12 +168,25 @@ int i915_restore_state(struct drm_device *dev)
|
||||
/* Memory arbitration state */
|
||||
I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]);
|
||||
I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]);
|
||||
/* Scratch space */
|
||||
if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) {
|
||||
for (i = 0; i < 7; i++) {
|
||||
I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
|
||||
I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
|
||||
} else if (IS_GEN2(dev_priv)) {
|
||||
for (i = 0; i < 7; i++)
|
||||
I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
|
||||
} else if (HAS_GMCH_DISPLAY(dev_priv)) {
|
||||
for (i = 0; i < 16; i++) {
|
||||
I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
|
||||
I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
|
@ -5,7 +5,6 @@
|
||||
*/
|
||||
#include <linux/pci.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
|
@ -94,7 +94,6 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
|
||||
__drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
|
||||
|
||||
crtc_state->update_pipe = false;
|
||||
crtc_state->disable_lp_wm = false;
|
||||
|
||||
return &crtc_state->base;
|
||||
}
|
||||
|
@ -61,21 +61,21 @@ static const struct {
|
||||
int clock;
|
||||
u32 config;
|
||||
} hdmi_audio_clock[] = {
|
||||
{ DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
|
||||
{ 25175, AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
|
||||
{ 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
|
||||
{ 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
|
||||
{ 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
|
||||
{ 27027, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
|
||||
{ 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
|
||||
{ 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
|
||||
{ DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
|
||||
{ 54054, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
|
||||
{ 74176, AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
|
||||
{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
|
||||
{ DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
|
||||
{ 148352, AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
|
||||
{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
|
||||
};
|
||||
|
||||
/* HDMI N/CTS table */
|
||||
#define TMDS_297M 297000
|
||||
#define TMDS_296M DIV_ROUND_UP(297000 * 1000, 1001)
|
||||
#define TMDS_296M 296703
|
||||
static const struct {
|
||||
int sample_rate;
|
||||
int clock;
|
||||
|
@ -1231,20 +1231,13 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
|
||||
{ }
|
||||
};
|
||||
|
||||
static const struct bdb_header *validate_vbt(const void __iomem *_base,
|
||||
static const struct bdb_header *validate_vbt(const void *base,
|
||||
size_t size,
|
||||
const void __iomem *_vbt,
|
||||
const void *_vbt,
|
||||
const char *source)
|
||||
{
|
||||
/*
|
||||
* This is the one place where we explicitly discard the address space
|
||||
* (__iomem) of the BIOS/VBT. (And this will cause a sparse complaint.)
|
||||
* From now on everything is based on 'base', and treated as regular
|
||||
* memory.
|
||||
*/
|
||||
const void *base = (const void *) _base;
|
||||
size_t offset = _vbt - _base;
|
||||
const struct vbt_header *vbt = base + offset;
|
||||
size_t offset = _vbt - base;
|
||||
const struct vbt_header *vbt = _vbt;
|
||||
const struct bdb_header *bdb;
|
||||
|
||||
if (offset + sizeof(struct vbt_header) > size) {
|
||||
@ -1282,7 +1275,15 @@ static const struct bdb_header *find_vbt(void __iomem *bios, size_t size)
|
||||
/* Scour memory looking for the VBT signature. */
|
||||
for (i = 0; i + 4 < size; i++) {
|
||||
if (ioread32(bios + i) == *((const u32 *) "$VBT")) {
|
||||
bdb = validate_vbt(bios, size, bios + i, "PCI ROM");
|
||||
/*
|
||||
* This is the one place where we explicitly discard the
|
||||
* address space (__iomem) of the BIOS/VBT. From now on
|
||||
* everything is based on 'base', and treated as regular
|
||||
* memory.
|
||||
*/
|
||||
void *_bios = (void __force *) bios;
|
||||
|
||||
bdb = validate_vbt(_bios, size, _bios + i, "PCI ROM");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1157,12 +1157,10 @@ static const char *state_string(bool enabled)
|
||||
void assert_pll(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool state)
|
||||
{
|
||||
int reg;
|
||||
u32 val;
|
||||
bool cur_state;
|
||||
|
||||
reg = DPLL(pipe);
|
||||
val = I915_READ(reg);
|
||||
val = I915_READ(DPLL(pipe));
|
||||
cur_state = !!(val & DPLL_VCO_ENABLE);
|
||||
I915_STATE_WARN(cur_state != state,
|
||||
"PLL state assertion failure (expected %s, current %s)\n",
|
||||
@ -1219,20 +1217,16 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
|
||||
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool state)
|
||||
{
|
||||
int reg;
|
||||
u32 val;
|
||||
bool cur_state;
|
||||
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
|
||||
pipe);
|
||||
|
||||
if (HAS_DDI(dev_priv->dev)) {
|
||||
/* DDI does not have a specific FDI_TX register */
|
||||
reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
|
||||
val = I915_READ(reg);
|
||||
u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
|
||||
cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
|
||||
} else {
|
||||
reg = FDI_TX_CTL(pipe);
|
||||
val = I915_READ(reg);
|
||||
u32 val = I915_READ(FDI_TX_CTL(pipe));
|
||||
cur_state = !!(val & FDI_TX_ENABLE);
|
||||
}
|
||||
I915_STATE_WARN(cur_state != state,
|
||||
@ -1245,12 +1239,10 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
|
||||
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool state)
|
||||
{
|
||||
int reg;
|
||||
u32 val;
|
||||
bool cur_state;
|
||||
|
||||
reg = FDI_RX_CTL(pipe);
|
||||
val = I915_READ(reg);
|
||||
val = I915_READ(FDI_RX_CTL(pipe));
|
||||
cur_state = !!(val & FDI_RX_ENABLE);
|
||||
I915_STATE_WARN(cur_state != state,
|
||||
"FDI RX state assertion failure (expected %s, current %s)\n",
|
||||
@ -1262,7 +1254,6 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
|
||||
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
int reg;
|
||||
u32 val;
|
||||
|
||||
/* ILK FDI PLL is always enabled */
|
||||
@ -1273,20 +1264,17 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
|
||||
if (HAS_DDI(dev_priv->dev))
|
||||
return;
|
||||
|
||||
reg = FDI_TX_CTL(pipe);
|
||||
val = I915_READ(reg);
|
||||
val = I915_READ(FDI_TX_CTL(pipe));
|
||||
I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
|
||||
}
|
||||
|
||||
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool state)
|
||||
{
|
||||
int reg;
|
||||
u32 val;
|
||||
bool cur_state;
|
||||
|
||||
reg = FDI_RX_CTL(pipe);
|
||||
val = I915_READ(reg);
|
||||
val = I915_READ(FDI_RX_CTL(pipe));
|
||||
cur_state = !!(val & FDI_RX_PLL_ENABLE);
|
||||
I915_STATE_WARN(cur_state != state,
|
||||
"FDI RX PLL assertion failure (expected %s, current %s)\n",
|
||||
@ -1356,8 +1344,6 @@ static void assert_cursor(struct drm_i915_private *dev_priv,
|
||||
void assert_pipe(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool state)
|
||||
{
|
||||
int reg;
|
||||
u32 val;
|
||||
bool cur_state;
|
||||
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
|
||||
pipe);
|
||||
@ -1371,8 +1357,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
|
||||
POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
|
||||
cur_state = false;
|
||||
} else {
|
||||
reg = PIPECONF(cpu_transcoder);
|
||||
val = I915_READ(reg);
|
||||
u32 val = I915_READ(PIPECONF(cpu_transcoder));
|
||||
cur_state = !!(val & PIPECONF_ENABLE);
|
||||
}
|
||||
|
||||
@ -1384,12 +1369,10 @@ void assert_pipe(struct drm_i915_private *dev_priv,
|
||||
static void assert_plane(struct drm_i915_private *dev_priv,
|
||||
enum plane plane, bool state)
|
||||
{
|
||||
int reg;
|
||||
u32 val;
|
||||
bool cur_state;
|
||||
|
||||
reg = DSPCNTR(plane);
|
||||
val = I915_READ(reg);
|
||||
val = I915_READ(DSPCNTR(plane));
|
||||
cur_state = !!(val & DISPLAY_PLANE_ENABLE);
|
||||
I915_STATE_WARN(cur_state != state,
|
||||
"plane %c assertion failure (expected %s, current %s)\n",
|
||||
@ -1403,14 +1386,11 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
int reg, i;
|
||||
u32 val;
|
||||
int cur_pipe;
|
||||
int i;
|
||||
|
||||
/* Primary planes are fixed to pipes on gen4+ */
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
reg = DSPCNTR(pipe);
|
||||
val = I915_READ(reg);
|
||||
u32 val = I915_READ(DSPCNTR(pipe));
|
||||
I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
|
||||
"plane %c assertion failure, should be disabled but not\n",
|
||||
plane_name(pipe));
|
||||
@ -1419,9 +1399,8 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
|
||||
|
||||
/* Need to check both planes against the pipe */
|
||||
for_each_pipe(dev_priv, i) {
|
||||
reg = DSPCNTR(i);
|
||||
val = I915_READ(reg);
|
||||
cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
|
||||
u32 val = I915_READ(DSPCNTR(i));
|
||||
enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
|
||||
DISPPLANE_SEL_PIPE_SHIFT;
|
||||
I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
|
||||
"plane %c assertion failure, should be off on pipe %c but is still active\n",
|
||||
@ -1433,33 +1412,29 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
int reg, sprite;
|
||||
u32 val;
|
||||
int sprite;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
for_each_sprite(dev_priv, pipe, sprite) {
|
||||
val = I915_READ(PLANE_CTL(pipe, sprite));
|
||||
u32 val = I915_READ(PLANE_CTL(pipe, sprite));
|
||||
I915_STATE_WARN(val & PLANE_CTL_ENABLE,
|
||||
"plane %d assertion failure, should be off on pipe %c but is still active\n",
|
||||
sprite, pipe_name(pipe));
|
||||
}
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
for_each_sprite(dev_priv, pipe, sprite) {
|
||||
reg = SPCNTR(pipe, sprite);
|
||||
val = I915_READ(reg);
|
||||
u32 val = I915_READ(SPCNTR(pipe, sprite));
|
||||
I915_STATE_WARN(val & SP_ENABLE,
|
||||
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
|
||||
sprite_name(pipe, sprite), pipe_name(pipe));
|
||||
}
|
||||
} else if (INTEL_INFO(dev)->gen >= 7) {
|
||||
reg = SPRCTL(pipe);
|
||||
val = I915_READ(reg);
|
||||
u32 val = I915_READ(SPRCTL(pipe));
|
||||
I915_STATE_WARN(val & SPRITE_ENABLE,
|
||||
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
|
||||
plane_name(pipe), pipe_name(pipe));
|
||||
} else if (INTEL_INFO(dev)->gen >= 5) {
|
||||
reg = DVSCNTR(pipe);
|
||||
val = I915_READ(reg);
|
||||
u32 val = I915_READ(DVSCNTR(pipe));
|
||||
I915_STATE_WARN(val & DVS_ENABLE,
|
||||
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
|
||||
plane_name(pipe), pipe_name(pipe));
|
||||
@ -1488,12 +1463,10 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
|
||||
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
int reg;
|
||||
u32 val;
|
||||
bool enabled;
|
||||
|
||||
reg = PCH_TRANSCONF(pipe);
|
||||
val = I915_READ(reg);
|
||||
val = I915_READ(PCH_TRANSCONF(pipe));
|
||||
enabled = !!(val & TRANS_ENABLE);
|
||||
I915_STATE_WARN(enabled,
|
||||
"transcoder assertion failed, should be off on pipe %c but is still active\n",
|
||||
@ -1600,21 +1573,18 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
|
||||
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
int reg;
|
||||
u32 val;
|
||||
|
||||
assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
|
||||
assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
|
||||
assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
|
||||
|
||||
reg = PCH_ADPA;
|
||||
val = I915_READ(reg);
|
||||
val = I915_READ(PCH_ADPA);
|
||||
I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
|
||||
"PCH VGA enabled on transcoder %c, should be disabled\n",
|
||||
pipe_name(pipe));
|
||||
|
||||
reg = PCH_LVDS;
|
||||
val = I915_READ(reg);
|
||||
val = I915_READ(PCH_LVDS);
|
||||
I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
|
||||
"PCH LVDS enabled on transcoder %c, should be disabled\n",
|
||||
pipe_name(pipe));
|
||||
@ -4804,6 +4774,7 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
|
||||
struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_plane *plane;
|
||||
|
||||
if (atomic->wait_vblank)
|
||||
intel_wait_for_vblank(dev, crtc->pipe);
|
||||
@ -4822,6 +4793,10 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
|
||||
if (atomic->post_enable_primary)
|
||||
intel_post_enable_primary(&crtc->base);
|
||||
|
||||
drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks)
|
||||
intel_update_sprite_watermarks(plane, &crtc->base,
|
||||
0, 0, 0, false, false);
|
||||
|
||||
memset(atomic, 0, sizeof(*atomic));
|
||||
}
|
||||
|
||||
@ -9952,7 +9927,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
|
||||
}
|
||||
cntl |= pipe << 28; /* Connect to correct pipe */
|
||||
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
if (HAS_DDI(dev))
|
||||
cntl |= CURSOR_PIPE_CSC_ENABLE;
|
||||
}
|
||||
|
||||
@ -10822,7 +10797,7 @@ static bool page_flip_finished(struct intel_crtc *crtc)
|
||||
*/
|
||||
return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
|
||||
crtc->unpin_work->gtt_offset &&
|
||||
g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)),
|
||||
g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
|
||||
crtc->unpin_work->flip_count);
|
||||
}
|
||||
|
||||
@ -10848,11 +10823,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
|
||||
static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
|
||||
static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
|
||||
{
|
||||
/* Ensure that the work item is consistent when activating it ... */
|
||||
smp_wmb();
|
||||
atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
|
||||
atomic_set(&work->pending, INTEL_FLIP_PENDING);
|
||||
/* and that it is marked active as soon as the irq could fire. */
|
||||
smp_wmb();
|
||||
}
|
||||
@ -10888,7 +10863,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
|
||||
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
|
||||
intel_ring_emit(ring, 0); /* aux display base address, unused */
|
||||
|
||||
intel_mark_page_flip_active(intel_crtc);
|
||||
intel_mark_page_flip_active(intel_crtc->unpin_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -10920,7 +10895,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
|
||||
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
|
||||
intel_mark_page_flip_active(intel_crtc);
|
||||
intel_mark_page_flip_active(intel_crtc->unpin_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -10959,7 +10934,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
|
||||
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
|
||||
intel_ring_emit(ring, pf | pipesrc);
|
||||
|
||||
intel_mark_page_flip_active(intel_crtc);
|
||||
intel_mark_page_flip_active(intel_crtc->unpin_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -10995,7 +10970,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
|
||||
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
|
||||
intel_ring_emit(ring, pf | pipesrc);
|
||||
|
||||
intel_mark_page_flip_active(intel_crtc);
|
||||
intel_mark_page_flip_active(intel_crtc->unpin_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -11090,7 +11065,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
|
||||
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
|
||||
intel_ring_emit(ring, (MI_NOOP));
|
||||
|
||||
intel_mark_page_flip_active(intel_crtc);
|
||||
intel_mark_page_flip_active(intel_crtc->unpin_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -11121,7 +11096,8 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
|
||||
return ring != i915_gem_request_get_ring(obj->last_write_req);
|
||||
}
|
||||
|
||||
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc)
|
||||
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
|
||||
struct intel_unpin_work *work)
|
||||
{
|
||||
struct drm_device *dev = intel_crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -11162,11 +11138,12 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc)
|
||||
I915_WRITE(PLANE_CTL(pipe, 0), ctl);
|
||||
I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
|
||||
|
||||
I915_WRITE(PLANE_SURF(pipe, 0), intel_crtc->unpin_work->gtt_offset);
|
||||
I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
|
||||
POSTING_READ(PLANE_SURF(pipe, 0));
|
||||
}
|
||||
|
||||
static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc)
|
||||
static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
|
||||
struct intel_unpin_work *work)
|
||||
{
|
||||
struct drm_device *dev = intel_crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -11186,31 +11163,36 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc)
|
||||
|
||||
I915_WRITE(reg, dspcntr);
|
||||
|
||||
I915_WRITE(DSPSURF(intel_crtc->plane),
|
||||
intel_crtc->unpin_work->gtt_offset);
|
||||
I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
|
||||
POSTING_READ(DSPSURF(intel_crtc->plane));
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX: This is the temporary way to update the plane registers until we get
|
||||
* around to using the usual plane update functions for MMIO flips
|
||||
*/
|
||||
static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
|
||||
static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
|
||||
{
|
||||
struct drm_device *dev = intel_crtc->base.dev;
|
||||
struct intel_crtc *crtc = mmio_flip->crtc;
|
||||
struct intel_unpin_work *work;
|
||||
|
||||
intel_mark_page_flip_active(intel_crtc);
|
||||
spin_lock_irq(&crtc->base.dev->event_lock);
|
||||
work = crtc->unpin_work;
|
||||
spin_unlock_irq(&crtc->base.dev->event_lock);
|
||||
if (work == NULL)
|
||||
return;
|
||||
|
||||
intel_pipe_update_start(intel_crtc);
|
||||
intel_mark_page_flip_active(work);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
skl_do_mmio_flip(intel_crtc);
|
||||
intel_pipe_update_start(crtc);
|
||||
|
||||
if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
|
||||
skl_do_mmio_flip(crtc, work);
|
||||
else
|
||||
/* use_mmio_flip() retricts MMIO flips to ilk+ */
|
||||
ilk_do_mmio_flip(intel_crtc);
|
||||
ilk_do_mmio_flip(crtc, work);
|
||||
|
||||
intel_pipe_update_end(intel_crtc);
|
||||
intel_pipe_update_end(crtc);
|
||||
}
|
||||
|
||||
static void intel_mmio_flip_work_func(struct work_struct *work)
|
||||
@ -11218,15 +11200,15 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
|
||||
struct intel_mmio_flip *mmio_flip =
|
||||
container_of(work, struct intel_mmio_flip, work);
|
||||
|
||||
if (mmio_flip->req)
|
||||
if (mmio_flip->req) {
|
||||
WARN_ON(__i915_wait_request(mmio_flip->req,
|
||||
mmio_flip->crtc->reset_counter,
|
||||
false, NULL,
|
||||
&mmio_flip->i915->rps.mmioflips));
|
||||
i915_gem_request_unreference__unlocked(mmio_flip->req);
|
||||
}
|
||||
|
||||
intel_do_mmio_flip(mmio_flip->crtc);
|
||||
|
||||
i915_gem_request_unreference__unlocked(mmio_flip->req);
|
||||
intel_do_mmio_flip(mmio_flip);
|
||||
kfree(mmio_flip);
|
||||
}
|
||||
|
||||
@ -11427,7 +11409,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
|
||||
work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
|
||||
work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
ring = &dev_priv->ring[BCS];
|
||||
@ -11577,32 +11559,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
static bool intel_wm_need_update(struct drm_plane *plane,
|
||||
struct drm_plane_state *state)
|
||||
{
|
||||
struct intel_plane_state *new = to_intel_plane_state(state);
|
||||
struct intel_plane_state *cur = to_intel_plane_state(plane->state);
|
||||
|
||||
/* Update watermarks on tiling or size changes. */
|
||||
/* Update watermarks on tiling changes. */
|
||||
if (!plane->state->fb || !state->fb ||
|
||||
plane->state->fb->modifier[0] != state->fb->modifier[0] ||
|
||||
plane->state->rotation != state->rotation ||
|
||||
drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
|
||||
drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
|
||||
drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
|
||||
drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
|
||||
plane->state->rotation != state->rotation)
|
||||
return true;
|
||||
|
||||
if (plane->state->crtc_w != state->crtc_w)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool needs_scaling(struct intel_plane_state *state)
|
||||
{
|
||||
int src_w = drm_rect_width(&state->src) >> 16;
|
||||
int src_h = drm_rect_height(&state->src) >> 16;
|
||||
int dst_w = drm_rect_width(&state->dst);
|
||||
int dst_h = drm_rect_height(&state->dst);
|
||||
|
||||
return (src_w != dst_w || src_h != dst_h);
|
||||
}
|
||||
|
||||
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
|
||||
struct drm_plane_state *plane_state)
|
||||
{
|
||||
@ -11618,6 +11586,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
|
||||
bool mode_changed = needs_modeset(crtc_state);
|
||||
bool was_crtc_enabled = crtc->state->active;
|
||||
bool is_crtc_enabled = crtc_state->active;
|
||||
|
||||
bool turn_off, turn_on, visible, was_visible;
|
||||
struct drm_framebuffer *fb = plane_state->fb;
|
||||
|
||||
@ -11735,23 +11704,11 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
|
||||
case DRM_PLANE_TYPE_CURSOR:
|
||||
break;
|
||||
case DRM_PLANE_TYPE_OVERLAY:
|
||||
/*
|
||||
* WaCxSRDisabledForSpriteScaling:ivb
|
||||
*
|
||||
* cstate->update_wm was already set above, so this flag will
|
||||
* take effect when we commit and program watermarks.
|
||||
*/
|
||||
if (IS_IVYBRIDGE(dev) &&
|
||||
needs_scaling(to_intel_plane_state(plane_state)) &&
|
||||
!needs_scaling(old_plane_state)) {
|
||||
to_intel_crtc_state(crtc_state)->disable_lp_wm = true;
|
||||
} else if (turn_off && !mode_changed) {
|
||||
if (turn_off && !mode_changed) {
|
||||
intel_crtc->atomic.wait_vblank = true;
|
||||
intel_crtc->atomic.update_sprite_watermarks |=
|
||||
1 << i;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -14942,13 +14899,12 @@ intel_check_plane_mapping(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 reg, val;
|
||||
u32 val;
|
||||
|
||||
if (INTEL_INFO(dev)->num_pipes == 1)
|
||||
return true;
|
||||
|
||||
reg = DSPCNTR(!crtc->plane);
|
||||
val = I915_READ(reg);
|
||||
val = I915_READ(DSPCNTR(!crtc->plane));
|
||||
|
||||
if ((val & DISPLAY_PLANE_ENABLE) &&
|
||||
(!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
|
||||
|
@ -574,8 +574,6 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
|
||||
edp_notifier);
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pp_div;
|
||||
u32 pp_ctrl_reg, pp_div_reg;
|
||||
|
||||
if (!is_edp(intel_dp) || code != SYS_RESTART)
|
||||
return 0;
|
||||
@ -584,6 +582,8 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
|
||||
u32 pp_ctrl_reg, pp_div_reg;
|
||||
u32 pp_div;
|
||||
|
||||
pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
|
||||
pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
|
||||
@ -5536,7 +5536,6 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
|
||||
struct intel_dp *intel_dp = dev_priv->drrs.dp;
|
||||
struct intel_crtc_state *config = NULL;
|
||||
struct intel_crtc *intel_crtc = NULL;
|
||||
u32 reg, val;
|
||||
enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
|
||||
|
||||
if (refresh_rate <= 0) {
|
||||
@ -5598,9 +5597,10 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
|
||||
DRM_ERROR("Unsupported refreshrate type\n");
|
||||
}
|
||||
} else if (INTEL_INFO(dev)->gen > 6) {
|
||||
reg = PIPECONF(intel_crtc->config->cpu_transcoder);
|
||||
val = I915_READ(reg);
|
||||
u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
|
||||
u32 val;
|
||||
|
||||
val = I915_READ(reg);
|
||||
if (index > DRRS_HIGH_RR) {
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
|
||||
|
@ -468,9 +468,6 @@ struct intel_crtc_state {
|
||||
|
||||
/* w/a for waiting 2 vblanks during crtc enable */
|
||||
enum pipe hsw_workaround_pipe;
|
||||
|
||||
/* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */
|
||||
bool disable_lp_wm;
|
||||
};
|
||||
|
||||
struct vlv_wm_state {
|
||||
@ -1399,6 +1396,12 @@ void intel_init_clock_gating(struct drm_device *dev);
|
||||
void intel_suspend_hw(struct drm_device *dev);
|
||||
int ilk_wm_max_level(const struct drm_device *dev);
|
||||
void intel_update_watermarks(struct drm_crtc *crtc);
|
||||
void intel_update_sprite_watermarks(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc,
|
||||
uint32_t sprite_width,
|
||||
uint32_t sprite_height,
|
||||
int pixel_size,
|
||||
bool enabled, bool scaled);
|
||||
void intel_init_pm(struct drm_device *dev);
|
||||
void intel_pm_setup(struct drm_device *dev);
|
||||
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
|
||||
|
@ -113,17 +113,18 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
|
||||
}
|
||||
}
|
||||
|
||||
static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
|
||||
enum transcoder cpu_transcoder,
|
||||
struct drm_i915_private *dev_priv)
|
||||
static u32 hsw_dip_data_reg(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder,
|
||||
enum hdmi_infoframe_type type,
|
||||
int i)
|
||||
{
|
||||
switch (type) {
|
||||
case HDMI_INFOFRAME_TYPE_AVI:
|
||||
return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder);
|
||||
return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i);
|
||||
case HDMI_INFOFRAME_TYPE_SPD:
|
||||
return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder);
|
||||
return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder, i);
|
||||
case HDMI_INFOFRAME_TYPE_VENDOR:
|
||||
return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder);
|
||||
return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i);
|
||||
default:
|
||||
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
|
||||
return 0;
|
||||
@ -365,14 +366,13 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
|
||||
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
|
||||
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
|
||||
u32 data_reg;
|
||||
int i;
|
||||
u32 val = I915_READ(ctl_reg);
|
||||
|
||||
data_reg = hsw_infoframe_data_reg(type,
|
||||
intel_crtc->config->cpu_transcoder,
|
||||
dev_priv);
|
||||
data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0);
|
||||
if (data_reg == 0)
|
||||
return;
|
||||
|
||||
@ -381,12 +381,14 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
|
||||
|
||||
mmiowb();
|
||||
for (i = 0; i < len; i += 4) {
|
||||
I915_WRITE(data_reg + i, *data);
|
||||
I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
|
||||
type, i >> 2), *data);
|
||||
data++;
|
||||
}
|
||||
/* Write every possible data byte to force correct ECC calculation. */
|
||||
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
|
||||
I915_WRITE(data_reg + i, 0);
|
||||
I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
|
||||
type, i >> 2), 0);
|
||||
mmiowb();
|
||||
|
||||
val |= hsw_infoframe_enable(type);
|
||||
|
@ -114,8 +114,8 @@ intel_i2c_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
|
||||
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
|
||||
I915_WRITE(GMBUS0, 0);
|
||||
I915_WRITE(GMBUS4, 0);
|
||||
}
|
||||
|
||||
static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
|
||||
@ -261,7 +261,6 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
|
||||
u32 gmbus4_irq_en)
|
||||
{
|
||||
int i;
|
||||
int reg_offset = dev_priv->gpio_mmio_base;
|
||||
u32 gmbus2 = 0;
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
@ -271,13 +270,13 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
|
||||
/* Important: The hw handles only the first bit, so set only one! Since
|
||||
* we also need to check for NAKs besides the hw ready/idle signal, we
|
||||
* need to wake up periodically and check that ourselves. */
|
||||
I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en);
|
||||
I915_WRITE(GMBUS4, gmbus4_irq_en);
|
||||
|
||||
for (i = 0; i < msecs_to_jiffies_timeout(50); i++) {
|
||||
prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
||||
gmbus2 = I915_READ_NOTRACE(GMBUS2 + reg_offset);
|
||||
gmbus2 = I915_READ_NOTRACE(GMBUS2);
|
||||
if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
|
||||
break;
|
||||
|
||||
@ -285,7 +284,7 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
finish_wait(&dev_priv->gmbus_wait_queue, &wait);
|
||||
|
||||
I915_WRITE(GMBUS4 + reg_offset, 0);
|
||||
I915_WRITE(GMBUS4, 0);
|
||||
|
||||
if (gmbus2 & GMBUS_SATOER)
|
||||
return -ENXIO;
|
||||
@ -298,20 +297,19 @@ static int
|
||||
gmbus_wait_idle(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int ret;
|
||||
int reg_offset = dev_priv->gpio_mmio_base;
|
||||
|
||||
#define C ((I915_READ_NOTRACE(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0)
|
||||
#define C ((I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0)
|
||||
|
||||
if (!HAS_GMBUS_IRQ(dev_priv->dev))
|
||||
return wait_for(C, 10);
|
||||
|
||||
/* Important: The hw handles only the first bit, so set only one! */
|
||||
I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN);
|
||||
I915_WRITE(GMBUS4, GMBUS_IDLE_EN);
|
||||
|
||||
ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
|
||||
msecs_to_jiffies_timeout(10));
|
||||
|
||||
I915_WRITE(GMBUS4 + reg_offset, 0);
|
||||
I915_WRITE(GMBUS4, 0);
|
||||
|
||||
if (ret)
|
||||
return 0;
|
||||
@ -325,9 +323,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
|
||||
unsigned short addr, u8 *buf, unsigned int len,
|
||||
u32 gmbus1_index)
|
||||
{
|
||||
int reg_offset = dev_priv->gpio_mmio_base;
|
||||
|
||||
I915_WRITE(GMBUS1 + reg_offset,
|
||||
I915_WRITE(GMBUS1,
|
||||
gmbus1_index |
|
||||
GMBUS_CYCLE_WAIT |
|
||||
(len << GMBUS_BYTE_COUNT_SHIFT) |
|
||||
@ -342,7 +338,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
val = I915_READ(GMBUS3 + reg_offset);
|
||||
val = I915_READ(GMBUS3);
|
||||
do {
|
||||
*buf++ = val & 0xff;
|
||||
val >>= 8;
|
||||
@ -380,7 +376,6 @@ static int
|
||||
gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
|
||||
unsigned short addr, u8 *buf, unsigned int len)
|
||||
{
|
||||
int reg_offset = dev_priv->gpio_mmio_base;
|
||||
unsigned int chunk_size = len;
|
||||
u32 val, loop;
|
||||
|
||||
@ -390,8 +385,8 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
|
||||
len -= 1;
|
||||
}
|
||||
|
||||
I915_WRITE(GMBUS3 + reg_offset, val);
|
||||
I915_WRITE(GMBUS1 + reg_offset,
|
||||
I915_WRITE(GMBUS3, val);
|
||||
I915_WRITE(GMBUS1,
|
||||
GMBUS_CYCLE_WAIT |
|
||||
(chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
|
||||
(addr << GMBUS_SLAVE_ADDR_SHIFT) |
|
||||
@ -404,7 +399,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
|
||||
val |= *buf++ << (8 * loop);
|
||||
} while (--len && ++loop < 4);
|
||||
|
||||
I915_WRITE(GMBUS3 + reg_offset, val);
|
||||
I915_WRITE(GMBUS3, val);
|
||||
|
||||
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
|
||||
GMBUS_HW_RDY_EN);
|
||||
@ -452,7 +447,6 @@ gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
|
||||
static int
|
||||
gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
|
||||
{
|
||||
int reg_offset = dev_priv->gpio_mmio_base;
|
||||
u32 gmbus1_index = 0;
|
||||
u32 gmbus5 = 0;
|
||||
int ret;
|
||||
@ -466,13 +460,13 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
|
||||
|
||||
/* GMBUS5 holds 16-bit index */
|
||||
if (gmbus5)
|
||||
I915_WRITE(GMBUS5 + reg_offset, gmbus5);
|
||||
I915_WRITE(GMBUS5, gmbus5);
|
||||
|
||||
ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
|
||||
|
||||
/* Clear GMBUS5 after each index transfer */
|
||||
if (gmbus5)
|
||||
I915_WRITE(GMBUS5 + reg_offset, 0);
|
||||
I915_WRITE(GMBUS5, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -486,7 +480,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
|
||||
struct intel_gmbus,
|
||||
adapter);
|
||||
struct drm_i915_private *dev_priv = bus->dev_priv;
|
||||
int i = 0, inc, try = 0, reg_offset;
|
||||
int i = 0, inc, try = 0;
|
||||
int ret = 0;
|
||||
|
||||
intel_aux_display_runtime_get(dev_priv);
|
||||
@ -497,10 +491,8 @@ gmbus_xfer(struct i2c_adapter *adapter,
|
||||
goto out;
|
||||
}
|
||||
|
||||
reg_offset = dev_priv->gpio_mmio_base;
|
||||
|
||||
retry:
|
||||
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
|
||||
I915_WRITE(GMBUS0, bus->reg0);
|
||||
|
||||
for (; i < num; i += inc) {
|
||||
inc = 1;
|
||||
@ -530,7 +522,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
|
||||
* a STOP on the very first cycle. To simplify the code we
|
||||
* unconditionally generate the STOP condition with an additional gmbus
|
||||
* cycle. */
|
||||
I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
|
||||
I915_WRITE(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
|
||||
|
||||
/* Mark the GMBUS interface as disabled after waiting for idle.
|
||||
* We will re-enable it at the start of the next xfer,
|
||||
@ -541,7 +533,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
|
||||
adapter->name);
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
I915_WRITE(GMBUS0 + reg_offset, 0);
|
||||
I915_WRITE(GMBUS0, 0);
|
||||
ret = ret ?: i;
|
||||
goto out;
|
||||
|
||||
@ -570,9 +562,9 @@ gmbus_xfer(struct i2c_adapter *adapter,
|
||||
* of resetting the GMBUS controller and so clearing the
|
||||
* BUS_ERROR raised by the slave's NAK.
|
||||
*/
|
||||
I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
|
||||
I915_WRITE(GMBUS1 + reg_offset, 0);
|
||||
I915_WRITE(GMBUS0 + reg_offset, 0);
|
||||
I915_WRITE(GMBUS1, GMBUS_SW_CLR_INT);
|
||||
I915_WRITE(GMBUS1, 0);
|
||||
I915_WRITE(GMBUS0, 0);
|
||||
|
||||
DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
|
||||
adapter->name, msgs[i].addr,
|
||||
@ -595,7 +587,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
|
||||
timeout:
|
||||
DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
|
||||
bus->adapter.name, bus->reg0 & 0xff);
|
||||
I915_WRITE(GMBUS0 + reg_offset, 0);
|
||||
I915_WRITE(GMBUS0, 0);
|
||||
|
||||
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
|
||||
bus->force_bit = 1;
|
||||
|
@ -98,15 +98,11 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 lvds_reg, tmp, flags = 0;
|
||||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
u32 tmp, flags = 0;
|
||||
int dotclock;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
lvds_reg = PCH_LVDS;
|
||||
else
|
||||
lvds_reg = LVDS;
|
||||
|
||||
tmp = I915_READ(lvds_reg);
|
||||
tmp = I915_READ(lvds_encoder->reg);
|
||||
if (tmp & LVDS_HSYNC_POLARITY)
|
||||
flags |= DRM_MODE_FLAG_NHSYNC;
|
||||
else
|
||||
@ -943,6 +939,7 @@ void intel_lvds_init(struct drm_device *dev)
|
||||
struct drm_display_mode *downclock_mode = NULL;
|
||||
struct edid *edid;
|
||||
struct drm_crtc *crtc;
|
||||
u32 lvds_reg;
|
||||
u32 lvds;
|
||||
int pipe;
|
||||
u8 pin;
|
||||
@ -965,8 +962,15 @@ void intel_lvds_init(struct drm_device *dev)
|
||||
if (dmi_check_system(intel_no_lvds))
|
||||
return;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
lvds_reg = PCH_LVDS;
|
||||
else
|
||||
lvds_reg = LVDS;
|
||||
|
||||
lvds = I915_READ(lvds_reg);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
|
||||
if ((lvds & LVDS_DETECTED) == 0)
|
||||
return;
|
||||
if (dev_priv->vbt.edp_support) {
|
||||
DRM_DEBUG_KMS("disable LVDS for eDP support\n");
|
||||
@ -976,8 +980,7 @@ void intel_lvds_init(struct drm_device *dev)
|
||||
|
||||
pin = GMBUS_PIN_PANEL;
|
||||
if (!lvds_is_present_in_vbt(dev, &pin)) {
|
||||
u32 reg = HAS_PCH_SPLIT(dev) ? PCH_LVDS : LVDS;
|
||||
if ((I915_READ(reg) & LVDS_PORT_EN) == 0) {
|
||||
if ((lvds & LVDS_PORT_EN) == 0) {
|
||||
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
|
||||
return;
|
||||
}
|
||||
@ -1054,11 +1057,7 @@ void intel_lvds_init(struct drm_device *dev)
|
||||
connector->interlace_allowed = false;
|
||||
connector->doublescan_allowed = false;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
lvds_encoder->reg = PCH_LVDS;
|
||||
} else {
|
||||
lvds_encoder->reg = LVDS;
|
||||
}
|
||||
lvds_encoder->reg = lvds_reg;
|
||||
|
||||
/* create the scaling mode property */
|
||||
drm_mode_create_scaling_mode_property(dev);
|
||||
@ -1139,7 +1138,6 @@ void intel_lvds_init(struct drm_device *dev)
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
goto failed;
|
||||
|
||||
lvds = I915_READ(LVDS);
|
||||
pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
|
||||
crtc = intel_get_crtc_for_pipe(dev, pipe);
|
||||
|
||||
|
@ -239,7 +239,7 @@ struct opregion_asle {
|
||||
static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct opregion_swsci __iomem *swsci = dev_priv->opregion.swsci;
|
||||
struct opregion_swsci *swsci = dev_priv->opregion.swsci;
|
||||
u32 main_function, sub_function, scic;
|
||||
u16 pci_swsci;
|
||||
u32 dslp;
|
||||
@ -264,7 +264,7 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
|
||||
}
|
||||
|
||||
/* Driver sleep timeout in ms. */
|
||||
dslp = ioread32(&swsci->dslp);
|
||||
dslp = swsci->dslp;
|
||||
if (!dslp) {
|
||||
/* The spec says 2ms should be the default, but it's too small
|
||||
* for some machines. */
|
||||
@ -277,7 +277,7 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
|
||||
}
|
||||
|
||||
/* The spec tells us to do this, but we are the only user... */
|
||||
scic = ioread32(&swsci->scic);
|
||||
scic = swsci->scic;
|
||||
if (scic & SWSCI_SCIC_INDICATOR) {
|
||||
DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
|
||||
return -EBUSY;
|
||||
@ -285,8 +285,8 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
|
||||
|
||||
scic = function | SWSCI_SCIC_INDICATOR;
|
||||
|
||||
iowrite32(parm, &swsci->parm);
|
||||
iowrite32(scic, &swsci->scic);
|
||||
swsci->parm = parm;
|
||||
swsci->scic = scic;
|
||||
|
||||
/* Ensure SCI event is selected and event trigger is cleared. */
|
||||
pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
|
||||
@ -301,7 +301,7 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
|
||||
pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
|
||||
|
||||
/* Poll for the result. */
|
||||
#define C (((scic = ioread32(&swsci->scic)) & SWSCI_SCIC_INDICATOR) == 0)
|
||||
#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
|
||||
if (wait_for(C, dslp)) {
|
||||
DRM_DEBUG_DRIVER("SWSCI request timed out\n");
|
||||
return -ETIMEDOUT;
|
||||
@ -317,7 +317,7 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
|
||||
}
|
||||
|
||||
if (parm_out)
|
||||
*parm_out = ioread32(&swsci->parm);
|
||||
*parm_out = swsci->parm;
|
||||
|
||||
return 0;
|
||||
|
||||
@ -412,7 +412,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_connector *intel_connector;
|
||||
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
|
||||
struct opregion_asle *asle = dev_priv->opregion.asle;
|
||||
|
||||
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
|
||||
|
||||
@ -437,7 +437,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
|
||||
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
|
||||
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
|
||||
intel_panel_set_backlight_acpi(intel_connector, bclp, 255);
|
||||
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
|
||||
asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;
|
||||
|
||||
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
||||
|
||||
@ -524,14 +524,14 @@ static void asle_work(struct work_struct *work)
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(opregion, struct drm_i915_private, opregion);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
|
||||
struct opregion_asle *asle = dev_priv->opregion.asle;
|
||||
u32 aslc_stat = 0;
|
||||
u32 aslc_req;
|
||||
|
||||
if (!asle)
|
||||
return;
|
||||
|
||||
aslc_req = ioread32(&asle->aslc);
|
||||
aslc_req = asle->aslc;
|
||||
|
||||
if (!(aslc_req & ASLC_REQ_MSK)) {
|
||||
DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
|
||||
@ -540,34 +540,34 @@ static void asle_work(struct work_struct *work)
|
||||
}
|
||||
|
||||
if (aslc_req & ASLC_SET_ALS_ILLUM)
|
||||
aslc_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
|
||||
aslc_stat |= asle_set_als_illum(dev, asle->alsi);
|
||||
|
||||
if (aslc_req & ASLC_SET_BACKLIGHT)
|
||||
aslc_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
|
||||
aslc_stat |= asle_set_backlight(dev, asle->bclp);
|
||||
|
||||
if (aslc_req & ASLC_SET_PFIT)
|
||||
aslc_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
|
||||
aslc_stat |= asle_set_pfit(dev, asle->pfit);
|
||||
|
||||
if (aslc_req & ASLC_SET_PWM_FREQ)
|
||||
aslc_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
|
||||
aslc_stat |= asle_set_pwm_freq(dev, asle->pfmb);
|
||||
|
||||
if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
|
||||
aslc_stat |= asle_set_supported_rotation_angles(dev,
|
||||
ioread32(&asle->srot));
|
||||
asle->srot);
|
||||
|
||||
if (aslc_req & ASLC_BUTTON_ARRAY)
|
||||
aslc_stat |= asle_set_button_array(dev, ioread32(&asle->iuer));
|
||||
aslc_stat |= asle_set_button_array(dev, asle->iuer);
|
||||
|
||||
if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
|
||||
aslc_stat |= asle_set_convertible(dev, ioread32(&asle->iuer));
|
||||
aslc_stat |= asle_set_convertible(dev, asle->iuer);
|
||||
|
||||
if (aslc_req & ASLC_DOCKING_INDICATOR)
|
||||
aslc_stat |= asle_set_docking(dev, ioread32(&asle->iuer));
|
||||
aslc_stat |= asle_set_docking(dev, asle->iuer);
|
||||
|
||||
if (aslc_req & ASLC_ISCT_STATE_CHANGE)
|
||||
aslc_stat |= asle_isct_state(dev);
|
||||
|
||||
iowrite32(aslc_stat, &asle->aslc);
|
||||
asle->aslc = aslc_stat;
|
||||
}
|
||||
|
||||
void intel_opregion_asle_intr(struct drm_device *dev)
|
||||
@ -592,8 +592,8 @@ static int intel_opregion_video_event(struct notifier_block *nb,
|
||||
Linux, these are handled by the dock, button and video drivers.
|
||||
*/
|
||||
|
||||
struct opregion_acpi __iomem *acpi;
|
||||
struct acpi_bus_event *event = data;
|
||||
struct opregion_acpi *acpi;
|
||||
int ret = NOTIFY_OK;
|
||||
|
||||
if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
|
||||
@ -604,11 +604,10 @@ static int intel_opregion_video_event(struct notifier_block *nb,
|
||||
|
||||
acpi = system_opregion->acpi;
|
||||
|
||||
if (event->type == 0x80 &&
|
||||
(ioread32(&acpi->cevt) & 1) == 0)
|
||||
if (event->type == 0x80 && ((acpi->cevt & 1) == 0))
|
||||
ret = NOTIFY_BAD;
|
||||
|
||||
iowrite32(0, &acpi->csts);
|
||||
acpi->csts = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -628,14 +627,14 @@ static u32 get_did(struct intel_opregion *opregion, int i)
|
||||
u32 did;
|
||||
|
||||
if (i < ARRAY_SIZE(opregion->acpi->didl)) {
|
||||
did = ioread32(&opregion->acpi->didl[i]);
|
||||
did = opregion->acpi->didl[i];
|
||||
} else {
|
||||
i -= ARRAY_SIZE(opregion->acpi->didl);
|
||||
|
||||
if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
|
||||
return 0;
|
||||
|
||||
did = ioread32(&opregion->acpi->did2[i]);
|
||||
did = opregion->acpi->did2[i];
|
||||
}
|
||||
|
||||
return did;
|
||||
@ -644,14 +643,14 @@ static u32 get_did(struct intel_opregion *opregion, int i)
|
||||
static void set_did(struct intel_opregion *opregion, int i, u32 val)
|
||||
{
|
||||
if (i < ARRAY_SIZE(opregion->acpi->didl)) {
|
||||
iowrite32(val, &opregion->acpi->didl[i]);
|
||||
opregion->acpi->didl[i] = val;
|
||||
} else {
|
||||
i -= ARRAY_SIZE(opregion->acpi->didl);
|
||||
|
||||
if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
|
||||
return;
|
||||
|
||||
iowrite32(val, &opregion->acpi->did2[i]);
|
||||
opregion->acpi->did2[i] = val;
|
||||
}
|
||||
}
|
||||
|
||||
@ -773,7 +772,7 @@ static void intel_setup_cadls(struct drm_device *dev)
|
||||
* there are less than eight devices. */
|
||||
do {
|
||||
disp_id = get_did(opregion, i);
|
||||
iowrite32(disp_id, &opregion->acpi->cadl[i]);
|
||||
opregion->acpi->cadl[i] = disp_id;
|
||||
} while (++i < 8 && disp_id != 0);
|
||||
}
|
||||
|
||||
@ -792,16 +791,16 @@ void intel_opregion_init(struct drm_device *dev)
|
||||
/* Notify BIOS we are ready to handle ACPI video ext notifs.
|
||||
* Right now, all the events are handled by the ACPI video module.
|
||||
* We don't actually need to do anything with them. */
|
||||
iowrite32(0, &opregion->acpi->csts);
|
||||
iowrite32(1, &opregion->acpi->drdy);
|
||||
opregion->acpi->csts = 0;
|
||||
opregion->acpi->drdy = 1;
|
||||
|
||||
system_opregion = opregion;
|
||||
register_acpi_notifier(&intel_opregion_notifier);
|
||||
}
|
||||
|
||||
if (opregion->asle) {
|
||||
iowrite32(ASLE_TCHE_BLC_EN, &opregion->asle->tche);
|
||||
iowrite32(ASLE_ARDY_READY, &opregion->asle->ardy);
|
||||
opregion->asle->tche = ASLE_TCHE_BLC_EN;
|
||||
opregion->asle->ardy = ASLE_ARDY_READY;
|
||||
}
|
||||
}
|
||||
|
||||
@ -814,19 +813,19 @@ void intel_opregion_fini(struct drm_device *dev)
|
||||
return;
|
||||
|
||||
if (opregion->asle)
|
||||
iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
|
||||
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
|
||||
|
||||
cancel_work_sync(&dev_priv->opregion.asle_work);
|
||||
|
||||
if (opregion->acpi) {
|
||||
iowrite32(0, &opregion->acpi->drdy);
|
||||
opregion->acpi->drdy = 0;
|
||||
|
||||
system_opregion = NULL;
|
||||
unregister_acpi_notifier(&intel_opregion_notifier);
|
||||
}
|
||||
|
||||
/* just clear all opregion memory pointers now */
|
||||
iounmap(opregion->header);
|
||||
memunmap(opregion->header);
|
||||
opregion->header = NULL;
|
||||
opregion->acpi = NULL;
|
||||
opregion->swsci = NULL;
|
||||
@ -899,10 +898,10 @@ int intel_opregion_setup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_opregion *opregion = &dev_priv->opregion;
|
||||
void __iomem *base;
|
||||
u32 asls, mboxes;
|
||||
char buf[sizeof(OPREGION_SIGNATURE)];
|
||||
int err = 0;
|
||||
void *base;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100);
|
||||
BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
|
||||
@ -920,11 +919,11 @@ int intel_opregion_setup(struct drm_device *dev)
|
||||
INIT_WORK(&opregion->asle_work, asle_work);
|
||||
#endif
|
||||
|
||||
base = acpi_os_ioremap(asls, OPREGION_SIZE);
|
||||
base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB);
|
||||
if (!base)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy_fromio(buf, base, sizeof(buf));
|
||||
memcpy(buf, base, sizeof(buf));
|
||||
|
||||
if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
|
||||
DRM_DEBUG_DRIVER("opregion signature mismatch\n");
|
||||
@ -936,7 +935,7 @@ int intel_opregion_setup(struct drm_device *dev)
|
||||
|
||||
opregion->lid_state = base + ACPI_CLID;
|
||||
|
||||
mboxes = ioread32(&opregion->header->mboxes);
|
||||
mboxes = opregion->header->mboxes;
|
||||
if (mboxes & MBOX_ACPI) {
|
||||
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
|
||||
opregion->acpi = base + OPREGION_ACPI_OFFSET;
|
||||
@ -951,12 +950,12 @@ int intel_opregion_setup(struct drm_device *dev)
|
||||
DRM_DEBUG_DRIVER("ASLE supported\n");
|
||||
opregion->asle = base + OPREGION_ASLE_OFFSET;
|
||||
|
||||
iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
|
||||
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
iounmap(base);
|
||||
memunmap(base);
|
||||
return err;
|
||||
}
|
||||
|
@ -381,7 +381,7 @@ intel_panel_detect(struct drm_device *dev)
|
||||
|
||||
/* Assume that the BIOS does not lie through the OpRegion... */
|
||||
if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
|
||||
return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
|
||||
return *dev_priv->opregion.lid_state & 0x1 ?
|
||||
connector_status_connected :
|
||||
connector_status_disconnected;
|
||||
}
|
||||
|
@ -52,56 +52,10 @@
|
||||
#define INTEL_RC6p_ENABLE (1<<1)
|
||||
#define INTEL_RC6pp_ENABLE (1<<2)
|
||||
|
||||
static void gen9_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* WaEnableLbsSlaRetryTimerDecrement:skl */
|
||||
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
|
||||
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
|
||||
|
||||
/* WaDisableKillLogic:bxt,skl */
|
||||
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
|
||||
ECOCHK_DIS_TLB);
|
||||
}
|
||||
|
||||
static void skl_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
gen9_init_clock_gating(dev);
|
||||
|
||||
if (INTEL_REVID(dev) <= SKL_REVID_D0) {
|
||||
/* WaDisableHDCInvalidation:skl */
|
||||
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
|
||||
BDW_DISABLE_HDC_INVALIDATION);
|
||||
|
||||
/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
|
||||
I915_WRITE(FF_SLICE_CS_CHICKEN2,
|
||||
_MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
|
||||
}
|
||||
|
||||
/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
|
||||
* involving this register should also be added to WA batch as required.
|
||||
*/
|
||||
if (INTEL_REVID(dev) <= SKL_REVID_E0)
|
||||
/* WaDisableLSQCROPERFforOCL:skl */
|
||||
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
|
||||
GEN8_LQSC_RO_PERF_DIS);
|
||||
|
||||
/* WaEnableGapsTsvCreditFix:skl */
|
||||
if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
|
||||
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
|
||||
GEN9_GAPS_TSV_CREDIT_DISABLE));
|
||||
}
|
||||
}
|
||||
|
||||
static void bxt_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
gen9_init_clock_gating(dev);
|
||||
|
||||
/* WaDisableSDEUnitClockGating:bxt */
|
||||
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
|
||||
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
|
||||
@ -112,17 +66,6 @@ static void bxt_init_clock_gating(struct drm_device *dev)
|
||||
*/
|
||||
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
|
||||
GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
|
||||
|
||||
/* WaStoreMultiplePTEenable:bxt */
|
||||
/* This is a requirement according to Hardware specification */
|
||||
if (INTEL_REVID(dev) == BXT_REVID_A0)
|
||||
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
|
||||
|
||||
/* WaSetClckGatingDisableMedia:bxt */
|
||||
if (INTEL_REVID(dev) == BXT_REVID_A0) {
|
||||
I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
|
||||
~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_pineview_get_mem_freq(struct drm_device *dev)
|
||||
@ -1765,6 +1708,13 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
|
||||
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
|
||||
}
|
||||
|
||||
struct skl_pipe_wm_parameters {
|
||||
bool active;
|
||||
uint32_t pipe_htotal;
|
||||
uint32_t pixel_rate; /* in KHz */
|
||||
struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
|
||||
};
|
||||
|
||||
struct ilk_wm_maximums {
|
||||
uint16_t pri;
|
||||
uint16_t spr;
|
||||
@ -2805,40 +2755,18 @@ static bool ilk_disable_lp_wm(struct drm_device *dev)
|
||||
#define SKL_DDB_SIZE 896 /* in blocks */
|
||||
#define BXT_DDB_SIZE 512
|
||||
|
||||
/*
|
||||
* Return the index of a plane in the SKL DDB and wm result arrays. Primary
|
||||
* plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
|
||||
* other universal planes are in indices 1..n. Note that this may leave unused
|
||||
* indices between the top "sprite" plane and the cursor.
|
||||
*/
|
||||
static int
|
||||
skl_wm_plane_id(const struct intel_plane *plane)
|
||||
{
|
||||
switch (plane->base.type) {
|
||||
case DRM_PLANE_TYPE_PRIMARY:
|
||||
return 0;
|
||||
case DRM_PLANE_TYPE_CURSOR:
|
||||
return PLANE_CURSOR;
|
||||
case DRM_PLANE_TYPE_OVERLAY:
|
||||
return plane->plane + 1;
|
||||
default:
|
||||
MISSING_CASE(plane->base.type);
|
||||
return plane->plane;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
|
||||
const struct intel_crtc_state *cstate,
|
||||
struct drm_crtc *for_crtc,
|
||||
const struct intel_wm_config *config,
|
||||
const struct skl_pipe_wm_parameters *params,
|
||||
struct skl_ddb_entry *alloc /* out */)
|
||||
{
|
||||
struct drm_crtc *for_crtc = cstate->base.crtc;
|
||||
struct drm_crtc *crtc;
|
||||
unsigned int pipe_size, ddb_size;
|
||||
int nth_active_pipe;
|
||||
|
||||
if (!cstate->base.active) {
|
||||
if (!params->active) {
|
||||
alloc->start = 0;
|
||||
alloc->end = 0;
|
||||
return;
|
||||
@ -2904,29 +2832,19 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
|
||||
const struct drm_plane_state *pstate,
|
||||
int y)
|
||||
skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
|
||||
struct drm_framebuffer *fb = pstate->fb;
|
||||
|
||||
/* for planar format */
|
||||
if (fb->pixel_format == DRM_FORMAT_NV12) {
|
||||
if (p->y_bytes_per_pixel) {
|
||||
if (y) /* y-plane data rate */
|
||||
return intel_crtc->config->pipe_src_w *
|
||||
intel_crtc->config->pipe_src_h *
|
||||
drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
return p->horiz_pixels * p->vert_pixels * p->y_bytes_per_pixel;
|
||||
else /* uv-plane data rate */
|
||||
return (intel_crtc->config->pipe_src_w/2) *
|
||||
(intel_crtc->config->pipe_src_h/2) *
|
||||
drm_format_plane_cpp(fb->pixel_format, 1);
|
||||
return (p->horiz_pixels/2) * (p->vert_pixels/2) * p->bytes_per_pixel;
|
||||
}
|
||||
|
||||
/* for packed formats */
|
||||
return intel_crtc->config->pipe_src_w *
|
||||
intel_crtc->config->pipe_src_h *
|
||||
drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2935,51 +2853,46 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
|
||||
* 3 * 4096 * 8192 * 4 < 2^32
|
||||
*/
|
||||
static unsigned int
|
||||
skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate)
|
||||
skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
|
||||
const struct skl_pipe_wm_parameters *params)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
|
||||
struct drm_device *dev = intel_crtc->base.dev;
|
||||
const struct intel_plane *intel_plane;
|
||||
unsigned int total_data_rate = 0;
|
||||
int plane;
|
||||
|
||||
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
|
||||
const struct drm_plane_state *pstate = intel_plane->base.state;
|
||||
for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
|
||||
const struct intel_plane_wm_parameters *p;
|
||||
|
||||
if (pstate->fb == NULL)
|
||||
p = ¶ms->plane[plane];
|
||||
if (!p->enabled)
|
||||
continue;
|
||||
|
||||
/* packed/uv */
|
||||
total_data_rate += skl_plane_relative_data_rate(cstate,
|
||||
pstate,
|
||||
0);
|
||||
|
||||
if (pstate->fb->pixel_format == DRM_FORMAT_NV12)
|
||||
/* y-plane */
|
||||
total_data_rate += skl_plane_relative_data_rate(cstate,
|
||||
pstate,
|
||||
1);
|
||||
total_data_rate += skl_plane_relative_data_rate(p, 0); /* packed/uv */
|
||||
if (p->y_bytes_per_pixel) {
|
||||
total_data_rate += skl_plane_relative_data_rate(p, 1); /* y-plane */
|
||||
}
|
||||
}
|
||||
|
||||
return total_data_rate;
|
||||
}
|
||||
|
||||
static void
|
||||
skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
|
||||
skl_allocate_pipe_ddb(struct drm_crtc *crtc,
|
||||
const struct intel_wm_config *config,
|
||||
const struct skl_pipe_wm_parameters *params,
|
||||
struct skl_ddb_allocation *ddb /* out */)
|
||||
{
|
||||
struct drm_crtc *crtc = cstate->base.crtc;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_plane *intel_plane;
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
|
||||
uint16_t alloc_size, start, cursor_blocks;
|
||||
uint16_t minimum[I915_MAX_PLANES];
|
||||
uint16_t y_minimum[I915_MAX_PLANES];
|
||||
unsigned int total_data_rate;
|
||||
int plane;
|
||||
|
||||
skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc);
|
||||
skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc);
|
||||
alloc_size = skl_ddb_entry_size(alloc);
|
||||
if (alloc_size == 0) {
|
||||
memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
|
||||
@ -2996,20 +2909,17 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
|
||||
alloc->end -= cursor_blocks;
|
||||
|
||||
/* 1. Allocate the mininum required blocks for each active plane */
|
||||
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
|
||||
struct drm_plane *plane = &intel_plane->base;
|
||||
struct drm_framebuffer *fb = plane->fb;
|
||||
int id = skl_wm_plane_id(intel_plane);
|
||||
for_each_plane(dev_priv, pipe, plane) {
|
||||
const struct intel_plane_wm_parameters *p;
|
||||
|
||||
if (fb == NULL)
|
||||
continue;
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||
p = ¶ms->plane[plane];
|
||||
if (!p->enabled)
|
||||
continue;
|
||||
|
||||
minimum[id] = 8;
|
||||
alloc_size -= minimum[id];
|
||||
y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0;
|
||||
alloc_size -= y_minimum[id];
|
||||
minimum[plane] = 8;
|
||||
alloc_size -= minimum[plane];
|
||||
y_minimum[plane] = p->y_bytes_per_pixel ? 8 : 0;
|
||||
alloc_size -= y_minimum[plane];
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3018,50 +2928,45 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
|
||||
*
|
||||
* FIXME: we may not allocate every single block here.
|
||||
*/
|
||||
total_data_rate = skl_get_total_relative_data_rate(cstate);
|
||||
total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params);
|
||||
|
||||
start = alloc->start;
|
||||
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
|
||||
struct drm_plane *plane = &intel_plane->base;
|
||||
struct drm_plane_state *pstate = intel_plane->base.state;
|
||||
for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
|
||||
const struct intel_plane_wm_parameters *p;
|
||||
unsigned int data_rate, y_data_rate;
|
||||
uint16_t plane_blocks, y_plane_blocks = 0;
|
||||
int id = skl_wm_plane_id(intel_plane);
|
||||
|
||||
if (pstate->fb == NULL)
|
||||
continue;
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||
p = ¶ms->plane[plane];
|
||||
if (!p->enabled)
|
||||
continue;
|
||||
|
||||
data_rate = skl_plane_relative_data_rate(cstate, pstate, 0);
|
||||
data_rate = skl_plane_relative_data_rate(p, 0);
|
||||
|
||||
/*
|
||||
* allocation for (packed formats) or (uv-plane part of planar format):
|
||||
* promote the expression to 64 bits to avoid overflowing, the
|
||||
* result is < available as data_rate / total_data_rate < 1
|
||||
*/
|
||||
plane_blocks = minimum[id];
|
||||
plane_blocks = minimum[plane];
|
||||
plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
|
||||
total_data_rate);
|
||||
|
||||
ddb->plane[pipe][id].start = start;
|
||||
ddb->plane[pipe][id].end = start + plane_blocks;
|
||||
ddb->plane[pipe][plane].start = start;
|
||||
ddb->plane[pipe][plane].end = start + plane_blocks;
|
||||
|
||||
start += plane_blocks;
|
||||
|
||||
/*
|
||||
* allocation for y_plane part of planar format:
|
||||
*/
|
||||
if (pstate->fb->pixel_format == DRM_FORMAT_NV12) {
|
||||
y_data_rate = skl_plane_relative_data_rate(cstate,
|
||||
pstate,
|
||||
1);
|
||||
y_plane_blocks = y_minimum[id];
|
||||
if (p->y_bytes_per_pixel) {
|
||||
y_data_rate = skl_plane_relative_data_rate(p, 1);
|
||||
y_plane_blocks = y_minimum[plane];
|
||||
y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
|
||||
total_data_rate);
|
||||
|
||||
ddb->y_plane[pipe][id].start = start;
|
||||
ddb->y_plane[pipe][id].end = start + y_plane_blocks;
|
||||
ddb->y_plane[pipe][plane].start = start;
|
||||
ddb->y_plane[pipe][plane].end = start + y_plane_blocks;
|
||||
|
||||
start += y_plane_blocks;
|
||||
}
|
||||
@ -3148,21 +3053,87 @@ static void skl_compute_wm_global_parameters(struct drm_device *dev,
|
||||
struct intel_wm_config *config)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_plane *plane;
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
|
||||
config->num_pipes_active += to_intel_crtc(crtc)->active;
|
||||
|
||||
/* FIXME: I don't think we need those two global parameters on SKL */
|
||||
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
|
||||
config->sprites_enabled |= intel_plane->wm.enabled;
|
||||
config->sprites_scaled |= intel_plane->wm.scaled;
|
||||
}
|
||||
}
|
||||
|
||||
static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
|
||||
struct skl_pipe_wm_parameters *p)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
struct drm_plane *plane;
|
||||
struct drm_framebuffer *fb;
|
||||
int i = 1; /* Index for sprite planes start */
|
||||
|
||||
p->active = intel_crtc->active;
|
||||
if (p->active) {
|
||||
p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
|
||||
p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
|
||||
|
||||
fb = crtc->primary->state->fb;
|
||||
/* For planar: Bpp is for uv plane, y_Bpp is for y plane */
|
||||
if (fb) {
|
||||
p->plane[0].enabled = true;
|
||||
p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
|
||||
drm_format_plane_cpp(fb->pixel_format, 1) :
|
||||
drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
|
||||
drm_format_plane_cpp(fb->pixel_format, 0) : 0;
|
||||
p->plane[0].tiling = fb->modifier[0];
|
||||
} else {
|
||||
p->plane[0].enabled = false;
|
||||
p->plane[0].bytes_per_pixel = 0;
|
||||
p->plane[0].y_bytes_per_pixel = 0;
|
||||
p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
|
||||
}
|
||||
p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
|
||||
p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
|
||||
p->plane[0].rotation = crtc->primary->state->rotation;
|
||||
|
||||
fb = crtc->cursor->state->fb;
|
||||
p->plane[PLANE_CURSOR].y_bytes_per_pixel = 0;
|
||||
if (fb) {
|
||||
p->plane[PLANE_CURSOR].enabled = true;
|
||||
p->plane[PLANE_CURSOR].bytes_per_pixel = fb->bits_per_pixel / 8;
|
||||
p->plane[PLANE_CURSOR].horiz_pixels = crtc->cursor->state->crtc_w;
|
||||
p->plane[PLANE_CURSOR].vert_pixels = crtc->cursor->state->crtc_h;
|
||||
} else {
|
||||
p->plane[PLANE_CURSOR].enabled = false;
|
||||
p->plane[PLANE_CURSOR].bytes_per_pixel = 0;
|
||||
p->plane[PLANE_CURSOR].horiz_pixels = 64;
|
||||
p->plane[PLANE_CURSOR].vert_pixels = 64;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
|
||||
if (intel_plane->pipe == pipe &&
|
||||
plane->type == DRM_PLANE_TYPE_OVERLAY)
|
||||
p->plane[i++] = intel_plane->wm;
|
||||
}
|
||||
}
|
||||
|
||||
static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
||||
struct intel_crtc_state *cstate,
|
||||
struct intel_plane *intel_plane,
|
||||
struct skl_pipe_wm_parameters *p,
|
||||
struct intel_plane_wm_parameters *p_params,
|
||||
uint16_t ddb_allocation,
|
||||
int level,
|
||||
uint16_t *out_blocks, /* out */
|
||||
uint8_t *out_lines /* out */)
|
||||
{
|
||||
struct drm_plane *plane = &intel_plane->base;
|
||||
struct drm_framebuffer *fb = plane->state->fb;
|
||||
uint32_t latency = dev_priv->wm.skl_latency[level];
|
||||
uint32_t method1, method2;
|
||||
uint32_t plane_bytes_per_line, plane_blocks_per_line;
|
||||
@ -3170,35 +3141,31 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
||||
uint32_t selected_result;
|
||||
uint8_t bytes_per_pixel;
|
||||
|
||||
if (latency == 0 || !cstate->base.active || !fb)
|
||||
if (latency == 0 || !p->active || !p_params->enabled)
|
||||
return false;
|
||||
|
||||
bytes_per_pixel = (fb->pixel_format == DRM_FORMAT_NV12) ?
|
||||
drm_format_plane_cpp(DRM_FORMAT_NV12, 0) :
|
||||
drm_format_plane_cpp(DRM_FORMAT_NV12, 1);
|
||||
method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
|
||||
bytes_per_pixel = p_params->y_bytes_per_pixel ?
|
||||
p_params->y_bytes_per_pixel :
|
||||
p_params->bytes_per_pixel;
|
||||
method1 = skl_wm_method1(p->pixel_rate,
|
||||
bytes_per_pixel,
|
||||
latency);
|
||||
method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
|
||||
cstate->base.adjusted_mode.crtc_htotal,
|
||||
cstate->pipe_src_w,
|
||||
method2 = skl_wm_method2(p->pixel_rate,
|
||||
p->pipe_htotal,
|
||||
p_params->horiz_pixels,
|
||||
bytes_per_pixel,
|
||||
fb->modifier[0],
|
||||
p_params->tiling,
|
||||
latency);
|
||||
|
||||
plane_bytes_per_line = cstate->pipe_src_w * bytes_per_pixel;
|
||||
plane_bytes_per_line = p_params->horiz_pixels * bytes_per_pixel;
|
||||
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
|
||||
|
||||
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
|
||||
fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
|
||||
if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
|
||||
p_params->tiling == I915_FORMAT_MOD_Yf_TILED) {
|
||||
uint32_t min_scanlines = 4;
|
||||
uint32_t y_tile_minimum;
|
||||
if (intel_rotation_90_or_270(plane->state->rotation)) {
|
||||
int bpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
|
||||
drm_format_plane_cpp(fb->pixel_format, 1) :
|
||||
drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
|
||||
switch (bpp) {
|
||||
if (intel_rotation_90_or_270(p_params->rotation)) {
|
||||
switch (p_params->bytes_per_pixel) {
|
||||
case 1:
|
||||
min_scanlines = 16;
|
||||
break;
|
||||
@ -3222,8 +3189,8 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
||||
res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
|
||||
|
||||
if (level >= 1 && level <= 7) {
|
||||
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
|
||||
fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)
|
||||
if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
|
||||
p_params->tiling == I915_FORMAT_MOD_Yf_TILED)
|
||||
res_lines += 4;
|
||||
else
|
||||
res_blocks++;
|
||||
@ -3240,80 +3207,84 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
||||
|
||||
static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
|
||||
struct skl_ddb_allocation *ddb,
|
||||
struct intel_crtc_state *cstate,
|
||||
struct skl_pipe_wm_parameters *p,
|
||||
enum pipe pipe,
|
||||
int level,
|
||||
int num_planes,
|
||||
struct skl_wm_level *result)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
|
||||
struct intel_plane *intel_plane;
|
||||
uint16_t ddb_blocks;
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
|
||||
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
|
||||
int i = skl_wm_plane_id(intel_plane);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_planes; i++) {
|
||||
ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
|
||||
|
||||
result->plane_en[i] = skl_compute_plane_wm(dev_priv,
|
||||
cstate,
|
||||
intel_plane,
|
||||
p, &p->plane[i],
|
||||
ddb_blocks,
|
||||
level,
|
||||
&result->plane_res_b[i],
|
||||
&result->plane_res_l[i]);
|
||||
}
|
||||
|
||||
ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][PLANE_CURSOR]);
|
||||
result->plane_en[PLANE_CURSOR] = skl_compute_plane_wm(dev_priv, p,
|
||||
&p->plane[PLANE_CURSOR],
|
||||
ddb_blocks, level,
|
||||
&result->plane_res_b[PLANE_CURSOR],
|
||||
&result->plane_res_l[PLANE_CURSOR]);
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
skl_compute_linetime_wm(struct intel_crtc_state *cstate)
|
||||
skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
|
||||
{
|
||||
if (!cstate->base.active)
|
||||
if (!to_intel_crtc(crtc)->active)
|
||||
return 0;
|
||||
|
||||
if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
|
||||
if (WARN_ON(p->pixel_rate == 0))
|
||||
return 0;
|
||||
|
||||
return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
|
||||
skl_pipe_pixel_rate(cstate));
|
||||
return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
|
||||
}
|
||||
|
||||
static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
|
||||
static void skl_compute_transition_wm(struct drm_crtc *crtc,
|
||||
struct skl_pipe_wm_parameters *params,
|
||||
struct skl_wm_level *trans_wm /* out */)
|
||||
{
|
||||
struct drm_crtc *crtc = cstate->base.crtc;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_plane *intel_plane;
|
||||
int i;
|
||||
|
||||
if (!cstate->base.active)
|
||||
if (!params->active)
|
||||
return;
|
||||
|
||||
/* Until we know more, just disable transition WMs */
|
||||
for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) {
|
||||
int i = skl_wm_plane_id(intel_plane);
|
||||
|
||||
for (i = 0; i < intel_num_planes(intel_crtc); i++)
|
||||
trans_wm->plane_en[i] = false;
|
||||
}
|
||||
trans_wm->plane_en[PLANE_CURSOR] = false;
|
||||
}
|
||||
|
||||
static void skl_compute_pipe_wm(struct intel_crtc_state *cstate,
|
||||
static void skl_compute_pipe_wm(struct drm_crtc *crtc,
|
||||
struct skl_ddb_allocation *ddb,
|
||||
struct skl_pipe_wm_parameters *params,
|
||||
struct skl_pipe_wm *pipe_wm)
|
||||
{
|
||||
struct drm_device *dev = cstate->base.crtc->dev;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
const struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int level, max_level = ilk_wm_max_level(dev);
|
||||
|
||||
for (level = 0; level <= max_level; level++) {
|
||||
skl_compute_wm_level(dev_priv, ddb, cstate,
|
||||
level, &pipe_wm->wm[level]);
|
||||
skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe,
|
||||
level, intel_num_planes(intel_crtc),
|
||||
&pipe_wm->wm[level]);
|
||||
}
|
||||
pipe_wm->linetime = skl_compute_linetime_wm(cstate);
|
||||
pipe_wm->linetime = skl_compute_linetime_wm(crtc, params);
|
||||
|
||||
skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
|
||||
skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm);
|
||||
}
|
||||
|
||||
static void skl_compute_wm_results(struct drm_device *dev,
|
||||
struct skl_pipe_wm_parameters *p,
|
||||
struct skl_pipe_wm *p_wm,
|
||||
struct skl_wm_values *r,
|
||||
struct intel_crtc *intel_crtc)
|
||||
@ -3557,15 +3528,16 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
static bool skl_update_pipe_wm(struct drm_crtc *crtc,
|
||||
struct skl_pipe_wm_parameters *params,
|
||||
struct intel_wm_config *config,
|
||||
struct skl_ddb_allocation *ddb, /* out */
|
||||
struct skl_pipe_wm *pipe_wm /* out */)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
|
||||
|
||||
skl_allocate_pipe_ddb(cstate, config, ddb);
|
||||
skl_compute_pipe_wm(cstate, ddb, pipe_wm);
|
||||
skl_compute_wm_pipe_parameters(crtc, params);
|
||||
skl_allocate_pipe_ddb(crtc, config, params, ddb);
|
||||
skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
|
||||
|
||||
if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm)))
|
||||
return false;
|
||||
@ -3598,6 +3570,7 @@ static void skl_update_other_pipe_wm(struct drm_device *dev,
|
||||
*/
|
||||
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
|
||||
base.head) {
|
||||
struct skl_pipe_wm_parameters params = {};
|
||||
struct skl_pipe_wm pipe_wm = {};
|
||||
bool wm_changed;
|
||||
|
||||
@ -3607,7 +3580,8 @@ static void skl_update_other_pipe_wm(struct drm_device *dev,
|
||||
if (!intel_crtc->active)
|
||||
continue;
|
||||
|
||||
wm_changed = skl_update_pipe_wm(&intel_crtc->base, config,
|
||||
wm_changed = skl_update_pipe_wm(&intel_crtc->base,
|
||||
¶ms, config,
|
||||
&r->ddb, &pipe_wm);
|
||||
|
||||
/*
|
||||
@ -3617,7 +3591,7 @@ static void skl_update_other_pipe_wm(struct drm_device *dev,
|
||||
*/
|
||||
WARN_ON(!wm_changed);
|
||||
|
||||
skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc);
|
||||
skl_compute_wm_results(dev, ¶ms, &pipe_wm, r, intel_crtc);
|
||||
r->dirty[intel_crtc->pipe] = true;
|
||||
}
|
||||
}
|
||||
@ -3647,6 +3621,7 @@ static void skl_update_wm(struct drm_crtc *crtc)
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct skl_pipe_wm_parameters params = {};
|
||||
struct skl_wm_values *results = &dev_priv->wm.skl_results;
|
||||
struct skl_pipe_wm pipe_wm = {};
|
||||
struct intel_wm_config config = {};
|
||||
@ -3659,10 +3634,11 @@ static void skl_update_wm(struct drm_crtc *crtc)
|
||||
|
||||
skl_compute_wm_global_parameters(dev, &config);
|
||||
|
||||
if (!skl_update_pipe_wm(crtc, &config, &results->ddb, &pipe_wm))
|
||||
if (!skl_update_pipe_wm(crtc, ¶ms, &config,
|
||||
&results->ddb, &pipe_wm))
|
||||
return;
|
||||
|
||||
skl_compute_wm_results(dev, &pipe_wm, results, intel_crtc);
|
||||
skl_compute_wm_results(dev, ¶ms, &pipe_wm, results, intel_crtc);
|
||||
results->dirty[intel_crtc->pipe] = true;
|
||||
|
||||
skl_update_other_pipe_wm(dev, crtc, &config, results);
|
||||
@ -3673,6 +3649,39 @@ static void skl_update_wm(struct drm_crtc *crtc)
|
||||
dev_priv->wm.skl_hw = *results;
|
||||
}
|
||||
|
||||
static void
|
||||
skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
uint32_t sprite_width, uint32_t sprite_height,
|
||||
int pixel_size, bool enabled, bool scaled)
|
||||
{
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
struct drm_framebuffer *fb = plane->state->fb;
|
||||
|
||||
intel_plane->wm.enabled = enabled;
|
||||
intel_plane->wm.scaled = scaled;
|
||||
intel_plane->wm.horiz_pixels = sprite_width;
|
||||
intel_plane->wm.vert_pixels = sprite_height;
|
||||
intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
|
||||
|
||||
/* For planar: Bpp is for UV plane, y_Bpp is for Y plane */
|
||||
intel_plane->wm.bytes_per_pixel =
|
||||
(fb && fb->pixel_format == DRM_FORMAT_NV12) ?
|
||||
drm_format_plane_cpp(plane->state->fb->pixel_format, 1) : pixel_size;
|
||||
intel_plane->wm.y_bytes_per_pixel =
|
||||
(fb && fb->pixel_format == DRM_FORMAT_NV12) ?
|
||||
drm_format_plane_cpp(plane->state->fb->pixel_format, 0) : 0;
|
||||
|
||||
/*
|
||||
* Framebuffer can be NULL on plane disable, but it does not
|
||||
* matter for watermarks if we assume no tiling in that case.
|
||||
*/
|
||||
if (fb)
|
||||
intel_plane->wm.tiling = fb->modifier[0];
|
||||
intel_plane->wm.rotation = plane->state->rotation;
|
||||
|
||||
skl_update_wm(crtc);
|
||||
}
|
||||
|
||||
static void ilk_update_wm(struct drm_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
@ -3688,18 +3697,6 @@ static void ilk_update_wm(struct drm_crtc *crtc)
|
||||
|
||||
WARN_ON(cstate->base.active != intel_crtc->active);
|
||||
|
||||
/*
|
||||
* IVB workaround: must disable low power watermarks for at least
|
||||
* one frame before enabling scaling. LP watermarks can be re-enabled
|
||||
* when scaling is disabled.
|
||||
*
|
||||
* WaCxSRDisabledForSpriteScaling:ivb
|
||||
*/
|
||||
if (cstate->disable_lp_wm) {
|
||||
ilk_disable_lp_wm(dev);
|
||||
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||
}
|
||||
|
||||
intel_compute_pipe_wm(cstate, &pipe_wm);
|
||||
|
||||
if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
|
||||
@ -3731,6 +3728,28 @@ static void ilk_update_wm(struct drm_crtc *crtc)
|
||||
ilk_write_wm_values(dev_priv, &results);
|
||||
}
|
||||
|
||||
static void
|
||||
ilk_update_sprite_wm(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc,
|
||||
uint32_t sprite_width, uint32_t sprite_height,
|
||||
int pixel_size, bool enabled, bool scaled)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
|
||||
/*
|
||||
* IVB workaround: must disable low power watermarks for at least
|
||||
* one frame before enabling scaling. LP watermarks can be re-enabled
|
||||
* when scaling is disabled.
|
||||
*
|
||||
* WaCxSRDisabledForSpriteScaling:ivb
|
||||
*/
|
||||
if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
|
||||
intel_wait_for_vblank(dev, intel_plane->pipe);
|
||||
|
||||
ilk_update_wm(crtc);
|
||||
}
|
||||
|
||||
static void skl_pipe_wm_active_state(uint32_t val,
|
||||
struct skl_pipe_wm *active,
|
||||
bool is_transwm,
|
||||
@ -4108,6 +4127,21 @@ void intel_update_watermarks(struct drm_crtc *crtc)
|
||||
dev_priv->display.update_wm(crtc);
|
||||
}
|
||||
|
||||
void intel_update_sprite_watermarks(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc,
|
||||
uint32_t sprite_width,
|
||||
uint32_t sprite_height,
|
||||
int pixel_size,
|
||||
bool enabled, bool scaled)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = plane->dev->dev_private;
|
||||
|
||||
if (dev_priv->display.update_sprite_wm)
|
||||
dev_priv->display.update_sprite_wm(plane, crtc,
|
||||
sprite_width, sprite_height,
|
||||
pixel_size, enabled, scaled);
|
||||
}
|
||||
|
||||
/**
|
||||
* Lock protecting IPS related data structures
|
||||
*/
|
||||
@ -7018,10 +7052,8 @@ void intel_init_pm(struct drm_device *dev)
|
||||
if (IS_BROXTON(dev))
|
||||
dev_priv->display.init_clock_gating =
|
||||
bxt_init_clock_gating;
|
||||
else if (IS_SKYLAKE(dev))
|
||||
dev_priv->display.init_clock_gating =
|
||||
skl_init_clock_gating;
|
||||
dev_priv->display.update_wm = skl_update_wm;
|
||||
dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
|
||||
} else if (HAS_PCH_SPLIT(dev)) {
|
||||
ilk_setup_wm_latency(dev);
|
||||
|
||||
@ -7030,6 +7062,7 @@ void intel_init_pm(struct drm_device *dev)
|
||||
(!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
|
||||
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
|
||||
dev_priv->display.update_wm = ilk_update_wm;
|
||||
dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
|
||||
} else {
|
||||
DRM_DEBUG_KMS("Failed to read display plane latency. "
|
||||
"Disable CxSR\n");
|
||||
|
@ -73,14 +73,14 @@ static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
|
||||
}
|
||||
|
||||
static void intel_psr_write_vsc(struct intel_dp *intel_dp,
|
||||
struct edp_vsc_psr *vsc_psr)
|
||||
const struct edp_vsc_psr *vsc_psr)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
|
||||
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config->cpu_transcoder);
|
||||
u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config->cpu_transcoder);
|
||||
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
|
||||
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
|
||||
uint32_t *data = (uint32_t *) vsc_psr;
|
||||
unsigned int i;
|
||||
|
||||
@ -90,12 +90,14 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
|
||||
I915_WRITE(ctl_reg, 0);
|
||||
POSTING_READ(ctl_reg);
|
||||
|
||||
for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
|
||||
if (i < sizeof(struct edp_vsc_psr))
|
||||
I915_WRITE(data_reg + i, *data++);
|
||||
else
|
||||
I915_WRITE(data_reg + i, 0);
|
||||
for (i = 0; i < sizeof(*vsc_psr); i += 4) {
|
||||
I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
|
||||
i >> 2), *data);
|
||||
data++;
|
||||
}
|
||||
for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4)
|
||||
I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
|
||||
i >> 2), 0);
|
||||
|
||||
I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
|
||||
POSTING_READ(ctl_reg);
|
||||
|
@ -906,6 +906,14 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t tmp;
|
||||
|
||||
/* WaEnableLbsSlaRetryTimerDecrement:skl */
|
||||
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
|
||||
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
|
||||
|
||||
/* WaDisableKillLogic:bxt,skl */
|
||||
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
|
||||
ECOCHK_DIS_TLB);
|
||||
|
||||
/* WaDisablePartialInstShootdown:skl,bxt */
|
||||
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
|
||||
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
|
||||
@ -1018,7 +1026,6 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int skl_init_workarounds(struct intel_engine_cs *ring)
|
||||
{
|
||||
int ret;
|
||||
@ -1029,6 +1036,30 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (INTEL_REVID(dev) <= SKL_REVID_D0) {
|
||||
/* WaDisableHDCInvalidation:skl */
|
||||
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
|
||||
BDW_DISABLE_HDC_INVALIDATION);
|
||||
|
||||
/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
|
||||
I915_WRITE(FF_SLICE_CS_CHICKEN2,
|
||||
_MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
|
||||
}
|
||||
|
||||
/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
|
||||
* involving this register should also be added to WA batch as required.
|
||||
*/
|
||||
if (INTEL_REVID(dev) <= SKL_REVID_E0)
|
||||
/* WaDisableLSQCROPERFforOCL:skl */
|
||||
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
|
||||
GEN8_LQSC_RO_PERF_DIS);
|
||||
|
||||
/* WaEnableGapsTsvCreditFix:skl */
|
||||
if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
|
||||
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
|
||||
GEN9_GAPS_TSV_CREDIT_DISABLE));
|
||||
}
|
||||
|
||||
/* WaDisablePowerCompilerClockGating:skl */
|
||||
if (INTEL_REVID(dev) == SKL_REVID_B0)
|
||||
WA_SET_BIT_MASKED(HIZ_CHICKEN,
|
||||
@ -1072,6 +1103,17 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* WaStoreMultiplePTEenable:bxt */
|
||||
/* This is a requirement according to Hardware specification */
|
||||
if (INTEL_REVID(dev) == BXT_REVID_A0)
|
||||
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
|
||||
|
||||
/* WaSetClckGatingDisableMedia:bxt */
|
||||
if (INTEL_REVID(dev) == BXT_REVID_A0) {
|
||||
I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
|
||||
~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
|
||||
}
|
||||
|
||||
/* WaDisableThreadStallDopClockGating:bxt */
|
||||
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
|
||||
STALL_DOP_GATING_DISABLE);
|
||||
|
@ -192,6 +192,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
||||
const int pipe = intel_plane->pipe;
|
||||
const int plane = intel_plane->plane + 1;
|
||||
u32 plane_ctl, stride_div, stride;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key =
|
||||
&to_intel_plane_state(drm_plane->state)->ckey;
|
||||
unsigned long surf_addr;
|
||||
@ -202,6 +203,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
||||
int scaler_id;
|
||||
|
||||
plane_ctl = PLANE_CTL_ENABLE |
|
||||
PLANE_CTL_PIPE_GAMMA_ENABLE |
|
||||
PLANE_CTL_PIPE_CSC_ENABLE;
|
||||
|
||||
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
|
||||
@ -210,6 +212,10 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
||||
rotation = drm_plane->state->rotation;
|
||||
plane_ctl |= skl_plane_ctl_rotation(rotation);
|
||||
|
||||
intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
|
||||
pixel_size, true,
|
||||
src_w != crtc_w || src_h != crtc_h);
|
||||
|
||||
stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
|
||||
fb->pixel_format);
|
||||
|
||||
@ -291,6 +297,8 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
|
||||
|
||||
I915_WRITE(PLANE_SURF(pipe, plane), 0);
|
||||
POSTING_READ(PLANE_SURF(pipe, plane));
|
||||
|
||||
intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -533,6 +541,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
sprctl |= SPRITE_PIPE_CSC_ENABLE;
|
||||
|
||||
intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size,
|
||||
true,
|
||||
src_w != crtc_w || src_h != crtc_h);
|
||||
|
||||
/* Sizes are 0 based */
|
||||
src_w--;
|
||||
src_h--;
|
||||
@ -601,7 +613,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
int pipe = intel_plane->pipe;
|
||||
|
||||
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
|
||||
I915_WRITE(SPRCTL(pipe), 0);
|
||||
/* Can't leave the scaler enabled... */
|
||||
if (intel_plane->can_scale)
|
||||
I915_WRITE(SPRSCALE(pipe), 0);
|
||||
@ -666,6 +678,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
if (IS_GEN6(dev))
|
||||
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
|
||||
|
||||
intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
|
||||
pixel_size, true,
|
||||
src_w != crtc_w || src_h != crtc_h);
|
||||
|
||||
/* Sizes are 0 based */
|
||||
src_w--;
|
||||
src_h--;
|
||||
|
@ -525,7 +525,7 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
|
||||
/* We give fast paths for the really cool registers */
|
||||
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
|
||||
#define NEEDS_FORCE_WAKE(reg) \
|
||||
((reg) < 0x40000 && (reg) != FORCEWAKE)
|
||||
|
||||
#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
|
||||
@ -727,7 +727,7 @@ static u##x \
|
||||
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
GEN6_READ_HEADER(x); \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
|
||||
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) \
|
||||
if (NEEDS_FORCE_WAKE(reg)) \
|
||||
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
|
||||
@ -761,7 +761,7 @@ chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
GEN6_READ_FOOTER; \
|
||||
}
|
||||
|
||||
#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \
|
||||
#define SKL_NEEDS_FORCE_WAKE(reg) \
|
||||
((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
|
||||
|
||||
#define __gen9_read(x) \
|
||||
@ -770,9 +770,9 @@ gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
enum forcewake_domains fw_engine; \
|
||||
GEN6_READ_HEADER(x); \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
|
||||
if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) \
|
||||
if (!SKL_NEEDS_FORCE_WAKE(reg)) \
|
||||
fw_engine = 0; \
|
||||
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
|
||||
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
|
||||
fw_engine = FORCEWAKE_RENDER; \
|
||||
else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
|
||||
fw_engine = FORCEWAKE_MEDIA; \
|
||||
@ -868,7 +868,7 @@ static void \
|
||||
gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
|
||||
u32 __fifo_ret = 0; \
|
||||
GEN6_WRITE_HEADER; \
|
||||
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
|
||||
if (NEEDS_FORCE_WAKE(reg)) { \
|
||||
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
|
||||
} \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
@ -883,7 +883,7 @@ static void \
|
||||
hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
|
||||
u32 __fifo_ret = 0; \
|
||||
GEN6_WRITE_HEADER; \
|
||||
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
|
||||
if (NEEDS_FORCE_WAKE(reg)) { \
|
||||
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
|
||||
} \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
|
||||
@ -985,7 +985,7 @@ gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
|
||||
enum forcewake_domains fw_engine; \
|
||||
GEN6_WRITE_HEADER; \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
|
||||
if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
|
||||
if (!SKL_NEEDS_FORCE_WAKE(reg) || \
|
||||
is_gen9_shadowed(dev_priv, reg)) \
|
||||
fw_engine = 0; \
|
||||
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
|
||||
|
Loading…
Reference in New Issue
Block a user