2005-04-17 05:20:36 +07:00
|
|
|
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
|
|
|
|
*/
|
2006-01-02 16:14:23 +07:00
|
|
|
/*
|
2005-06-23 19:46:46 +07:00
|
|
|
*
|
2005-04-17 05:20:36 +07:00
|
|
|
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
|
|
|
|
* All Rights Reserved.
|
2005-06-23 19:46:46 +07:00
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the
|
|
|
|
* "Software"), to deal in the Software without restriction, including
|
|
|
|
* without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
* distribute, sub license, and/or sell copies of the Software, and to
|
|
|
|
* permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
* the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the
|
|
|
|
* next paragraph) shall be included in all copies or substantial portions
|
|
|
|
* of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
|
|
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
|
|
|
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
|
|
|
|
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
|
|
|
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
|
|
|
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
2006-01-02 16:14:23 +07:00
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-06-12 22:35:47 +07:00
|
|
|
#include <linux/acpi.h>
|
2016-06-24 20:00:22 +07:00
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/oom.h>
|
2011-08-30 22:04:30 +07:00
|
|
|
#include <linux/module.h>
|
2016-06-24 20:00:22 +07:00
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/pm.h>
|
2014-05-07 23:57:49 +07:00
|
|
|
#include <linux/pm_runtime.h>
|
2016-06-24 20:00:22 +07:00
|
|
|
#include <linux/pnp.h>
|
|
|
|
#include <linux/slab.h>
|
2016-01-12 02:09:20 +07:00
|
|
|
#include <linux/vga_switcheroo.h>
|
2016-06-24 20:00:22 +07:00
|
|
|
#include <linux/vt.h>
|
|
|
|
#include <acpi/video.h>
|
|
|
|
|
2016-12-15 21:29:44 +07:00
|
|
|
#include <drm/drm_atomic_helper.h>
|
2019-01-26 19:25:24 +07:00
|
|
|
#include <drm/drm_ioctl.h>
|
|
|
|
#include <drm/drm_irq.h>
|
|
|
|
#include <drm/drm_probe_helper.h>
|
2016-06-24 20:00:22 +07:00
|
|
|
#include <drm/i915_drm.h>
|
|
|
|
|
2019-06-13 15:44:16 +07:00
|
|
|
#include "display/intel_acpi.h"
|
|
|
|
#include "display/intel_audio.h"
|
|
|
|
#include "display/intel_bw.h"
|
|
|
|
#include "display/intel_cdclk.h"
|
2019-08-06 18:39:33 +07:00
|
|
|
#include "display/intel_display_types.h"
|
2019-06-13 15:44:15 +07:00
|
|
|
#include "display/intel_dp.h"
|
2019-06-13 15:44:16 +07:00
|
|
|
#include "display/intel_fbdev.h"
|
|
|
|
#include "display/intel_hotplug.h"
|
|
|
|
#include "display/intel_overlay.h"
|
|
|
|
#include "display/intel_pipe_crc.h"
|
|
|
|
#include "display/intel_sprite.h"
|
2019-10-01 22:25:06 +07:00
|
|
|
#include "display/intel_vga.h"
|
2019-06-13 15:44:15 +07:00
|
|
|
|
2019-05-28 16:29:49 +07:00
|
|
|
#include "gem/i915_gem_context.h"
|
2019-05-28 16:29:43 +07:00
|
|
|
#include "gem/i915_gem_ioctls.h"
|
2019-06-21 14:07:41 +07:00
|
|
|
#include "gt/intel_gt.h"
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 03:07:17 +07:00
|
|
|
#include "gt/intel_gt_pm.h"
|
2019-10-17 20:38:31 +07:00
|
|
|
#include "gt/intel_rc6.h"
|
2019-04-25 00:48:39 +07:00
|
|
|
|
2019-05-02 22:02:43 +07:00
|
|
|
#include "i915_debugfs.h"
|
2016-06-24 20:00:22 +07:00
|
|
|
#include "i915_drv.h"
|
2019-04-29 19:29:27 +07:00
|
|
|
#include "i915_irq.h"
|
2019-08-08 20:42:47 +07:00
|
|
|
#include "i915_memcpy.h"
|
2019-08-08 20:42:44 +07:00
|
|
|
#include "i915_perf.h"
|
2018-03-06 19:28:56 +07:00
|
|
|
#include "i915_query.h"
|
2019-08-08 20:42:46 +07:00
|
|
|
#include "i915_suspend.h"
|
2019-10-04 19:20:18 +07:00
|
|
|
#include "i915_switcheroo.h"
|
2019-08-08 20:42:45 +07:00
|
|
|
#include "i915_sysfs.h"
|
2019-04-05 18:00:03 +07:00
|
|
|
#include "i915_trace.h"
|
2016-06-24 20:00:22 +07:00
|
|
|
#include "i915_vgpu.h"
|
2019-04-05 18:00:07 +07:00
|
|
|
#include "intel_csr.h"
|
2019-10-27 03:20:32 +07:00
|
|
|
#include "intel_memory_region.h"
|
2019-04-05 18:00:15 +07:00
|
|
|
#include "intel_pm.h"
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-08 05:24:08 +07:00
|
|
|
|
2009-01-05 04:55:33 +07:00
|
|
|
static struct drm_driver driver;
|
|
|
|
|
2019-08-20 09:01:46 +07:00
|
|
|
struct vlv_s0ix_state {
|
|
|
|
/* GAM */
|
|
|
|
u32 wr_watermark;
|
|
|
|
u32 gfx_prio_ctrl;
|
|
|
|
u32 arb_mode;
|
|
|
|
u32 gfx_pend_tlb0;
|
|
|
|
u32 gfx_pend_tlb1;
|
|
|
|
u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
|
|
|
|
u32 media_max_req_count;
|
|
|
|
u32 gfx_max_req_count;
|
|
|
|
u32 render_hwsp;
|
|
|
|
u32 ecochk;
|
|
|
|
u32 bsd_hwsp;
|
|
|
|
u32 blt_hwsp;
|
|
|
|
u32 tlb_rd_addr;
|
|
|
|
|
|
|
|
/* MBC */
|
|
|
|
u32 g3dctl;
|
|
|
|
u32 gsckgctl;
|
|
|
|
u32 mbctl;
|
|
|
|
|
|
|
|
/* GCP */
|
|
|
|
u32 ucgctl1;
|
|
|
|
u32 ucgctl3;
|
|
|
|
u32 rcgctl1;
|
|
|
|
u32 rcgctl2;
|
|
|
|
u32 rstctl;
|
|
|
|
u32 misccpctl;
|
|
|
|
|
|
|
|
/* GPM */
|
|
|
|
u32 gfxpause;
|
|
|
|
u32 rpdeuhwtc;
|
|
|
|
u32 rpdeuc;
|
|
|
|
u32 ecobus;
|
|
|
|
u32 pwrdwnupctl;
|
|
|
|
u32 rp_down_timeout;
|
|
|
|
u32 rp_deucsw;
|
|
|
|
u32 rcubmabdtmr;
|
|
|
|
u32 rcedata;
|
|
|
|
u32 spare2gh;
|
|
|
|
|
|
|
|
/* Display 1 CZ domain */
|
|
|
|
u32 gt_imr;
|
|
|
|
u32 gt_ier;
|
|
|
|
u32 pm_imr;
|
|
|
|
u32 pm_ier;
|
|
|
|
u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
|
|
|
|
|
|
|
|
/* GT SA CZ domain */
|
|
|
|
u32 tilectl;
|
|
|
|
u32 gt_fifoctl;
|
|
|
|
u32 gtlc_wake_ctrl;
|
|
|
|
u32 gtlc_survive;
|
|
|
|
u32 pmwgicz;
|
|
|
|
|
|
|
|
/* Display 2 CZ domain */
|
|
|
|
u32 gu_ctl0;
|
|
|
|
u32 gu_ctl1;
|
|
|
|
u32 pcbr;
|
|
|
|
u32 clock_gate_dis2;
|
|
|
|
};
|
|
|
|
|
2016-12-01 21:16:40 +07:00
|
|
|
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
|
2016-06-24 20:00:22 +07:00
|
|
|
{
|
2017-11-27 23:57:46 +07:00
|
|
|
int domain = pci_domain_nr(dev_priv->drm.pdev->bus);
|
|
|
|
|
|
|
|
dev_priv->bridge_dev =
|
|
|
|
pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
|
2016-06-24 20:00:22 +07:00
|
|
|
if (!dev_priv->bridge_dev) {
|
|
|
|
DRM_ERROR("bridge device not found\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate space for the MCH regs if needed, return nonzero on error */
|
|
|
|
static int
|
2016-12-01 21:16:40 +07:00
|
|
|
intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
|
2016-06-24 20:00:22 +07:00
|
|
|
{
|
2016-11-04 21:42:48 +07:00
|
|
|
int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
|
2016-06-24 20:00:22 +07:00
|
|
|
u32 temp_lo, temp_hi = 0;
|
|
|
|
u64 mchbar_addr;
|
|
|
|
int ret;
|
|
|
|
|
2016-11-04 21:42:48 +07:00
|
|
|
if (INTEL_GEN(dev_priv) >= 4)
|
2016-06-24 20:00:22 +07:00
|
|
|
pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
|
|
|
|
pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
|
|
|
|
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
|
|
|
|
|
|
|
|
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
|
|
|
|
#ifdef CONFIG_PNP
|
|
|
|
if (mchbar_addr &&
|
|
|
|
pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Get some space for it */
|
|
|
|
dev_priv->mch_res.name = "i915 MCHBAR";
|
|
|
|
dev_priv->mch_res.flags = IORESOURCE_MEM;
|
|
|
|
ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
|
|
|
|
&dev_priv->mch_res,
|
|
|
|
MCHBAR_SIZE, MCHBAR_SIZE,
|
|
|
|
PCIBIOS_MIN_MEM,
|
|
|
|
0, pcibios_align_resource,
|
|
|
|
dev_priv->bridge_dev);
|
|
|
|
if (ret) {
|
|
|
|
DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
|
|
|
|
dev_priv->mch_res.start = 0;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-11-04 21:42:48 +07:00
|
|
|
if (INTEL_GEN(dev_priv) >= 4)
|
2016-06-24 20:00:22 +07:00
|
|
|
pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
|
|
|
|
upper_32_bits(dev_priv->mch_res.start));
|
|
|
|
|
|
|
|
pci_write_config_dword(dev_priv->bridge_dev, reg,
|
|
|
|
lower_32_bits(dev_priv->mch_res.start));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup MCHBAR if possible, return true if we should disable it again */
|
|
|
|
static void
|
2016-12-01 21:16:40 +07:00
|
|
|
intel_setup_mchbar(struct drm_i915_private *dev_priv)
|
2016-06-24 20:00:22 +07:00
|
|
|
{
|
2016-11-04 21:42:48 +07:00
|
|
|
int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
|
2016-06-24 20:00:22 +07:00
|
|
|
u32 temp;
|
|
|
|
bool enabled;
|
|
|
|
|
2016-10-14 16:13:44 +07:00
|
|
|
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
2016-06-24 20:00:22 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
dev_priv->mchbar_need_disable = false;
|
|
|
|
|
2016-10-13 17:02:58 +07:00
|
|
|
if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
|
2016-06-24 20:00:22 +07:00
|
|
|
pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
|
|
|
|
enabled = !!(temp & DEVEN_MCHBAR_EN);
|
|
|
|
} else {
|
|
|
|
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
|
|
|
|
enabled = temp & 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If it's already enabled, don't have to do anything */
|
|
|
|
if (enabled)
|
|
|
|
return;
|
|
|
|
|
2016-12-01 21:16:40 +07:00
|
|
|
if (intel_alloc_mchbar_resource(dev_priv))
|
2016-06-24 20:00:22 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
dev_priv->mchbar_need_disable = true;
|
|
|
|
|
|
|
|
/* Space is allocated or reserved, so enable it. */
|
2016-10-13 17:02:58 +07:00
|
|
|
if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
|
2016-06-24 20:00:22 +07:00
|
|
|
pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
|
|
|
|
temp | DEVEN_MCHBAR_EN);
|
|
|
|
} else {
|
|
|
|
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
|
|
|
|
pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-12-01 21:16:40 +07:00
|
|
|
intel_teardown_mchbar(struct drm_i915_private *dev_priv)
|
2016-06-24 20:00:22 +07:00
|
|
|
{
|
2016-11-04 21:42:48 +07:00
|
|
|
int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
|
2016-06-24 20:00:22 +07:00
|
|
|
|
|
|
|
if (dev_priv->mchbar_need_disable) {
|
2016-10-13 17:02:58 +07:00
|
|
|
if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
|
2016-06-24 20:00:22 +07:00
|
|
|
u32 deven_val;
|
|
|
|
|
|
|
|
pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
|
|
|
|
&deven_val);
|
|
|
|
deven_val &= ~DEVEN_MCHBAR_EN;
|
|
|
|
pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
|
|
|
|
deven_val);
|
|
|
|
} else {
|
|
|
|
u32 mchbar_val;
|
|
|
|
|
|
|
|
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
|
|
|
|
&mchbar_val);
|
|
|
|
mchbar_val &= ~1;
|
|
|
|
pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
|
|
|
|
mchbar_val);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dev_priv->mch_res.start)
|
|
|
|
release_resource(&dev_priv->mch_res);
|
|
|
|
}
|
|
|
|
|
2019-09-21 01:54:17 +07:00
|
|
|
static int i915_driver_modeset_probe(struct drm_i915_private *i915)
|
2016-06-24 20:00:22 +07:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2019-09-21 01:54:17 +07:00
|
|
|
if (i915_inject_probe_failure(i915))
|
2016-06-24 20:00:22 +07:00
|
|
|
return -ENODEV;
|
|
|
|
|
2019-09-21 01:54:17 +07:00
|
|
|
if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
|
|
|
|
ret = drm_vblank_init(&i915->drm,
|
|
|
|
INTEL_NUM_PIPES(i915));
|
2018-11-08 07:16:44 +07:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-09-21 01:54:17 +07:00
|
|
|
intel_bios_init(i915);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2019-10-01 22:25:06 +07:00
|
|
|
ret = intel_vga_register(i915);
|
|
|
|
if (ret)
|
2016-06-24 20:00:22 +07:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
intel_register_dsm_handler();
|
|
|
|
|
2019-10-04 19:20:18 +07:00
|
|
|
ret = i915_switcheroo_register(i915);
|
2016-06-24 20:00:22 +07:00
|
|
|
if (ret)
|
|
|
|
goto cleanup_vga_client;
|
|
|
|
|
2019-09-21 01:54:17 +07:00
|
|
|
intel_power_domains_init_hw(i915, false);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2019-09-21 01:54:17 +07:00
|
|
|
intel_csr_ucode_init(i915);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2019-09-21 01:54:17 +07:00
|
|
|
ret = intel_irq_install(i915);
|
2016-06-24 20:00:22 +07:00
|
|
|
if (ret)
|
|
|
|
goto cleanup_csr;
|
|
|
|
|
|
|
|
/* Important: The output setup functions called by modeset_init need
|
|
|
|
* working irqs for e.g. gmbus and dp aux transfers. */
|
2019-09-21 01:54:21 +07:00
|
|
|
ret = intel_modeset_init(i915);
|
2016-10-25 22:58:02 +07:00
|
|
|
if (ret)
|
|
|
|
goto cleanup_irq;
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2019-09-21 01:54:17 +07:00
|
|
|
ret = i915_gem_init(i915);
|
2016-06-24 20:00:22 +07:00
|
|
|
if (ret)
|
2018-07-10 16:44:21 +07:00
|
|
|
goto cleanup_modeset;
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2019-09-21 01:54:17 +07:00
|
|
|
intel_overlay_setup(i915);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2019-09-21 01:54:17 +07:00
|
|
|
if (!HAS_DISPLAY(i915) || !INTEL_DISPLAY_ENABLED(i915))
|
2016-06-24 20:00:22 +07:00
|
|
|
return 0;
|
|
|
|
|
2019-09-21 01:54:17 +07:00
|
|
|
ret = intel_fbdev_init(&i915->drm);
|
2016-06-24 20:00:22 +07:00
|
|
|
if (ret)
|
|
|
|
goto cleanup_gem;
|
|
|
|
|
|
|
|
/* Only enable hotplug handling once the fbdev is fully set up. */
|
2019-09-21 01:54:17 +07:00
|
|
|
intel_hpd_init(i915);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2019-09-21 01:54:17 +07:00
|
|
|
intel_init_ipc(i915);
|
2018-11-08 07:16:46 +07:00
|
|
|
|
2016-06-24 20:00:22 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
cleanup_gem:
|
2019-09-21 01:54:17 +07:00
|
|
|
i915_gem_suspend(i915);
|
|
|
|
i915_gem_driver_remove(i915);
|
|
|
|
i915_gem_driver_release(i915);
|
2018-07-10 16:44:21 +07:00
|
|
|
cleanup_modeset:
|
2019-09-21 01:54:18 +07:00
|
|
|
intel_modeset_driver_remove(i915);
|
2016-06-24 20:00:22 +07:00
|
|
|
cleanup_irq:
|
2019-09-21 01:54:17 +07:00
|
|
|
intel_irq_uninstall(i915);
|
2016-06-24 20:00:22 +07:00
|
|
|
cleanup_csr:
|
2019-09-21 01:54:17 +07:00
|
|
|
intel_csr_ucode_fini(i915);
|
|
|
|
intel_power_domains_driver_remove(i915);
|
2019-10-04 19:20:18 +07:00
|
|
|
i915_switcheroo_unregister(i915);
|
2016-06-24 20:00:22 +07:00
|
|
|
cleanup_vga_client:
|
2019-10-01 22:25:06 +07:00
|
|
|
intel_vga_unregister(i915);
|
2016-06-24 20:00:22 +07:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-09-21 01:54:16 +07:00
|
|
|
static void i915_driver_modeset_remove(struct drm_i915_private *i915)
|
|
|
|
{
|
2019-09-21 01:54:18 +07:00
|
|
|
intel_modeset_driver_remove(i915);
|
2019-09-21 01:54:16 +07:00
|
|
|
|
2019-10-18 17:07:10 +07:00
|
|
|
intel_irq_uninstall(i915);
|
|
|
|
|
2019-09-21 01:54:16 +07:00
|
|
|
intel_bios_driver_remove(i915);
|
|
|
|
|
2019-10-04 19:20:18 +07:00
|
|
|
i915_switcheroo_unregister(i915);
|
|
|
|
|
2019-10-01 22:25:06 +07:00
|
|
|
intel_vga_unregister(i915);
|
2019-09-21 01:54:16 +07:00
|
|
|
|
|
|
|
intel_csr_ucode_fini(i915);
|
|
|
|
}
|
|
|
|
|
2016-06-24 20:00:22 +07:00
|
|
|
static void intel_init_dpio(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
|
|
|
|
* CHV x1 PHY (DP/HDMI D)
|
|
|
|
* IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
|
|
|
|
*/
|
|
|
|
if (IS_CHERRYVIEW(dev_priv)) {
|
|
|
|
DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
|
|
|
|
DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
|
|
|
|
} else if (IS_VALLEYVIEW(dev_priv)) {
|
|
|
|
DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int i915_workqueues_init(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The i915 workqueue is primarily used for batched retirement of
|
|
|
|
* requests (and thus managing bo) once the task has been completed
|
2018-02-21 16:56:36 +07:00
|
|
|
* by the GPU. i915_retire_requests() is called directly when we
|
2016-06-24 20:00:22 +07:00
|
|
|
* need high-priority retirement, such as waiting for an explicit
|
|
|
|
* bo.
|
|
|
|
*
|
|
|
|
* It is also used for periodic low-priority events, such as
|
|
|
|
* idle-timers and recording error state.
|
|
|
|
*
|
|
|
|
* All tasks on the workqueue are expected to acquire the dev mutex
|
|
|
|
* so there is no point in running more than one instance of the
|
|
|
|
* workqueue at any time. Use an ordered one.
|
|
|
|
*/
|
|
|
|
dev_priv->wq = alloc_ordered_workqueue("i915", 0);
|
|
|
|
if (dev_priv->wq == NULL)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
|
|
|
|
if (dev_priv->hotplug.dp_wq == NULL)
|
|
|
|
goto out_free_wq;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_free_wq:
|
|
|
|
destroy_workqueue(dev_priv->wq);
|
|
|
|
out_err:
|
|
|
|
DRM_ERROR("Failed to allocate workqueues.\n");
|
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
destroy_workqueue(dev_priv->hotplug.dp_wq);
|
|
|
|
destroy_workqueue(dev_priv->wq);
|
|
|
|
}
|
|
|
|
|
2016-09-26 19:07:52 +07:00
|
|
|
/*
|
|
|
|
* We don't keep the workarounds for pre-production hardware, so we expect our
|
|
|
|
* driver to fail on these machines in one way or another. A little warning on
|
|
|
|
* dmesg may help both the user and the bug triagers.
|
2017-11-17 17:26:35 +07:00
|
|
|
*
|
|
|
|
* Our policy for removing pre-production workarounds is to keep the
|
|
|
|
* current gen workarounds as a guide to the bring-up of the next gen
|
|
|
|
* (workarounds have a habit of persisting!). Anything older than that
|
|
|
|
* should be removed along with the complications they introduce.
|
2016-09-26 19:07:52 +07:00
|
|
|
*/
|
|
|
|
static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
2017-01-30 17:44:56 +07:00
|
|
|
bool pre = false;
|
|
|
|
|
|
|
|
pre |= IS_HSW_EARLY_SDV(dev_priv);
|
|
|
|
pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
|
2017-01-30 17:44:58 +07:00
|
|
|
pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
|
2018-11-28 20:53:25 +07:00
|
|
|
pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
|
2017-01-30 17:44:56 +07:00
|
|
|
|
2017-01-30 17:44:57 +07:00
|
|
|
if (pre) {
|
2016-09-26 19:07:52 +07:00
|
|
|
DRM_ERROR("This is a pre-production stepping. "
|
|
|
|
"It may not be fully functional.\n");
|
2017-01-30 17:44:57 +07:00
|
|
|
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
|
|
|
|
}
|
2016-09-26 19:07:52 +07:00
|
|
|
}
|
|
|
|
|
2019-08-20 09:01:46 +07:00
|
|
|
static int vlv_alloc_s0ix_state(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
if (!IS_VALLEYVIEW(i915))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* we write all the values in the struct, so no need to zero it out */
|
|
|
|
i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!i915->vlv_s0ix_state)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vlv_free_s0ix_state(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
if (!i915->vlv_s0ix_state)
|
|
|
|
return;
|
|
|
|
|
|
|
|
kfree(i915->vlv_s0ix_state);
|
|
|
|
i915->vlv_s0ix_state = NULL;
|
|
|
|
}
|
|
|
|
|
2016-06-24 20:00:22 +07:00
|
|
|
/**
|
2019-07-12 18:24:30 +07:00
|
|
|
* i915_driver_early_probe - setup state not requiring device access
|
2016-06-24 20:00:22 +07:00
|
|
|
* @dev_priv: device private
|
|
|
|
*
|
|
|
|
* Initialize everything that is a "SW-only" state, that is state not
|
|
|
|
* requiring accessing the device or exposing the driver via kernel internal
|
|
|
|
* or userspace interfaces. Example steps belonging here: lock initialization,
|
|
|
|
* system memory allocation, setting up device specific attributes and
|
|
|
|
* function hooks not requiring accessing the device.
|
|
|
|
*/
|
2019-07-12 18:24:30 +07:00
|
|
|
static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
|
2016-06-24 20:00:22 +07:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
2019-08-03 01:40:50 +07:00
|
|
|
if (i915_inject_probe_failure(dev_priv))
|
2016-06-24 20:00:22 +07:00
|
|
|
return -ENODEV;
|
|
|
|
|
2019-03-27 21:23:28 +07:00
|
|
|
intel_device_info_subplatform_init(dev_priv);
|
|
|
|
|
2019-08-09 13:31:16 +07:00
|
|
|
intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
|
2019-06-20 08:00:18 +07:00
|
|
|
intel_uncore_init_early(&dev_priv->uncore, dev_priv);
|
2019-04-03 03:10:31 +07:00
|
|
|
|
2016-06-24 20:00:22 +07:00
|
|
|
spin_lock_init(&dev_priv->irq_lock);
|
|
|
|
spin_lock_init(&dev_priv->gpu_error.lock);
|
|
|
|
mutex_init(&dev_priv->backlight_lock);
|
2017-02-04 09:18:25 +07:00
|
|
|
|
2016-06-24 20:00:22 +07:00
|
|
|
mutex_init(&dev_priv->sb_lock);
|
2019-04-26 15:17:18 +07:00
|
|
|
pm_qos_add_request(&dev_priv->sb_qos,
|
|
|
|
PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
|
|
|
|
|
2016-06-24 20:00:22 +07:00
|
|
|
mutex_init(&dev_priv->av_mutex);
|
|
|
|
mutex_init(&dev_priv->wm.wm_mutex);
|
|
|
|
mutex_init(&dev_priv->pps_mutex);
|
2019-02-17 00:36:51 +07:00
|
|
|
mutex_init(&dev_priv->hdcp_comp_mutex);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2016-08-12 18:39:59 +07:00
|
|
|
i915_memcpy_init_early(dev_priv);
|
2019-06-14 06:21:53 +07:00
|
|
|
intel_runtime_pm_init_early(&dev_priv->runtime_pm);
|
2016-08-12 18:39:59 +07:00
|
|
|
|
2016-06-24 20:00:22 +07:00
|
|
|
ret = i915_workqueues_init(dev_priv);
|
|
|
|
if (ret < 0)
|
2019-07-18 14:00:10 +07:00
|
|
|
return ret;
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2019-08-20 09:01:46 +07:00
|
|
|
ret = vlv_alloc_s0ix_state(dev_priv);
|
|
|
|
if (ret < 0)
|
|
|
|
goto err_workqueues;
|
|
|
|
|
2019-08-01 07:57:08 +07:00
|
|
|
intel_wopcm_init_early(&dev_priv->wopcm);
|
|
|
|
|
2019-06-21 14:07:42 +07:00
|
|
|
intel_gt_init_early(&dev_priv->gt, dev_priv);
|
2019-06-21 14:07:41 +07:00
|
|
|
|
2019-09-28 00:33:49 +07:00
|
|
|
i915_gem_init_early(dev_priv);
|
2018-03-23 19:34:49 +07:00
|
|
|
|
2016-06-24 20:00:22 +07:00
|
|
|
/* This must be called before any calls to HAS_PCH_* */
|
2016-12-01 21:16:40 +07:00
|
|
|
intel_detect_pch(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2016-12-01 21:16:45 +07:00
|
|
|
intel_pm_setup(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
intel_init_dpio(dev_priv);
|
2018-08-06 16:58:37 +07:00
|
|
|
ret = intel_power_domains_init(dev_priv);
|
|
|
|
if (ret < 0)
|
2019-08-01 07:57:08 +07:00
|
|
|
goto err_gem;
|
2016-06-24 20:00:22 +07:00
|
|
|
intel_irq_init(dev_priv);
|
|
|
|
intel_init_display_hooks(dev_priv);
|
|
|
|
intel_init_clock_gating_hooks(dev_priv);
|
|
|
|
intel_init_audio_hooks(dev_priv);
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
intel_display_crc_init(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2016-09-26 19:07:52 +07:00
|
|
|
intel_detect_preproduction_hw(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2019-08-01 07:57:08 +07:00
|
|
|
err_gem:
|
2018-08-06 16:58:37 +07:00
|
|
|
i915_gem_cleanup_early(dev_priv);
|
2019-08-01 07:57:07 +07:00
|
|
|
intel_gt_driver_late_release(&dev_priv->gt);
|
2019-08-20 09:01:46 +07:00
|
|
|
vlv_free_s0ix_state(dev_priv);
|
|
|
|
err_workqueues:
|
2016-06-24 20:00:22 +07:00
|
|
|
i915_workqueues_cleanup(dev_priv);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2019-07-12 18:24:28 +07:00
|
|
|
* i915_driver_late_release - cleanup the setup done in
|
2019-07-12 18:24:30 +07:00
|
|
|
* i915_driver_early_probe()
|
2016-06-24 20:00:22 +07:00
|
|
|
* @dev_priv: device private
|
|
|
|
*/
|
2019-07-12 18:24:28 +07:00
|
|
|
static void i915_driver_late_release(struct drm_i915_private *dev_priv)
|
2016-06-24 20:00:22 +07:00
|
|
|
{
|
2017-04-28 14:58:39 +07:00
|
|
|
intel_irq_fini(dev_priv);
|
2018-08-06 16:58:37 +07:00
|
|
|
intel_power_domains_cleanup(dev_priv);
|
2018-03-23 19:34:49 +07:00
|
|
|
i915_gem_cleanup_early(dev_priv);
|
2019-08-01 07:57:07 +07:00
|
|
|
intel_gt_driver_late_release(&dev_priv->gt);
|
2019-08-20 09:01:46 +07:00
|
|
|
vlv_free_s0ix_state(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
i915_workqueues_cleanup(dev_priv);
|
2019-04-26 15:17:18 +07:00
|
|
|
|
|
|
|
pm_qos_remove_request(&dev_priv->sb_qos);
|
|
|
|
mutex_destroy(&dev_priv->sb_lock);
|
2016-06-24 20:00:22 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2019-07-12 18:24:30 +07:00
|
|
|
* i915_driver_mmio_probe - setup device MMIO
|
2016-06-24 20:00:22 +07:00
|
|
|
* @dev_priv: device private
|
|
|
|
*
|
|
|
|
* Setup minimal device state necessary for MMIO accesses later in the
|
|
|
|
* initialization sequence. The setup here should avoid any other device-wide
|
|
|
|
* side effects or exposing the driver via kernel internal or user space
|
|
|
|
* interfaces.
|
|
|
|
*/
|
2019-07-12 18:24:30 +07:00
|
|
|
static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
|
2016-06-24 20:00:22 +07:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2019-08-03 01:40:50 +07:00
|
|
|
if (i915_inject_probe_failure(dev_priv))
|
2016-06-24 20:00:22 +07:00
|
|
|
return -ENODEV;
|
|
|
|
|
2016-12-01 21:16:40 +07:00
|
|
|
if (i915_get_bridge_dev(dev_priv))
|
2016-06-24 20:00:22 +07:00
|
|
|
return -EIO;
|
|
|
|
|
2019-04-03 03:10:32 +07:00
|
|
|
ret = intel_uncore_init_mmio(&dev_priv->uncore);
|
2016-06-24 20:00:22 +07:00
|
|
|
if (ret < 0)
|
2017-04-28 14:53:36 +07:00
|
|
|
goto err_bridge;
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2019-03-20 01:35:40 +07:00
|
|
|
/* Try to make sure MCHBAR is enabled before poking at it */
|
|
|
|
intel_setup_mchbar(dev_priv);
|
2017-04-28 14:53:36 +07:00
|
|
|
|
drm/i915/icl: Check for fused-off VDBOX and VEBOX instances
In Gen11, the Video Decode engines (aka VDBOX, aka VCS, aka BSD) and the
Video Enhancement engines (aka VEBOX, aka VECS) could be fused off. Also,
each VDBOX and VEBOX has its own power well, which only exist if the
related engine exists in the HW.
Unfortunately, we have a Catch-22 situation going on: we need the blitter
forcewake to read the register with the fuse info, but we cannot initialize
the forcewake domains without knowin about the engines present in the HW.
We workaround this problem by allowing the initialization of all forcewake
domains and then pruning the fused off ones, as per the fuse information.
Bspec: 20680
v2: We were shifting incorrectly for vebox disable (Vinay)
v3: Assert mmio is ready and warn if we have attempted to initialize
forcewake for fused-off engines (Paulo)
v4:
- Use INTEL_GEN in new code (Tvrtko)
- Shorter local variable (Tvrtko, Michal)
- Keep "if (!...) continue" style (Tvrtko)
- No unnecessary BUG_ON (Tvrtko)
- WARN_ON and cleanup if wrong mask (Tvrtko, Michal)
- Use I915_READ_FW (Michal)
- Use I915_MAX_VCS/VECS macros (Michal)
v5: Rebased by Rodrigo fixing conflicts on top of:
"drm/i915: Simplify intel_engines_init"
v6: Fix v5. Remove info->num_rings. (by Oscar)
v7: Rebase (Rodrigo).
v8:
- s/intel_device_info_fused_off_engines/
intel_device_info_init_mmio (Chris)
- Make vdbox_disable & vebox_disable local variables (Chris)
v9:
- Move function declaration to intel_device_info.h (Michal)
- Missing indent in bit fields definitions (Michal)
- When RC6 is enabled by BIOS, the fuse register cannot be read until
the blitter powerwell is awake. Shuffle where the fuse is read, prune
the forcewake domains after the fact and change the commit message
accordingly (Vinay, Sagar, Chris).
v10:
- Improved commit message (Sagar)
- New line in header file (Sagar)
- Specify the message in fw_domain_reset applies to ICL+ (Sagar)
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Vinay Belgaumkar <vinay.belgaumkar@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180316121456.11577-1-mika.kuoppala@linux.intel.com
[Mika: soothe checkpatch on commit msg]
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
2018-03-16 19:14:49 +07:00
|
|
|
intel_device_info_init_mmio(dev_priv);
|
|
|
|
|
2019-04-03 03:10:32 +07:00
|
|
|
intel_uncore_prune_mmio_domains(&dev_priv->uncore);
|
drm/i915/icl: Check for fused-off VDBOX and VEBOX instances
In Gen11, the Video Decode engines (aka VDBOX, aka VCS, aka BSD) and the
Video Enhancement engines (aka VEBOX, aka VECS) could be fused off. Also,
each VDBOX and VEBOX has its own power well, which only exist if the
related engine exists in the HW.
Unfortunately, we have a Catch-22 situation going on: we need the blitter
forcewake to read the register with the fuse info, but we cannot initialize
the forcewake domains without knowin about the engines present in the HW.
We workaround this problem by allowing the initialization of all forcewake
domains and then pruning the fused off ones, as per the fuse information.
Bspec: 20680
v2: We were shifting incorrectly for vebox disable (Vinay)
v3: Assert mmio is ready and warn if we have attempted to initialize
forcewake for fused-off engines (Paulo)
v4:
- Use INTEL_GEN in new code (Tvrtko)
- Shorter local variable (Tvrtko, Michal)
- Keep "if (!...) continue" style (Tvrtko)
- No unnecessary BUG_ON (Tvrtko)
- WARN_ON and cleanup if wrong mask (Tvrtko, Michal)
- Use I915_READ_FW (Michal)
- Use I915_MAX_VCS/VECS macros (Michal)
v5: Rebased by Rodrigo fixing conflicts on top of:
"drm/i915: Simplify intel_engines_init"
v6: Fix v5. Remove info->num_rings. (by Oscar)
v7: Rebase (Rodrigo).
v8:
- s/intel_device_info_fused_off_engines/
intel_device_info_init_mmio (Chris)
- Make vdbox_disable & vebox_disable local variables (Chris)
v9:
- Move function declaration to intel_device_info.h (Michal)
- Missing indent in bit fields definitions (Michal)
- When RC6 is enabled by BIOS, the fuse register cannot be read until
the blitter powerwell is awake. Shuffle where the fuse is read, prune
the forcewake domains after the fact and change the commit message
accordingly (Vinay, Sagar, Chris).
v10:
- Improved commit message (Sagar)
- New line in header file (Sagar)
- Specify the message in fw_domain_reset applies to ICL+ (Sagar)
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Vinay Belgaumkar <vinay.belgaumkar@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180316121456.11577-1-mika.kuoppala@linux.intel.com
[Mika: soothe checkpatch on commit msg]
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
2018-03-16 19:14:49 +07:00
|
|
|
|
2019-07-13 17:00:13 +07:00
|
|
|
intel_uc_init_mmio(&dev_priv->gt.uc);
|
2017-10-04 22:33:24 +07:00
|
|
|
|
2019-10-22 16:47:15 +07:00
|
|
|
ret = intel_engines_init_mmio(&dev_priv->gt);
|
2017-04-28 14:53:36 +07:00
|
|
|
if (ret)
|
|
|
|
goto err_uncore;
|
|
|
|
|
2016-06-24 20:00:22 +07:00
|
|
|
return 0;
|
|
|
|
|
2017-04-28 14:53:36 +07:00
|
|
|
err_uncore:
|
2019-03-20 01:35:40 +07:00
|
|
|
intel_teardown_mchbar(dev_priv);
|
2019-04-03 03:10:32 +07:00
|
|
|
intel_uncore_fini_mmio(&dev_priv->uncore);
|
2017-04-28 14:53:36 +07:00
|
|
|
err_bridge:
|
2016-06-24 20:00:22 +07:00
|
|
|
pci_dev_put(dev_priv->bridge_dev);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2019-07-12 18:24:30 +07:00
|
|
|
* i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
|
2016-06-24 20:00:22 +07:00
|
|
|
* @dev_priv: device private
|
|
|
|
*/
|
2019-07-12 18:24:28 +07:00
|
|
|
static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
|
2016-06-24 20:00:22 +07:00
|
|
|
{
|
2019-10-22 16:47:17 +07:00
|
|
|
intel_engines_cleanup(&dev_priv->gt);
|
2019-03-20 01:35:40 +07:00
|
|
|
intel_teardown_mchbar(dev_priv);
|
2019-04-03 03:10:32 +07:00
|
|
|
intel_uncore_fini_mmio(&dev_priv->uncore);
|
2016-06-24 20:00:22 +07:00
|
|
|
pci_dev_put(dev_priv->bridge_dev);
|
|
|
|
}
|
|
|
|
|
2016-07-05 16:40:20 +07:00
|
|
|
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
2017-05-27 16:44:17 +07:00
|
|
|
intel_gvt_sanitize_options(dev_priv);
|
2016-07-05 16:40:20 +07:00
|
|
|
}
|
|
|
|
|
2019-03-07 03:35:51 +07:00
|
|
|
#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
|
|
|
|
|
|
|
|
static const char *intel_dram_type_str(enum intel_dram_type type)
|
|
|
|
{
|
|
|
|
static const char * const str[] = {
|
|
|
|
DRAM_TYPE_STR(UNKNOWN),
|
|
|
|
DRAM_TYPE_STR(DDR3),
|
|
|
|
DRAM_TYPE_STR(DDR4),
|
|
|
|
DRAM_TYPE_STR(LPDDR3),
|
|
|
|
DRAM_TYPE_STR(LPDDR4),
|
|
|
|
};
|
|
|
|
|
|
|
|
if (type >= ARRAY_SIZE(str))
|
|
|
|
type = INTEL_DRAM_UNKNOWN;
|
|
|
|
|
|
|
|
return str[type];
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef DRAM_TYPE_STR
|
|
|
|
|
2019-03-07 03:35:42 +07:00
|
|
|
static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
|
|
|
|
{
|
|
|
|
return dimm->ranks * 64 / (dimm->width ?: 1);
|
|
|
|
}
|
|
|
|
|
2019-03-07 03:35:41 +07:00
|
|
|
/* Returns total GB for the whole DIMM */
|
|
|
|
static int skl_get_dimm_size(u16 val)
|
2018-08-24 16:32:22 +07:00
|
|
|
{
|
2019-03-07 03:35:41 +07:00
|
|
|
return val & SKL_DRAM_SIZE_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int skl_get_dimm_width(u16 val)
|
|
|
|
{
|
|
|
|
if (skl_get_dimm_size(val) == 0)
|
2019-03-07 03:35:40 +07:00
|
|
|
return 0;
|
2018-08-24 16:32:22 +07:00
|
|
|
|
2019-03-07 03:35:41 +07:00
|
|
|
switch (val & SKL_DRAM_WIDTH_MASK) {
|
|
|
|
case SKL_DRAM_WIDTH_X8:
|
|
|
|
case SKL_DRAM_WIDTH_X16:
|
|
|
|
case SKL_DRAM_WIDTH_X32:
|
|
|
|
val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
|
|
|
|
return 8 << val;
|
|
|
|
default:
|
|
|
|
MISSING_CASE(val);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int skl_get_dimm_ranks(u16 val)
|
|
|
|
{
|
|
|
|
if (skl_get_dimm_size(val) == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
|
|
|
|
|
|
|
|
return val + 1;
|
2018-08-24 16:32:22 +07:00
|
|
|
}
|
|
|
|
|
2019-03-07 03:35:50 +07:00
|
|
|
/* Returns total GB for the whole DIMM */
|
|
|
|
static int cnl_get_dimm_size(u16 val)
|
|
|
|
{
|
|
|
|
return (val & CNL_DRAM_SIZE_MASK) / 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cnl_get_dimm_width(u16 val)
|
|
|
|
{
|
|
|
|
if (cnl_get_dimm_size(val) == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch (val & CNL_DRAM_WIDTH_MASK) {
|
|
|
|
case CNL_DRAM_WIDTH_X8:
|
|
|
|
case CNL_DRAM_WIDTH_X16:
|
|
|
|
case CNL_DRAM_WIDTH_X32:
|
|
|
|
val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
|
|
|
|
return 8 << val;
|
|
|
|
default:
|
|
|
|
MISSING_CASE(val);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cnl_get_dimm_ranks(u16 val)
|
|
|
|
{
|
|
|
|
if (cnl_get_dimm_size(val) == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
|
|
|
|
|
|
|
|
return val + 1;
|
|
|
|
}
|
|
|
|
|
2018-08-31 18:09:42 +07:00
|
|
|
static bool
|
2019-03-07 03:35:42 +07:00
|
|
|
skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
|
2018-08-31 18:09:42 +07:00
|
|
|
{
|
2019-03-07 03:35:42 +07:00
|
|
|
/* Convert total GB to Gb per DRAM device */
|
|
|
|
return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
|
2018-08-31 18:09:42 +07:00
|
|
|
}
|
|
|
|
|
2019-03-07 03:35:46 +07:00
|
|
|
static void
|
2019-03-07 03:35:50 +07:00
|
|
|
skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
|
|
|
|
struct dram_dimm_info *dimm,
|
2019-03-07 03:35:46 +07:00
|
|
|
int channel, char dimm_name, u16 val)
|
2018-08-24 16:32:22 +07:00
|
|
|
{
|
2019-03-07 03:35:50 +07:00
|
|
|
if (INTEL_GEN(dev_priv) >= 10) {
|
|
|
|
dimm->size = cnl_get_dimm_size(val);
|
|
|
|
dimm->width = cnl_get_dimm_width(val);
|
|
|
|
dimm->ranks = cnl_get_dimm_ranks(val);
|
|
|
|
} else {
|
|
|
|
dimm->size = skl_get_dimm_size(val);
|
|
|
|
dimm->width = skl_get_dimm_width(val);
|
|
|
|
dimm->ranks = skl_get_dimm_ranks(val);
|
|
|
|
}
|
2018-08-24 16:32:22 +07:00
|
|
|
|
2019-03-07 03:35:46 +07:00
|
|
|
DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
|
|
|
|
channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
|
|
|
|
yesno(skl_is_16gb_dimm(dimm)));
|
|
|
|
}
|
2018-08-24 16:32:22 +07:00
|
|
|
|
2019-03-07 03:35:46 +07:00
|
|
|
static int
|
2019-03-07 03:35:50 +07:00
|
|
|
skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
|
|
|
|
struct dram_channel_info *ch,
|
2019-03-07 03:35:46 +07:00
|
|
|
int channel, u32 val)
|
|
|
|
{
|
2019-03-07 03:35:50 +07:00
|
|
|
skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
|
|
|
|
channel, 'L', val & 0xffff);
|
|
|
|
skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
|
|
|
|
channel, 'S', val >> 16);
|
2018-08-24 16:32:22 +07:00
|
|
|
|
2019-03-07 03:35:48 +07:00
|
|
|
if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
|
2019-03-07 03:35:46 +07:00
|
|
|
DRM_DEBUG_KMS("CH%u not populated\n", channel);
|
2018-08-24 16:32:22 +07:00
|
|
|
return -EINVAL;
|
2019-03-07 03:35:46 +07:00
|
|
|
}
|
2019-03-07 03:35:40 +07:00
|
|
|
|
2019-03-07 03:35:48 +07:00
|
|
|
if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
|
2019-03-07 03:35:40 +07:00
|
|
|
ch->ranks = 2;
|
2019-03-07 03:35:48 +07:00
|
|
|
else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
|
2019-03-07 03:35:40 +07:00
|
|
|
ch->ranks = 2;
|
2018-08-24 16:32:22 +07:00
|
|
|
else
|
2019-03-07 03:35:40 +07:00
|
|
|
ch->ranks = 1;
|
2018-08-24 16:32:22 +07:00
|
|
|
|
2019-03-07 03:35:42 +07:00
|
|
|
ch->is_16gb_dimm =
|
2019-03-07 03:35:48 +07:00
|
|
|
skl_is_16gb_dimm(&ch->dimm_l) ||
|
|
|
|
skl_is_16gb_dimm(&ch->dimm_s);
|
2018-08-31 18:09:42 +07:00
|
|
|
|
2019-03-07 03:35:46 +07:00
|
|
|
DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n",
|
|
|
|
channel, ch->ranks, yesno(ch->is_16gb_dimm));
|
2018-08-24 16:32:22 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-24 16:32:25 +07:00
|
|
|
static bool
|
2019-03-07 03:35:47 +07:00
|
|
|
intel_is_dram_symmetric(const struct dram_channel_info *ch0,
|
|
|
|
const struct dram_channel_info *ch1)
|
2018-08-24 16:32:25 +07:00
|
|
|
{
|
2019-03-07 03:35:47 +07:00
|
|
|
return !memcmp(ch0, ch1, sizeof(*ch0)) &&
|
2019-03-07 03:35:48 +07:00
|
|
|
(ch0->dimm_s.size == 0 ||
|
|
|
|
!memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
|
2018-08-24 16:32:25 +07:00
|
|
|
}
|
|
|
|
|
2018-08-24 16:32:22 +07:00
|
|
|
static int
|
|
|
|
skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
struct dram_info *dram_info = &dev_priv->dram_info;
|
2019-03-07 03:35:46 +07:00
|
|
|
struct dram_channel_info ch0 = {}, ch1 = {};
|
2019-03-07 03:35:47 +07:00
|
|
|
u32 val;
|
2018-08-24 16:32:22 +07:00
|
|
|
int ret;
|
|
|
|
|
2019-03-07 03:35:47 +07:00
|
|
|
val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
|
2019-03-07 03:35:50 +07:00
|
|
|
ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
|
2018-08-24 16:32:22 +07:00
|
|
|
if (ret == 0)
|
|
|
|
dram_info->num_channels++;
|
|
|
|
|
2019-03-07 03:35:47 +07:00
|
|
|
val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
|
2019-03-07 03:35:50 +07:00
|
|
|
ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
|
2018-08-24 16:32:22 +07:00
|
|
|
if (ret == 0)
|
|
|
|
dram_info->num_channels++;
|
|
|
|
|
|
|
|
if (dram_info->num_channels == 0) {
|
|
|
|
DRM_INFO("Number of memory channels is zero\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If any of the channel is single rank channel, worst case output
|
|
|
|
* will be same as if single rank memory, so consider single rank
|
|
|
|
* memory.
|
|
|
|
*/
|
2019-03-07 03:35:40 +07:00
|
|
|
if (ch0.ranks == 1 || ch1.ranks == 1)
|
|
|
|
dram_info->ranks = 1;
|
2018-08-24 16:32:22 +07:00
|
|
|
else
|
2019-03-07 03:35:40 +07:00
|
|
|
dram_info->ranks = max(ch0.ranks, ch1.ranks);
|
2018-08-24 16:32:22 +07:00
|
|
|
|
2019-03-07 03:35:40 +07:00
|
|
|
if (dram_info->ranks == 0) {
|
2018-08-24 16:32:22 +07:00
|
|
|
DRM_INFO("couldn't get memory rank information\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2018-08-31 18:09:42 +07:00
|
|
|
|
2018-10-24 01:21:02 +07:00
|
|
|
dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
|
2018-08-31 18:09:42 +07:00
|
|
|
|
2019-03-07 03:35:47 +07:00
|
|
|
dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
|
2018-08-24 16:32:25 +07:00
|
|
|
|
2019-03-07 03:35:47 +07:00
|
|
|
DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n",
|
|
|
|
yesno(dram_info->symmetric_memory));
|
2018-08-24 16:32:22 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-03-07 03:35:51 +07:00
|
|
|
static enum intel_dram_type
|
|
|
|
skl_get_dram_type(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
|
|
|
|
|
|
|
|
switch (val & SKL_DRAM_DDR_TYPE_MASK) {
|
|
|
|
case SKL_DRAM_DDR_TYPE_DDR3:
|
|
|
|
return INTEL_DRAM_DDR3;
|
|
|
|
case SKL_DRAM_DDR_TYPE_DDR4:
|
|
|
|
return INTEL_DRAM_DDR4;
|
|
|
|
case SKL_DRAM_DDR_TYPE_LPDDR3:
|
|
|
|
return INTEL_DRAM_LPDDR3;
|
|
|
|
case SKL_DRAM_DDR_TYPE_LPDDR4:
|
|
|
|
return INTEL_DRAM_LPDDR4;
|
|
|
|
default:
|
|
|
|
MISSING_CASE(val);
|
|
|
|
return INTEL_DRAM_UNKNOWN;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-24 16:32:22 +07:00
|
|
|
static int
|
|
|
|
skl_get_dram_info(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
struct dram_info *dram_info = &dev_priv->dram_info;
|
|
|
|
u32 mem_freq_khz, val;
|
|
|
|
int ret;
|
|
|
|
|
2019-03-07 03:35:51 +07:00
|
|
|
dram_info->type = skl_get_dram_type(dev_priv);
|
|
|
|
DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type));
|
|
|
|
|
2018-08-24 16:32:22 +07:00
|
|
|
ret = skl_dram_get_channels_info(dev_priv);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
|
|
|
|
mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
|
|
|
|
SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
|
|
|
|
|
|
|
|
dram_info->bandwidth_kbps = dram_info->num_channels *
|
|
|
|
mem_freq_khz * 8;
|
|
|
|
|
|
|
|
if (dram_info->bandwidth_kbps == 0) {
|
|
|
|
DRM_INFO("Couldn't get system memory bandwidth\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
dram_info->valid = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-03-07 03:35:43 +07:00
|
|
|
/* Returns Gb per DRAM device */
|
|
|
|
static int bxt_get_dimm_size(u32 val)
|
|
|
|
{
|
|
|
|
switch (val & BXT_DRAM_SIZE_MASK) {
|
2019-03-07 03:35:44 +07:00
|
|
|
case BXT_DRAM_SIZE_4GBIT:
|
2019-03-07 03:35:43 +07:00
|
|
|
return 4;
|
2019-03-07 03:35:44 +07:00
|
|
|
case BXT_DRAM_SIZE_6GBIT:
|
2019-03-07 03:35:43 +07:00
|
|
|
return 6;
|
2019-03-07 03:35:44 +07:00
|
|
|
case BXT_DRAM_SIZE_8GBIT:
|
2019-03-07 03:35:43 +07:00
|
|
|
return 8;
|
2019-03-07 03:35:44 +07:00
|
|
|
case BXT_DRAM_SIZE_12GBIT:
|
2019-03-07 03:35:43 +07:00
|
|
|
return 12;
|
2019-03-07 03:35:44 +07:00
|
|
|
case BXT_DRAM_SIZE_16GBIT:
|
2019-03-07 03:35:43 +07:00
|
|
|
return 16;
|
|
|
|
default:
|
|
|
|
MISSING_CASE(val);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bxt_get_dimm_width(u32 val)
|
|
|
|
{
|
|
|
|
if (!bxt_get_dimm_size(val))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
|
|
|
|
|
|
|
|
return 8 << val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bxt_get_dimm_ranks(u32 val)
|
|
|
|
{
|
|
|
|
if (!bxt_get_dimm_size(val))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch (val & BXT_DRAM_RANK_MASK) {
|
|
|
|
case BXT_DRAM_RANK_SINGLE:
|
|
|
|
return 1;
|
|
|
|
case BXT_DRAM_RANK_DUAL:
|
|
|
|
return 2;
|
|
|
|
default:
|
|
|
|
MISSING_CASE(val);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-07 03:35:51 +07:00
|
|
|
static enum intel_dram_type bxt_get_dimm_type(u32 val)
|
|
|
|
{
|
|
|
|
if (!bxt_get_dimm_size(val))
|
|
|
|
return INTEL_DRAM_UNKNOWN;
|
|
|
|
|
|
|
|
switch (val & BXT_DRAM_TYPE_MASK) {
|
|
|
|
case BXT_DRAM_TYPE_DDR3:
|
|
|
|
return INTEL_DRAM_DDR3;
|
|
|
|
case BXT_DRAM_TYPE_LPDDR3:
|
|
|
|
return INTEL_DRAM_LPDDR3;
|
|
|
|
case BXT_DRAM_TYPE_DDR4:
|
|
|
|
return INTEL_DRAM_DDR4;
|
|
|
|
case BXT_DRAM_TYPE_LPDDR4:
|
|
|
|
return INTEL_DRAM_LPDDR4;
|
|
|
|
default:
|
|
|
|
MISSING_CASE(val);
|
|
|
|
return INTEL_DRAM_UNKNOWN;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-07 03:35:43 +07:00
|
|
|
static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
|
|
|
|
u32 val)
|
|
|
|
{
|
|
|
|
dimm->width = bxt_get_dimm_width(val);
|
|
|
|
dimm->ranks = bxt_get_dimm_ranks(val);
|
2019-03-07 03:35:44 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Size in register is Gb per DRAM device. Convert to total
|
|
|
|
* GB to match the way we report this for non-LP platforms.
|
|
|
|
*/
|
|
|
|
dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
|
2019-03-07 03:35:43 +07:00
|
|
|
}
|
|
|
|
|
2018-08-24 16:32:21 +07:00
|
|
|
static int
|
|
|
|
bxt_get_dram_info(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
struct dram_info *dram_info = &dev_priv->dram_info;
|
|
|
|
u32 dram_channels;
|
|
|
|
u32 mem_freq_khz, val;
|
|
|
|
u8 num_active_channels;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
|
|
|
|
mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
|
|
|
|
BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
|
|
|
|
|
|
|
|
dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
|
|
|
|
num_active_channels = hweight32(dram_channels);
|
|
|
|
|
|
|
|
/* Each active bit represents 4-byte channel */
|
|
|
|
dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
|
|
|
|
|
|
|
|
if (dram_info->bandwidth_kbps == 0) {
|
|
|
|
DRM_INFO("Couldn't get system memory bandwidth\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now read each DUNIT8/9/10/11 to check the rank of each dimms.
|
|
|
|
*/
|
|
|
|
for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
|
2019-03-07 03:35:43 +07:00
|
|
|
struct dram_dimm_info dimm;
|
2019-03-07 03:35:51 +07:00
|
|
|
enum intel_dram_type type;
|
2018-08-24 16:32:21 +07:00
|
|
|
|
|
|
|
val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
|
|
|
|
if (val == 0xFFFFFFFF)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
dram_info->num_channels++;
|
2019-03-07 03:35:43 +07:00
|
|
|
|
|
|
|
bxt_get_dimm_info(&dimm, val);
|
2019-03-07 03:35:51 +07:00
|
|
|
type = bxt_get_dimm_type(val);
|
|
|
|
|
|
|
|
WARN_ON(type != INTEL_DRAM_UNKNOWN &&
|
|
|
|
dram_info->type != INTEL_DRAM_UNKNOWN &&
|
|
|
|
dram_info->type != type);
|
2019-03-07 03:35:43 +07:00
|
|
|
|
2019-03-07 03:35:51 +07:00
|
|
|
DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
|
2019-03-07 03:35:43 +07:00
|
|
|
i - BXT_D_CR_DRP0_DUNIT_START,
|
2019-03-07 03:35:51 +07:00
|
|
|
dimm.size, dimm.width, dimm.ranks,
|
|
|
|
intel_dram_type_str(type));
|
2018-08-24 16:32:21 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If any of the channel is single rank channel,
|
|
|
|
* worst case output will be same as if single rank
|
|
|
|
* memory, so consider single rank memory.
|
|
|
|
*/
|
2019-03-07 03:35:40 +07:00
|
|
|
if (dram_info->ranks == 0)
|
2019-03-07 03:35:43 +07:00
|
|
|
dram_info->ranks = dimm.ranks;
|
|
|
|
else if (dimm.ranks == 1)
|
2019-03-07 03:35:40 +07:00
|
|
|
dram_info->ranks = 1;
|
2019-03-07 03:35:51 +07:00
|
|
|
|
|
|
|
if (type != INTEL_DRAM_UNKNOWN)
|
|
|
|
dram_info->type = type;
|
2018-08-24 16:32:21 +07:00
|
|
|
}
|
|
|
|
|
2019-03-07 03:35:51 +07:00
|
|
|
if (dram_info->type == INTEL_DRAM_UNKNOWN ||
|
|
|
|
dram_info->ranks == 0) {
|
|
|
|
DRM_INFO("couldn't get memory information\n");
|
2018-08-24 16:32:21 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
dram_info->valid = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
intel_get_dram_info(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
struct dram_info *dram_info = &dev_priv->dram_info;
|
|
|
|
int ret;
|
|
|
|
|
2018-10-24 01:21:02 +07:00
|
|
|
/*
|
|
|
|
* Assume 16Gb DIMMs are present until proven otherwise.
|
|
|
|
* This is only used for the level 0 watermark latency
|
|
|
|
* w/a which does not apply to bxt/glk.
|
|
|
|
*/
|
|
|
|
dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
|
|
|
|
|
2019-11-20 07:45:05 +07:00
|
|
|
if (INTEL_GEN(dev_priv) < 9 || !HAS_DISPLAY(dev_priv))
|
2018-08-24 16:32:21 +07:00
|
|
|
return;
|
|
|
|
|
2019-03-07 03:35:45 +07:00
|
|
|
if (IS_GEN9_LP(dev_priv))
|
2018-08-24 16:32:22 +07:00
|
|
|
ret = bxt_get_dram_info(dev_priv);
|
|
|
|
else
|
2019-03-07 03:35:50 +07:00
|
|
|
ret = skl_get_dram_info(dev_priv);
|
2018-08-24 16:32:21 +07:00
|
|
|
if (ret)
|
|
|
|
return;
|
|
|
|
|
2019-03-07 03:35:49 +07:00
|
|
|
DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n",
|
|
|
|
dram_info->bandwidth_kbps,
|
|
|
|
dram_info->num_channels);
|
|
|
|
|
2019-03-07 03:35:42 +07:00
|
|
|
DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n",
|
2019-03-07 03:35:40 +07:00
|
|
|
dram_info->ranks, yesno(dram_info->is_16gb_dimm));
|
2018-08-24 16:32:21 +07:00
|
|
|
}
|
|
|
|
|
2019-03-29 00:45:32 +07:00
|
|
|
static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
|
|
|
|
{
|
2019-10-10 21:51:23 +07:00
|
|
|
static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
|
|
|
|
static const u8 sets[4] = { 1, 1, 2, 2 };
|
2019-03-29 00:45:32 +07:00
|
|
|
|
|
|
|
return EDRAM_NUM_BANKS(cap) *
|
|
|
|
ways[EDRAM_WAYS_IDX(cap)] *
|
|
|
|
sets[EDRAM_SETS_IDX(cap)];
|
|
|
|
}
|
|
|
|
|
|
|
|
static void edram_detect(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
u32 edram_cap = 0;
|
|
|
|
|
|
|
|
if (!(IS_HASWELL(dev_priv) ||
|
|
|
|
IS_BROADWELL(dev_priv) ||
|
|
|
|
INTEL_GEN(dev_priv) >= 9))
|
|
|
|
return;
|
|
|
|
|
|
|
|
edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);
|
|
|
|
|
|
|
|
/* NB: We can't write IDICR yet because we don't have gt funcs set up */
|
|
|
|
|
|
|
|
if (!(edram_cap & EDRAM_ENABLED))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The needed capability bits for size calculation are not there with
|
|
|
|
* pre gen9 so return 128MB always.
|
|
|
|
*/
|
|
|
|
if (INTEL_GEN(dev_priv) < 9)
|
|
|
|
dev_priv->edram_size_mb = 128;
|
|
|
|
else
|
|
|
|
dev_priv->edram_size_mb =
|
|
|
|
gen9_edram_size_mb(dev_priv, edram_cap);
|
|
|
|
|
2019-08-15 16:36:04 +07:00
|
|
|
dev_info(dev_priv->drm.dev,
|
|
|
|
"Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
|
2019-03-29 00:45:32 +07:00
|
|
|
}
|
|
|
|
|
2016-06-24 20:00:22 +07:00
|
|
|
/**
|
2019-07-12 18:24:30 +07:00
|
|
|
* i915_driver_hw_probe - setup state requiring device access
|
2016-06-24 20:00:22 +07:00
|
|
|
* @dev_priv: device private
|
|
|
|
*
|
|
|
|
* Setup state that requires accessing the device, but doesn't require
|
|
|
|
* exposing the driver via kernel internal or userspace interfaces.
|
|
|
|
*/
|
2019-07-12 18:24:30 +07:00
|
|
|
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
|
2016-06-24 20:00:22 +07:00
|
|
|
{
|
2016-08-22 17:32:44 +07:00
|
|
|
struct pci_dev *pdev = dev_priv->drm.pdev;
|
2016-06-24 20:00:22 +07:00
|
|
|
int ret;
|
|
|
|
|
2019-08-03 01:40:50 +07:00
|
|
|
if (i915_inject_probe_failure(dev_priv))
|
2016-06-24 20:00:22 +07:00
|
|
|
return -ENODEV;
|
|
|
|
|
2018-12-31 21:56:43 +07:00
|
|
|
intel_device_info_runtime_init(dev_priv);
|
2016-07-05 16:40:20 +07:00
|
|
|
|
2018-09-27 03:12:22 +07:00
|
|
|
if (HAS_PPGTT(dev_priv)) {
|
|
|
|
if (intel_vgpu_active(dev_priv) &&
|
2019-03-15 05:38:35 +07:00
|
|
|
!intel_vgpu_has_full_ppgtt(dev_priv)) {
|
2018-09-27 03:12:22 +07:00
|
|
|
i915_report_error(dev_priv,
|
|
|
|
"incompatible vGPU found, support for isolated ppGTT required\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-30 19:59:54 +07:00
|
|
|
if (HAS_EXECLISTS(dev_priv)) {
|
|
|
|
/*
|
|
|
|
* Older GVT emulation depends upon intercepting CSB mmio,
|
|
|
|
* which we no longer use, preferring to use the HWSP cache
|
|
|
|
* instead.
|
|
|
|
*/
|
|
|
|
if (intel_vgpu_active(dev_priv) &&
|
|
|
|
!intel_vgpu_has_hwsp_emulation(dev_priv)) {
|
|
|
|
i915_report_error(dev_priv,
|
|
|
|
"old vGPU host found, support for HWSP emulation required\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-05 16:40:20 +07:00
|
|
|
intel_sanitize_options(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2019-03-29 00:45:32 +07:00
|
|
|
/* needs to be done before ggtt probe */
|
|
|
|
edram_detect(dev_priv);
|
|
|
|
|
2017-10-27 21:59:31 +07:00
|
|
|
i915_perf_init(dev_priv);
|
|
|
|
|
2016-08-04 13:52:22 +07:00
|
|
|
ret = i915_ggtt_probe_hw(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
if (ret)
|
2018-04-14 16:12:33 +07:00
|
|
|
goto err_perf;
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2019-08-22 16:06:45 +07:00
|
|
|
ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
|
|
|
|
if (ret)
|
2018-04-14 16:12:33 +07:00
|
|
|
goto err_ggtt;
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2016-08-04 13:52:22 +07:00
|
|
|
ret = i915_ggtt_init_hw(dev_priv);
|
2016-08-04 13:52:21 +07:00
|
|
|
if (ret)
|
2018-04-14 16:12:33 +07:00
|
|
|
goto err_ggtt;
|
2016-08-04 13:52:21 +07:00
|
|
|
|
2019-10-27 03:20:32 +07:00
|
|
|
ret = intel_memory_regions_hw_probe(dev_priv);
|
|
|
|
if (ret)
|
|
|
|
goto err_ggtt;
|
|
|
|
|
2019-11-01 21:10:06 +07:00
|
|
|
intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
|
2019-06-21 14:08:06 +07:00
|
|
|
|
2016-08-04 13:52:22 +07:00
|
|
|
ret = i915_ggtt_enable_hw(dev_priv);
|
2016-08-04 13:52:21 +07:00
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("failed to enable GGTT\n");
|
2019-10-27 03:20:32 +07:00
|
|
|
goto err_mem_regions;
|
2016-08-04 13:52:21 +07:00
|
|
|
}
|
|
|
|
|
2016-08-22 17:32:44 +07:00
|
|
|
pci_set_master(pdev);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
drm/i915: Call dma_set_max_seg_size() in i915_driver_hw_probe()
Currently, we don't call dma_set_max_seg_size() for i915 because we
intentionally do not limit the segment length that the device supports.
However, this results in a warning being emitted if we try to map
anything larger than SZ_64K on a kernel with CONFIG_DMA_API_DEBUG_SG
enabled:
[ 7.751926] DMA-API: i915 0000:00:02.0: mapping sg segment longer
than device claims to support [len=98304] [max=65536]
[ 7.751934] WARNING: CPU: 5 PID: 474 at kernel/dma/debug.c:1220
debug_dma_map_sg+0x20f/0x340
This was originally brought up on
https://bugs.freedesktop.org/show_bug.cgi?id=108517 , and the consensus
there was it wasn't really useful to set a limit (and that dma-debug
isn't really all that useful for i915 in the first place). Unfortunately
though, CONFIG_DMA_API_DEBUG_SG is enabled in the debug configs for
various distro kernels. Since a WARN_ON() will disable automatic problem
reporting (and cause any CI with said option enabled to start
complaining), we really should just fix the problem.
Note that as me and Chris Wilson discussed, the other solution for this
would be to make DMA-API not make such assumptions when a driver hasn't
explicitly set a maximum segment size. But, taking a look at the commit
which originally introduced this behavior, commit 78c47830a5cb
("dma-debug: check scatterlist segments"), there is an explicit mention
of this assumption and how it applies to devices with no segment size:
Conversely, devices which are less limited than the rather
conservative defaults, or indeed have no limitations at all
(e.g. GPUs with their own internal MMU), should be encouraged to
set appropriate dma_parms, as they may get more efficient DMA
mapping performance out of it.
So unless there's any concerns (I'm open to discussion!), let's just
follow suite and call dma_set_max_seg_size() with UINT_MAX as our limit
to silence any warnings.
Changes since v3:
* Drop patch for enabling CONFIG_DMA_API_DEBUG_SG in CI. It looks like
just turning it on causes the kernel to spit out bogus WARN_ONs()
during some igt tests which would otherwise require teaching igt to
disable the various DMA-API debugging options causing this. This is
too much work to be worth it, since DMA-API debugging is useless for
us. So, we'll just settle with this single patch to squelch WARN_ONs()
during driver load for users that have CONFIG_DMA_API_DEBUG_SG turned
on for some reason.
* Move dma_set_max_seg_size() call into i915_driver_hw_probe() - Chris
Wilson
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: <stable@vger.kernel.org> # v4.18+
Link: https://patchwork.freedesktop.org/patch/msgid/20190823205251.14298-1-lyude@redhat.com
2019-08-24 03:52:51 +07:00
|
|
|
/*
|
|
|
|
* We don't have a max segment size, so set it to the max so sg's
|
|
|
|
* debugging layer doesn't complain
|
|
|
|
*/
|
|
|
|
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
|
|
|
|
|
2016-06-24 20:00:22 +07:00
|
|
|
/* overlay on gen2 is broken and can't address above 1G */
|
drm/i915: replace IS_GEN<N> with IS_GEN(..., N)
Define IS_GEN() similarly to our IS_GEN_RANGE(). but use gen instead of
gen_mask to do the comparison. Now callers can pass then gen as a parameter,
so we don't require one macro for each gen.
The following spatch was used to convert the users of these macros:
@@
expression e;
@@
(
- IS_GEN2(e)
+ IS_GEN(e, 2)
|
- IS_GEN3(e)
+ IS_GEN(e, 3)
|
- IS_GEN4(e)
+ IS_GEN(e, 4)
|
- IS_GEN5(e)
+ IS_GEN(e, 5)
|
- IS_GEN6(e)
+ IS_GEN(e, 6)
|
- IS_GEN7(e)
+ IS_GEN(e, 7)
|
- IS_GEN8(e)
+ IS_GEN(e, 8)
|
- IS_GEN9(e)
+ IS_GEN(e, 9)
|
- IS_GEN10(e)
+ IS_GEN(e, 10)
|
- IS_GEN11(e)
+ IS_GEN(e, 11)
)
v2: use IS_GEN rather than GT_GEN and compare to info.gen rather than
using the bitmask
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181212181044.15886-2-lucas.demarchi@intel.com
2018-12-13 01:10:43 +07:00
|
|
|
if (IS_GEN(dev_priv, 2)) {
|
2016-08-22 17:32:44 +07:00
|
|
|
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
|
2016-06-24 20:00:22 +07:00
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("failed to set DMA mask\n");
|
|
|
|
|
2019-10-27 03:20:32 +07:00
|
|
|
goto err_mem_regions;
|
2016-06-24 20:00:22 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 965GM sometimes incorrectly writes to hardware status page (HWS)
|
|
|
|
* using 32bit addressing, overwriting memory if HWS is located
|
|
|
|
* above 4GB.
|
|
|
|
*
|
|
|
|
* The documentation also mentions an issue with undefined
|
|
|
|
* behaviour if any general state is accessed within a page above 4GB,
|
|
|
|
* which also needs to be handled carefully.
|
|
|
|
*/
|
2016-12-07 17:13:04 +07:00
|
|
|
if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
|
2016-08-22 17:32:44 +07:00
|
|
|
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
|
2016-06-24 20:00:22 +07:00
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("failed to set DMA mask\n");
|
|
|
|
|
2019-10-27 03:20:32 +07:00
|
|
|
goto err_mem_regions;
|
2016-06-24 20:00:22 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
|
|
|
|
PM_QOS_DEFAULT_VALUE);
|
|
|
|
|
2018-12-03 20:33:19 +07:00
|
|
|
intel_gt_init_workarounds(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
|
|
|
/* On the 945G/GM, the chipset reports the MSI capability on the
|
|
|
|
* integrated graphics even though the support isn't actually there
|
|
|
|
* according to the published specs. It doesn't appear to function
|
|
|
|
* correctly in testing on 945G.
|
|
|
|
* This may be a side effect of MSI having been made available for PEG
|
|
|
|
* and the registers being closely associated.
|
|
|
|
*
|
|
|
|
* According to chipset errata, on the 965GM, MSI interrupts may
|
2017-06-27 03:30:51 +07:00
|
|
|
* be lost or delayed, and was defeatured. MSI interrupts seem to
|
|
|
|
* get lost on g4x as well, and interrupt delivery seems to stay
|
|
|
|
* properly dead afterwards. So we'll just disable them for all
|
|
|
|
* pre-gen5 chipsets.
|
2018-05-24 01:04:35 +07:00
|
|
|
*
|
|
|
|
* dp aux and gmbus irq on gen4 seems to be able to generate legacy
|
|
|
|
* interrupts even when in MSI mode. This results in spurious
|
|
|
|
* interrupt warnings if the legacy irq no. is shared with another
|
|
|
|
* device. The kernel then disables that interrupt source and so
|
|
|
|
* prevents the other device from working properly.
|
2016-06-24 20:00:22 +07:00
|
|
|
*/
|
2017-06-27 03:30:51 +07:00
|
|
|
if (INTEL_GEN(dev_priv) >= 5) {
|
2016-08-22 17:32:44 +07:00
|
|
|
if (pci_enable_msi(pdev) < 0)
|
2016-06-24 20:00:22 +07:00
|
|
|
DRM_DEBUG_DRIVER("can't enable MSI");
|
|
|
|
}
|
|
|
|
|
2017-01-13 09:46:09 +07:00
|
|
|
ret = intel_gvt_init(dev_priv);
|
|
|
|
if (ret)
|
2018-07-10 21:38:21 +07:00
|
|
|
goto err_msi;
|
|
|
|
|
|
|
|
intel_opregion_setup(dev_priv);
|
2018-08-24 16:32:21 +07:00
|
|
|
/*
|
|
|
|
* Fill the dram structure to get the system raw bandwidth and
|
|
|
|
* dram info. This will be used for memory latency calculation.
|
|
|
|
*/
|
|
|
|
intel_get_dram_info(dev_priv);
|
|
|
|
|
2019-05-24 22:36:14 +07:00
|
|
|
intel_bw_init_hw(dev_priv);
|
2017-01-13 09:46:09 +07:00
|
|
|
|
2016-06-24 20:00:22 +07:00
|
|
|
return 0;
|
|
|
|
|
2018-07-10 21:38:21 +07:00
|
|
|
err_msi:
|
|
|
|
if (pdev->msi_enabled)
|
|
|
|
pci_disable_msi(pdev);
|
|
|
|
pm_qos_remove_request(&dev_priv->pm_qos);
|
2019-10-27 03:20:32 +07:00
|
|
|
err_mem_regions:
|
|
|
|
intel_memory_regions_driver_release(dev_priv);
|
2018-04-14 16:12:33 +07:00
|
|
|
err_ggtt:
|
2019-07-12 18:24:28 +07:00
|
|
|
i915_ggtt_driver_release(dev_priv);
|
2018-04-14 16:12:33 +07:00
|
|
|
err_perf:
|
|
|
|
i915_perf_fini(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2019-07-12 18:24:29 +07:00
|
|
|
* i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
|
2016-06-24 20:00:22 +07:00
|
|
|
* @dev_priv: device private
|
|
|
|
*/
|
2019-07-12 18:24:29 +07:00
|
|
|
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
|
2016-06-24 20:00:22 +07:00
|
|
|
{
|
2016-08-22 17:32:44 +07:00
|
|
|
struct pci_dev *pdev = dev_priv->drm.pdev;
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2017-10-27 21:59:31 +07:00
|
|
|
i915_perf_fini(dev_priv);
|
|
|
|
|
2016-08-22 17:32:44 +07:00
|
|
|
if (pdev->msi_enabled)
|
|
|
|
pci_disable_msi(pdev);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
|
|
|
pm_qos_remove_request(&dev_priv->pm_qos);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i915_driver_register - register the driver with the rest of the system
|
|
|
|
* @dev_priv: device private
|
|
|
|
*
|
|
|
|
* Perform any steps necessary to make the driver available via kernel
|
|
|
|
* internal or userspace interfaces.
|
|
|
|
*/
|
|
|
|
static void i915_driver_register(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
2016-07-05 16:40:23 +07:00
|
|
|
struct drm_device *dev = &dev_priv->drm;
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2019-08-06 19:42:59 +07:00
|
|
|
i915_gem_driver_register(dev_priv);
|
drm/i915/pmu: Expose a PMU interface for perf queries
From: Chris Wilson <chris@chris-wilson.co.uk>
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
From: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
The first goal is to be able to measure GPU (and invidual ring) busyness
without having to poll registers from userspace. (Which not only incurs
holding the forcewake lock indefinitely, perturbing the system, but also
runs the risk of hanging the machine.) As an alternative we can use the
perf event counter interface to sample the ring registers periodically
and send those results to userspace.
Functionality we are exporting to userspace is via the existing perf PMU
API and can be exercised via the existing tools. For example:
perf stat -a -e i915/rcs0-busy/ -I 1000
Will print the render engine busynnes once per second. All the performance
counters can be enumerated (perf list) and have their unit of measure
correctly reported in sysfs.
v1-v2 (Chris Wilson):
v2: Use a common timer for the ring sampling.
v3: (Tvrtko Ursulin)
* Decouple uAPI from i915 engine ids.
* Complete uAPI defines.
* Refactor some code to helpers for clarity.
* Skip sampling disabled engines.
* Expose counters in sysfs.
* Pass in fake regs to avoid null ptr deref in perf core.
* Convert to class/instance uAPI.
* Use shared driver code for rc6 residency, power and frequency.
v4: (Dmitry Rogozhkin)
* Register PMU with .task_ctx_nr=perf_invalid_context
* Expose cpumask for the PMU with the single CPU in the mask
* Properly support pmu->stop(): it should call pmu->read()
* Properly support pmu->del(): it should call stop(event, PERF_EF_UPDATE)
* Introduce refcounting of event subscriptions.
* Make pmu.busy_stats a refcounter to avoid busy stats going away
with some deleted event.
* Expose cpumask for i915 PMU to avoid multiple events creation of
the same type followed by counter aggregation by perf-stat.
* Track CPUs getting online/offline to migrate perf context. If (likely)
cpumask will initially set CPU0, CONFIG_BOOTPARAM_HOTPLUG_CPU0 will be
needed to see effect of CPU status tracking.
* End result is that only global events are supported and perf stat
works correctly.
* Deny perf driver level sampling - it is prohibited for uncore PMU.
v5: (Tvrtko Ursulin)
* Don't hardcode number of engine samplers.
* Rewrite event ref-counting for correctness and simplicity.
* Store initial counter value when starting already enabled events
to correctly report values to all listeners.
* Fix RC6 residency readout.
* Comments, GPL header.
v6:
* Add missing entry to v4 changelog.
* Fix accounting in CPU hotplug case by copying the approach from
arch/x86/events/intel/cstate.c. (Dmitry Rogozhkin)
v7:
* Log failure message only on failure.
* Remove CPU hotplug notification state on unregister.
v8:
* Fix error unwind on failed registration.
* Checkpatch cleanup.
v9:
* Drop the energy metric, it is available via intel_rapl_perf.
(Ville Syrjälä)
* Use HAS_RC6(p). (Chris Wilson)
* Handle unsupported non-engine events. (Dmitry Rogozhkin)
* Rebase for intel_rc6_residency_ns needing caller managed
runtime pm.
* Drop HAS_RC6 checks from the read callback since creating those
events will be rejected at init time already.
* Add counter units to sysfs so perf stat output is nicer.
* Cleanup the attribute tables for brevity and readability.
v10:
* Fixed queued accounting.
v11:
* Move intel_engine_lookup_user to intel_engine_cs.c
* Commit update. (Joonas Lahtinen)
v12:
* More accurate sampling. (Chris Wilson)
* Store and report frequency in MHz for better usability from
perf stat.
* Removed metrics: queued, interrupts, rc6 counters.
* Sample engine busyness based on seqno difference only
for less MMIO (and forcewake) on all platforms. (Chris Wilson)
v13:
* Comment spelling, use mul_u32_u32 to work around potential GCC
issue and somne code alignment changes. (Chris Wilson)
v14:
* Rebase.
v15:
* Rebase for RPS refactoring.
v16:
* Use the dynamic slot in the CPU hotplug state machine so that we are
free to setup our state as multi-instance. Previously we were re-using
the CPUHP_AP_PERF_X86_UNCORE_ONLINE slot which is neither used as
multi-instance, nor owned by our driver to start with.
* Register the CPU hotplug handlers after the PMU, otherwise the callback
will get called before the PMU is initialized which can end up in
perf_pmu_migrate_context with an un-initialized base.
* Added workaround for a probable bug in cpuhp core.
v17:
* Remove workaround for the cpuhp bug.
v18:
* Rebase for drm_i915_gem_engine_class getting upstream before us.
v19:
* Rebase. (trivial)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-2-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:45 +07:00
|
|
|
i915_pmu_register(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Notify a valid surface after modesetting,
|
|
|
|
* when running inside a VM.
|
|
|
|
*/
|
|
|
|
if (intel_vgpu_active(dev_priv))
|
|
|
|
I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
|
|
|
|
|
|
|
|
/* Reveal our presence to userspace */
|
|
|
|
if (drm_dev_register(dev, 0) == 0) {
|
|
|
|
i915_debugfs_register(dev_priv);
|
2016-08-22 17:32:43 +07:00
|
|
|
i915_setup_sysfs(dev_priv);
|
2016-11-08 02:49:53 +07:00
|
|
|
|
|
|
|
/* Depends on sysfs having been initialized */
|
|
|
|
i915_perf_register(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
} else
|
|
|
|
DRM_ERROR("Failed to register driver for userspace access!\n");
|
|
|
|
|
2019-09-13 17:04:07 +07:00
|
|
|
if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) {
|
2016-06-24 20:00:22 +07:00
|
|
|
/* Must be done after probing outputs */
|
|
|
|
intel_opregion_register(dev_priv);
|
|
|
|
acpi_video_register();
|
|
|
|
}
|
|
|
|
|
2019-09-05 18:14:03 +07:00
|
|
|
intel_gt_driver_register(&dev_priv->gt);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2017-01-25 05:57:49 +07:00
|
|
|
intel_audio_init(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Some ports require correctly set-up hpd registers for detection to
|
|
|
|
* work properly (leading to ghost connected connector status), e.g. VGA
|
|
|
|
* on gm45. Hence we can only set up the initial fbdev config after hpd
|
|
|
|
* irqs are fully enabled. We do it last so that the async config
|
|
|
|
* cannot run before the connectors are registered.
|
|
|
|
*/
|
|
|
|
intel_fbdev_initial_config_async(dev);
|
2017-11-28 18:01:47 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to coordinate the hotplugs with the asynchronous fbdev
|
|
|
|
* configuration, for which we use the fbdev->async_cookie.
|
|
|
|
*/
|
2019-09-13 17:04:07 +07:00
|
|
|
if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv))
|
2017-11-28 18:01:47 +07:00
|
|
|
drm_kms_helper_poll_init(dev);
|
2018-08-16 19:37:56 +07:00
|
|
|
|
2018-08-16 19:37:57 +07:00
|
|
|
intel_power_domains_enable(dev_priv);
|
2019-06-14 06:21:53 +07:00
|
|
|
intel_runtime_pm_enable(&dev_priv->runtime_pm);
|
2016-06-24 20:00:22 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
|
|
|
|
* @dev_priv: device private
|
|
|
|
*/
|
|
|
|
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
2019-06-14 06:21:53 +07:00
|
|
|
intel_runtime_pm_disable(&dev_priv->runtime_pm);
|
2018-08-16 19:37:57 +07:00
|
|
|
intel_power_domains_disable(dev_priv);
|
2018-08-16 19:37:56 +07:00
|
|
|
|
2017-07-15 05:46:55 +07:00
|
|
|
intel_fbdev_unregister(dev_priv);
|
2017-01-25 05:57:49 +07:00
|
|
|
intel_audio_deinit(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2017-11-28 18:01:47 +07:00
|
|
|
/*
|
|
|
|
* After flushing the fbdev (incl. a late async config which will
|
|
|
|
* have delayed queuing of a hotplug event), then flush the hotplug
|
|
|
|
* events.
|
|
|
|
*/
|
|
|
|
drm_kms_helper_poll_fini(&dev_priv->drm);
|
|
|
|
|
2019-09-05 18:14:03 +07:00
|
|
|
intel_gt_driver_unregister(&dev_priv->gt);
|
2016-06-24 20:00:22 +07:00
|
|
|
acpi_video_unregister();
|
|
|
|
intel_opregion_unregister(dev_priv);
|
|
|
|
|
2016-11-08 02:49:53 +07:00
|
|
|
i915_perf_unregister(dev_priv);
|
drm/i915/pmu: Expose a PMU interface for perf queries
From: Chris Wilson <chris@chris-wilson.co.uk>
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
From: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
The first goal is to be able to measure GPU (and invidual ring) busyness
without having to poll registers from userspace. (Which not only incurs
holding the forcewake lock indefinitely, perturbing the system, but also
runs the risk of hanging the machine.) As an alternative we can use the
perf event counter interface to sample the ring registers periodically
and send those results to userspace.
Functionality we are exporting to userspace is via the existing perf PMU
API and can be exercised via the existing tools. For example:
perf stat -a -e i915/rcs0-busy/ -I 1000
Will print the render engine busynnes once per second. All the performance
counters can be enumerated (perf list) and have their unit of measure
correctly reported in sysfs.
v1-v2 (Chris Wilson):
v2: Use a common timer for the ring sampling.
v3: (Tvrtko Ursulin)
* Decouple uAPI from i915 engine ids.
* Complete uAPI defines.
* Refactor some code to helpers for clarity.
* Skip sampling disabled engines.
* Expose counters in sysfs.
* Pass in fake regs to avoid null ptr deref in perf core.
* Convert to class/instance uAPI.
* Use shared driver code for rc6 residency, power and frequency.
v4: (Dmitry Rogozhkin)
* Register PMU with .task_ctx_nr=perf_invalid_context
* Expose cpumask for the PMU with the single CPU in the mask
* Properly support pmu->stop(): it should call pmu->read()
* Properly support pmu->del(): it should call stop(event, PERF_EF_UPDATE)
* Introduce refcounting of event subscriptions.
* Make pmu.busy_stats a refcounter to avoid busy stats going away
with some deleted event.
* Expose cpumask for i915 PMU to avoid multiple events creation of
the same type followed by counter aggregation by perf-stat.
* Track CPUs getting online/offline to migrate perf context. If (likely)
cpumask will initially set CPU0, CONFIG_BOOTPARAM_HOTPLUG_CPU0 will be
needed to see effect of CPU status tracking.
* End result is that only global events are supported and perf stat
works correctly.
* Deny perf driver level sampling - it is prohibited for uncore PMU.
v5: (Tvrtko Ursulin)
* Don't hardcode number of engine samplers.
* Rewrite event ref-counting for correctness and simplicity.
* Store initial counter value when starting already enabled events
to correctly report values to all listeners.
* Fix RC6 residency readout.
* Comments, GPL header.
v6:
* Add missing entry to v4 changelog.
* Fix accounting in CPU hotplug case by copying the approach from
arch/x86/events/intel/cstate.c. (Dmitry Rogozhkin)
v7:
* Log failure message only on failure.
* Remove CPU hotplug notification state on unregister.
v8:
* Fix error unwind on failed registration.
* Checkpatch cleanup.
v9:
* Drop the energy metric, it is available via intel_rapl_perf.
(Ville Syrjälä)
* Use HAS_RC6(p). (Chris Wilson)
* Handle unsupported non-engine events. (Dmitry Rogozhkin)
* Rebase for intel_rc6_residency_ns needing caller managed
runtime pm.
* Drop HAS_RC6 checks from the read callback since creating those
events will be rejected at init time already.
* Add counter units to sysfs so perf stat output is nicer.
* Cleanup the attribute tables for brevity and readability.
v10:
* Fixed queued accounting.
v11:
* Move intel_engine_lookup_user to intel_engine_cs.c
* Commit update. (Joonas Lahtinen)
v12:
* More accurate sampling. (Chris Wilson)
* Store and report frequency in MHz for better usability from
perf stat.
* Removed metrics: queued, interrupts, rc6 counters.
* Sample engine busyness based on seqno difference only
for less MMIO (and forcewake) on all platforms. (Chris Wilson)
v13:
* Comment spelling, use mul_u32_u32 to work around potential GCC
issue and somne code alignment changes. (Chris Wilson)
v14:
* Rebase.
v15:
* Rebase for RPS refactoring.
v16:
* Use the dynamic slot in the CPU hotplug state machine so that we are
free to setup our state as multi-instance. Previously we were re-using
the CPUHP_AP_PERF_X86_UNCORE_ONLINE slot which is neither used as
multi-instance, nor owned by our driver to start with.
* Register the CPU hotplug handlers after the PMU, otherwise the callback
will get called before the PMU is initialized which can end up in
perf_pmu_migrate_context with an un-initialized base.
* Added workaround for a probable bug in cpuhp core.
v17:
* Remove workaround for the cpuhp bug.
v18:
* Rebase for drm_i915_gem_engine_class getting upstream before us.
v19:
* Rebase. (trivial)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-2-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:45 +07:00
|
|
|
i915_pmu_unregister(dev_priv);
|
2016-11-08 02:49:53 +07:00
|
|
|
|
2016-08-22 17:32:43 +07:00
|
|
|
i915_teardown_sysfs(dev_priv);
|
2019-04-05 20:02:34 +07:00
|
|
|
drm_dev_unplug(&dev_priv->drm);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2019-08-06 19:42:59 +07:00
|
|
|
i915_gem_driver_unregister(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
}
|
|
|
|
|
2017-12-22 04:57:35 +07:00
|
|
|
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
if (drm_debug & DRM_UT_DRIVER) {
|
|
|
|
struct drm_printer p = drm_debug_printer("i915 device info:");
|
|
|
|
|
2019-03-27 21:23:28 +07:00
|
|
|
drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
|
2018-12-31 21:56:45 +07:00
|
|
|
INTEL_DEVID(dev_priv),
|
|
|
|
INTEL_REVID(dev_priv),
|
|
|
|
intel_platform_name(INTEL_INFO(dev_priv)->platform),
|
2019-03-27 21:23:28 +07:00
|
|
|
intel_subplatform(RUNTIME_INFO(dev_priv),
|
|
|
|
INTEL_INFO(dev_priv)->platform),
|
2018-12-31 21:56:45 +07:00
|
|
|
INTEL_GEN(dev_priv));
|
|
|
|
|
|
|
|
intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
|
2018-12-31 21:56:41 +07:00
|
|
|
intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
|
2017-12-22 04:57:35 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
|
|
|
|
DRM_INFO("DRM_I915_DEBUG enabled\n");
|
|
|
|
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
|
|
|
|
DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
|
2018-08-17 02:34:14 +07:00
|
|
|
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
|
|
|
|
DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n");
|
2017-12-22 04:57:35 +07:00
|
|
|
}
|
|
|
|
|
2018-09-05 21:09:20 +07:00
|
|
|
static struct drm_i915_private *
|
|
|
|
i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
|
{
|
|
|
|
const struct intel_device_info *match_info =
|
|
|
|
(struct intel_device_info *)ent->driver_data;
|
|
|
|
struct intel_device_info *device_info;
|
|
|
|
struct drm_i915_private *i915;
|
2018-10-02 16:20:47 +07:00
|
|
|
int err;
|
2018-09-05 21:09:20 +07:00
|
|
|
|
|
|
|
i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
|
|
|
|
if (!i915)
|
2018-10-02 16:20:47 +07:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2018-09-05 21:09:20 +07:00
|
|
|
|
2018-10-02 16:20:47 +07:00
|
|
|
err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
|
|
|
|
if (err) {
|
2018-09-05 21:09:20 +07:00
|
|
|
kfree(i915);
|
2018-10-02 16:20:47 +07:00
|
|
|
return ERR_PTR(err);
|
2018-09-05 21:09:20 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
i915->drm.dev_private = i915;
|
2019-08-06 14:42:19 +07:00
|
|
|
|
|
|
|
i915->drm.pdev = pdev;
|
|
|
|
pci_set_drvdata(pdev, i915);
|
2018-09-05 21:09:20 +07:00
|
|
|
|
|
|
|
/* Setup the write-once "constant" device info */
|
|
|
|
device_info = mkwrite_device_info(i915);
|
|
|
|
memcpy(device_info, match_info, sizeof(*device_info));
|
2018-12-31 21:56:41 +07:00
|
|
|
RUNTIME_INFO(i915)->device_id = pdev->device;
|
2018-09-05 21:09:20 +07:00
|
|
|
|
2018-09-26 17:47:07 +07:00
|
|
|
BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
|
2018-09-05 21:09:20 +07:00
|
|
|
|
|
|
|
return i915;
|
|
|
|
}
|
|
|
|
|
2018-09-05 21:09:21 +07:00
|
|
|
static void i915_driver_destroy(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = i915->drm.pdev;
|
|
|
|
|
|
|
|
drm_dev_fini(&i915->drm);
|
|
|
|
kfree(i915);
|
|
|
|
|
|
|
|
/* And make sure we never chase our dangling pointer from pci_dev */
|
|
|
|
pci_set_drvdata(pdev, NULL);
|
|
|
|
}
|
|
|
|
|
2016-06-24 20:00:22 +07:00
|
|
|
/**
|
2019-07-12 18:24:26 +07:00
|
|
|
* i915_driver_probe - setup chip and create an initial config
|
2016-11-10 20:36:34 +07:00
|
|
|
* @pdev: PCI device
|
|
|
|
* @ent: matching PCI ID entry
|
2016-06-24 20:00:22 +07:00
|
|
|
*
|
2019-07-12 18:24:26 +07:00
|
|
|
* The driver probe routine has to do several things:
|
2016-06-24 20:00:22 +07:00
|
|
|
* - drive output discovery via intel_modeset_init()
|
|
|
|
* - initialize the memory manager
|
|
|
|
* - allocate initial config memory
|
|
|
|
* - setup the DRM framebuffer with the allocated memory
|
|
|
|
*/
|
2019-07-12 18:24:26 +07:00
|
|
|
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
2016-06-24 20:00:22 +07:00
|
|
|
{
|
2017-02-02 14:41:42 +07:00
|
|
|
const struct intel_device_info *match_info =
|
|
|
|
(struct intel_device_info *)ent->driver_data;
|
2016-06-24 20:00:22 +07:00
|
|
|
struct drm_i915_private *dev_priv;
|
|
|
|
int ret;
|
2014-04-09 22:19:04 +07:00
|
|
|
|
2018-09-05 21:09:20 +07:00
|
|
|
dev_priv = i915_driver_create(pdev, ent);
|
2018-10-02 16:20:47 +07:00
|
|
|
if (IS_ERR(dev_priv))
|
|
|
|
return PTR_ERR(dev_priv);
|
2015-02-04 20:22:27 +07:00
|
|
|
|
2018-09-13 20:16:22 +07:00
|
|
|
/* Disable nuclear pageflip by default on pre-ILK */
|
|
|
|
if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
|
|
|
|
dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
|
|
|
|
|
2019-10-31 00:33:20 +07:00
|
|
|
/*
|
|
|
|
* Check if we support fake LMEM -- for now we only unleash this for
|
|
|
|
* the live selftests(test-and-exit).
|
|
|
|
*/
|
2019-11-01 16:51:47 +07:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
2019-10-31 00:33:20 +07:00
|
|
|
if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
|
|
|
|
if (INTEL_GEN(dev_priv) >= 9 && i915_selftest.live < 0 &&
|
|
|
|
i915_modparams.fake_lmem_start) {
|
|
|
|
mkwrite_device_info(dev_priv)->memory_regions =
|
|
|
|
REGION_SMEM | REGION_LMEM | REGION_STOLEN;
|
|
|
|
mkwrite_device_info(dev_priv)->is_dgfx = true;
|
|
|
|
GEM_BUG_ON(!HAS_LMEM(dev_priv));
|
|
|
|
GEM_BUG_ON(!IS_DGFX(dev_priv));
|
|
|
|
}
|
|
|
|
}
|
2019-11-01 16:51:47 +07:00
|
|
|
#endif
|
2019-10-31 00:33:20 +07:00
|
|
|
|
2016-06-24 20:00:22 +07:00
|
|
|
ret = pci_enable_device(pdev);
|
|
|
|
if (ret)
|
2017-02-10 23:35:21 +07:00
|
|
|
goto out_fini;
|
2015-03-17 16:39:27 +07:00
|
|
|
|
2019-07-12 18:24:30 +07:00
|
|
|
ret = i915_driver_early_probe(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
if (ret < 0)
|
|
|
|
goto out_pci_disable;
|
2015-10-28 18:16:45 +07:00
|
|
|
|
2019-06-14 06:21:51 +07:00
|
|
|
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2019-06-20 08:00:21 +07:00
|
|
|
i915_detect_vgpu(dev_priv);
|
|
|
|
|
2019-07-12 18:24:30 +07:00
|
|
|
ret = i915_driver_mmio_probe(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
if (ret < 0)
|
|
|
|
goto out_runtime_pm_put;
|
DRM: i915: add mode setting support
This commit adds i915 driver support for the DRM mode setting APIs.
Currently, VGA, LVDS, SDVO DVI & VGA, TV and DVO LVDS outputs are
supported. HDMI, DisplayPort and additional SDVO output support will
follow.
Support for the mode setting code is controlled by the new 'modeset'
module option. A new config option, CONFIG_DRM_I915_KMS controls the
default behavior, and whether a PCI ID list is built into the module for
use by user level module utilities.
Note that if mode setting is enabled, user level drivers that access
display registers directly or that don't use the kernel graphics memory
manager will likely corrupt kernel graphics memory, disrupt output
configuration (possibly leading to hangs and/or blank displays), and
prevent panic/oops messages from appearing. So use caution when
enabling this code; be sure your user level code supports the new
interfaces.
A new SysRq key, 'g', provides emergency support for switching back to
the kernel's framebuffer console; which is useful for testing.
Co-authors: Dave Airlie <airlied@linux.ie>, Hong Liu <hong.liu@intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2008-11-08 05:24:08 +07:00
|
|
|
|
2019-07-12 18:24:30 +07:00
|
|
|
ret = i915_driver_hw_probe(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
if (ret < 0)
|
|
|
|
goto out_cleanup_mmio;
|
2015-08-28 19:10:22 +07:00
|
|
|
|
2019-09-21 01:54:17 +07:00
|
|
|
ret = i915_driver_modeset_probe(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
if (ret < 0)
|
2017-06-21 15:28:41 +07:00
|
|
|
goto out_cleanup_hw;
|
2016-06-24 20:00:22 +07:00
|
|
|
|
|
|
|
i915_driver_register(dev_priv);
|
|
|
|
|
2019-06-14 06:21:51 +07:00
|
|
|
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2017-12-22 04:57:35 +07:00
|
|
|
i915_welcome_messages(dev_priv);
|
|
|
|
|
2016-06-24 20:00:22 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_cleanup_hw:
|
2019-07-12 18:24:29 +07:00
|
|
|
i915_driver_hw_remove(dev_priv);
|
2019-10-27 03:20:32 +07:00
|
|
|
intel_memory_regions_driver_release(dev_priv);
|
2019-07-12 18:24:28 +07:00
|
|
|
i915_ggtt_driver_release(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
out_cleanup_mmio:
|
2019-07-12 18:24:28 +07:00
|
|
|
i915_driver_mmio_release(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
out_runtime_pm_put:
|
2019-06-14 06:21:51 +07:00
|
|
|
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
|
2019-07-12 18:24:28 +07:00
|
|
|
i915_driver_late_release(dev_priv);
|
2016-06-24 20:00:22 +07:00
|
|
|
out_pci_disable:
|
|
|
|
pci_disable_device(pdev);
|
2017-02-10 23:35:21 +07:00
|
|
|
out_fini:
|
2019-07-12 18:24:27 +07:00
|
|
|
i915_probe_error(dev_priv, "Device initialization failed (%d)\n", ret);
|
2018-09-05 21:09:21 +07:00
|
|
|
i915_driver_destroy(dev_priv);
|
2015-08-28 19:10:22 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
void i915_driver_remove(struct drm_i915_private *i915)
|
2010-04-07 15:15:53 +07:00
|
|
|
{
|
2019-08-06 14:42:19 +07:00
|
|
|
disable_rpm_wakeref_asserts(&i915->runtime_pm);
|
2018-08-16 19:37:56 +07:00
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
i915_driver_unregister(i915);
|
2017-07-15 05:46:56 +07:00
|
|
|
|
2019-04-06 17:40:34 +07:00
|
|
|
/*
|
|
|
|
* After unregistering the device to prevent any new users, cancel
|
|
|
|
* all in-flight requests so that we can quickly unbind the active
|
|
|
|
* resources.
|
|
|
|
*/
|
2019-08-06 14:42:19 +07:00
|
|
|
intel_gt_set_wedged(&i915->gt);
|
2019-04-06 17:40:34 +07:00
|
|
|
|
2019-01-14 21:21:29 +07:00
|
|
|
/* Flush any external code that still may be under the RCU lock */
|
|
|
|
synchronize_rcu();
|
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
i915_gem_suspend(i915);
|
2013-04-06 03:12:44 +07:00
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
drm_atomic_helper_shutdown(&i915->drm);
|
2016-12-15 21:29:44 +07:00
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
intel_gvt_driver_remove(i915);
|
2017-01-13 09:46:09 +07:00
|
|
|
|
2019-09-21 01:54:16 +07:00
|
|
|
i915_driver_modeset_remove(i915);
|
2014-02-15 01:23:54 +07:00
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
i915_reset_error_state(i915);
|
|
|
|
i915_gem_driver_remove(i915);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
intel_power_domains_driver_remove(i915);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
i915_driver_hw_remove(i915);
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
enable_rpm_wakeref_asserts(&i915->runtime_pm);
|
2017-02-10 23:35:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void i915_driver_release(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
2019-06-14 06:21:53 +07:00
|
|
|
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
|
2016-06-24 20:00:22 +07:00
|
|
|
|
2019-06-14 06:21:53 +07:00
|
|
|
disable_rpm_wakeref_asserts(rpm);
|
2019-05-30 20:31:05 +07:00
|
|
|
|
2019-07-12 18:24:28 +07:00
|
|
|
i915_gem_driver_release(dev_priv);
|
2019-05-30 20:31:05 +07:00
|
|
|
|
2019-10-27 03:20:32 +07:00
|
|
|
intel_memory_regions_driver_release(dev_priv);
|
2019-07-12 18:24:28 +07:00
|
|
|
i915_ggtt_driver_release(dev_priv);
|
2019-06-20 08:00:17 +07:00
|
|
|
|
2019-07-12 18:24:28 +07:00
|
|
|
i915_driver_mmio_release(dev_priv);
|
2019-05-30 20:31:05 +07:00
|
|
|
|
2019-06-14 06:21:53 +07:00
|
|
|
enable_rpm_wakeref_asserts(rpm);
|
2019-07-12 18:24:28 +07:00
|
|
|
intel_runtime_pm_driver_release(rpm);
|
2019-05-30 20:31:05 +07:00
|
|
|
|
2019-07-12 18:24:28 +07:00
|
|
|
i915_driver_late_release(dev_priv);
|
2018-09-05 21:09:21 +07:00
|
|
|
i915_driver_destroy(dev_priv);
|
2010-04-07 15:15:53 +07:00
|
|
|
}
|
|
|
|
|
2016-06-24 20:00:22 +07:00
|
|
|
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
|
2012-04-06 04:47:36 +07:00
|
|
|
{
|
2017-06-20 18:05:45 +07:00
|
|
|
struct drm_i915_private *i915 = to_i915(dev);
|
2016-06-24 20:00:22 +07:00
|
|
|
int ret;
|
2012-04-06 04:47:36 +07:00
|
|
|
|
2017-06-20 18:05:45 +07:00
|
|
|
ret = i915_gem_open(i915, file);
|
2016-06-24 20:00:22 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2012-04-06 04:47:36 +07:00
|
|
|
|
2016-06-24 20:00:22 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2014-07-24 23:04:44 +07:00
|
|
|
|
2016-06-24 20:00:22 +07:00
|
|
|
/**
|
|
|
|
* i915_driver_lastclose - clean up after all DRM clients have exited
|
|
|
|
* @dev: DRM device
|
|
|
|
*
|
|
|
|
* Take care of cleaning up after all DRM clients have exited. In the
|
|
|
|
* mode setting case, we want to restore the kernel's initial mode (just
|
|
|
|
* in case the last client left us in a bad state).
|
|
|
|
*
|
|
|
|
* Additionally, in the non-mode setting case, we'll tear down the GTT
|
|
|
|
* and DMA structures, since the kernel won't be using them, and clea
|
|
|
|
* up any GEM state.
|
|
|
|
*/
|
|
|
|
static void i915_driver_lastclose(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
intel_fbdev_restore_mode(dev);
|
|
|
|
vga_switcheroo_process_delayed_switch();
|
|
|
|
}
|
2012-04-06 04:47:36 +07:00
|
|
|
|
2017-03-08 21:12:45 +07:00
|
|
|
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
|
2016-06-24 20:00:22 +07:00
|
|
|
{
|
2017-03-08 21:12:45 +07:00
|
|
|
struct drm_i915_file_private *file_priv = file->driver_priv;
|
|
|
|
|
2017-06-20 18:05:45 +07:00
|
|
|
i915_gem_context_close(file);
|
2016-06-24 20:00:22 +07:00
|
|
|
i915_gem_release(dev, file);
|
|
|
|
|
2019-08-24 01:14:55 +07:00
|
|
|
kfree_rcu(file_priv, rcu);
|
2019-08-03 04:21:37 +07:00
|
|
|
|
|
|
|
/* Catch up with all the deferred frees from "this" client */
|
|
|
|
i915_gem_flush_free_objects(to_i915(dev));
|
2012-04-06 04:47:36 +07:00
|
|
|
}
|
|
|
|
|
2014-08-18 18:42:45 +07:00
|
|
|
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
2016-07-05 16:40:23 +07:00
|
|
|
struct drm_device *dev = &dev_priv->drm;
|
2015-12-16 17:48:16 +07:00
|
|
|
struct intel_encoder *encoder;
|
2014-08-18 18:42:45 +07:00
|
|
|
|
|
|
|
drm_modeset_lock_all(dev);
|
2015-12-16 17:48:16 +07:00
|
|
|
for_each_intel_encoder(dev, encoder)
|
|
|
|
if (encoder->suspend)
|
|
|
|
encoder->suspend(encoder);
|
2014-08-18 18:42:45 +07:00
|
|
|
drm_modeset_unlock_all(dev);
|
|
|
|
}
|
|
|
|
|
2014-10-28 02:54:32 +07:00
|
|
|
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
|
|
|
|
bool rpm_resume);
|
2016-04-21 00:27:54 +07:00
|
|
|
static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
|
2015-04-16 15:52:11 +07:00
|
|
|
|
2015-11-18 22:32:30 +07:00
|
|
|
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
|
|
|
|
if (acpi_target_system_state() < ACPI_STATE_S3)
|
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
return false;
|
|
|
|
}
|
2014-08-14 00:37:05 +07:00
|
|
|
|
2018-05-25 16:26:29 +07:00
|
|
|
static int i915_drm_prepare(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = to_i915(dev);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NB intel_display_suspend() may issue new requests after we've
|
|
|
|
* ostensibly marked the GPU as ready-to-sleep here. We need to
|
|
|
|
* split out that work and pull it forward so that after point,
|
|
|
|
* the GPU is not woken again.
|
|
|
|
*/
|
2019-03-08 16:36:54 +07:00
|
|
|
i915_gem_suspend(i915);
|
2018-05-25 16:26:29 +07:00
|
|
|
|
2019-03-08 16:36:54 +07:00
|
|
|
return 0;
|
2018-05-25 16:26:29 +07:00
|
|
|
}
|
|
|
|
|
2014-10-23 23:23:25 +07:00
|
|
|
static int i915_drm_suspend(struct drm_device *dev)
|
2007-11-22 11:14:14 +07:00
|
|
|
{
|
2016-07-04 17:34:36 +07:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
2016-08-22 17:32:44 +07:00
|
|
|
struct pci_dev *pdev = dev_priv->drm.pdev;
|
2014-06-12 22:35:47 +07:00
|
|
|
pci_power_t opregion_target_state;
|
2010-02-19 05:06:27 +07:00
|
|
|
|
2019-06-14 06:21:51 +07:00
|
|
|
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
|
2015-12-16 07:52:19 +07:00
|
|
|
|
2013-08-19 23:18:09 +07:00
|
|
|
/* We do a lot of poking in a lot of registers, make sure they work
|
|
|
|
* properly. */
|
2018-08-16 19:37:57 +07:00
|
|
|
intel_power_domains_disable(dev_priv);
|
2013-01-26 01:59:15 +07:00
|
|
|
|
2010-12-07 06:20:40 +07:00
|
|
|
drm_kms_helper_poll_disable(dev);
|
|
|
|
|
2016-08-22 17:32:44 +07:00
|
|
|
pci_save_state(pdev);
|
2007-11-22 11:14:14 +07:00
|
|
|
|
2015-06-01 17:49:47 +07:00
|
|
|
intel_display_suspend(dev);
|
2014-11-19 20:30:05 +07:00
|
|
|
|
2018-07-05 23:43:52 +07:00
|
|
|
intel_dp_mst_suspend(dev_priv);
|
2013-04-17 18:04:50 +07:00
|
|
|
|
2015-02-23 18:03:26 +07:00
|
|
|
intel_runtime_pm_disable_interrupts(dev_priv);
|
|
|
|
intel_hpd_cancel_work(dev_priv);
|
2014-07-23 11:25:24 +07:00
|
|
|
|
2015-02-23 18:03:26 +07:00
|
|
|
intel_suspend_encoders(dev_priv);
|
2014-05-02 11:02:48 +07:00
|
|
|
|
2016-11-01 03:37:23 +07:00
|
|
|
intel_suspend_hw(dev_priv);
|
2009-02-18 06:13:31 +07:00
|
|
|
|
2016-11-16 15:55:34 +07:00
|
|
|
i915_gem_suspend_gtt_mappings(dev_priv);
|
2013-10-16 23:21:30 +07:00
|
|
|
|
2016-12-01 21:16:44 +07:00
|
|
|
i915_save_state(dev_priv);
|
2009-06-23 08:05:12 +07:00
|
|
|
|
2015-11-18 22:32:30 +07:00
|
|
|
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
|
2018-10-30 18:05:54 +07:00
|
|
|
intel_opregion_suspend(dev_priv, opregion_target_state);
|
2008-08-06 01:37:25 +07:00
|
|
|
|
2014-08-13 19:09:46 +07:00
|
|
|
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
|
2012-03-28 16:48:49 +07:00
|
|
|
|
2014-02-25 22:11:28 +07:00
|
|
|
dev_priv->suspend_count++;
|
|
|
|
|
2016-04-18 18:48:21 +07:00
|
|
|
intel_csr_ucode_suspend(dev_priv);
|
2015-10-29 04:59:06 +07:00
|
|
|
|
2019-06-14 06:21:51 +07:00
|
|
|
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
|
2015-12-16 07:52:19 +07:00
|
|
|
|
2018-05-25 16:26:29 +07:00
|
|
|
return 0;
|
2010-02-08 03:48:24 +07:00
|
|
|
}
|
|
|
|
|
2018-08-16 19:37:57 +07:00
|
|
|
static enum i915_drm_suspend_mode
|
|
|
|
get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
|
|
|
|
{
|
|
|
|
if (hibernate)
|
|
|
|
return I915_DRM_SUSPEND_HIBERNATE;
|
|
|
|
|
|
|
|
if (suspend_to_idle(dev_priv))
|
|
|
|
return I915_DRM_SUSPEND_IDLE;
|
|
|
|
|
|
|
|
return I915_DRM_SUSPEND_MEM;
|
|
|
|
}
|
|
|
|
|
2016-08-22 17:32:42 +07:00
|
|
|
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
|
2014-10-23 23:23:15 +07:00
|
|
|
{
|
2016-08-22 17:32:42 +07:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
2016-08-22 17:32:44 +07:00
|
|
|
struct pci_dev *pdev = dev_priv->drm.pdev;
|
2019-06-14 06:21:53 +07:00
|
|
|
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
|
2019-08-06 19:22:08 +07:00
|
|
|
int ret = 0;
|
2014-10-23 23:23:15 +07:00
|
|
|
|
2019-06-14 06:21:53 +07:00
|
|
|
disable_rpm_wakeref_asserts(rpm);
|
2015-12-16 07:52:19 +07:00
|
|
|
|
2018-05-31 15:22:46 +07:00
|
|
|
i915_gem_suspend_late(dev_priv);
|
|
|
|
|
2019-03-20 01:35:37 +07:00
|
|
|
intel_uncore_suspend(&dev_priv->uncore);
|
2016-10-13 18:34:06 +07:00
|
|
|
|
2018-08-16 19:37:57 +07:00
|
|
|
intel_power_domains_suspend(dev_priv,
|
|
|
|
get_suspend_mode(dev_priv, hibernation));
|
2015-11-17 22:33:53 +07:00
|
|
|
|
2019-08-06 19:22:08 +07:00
|
|
|
intel_display_power_suspend_late(dev_priv);
|
|
|
|
|
|
|
|
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
2016-04-21 00:27:54 +07:00
|
|
|
ret = vlv_suspend_complete(dev_priv);
|
2014-10-23 23:23:15 +07:00
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("Suspend complete failed: %d\n", ret);
|
2018-08-16 19:37:57 +07:00
|
|
|
intel_power_domains_resume(dev_priv);
|
2014-10-23 23:23:15 +07:00
|
|
|
|
2015-12-16 07:52:19 +07:00
|
|
|
goto out;
|
2014-10-23 23:23:15 +07:00
|
|
|
}
|
|
|
|
|
2016-08-22 17:32:44 +07:00
|
|
|
pci_disable_device(pdev);
|
2015-03-02 18:04:41 +07:00
|
|
|
/*
|
2015-06-30 21:06:47 +07:00
|
|
|
* During hibernation on some platforms the BIOS may try to access
|
2015-03-02 18:04:41 +07:00
|
|
|
* the device even though it's already in D3 and hang the machine. So
|
|
|
|
* leave the device in D0 on those platforms and hope the BIOS will
|
2015-06-30 21:06:47 +07:00
|
|
|
* power down the device properly. The issue was seen on multiple old
|
|
|
|
* GENs with different BIOS vendors, so having an explicit blacklist
|
|
|
|
* is inpractical; apply the workaround on everything pre GEN6. The
|
|
|
|
* platforms where the issue was seen:
|
|
|
|
* Lenovo Thinkpad X301, X61s, X60, T60, X41
|
|
|
|
* Fujitsu FSC S7110
|
|
|
|
* Acer Aspire 1830T
|
2015-03-02 18:04:41 +07:00
|
|
|
*/
|
2016-11-04 21:42:48 +07:00
|
|
|
if (!(hibernation && INTEL_GEN(dev_priv) < 6))
|
2016-08-22 17:32:44 +07:00
|
|
|
pci_set_power_state(pdev, PCI_D3hot);
|
2014-10-23 23:23:15 +07:00
|
|
|
|
2015-12-16 07:52:19 +07:00
|
|
|
out:
|
2019-06-14 06:21:53 +07:00
|
|
|
enable_rpm_wakeref_asserts(rpm);
|
2019-08-09 13:31:16 +07:00
|
|
|
if (!dev_priv->uncore.user_forcewake_count)
|
2019-07-12 18:24:28 +07:00
|
|
|
intel_runtime_pm_driver_release(rpm);
|
2015-12-16 07:52:19 +07:00
|
|
|
|
|
|
|
return ret;
|
2014-10-23 23:23:15 +07:00
|
|
|
}
|
|
|
|
|
2019-10-04 19:20:18 +07:00
|
|
|
int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
|
2010-02-08 03:48:24 +07:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
2014-09-10 22:16:55 +07:00
|
|
|
if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
|
|
|
|
state.event != PM_EVENT_FREEZE))
|
|
|
|
return -EINVAL;
|
2010-12-07 06:20:40 +07:00
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
|
2010-12-07 06:20:40 +07:00
|
|
|
return 0;
|
2010-09-08 15:45:11 +07:00
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
error = i915_drm_suspend(&i915->drm);
|
2010-02-08 03:48:24 +07:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
return i915_drm_suspend_late(&i915->drm, false);
|
2007-11-22 11:14:14 +07:00
|
|
|
}
|
|
|
|
|
2014-10-23 23:23:25 +07:00
|
|
|
static int i915_drm_resume(struct drm_device *dev)
|
2014-04-01 23:55:22 +07:00
|
|
|
{
|
2016-07-04 17:34:36 +07:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
2016-05-07 01:35:55 +07:00
|
|
|
int ret;
|
2013-09-13 04:06:43 +07:00
|
|
|
|
2019-06-14 06:21:51 +07:00
|
|
|
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
|
2015-12-16 07:52:19 +07:00
|
|
|
|
2019-11-01 21:10:06 +07:00
|
|
|
intel_gt_sanitize(&dev_priv->gt, true);
|
2018-06-14 16:40:59 +07:00
|
|
|
|
2016-08-04 13:52:22 +07:00
|
|
|
ret = i915_ggtt_enable_hw(dev_priv);
|
2016-05-07 01:35:55 +07:00
|
|
|
if (ret)
|
|
|
|
DRM_ERROR("failed to re-enable GGTT\n");
|
|
|
|
|
2019-09-09 18:00:08 +07:00
|
|
|
i915_gem_restore_gtt_mappings(dev_priv);
|
2019-10-16 21:32:33 +07:00
|
|
|
i915_gem_restore_fences(&dev_priv->ggtt);
|
2019-09-09 18:00:08 +07:00
|
|
|
|
2016-04-18 18:48:21 +07:00
|
|
|
intel_csr_ucode_resume(dev_priv);
|
|
|
|
|
2016-12-01 21:16:44 +07:00
|
|
|
i915_restore_state(dev_priv);
|
2016-08-10 18:07:33 +07:00
|
|
|
intel_pps_unlock_regs_wa(dev_priv);
|
2010-02-19 05:06:27 +07:00
|
|
|
|
2016-11-23 21:21:44 +07:00
|
|
|
intel_init_pch_refclk(dev_priv);
|
2012-05-09 17:56:28 +07:00
|
|
|
|
2015-05-11 14:50:45 +07:00
|
|
|
/*
|
|
|
|
* Interrupts have to be enabled before any batches are run. If not the
|
|
|
|
* GPU will hang. i915_gem_init_hw() will initiate batches to
|
|
|
|
* update/restore the context.
|
|
|
|
*
|
2016-11-30 02:40:29 +07:00
|
|
|
* drm_mode_config_reset() needs AUX interrupts.
|
|
|
|
*
|
2015-05-11 14:50:45 +07:00
|
|
|
* Modeset enabling in intel_modeset_init_hw() also needs working
|
|
|
|
* interrupts.
|
|
|
|
*/
|
|
|
|
intel_runtime_pm_enable_interrupts(dev_priv);
|
|
|
|
|
2016-11-30 02:40:29 +07:00
|
|
|
drm_mode_config_reset(dev);
|
|
|
|
|
2017-11-12 18:27:38 +07:00
|
|
|
i915_gem_resume(dev_priv);
|
2009-02-24 06:41:09 +07:00
|
|
|
|
2019-09-21 01:54:21 +07:00
|
|
|
intel_modeset_init_hw(dev_priv);
|
2017-11-16 23:02:15 +07:00
|
|
|
intel_init_clock_gating(dev_priv);
|
2013-03-26 23:25:45 +07:00
|
|
|
|
2015-02-23 18:03:26 +07:00
|
|
|
spin_lock_irq(&dev_priv->irq_lock);
|
|
|
|
if (dev_priv->display.hpd_irq_setup)
|
2016-05-06 20:48:28 +07:00
|
|
|
dev_priv->display.hpd_irq_setup(dev_priv);
|
2015-02-23 18:03:26 +07:00
|
|
|
spin_unlock_irq(&dev_priv->irq_lock);
|
2014-05-02 11:02:48 +07:00
|
|
|
|
2018-07-05 23:43:52 +07:00
|
|
|
intel_dp_mst_resume(dev_priv);
|
2014-12-08 10:23:37 +07:00
|
|
|
|
2016-03-11 22:57:01 +07:00
|
|
|
intel_display_resume(dev);
|
|
|
|
|
2016-11-02 08:06:30 +07:00
|
|
|
drm_kms_helper_poll_enable(dev);
|
|
|
|
|
2015-02-23 18:03:26 +07:00
|
|
|
/*
|
|
|
|
* ... but also need to make sure that hotplug processing
|
|
|
|
* doesn't cause havoc. Like in the driver load code we don't
|
2018-08-03 23:41:50 +07:00
|
|
|
* bother with the tiny race here where we might lose hotplug
|
2015-02-23 18:03:26 +07:00
|
|
|
* notifications.
|
|
|
|
* */
|
|
|
|
intel_hpd_init(dev_priv);
|
2011-01-06 03:01:25 +07:00
|
|
|
|
2018-10-30 18:05:54 +07:00
|
|
|
intel_opregion_resume(dev_priv);
|
2010-08-19 22:09:23 +07:00
|
|
|
|
2014-08-13 19:09:46 +07:00
|
|
|
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
|
2012-11-03 01:13:59 +07:00
|
|
|
|
2018-08-16 19:37:57 +07:00
|
|
|
intel_power_domains_enable(dev_priv);
|
|
|
|
|
2019-06-14 06:21:51 +07:00
|
|
|
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
|
2015-12-16 07:52:19 +07:00
|
|
|
|
2014-04-09 15:19:43 +07:00
|
|
|
return 0;
|
2010-02-08 03:48:24 +07:00
|
|
|
}
|
|
|
|
|
2014-10-23 23:23:25 +07:00
|
|
|
static int i915_drm_resume_early(struct drm_device *dev)
|
2010-02-08 03:48:24 +07:00
|
|
|
{
|
2016-07-04 17:34:36 +07:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
2016-08-22 17:32:44 +07:00
|
|
|
struct pci_dev *pdev = dev_priv->drm.pdev;
|
2016-04-18 18:45:54 +07:00
|
|
|
int ret;
|
2014-10-23 23:23:24 +07:00
|
|
|
|
2014-04-01 23:55:22 +07:00
|
|
|
/*
|
|
|
|
* We have a resume ordering issue with the snd-hda driver also
|
|
|
|
* requiring our device to be power up. Due to the lack of a
|
|
|
|
* parent/child relationship we currently solve this with an early
|
|
|
|
* resume hook.
|
|
|
|
*
|
|
|
|
* FIXME: This should be solved with a special hdmi sink device or
|
|
|
|
* similar so that power domains can be employed.
|
|
|
|
*/
|
2016-04-18 18:45:54 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that we need to set the power state explicitly, since we
|
|
|
|
* powered off the device during freeze and the PCI core won't power
|
|
|
|
* it back up for us during thaw. Powering off the device during
|
|
|
|
* freeze is not a hard requirement though, and during the
|
|
|
|
* suspend/resume phases the PCI core makes sure we get here with the
|
|
|
|
* device powered on. So in case we change our freeze logic and keep
|
|
|
|
* the device powered we can also remove the following set power state
|
|
|
|
* call.
|
|
|
|
*/
|
2016-08-22 17:32:44 +07:00
|
|
|
ret = pci_set_power_state(pdev, PCI_D0);
|
2016-04-18 18:45:54 +07:00
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
|
2018-08-16 19:37:57 +07:00
|
|
|
return ret;
|
2016-04-18 18:45:54 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that pci_enable_device() first enables any parent bridge
|
|
|
|
* device and only then sets the power state for this device. The
|
|
|
|
* bridge enabling is a nop though, since bridge devices are resumed
|
|
|
|
* first. The order of enabling power and enabling the device is
|
|
|
|
* imposed by the PCI core as described above, so here we preserve the
|
|
|
|
* same order for the freeze/thaw phases.
|
|
|
|
*
|
|
|
|
* TODO: eventually we should remove pci_disable_device() /
|
|
|
|
* pci_enable_enable_device() from suspend/resume. Due to how they
|
|
|
|
* depend on the device enable refcount we can't anyway depend on them
|
|
|
|
* disabling/enabling the device.
|
|
|
|
*/
|
2018-08-16 19:37:57 +07:00
|
|
|
if (pci_enable_device(pdev))
|
|
|
|
return -EIO;
|
2010-02-08 03:48:24 +07:00
|
|
|
|
2016-08-22 17:32:44 +07:00
|
|
|
pci_set_master(pdev);
|
2010-02-08 03:48:24 +07:00
|
|
|
|
2019-06-14 06:21:51 +07:00
|
|
|
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
|
2015-12-16 07:52:19 +07:00
|
|
|
|
2015-12-10 03:29:35 +07:00
|
|
|
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
2014-10-28 02:54:32 +07:00
|
|
|
ret = vlv_resume_prepare(dev_priv, false);
|
2014-10-23 23:23:24 +07:00
|
|
|
if (ret)
|
2015-05-20 20:45:15 +07:00
|
|
|
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
|
|
|
|
ret);
|
2014-10-23 23:23:24 +07:00
|
|
|
|
2019-03-20 01:35:37 +07:00
|
|
|
intel_uncore_resume_early(&dev_priv->uncore);
|
|
|
|
|
2019-06-21 14:07:44 +07:00
|
|
|
intel_gt_check_and_clear_faults(&dev_priv->gt);
|
2014-10-28 02:54:33 +07:00
|
|
|
|
2019-08-06 19:22:08 +07:00
|
|
|
intel_display_power_resume_early(dev_priv);
|
2014-10-28 02:54:33 +07:00
|
|
|
|
2018-08-16 19:37:57 +07:00
|
|
|
intel_power_domains_resume(dev_priv);
|
2015-11-18 22:32:30 +07:00
|
|
|
|
2019-06-14 06:21:51 +07:00
|
|
|
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
|
2016-04-18 14:04:19 +07:00
|
|
|
|
2014-10-23 23:23:24 +07:00
|
|
|
return ret;
|
2014-04-01 23:55:22 +07:00
|
|
|
}
|
|
|
|
|
2019-10-04 19:20:18 +07:00
|
|
|
int i915_resume_switcheroo(struct drm_i915_private *i915)
|
2014-04-01 23:55:22 +07:00
|
|
|
{
|
2014-10-23 23:23:17 +07:00
|
|
|
int ret;
|
2014-04-01 23:55:22 +07:00
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
|
2014-10-23 23:23:19 +07:00
|
|
|
return 0;
|
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
ret = i915_drm_resume_early(&i915->drm);
|
2014-10-23 23:23:17 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
return i915_drm_resume(&i915->drm);
|
2014-10-23 23:23:18 +07:00
|
|
|
}
|
|
|
|
|
2018-05-25 16:26:29 +07:00
|
|
|
static int i915_pm_prepare(struct device *kdev)
|
|
|
|
{
|
2019-08-06 14:42:19 +07:00
|
|
|
struct drm_i915_private *i915 = kdev_to_i915(kdev);
|
2018-05-25 16:26:29 +07:00
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
if (!i915) {
|
2018-05-25 16:26:29 +07:00
|
|
|
dev_err(kdev, "DRM not initialized, aborting suspend.\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
|
2018-05-25 16:26:29 +07:00
|
|
|
return 0;
|
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
return i915_drm_prepare(&i915->drm);
|
2018-05-25 16:26:29 +07:00
|
|
|
}
|
|
|
|
|
2016-08-22 17:32:42 +07:00
|
|
|
static int i915_pm_suspend(struct device *kdev)
|
2009-01-05 04:55:33 +07:00
|
|
|
{
|
2019-08-06 14:42:19 +07:00
|
|
|
struct drm_i915_private *i915 = kdev_to_i915(kdev);
|
2009-01-05 04:55:33 +07:00
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
if (!i915) {
|
2016-08-22 17:32:42 +07:00
|
|
|
dev_err(kdev, "DRM not initialized, aborting suspend.\n");
|
2010-02-08 03:48:24 +07:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
2009-01-05 04:55:33 +07:00
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
|
2010-12-07 06:20:40 +07:00
|
|
|
return 0;
|
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
return i915_drm_suspend(&i915->drm);
|
2014-04-01 23:55:22 +07:00
|
|
|
}
|
|
|
|
|
2016-08-22 17:32:42 +07:00
|
|
|
static int i915_pm_suspend_late(struct device *kdev)
|
2014-04-01 23:55:22 +07:00
|
|
|
{
|
2019-08-06 14:42:19 +07:00
|
|
|
struct drm_i915_private *i915 = kdev_to_i915(kdev);
|
2014-04-01 23:55:22 +07:00
|
|
|
|
|
|
|
/*
|
2015-05-19 01:53:48 +07:00
|
|
|
* We have a suspend ordering issue with the snd-hda driver also
|
2014-04-01 23:55:22 +07:00
|
|
|
* requiring our device to be power up. Due to the lack of a
|
|
|
|
* parent/child relationship we currently solve this with an late
|
|
|
|
* suspend hook.
|
|
|
|
*
|
|
|
|
* FIXME: This should be solved with a special hdmi sink device or
|
|
|
|
* similar so that power domains can be employed.
|
|
|
|
*/
|
2019-08-06 14:42:19 +07:00
|
|
|
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
|
2014-04-01 23:55:22 +07:00
|
|
|
return 0;
|
2009-01-05 04:55:33 +07:00
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
return i915_drm_suspend_late(&i915->drm, false);
|
2015-03-02 18:04:41 +07:00
|
|
|
}
|
|
|
|
|
2016-08-22 17:32:42 +07:00
|
|
|
static int i915_pm_poweroff_late(struct device *kdev)
|
2015-03-02 18:04:41 +07:00
|
|
|
{
|
2019-08-06 14:42:19 +07:00
|
|
|
struct drm_i915_private *i915 = kdev_to_i915(kdev);
|
2015-03-02 18:04:41 +07:00
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
|
2015-03-02 18:04:41 +07:00
|
|
|
return 0;
|
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
return i915_drm_suspend_late(&i915->drm, true);
|
2009-12-16 12:36:10 +07:00
|
|
|
}
|
|
|
|
|
2016-08-22 17:32:42 +07:00
|
|
|
static int i915_pm_resume_early(struct device *kdev)
|
2014-04-01 23:55:22 +07:00
|
|
|
{
|
2019-08-06 14:42:19 +07:00
|
|
|
struct drm_i915_private *i915 = kdev_to_i915(kdev);
|
2014-04-01 23:55:22 +07:00
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
|
2014-10-23 23:23:19 +07:00
|
|
|
return 0;
|
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
return i915_drm_resume_early(&i915->drm);
|
2014-04-01 23:55:22 +07:00
|
|
|
}
|
|
|
|
|
2016-08-22 17:32:42 +07:00
|
|
|
static int i915_pm_resume(struct device *kdev)
|
2009-12-16 12:36:10 +07:00
|
|
|
{
|
2019-08-06 14:42:19 +07:00
|
|
|
struct drm_i915_private *i915 = kdev_to_i915(kdev);
|
2010-02-08 03:48:24 +07:00
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
|
2014-10-23 23:23:19 +07:00
|
|
|
return 0;
|
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
return i915_drm_resume(&i915->drm);
|
2009-12-16 12:36:10 +07:00
|
|
|
}
|
|
|
|
|
2016-05-14 13:26:32 +07:00
|
|
|
/* freeze: before creating the hibernation_image */
|
2016-08-22 17:32:42 +07:00
|
|
|
static int i915_pm_freeze(struct device *kdev)
|
2016-05-14 13:26:32 +07:00
|
|
|
{
|
2019-08-06 14:42:19 +07:00
|
|
|
struct drm_i915_private *i915 = kdev_to_i915(kdev);
|
2016-09-21 20:51:07 +07:00
|
|
|
int ret;
|
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
|
|
|
|
ret = i915_drm_suspend(&i915->drm);
|
2017-08-16 21:46:07 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2016-09-21 20:51:07 +07:00
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
ret = i915_gem_freeze(i915);
|
2016-09-21 20:51:07 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
2016-05-14 13:26:32 +07:00
|
|
|
}
|
|
|
|
|
2016-08-22 17:32:42 +07:00
|
|
|
static int i915_pm_freeze_late(struct device *kdev)
|
2016-05-14 13:26:32 +07:00
|
|
|
{
|
2019-08-06 14:42:19 +07:00
|
|
|
struct drm_i915_private *i915 = kdev_to_i915(kdev);
|
2016-05-14 13:26:33 +07:00
|
|
|
int ret;
|
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
|
|
|
|
ret = i915_drm_suspend_late(&i915->drm, true);
|
2017-08-16 21:46:07 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2016-05-14 13:26:33 +07:00
|
|
|
|
2019-08-06 14:42:19 +07:00
|
|
|
ret = i915_gem_freeze_late(i915);
|
2016-05-14 13:26:33 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
2016-05-14 13:26:32 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* thaw: called after creating the hibernation image, but before turning off. */
|
2016-08-22 17:32:42 +07:00
|
|
|
static int i915_pm_thaw_early(struct device *kdev)
|
2016-05-14 13:26:32 +07:00
|
|
|
{
|
2016-08-22 17:32:42 +07:00
|
|
|
return i915_pm_resume_early(kdev);
|
2016-05-14 13:26:32 +07:00
|
|
|
}
|
|
|
|
|
2016-08-22 17:32:42 +07:00
|
|
|
static int i915_pm_thaw(struct device *kdev)
|
2016-05-14 13:26:32 +07:00
|
|
|
{
|
2016-08-22 17:32:42 +07:00
|
|
|
return i915_pm_resume(kdev);
|
2016-05-14 13:26:32 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* restore: called after loading the hibernation image. */
|
2016-08-22 17:32:42 +07:00
|
|
|
static int i915_pm_restore_early(struct device *kdev)
|
2016-05-14 13:26:32 +07:00
|
|
|
{
|
2016-08-22 17:32:42 +07:00
|
|
|
return i915_pm_resume_early(kdev);
|
2016-05-14 13:26:32 +07:00
|
|
|
}
|
|
|
|
|
2016-08-22 17:32:42 +07:00
|
|
|
static int i915_pm_restore(struct device *kdev)
|
2016-05-14 13:26:32 +07:00
|
|
|
{
|
2016-08-22 17:32:42 +07:00
|
|
|
return i915_pm_resume(kdev);
|
2016-05-14 13:26:32 +07:00
|
|
|
}
|
|
|
|
|
2014-05-05 19:19:56 +07:00
|
|
|
/*
|
|
|
|
* Save all Gunit registers that may be lost after a D3 and a subsequent
|
|
|
|
* S0i[R123] transition. The list of registers needing a save/restore is
|
|
|
|
* defined in the VLV2_S0IXRegs document. This documents marks all Gunit
|
|
|
|
* registers in the following way:
|
|
|
|
* - Driver: saved/restored by the driver
|
|
|
|
* - Punit : saved/restored by the Punit firmware
|
|
|
|
* - No, w/o marking: no need to save/restore, since the register is R/O or
|
|
|
|
* used internally by the HW in a way that doesn't depend
|
|
|
|
* keeping the content across a suspend/resume.
|
|
|
|
* - Debug : used for debugging
|
|
|
|
*
|
|
|
|
* We save/restore all registers marked with 'Driver', with the following
|
|
|
|
* exceptions:
|
|
|
|
* - Registers out of use, including also registers marked with 'Debug'.
|
|
|
|
* These have no effect on the driver's operation, so we don't save/restore
|
|
|
|
* them to reduce the overhead.
|
|
|
|
* - Registers that are fully setup by an initialization function called from
|
|
|
|
* the resume path. For example many clock gating and RPS/RC6 registers.
|
|
|
|
* - Registers that provide the right functionality with their reset defaults.
|
|
|
|
*
|
|
|
|
* TODO: Except for registers that based on the above 3 criteria can be safely
|
|
|
|
* ignored, we save/restore all others, practically treating the HW context as
|
|
|
|
* a black-box for the driver. Further investigation is needed to reduce the
|
|
|
|
* saved/restored registers even further, by following the same 3 criteria.
|
|
|
|
*/
|
|
|
|
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
2019-08-20 09:01:46 +07:00
|
|
|
struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
|
2014-05-05 19:19:56 +07:00
|
|
|
int i;
|
|
|
|
|
2019-08-20 09:01:46 +07:00
|
|
|
if (!s)
|
|
|
|
return;
|
|
|
|
|
2014-05-05 19:19:56 +07:00
|
|
|
/* GAM 0x4000-0x4770 */
|
|
|
|
s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
|
|
|
|
s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
|
|
|
|
s->arb_mode = I915_READ(ARB_MODE);
|
|
|
|
s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
|
|
|
|
s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
|
2015-09-19 00:03:16 +07:00
|
|
|
s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
|
2014-05-05 19:19:56 +07:00
|
|
|
|
|
|
|
s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
|
2015-04-16 06:52:30 +07:00
|
|
|
s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
|
2014-05-05 19:19:56 +07:00
|
|
|
|
|
|
|
s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
|
|
|
|
s->ecochk = I915_READ(GAM_ECOCHK);
|
|
|
|
s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
|
|
|
|
s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
|
|
|
|
|
|
|
|
s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
|
|
|
|
|
|
|
|
/* MBC 0x9024-0x91D0, 0x8500 */
|
|
|
|
s->g3dctl = I915_READ(VLV_G3DCTL);
|
|
|
|
s->gsckgctl = I915_READ(VLV_GSCKGCTL);
|
|
|
|
s->mbctl = I915_READ(GEN6_MBCTL);
|
|
|
|
|
|
|
|
/* GCP 0x9400-0x9424, 0x8100-0x810C */
|
|
|
|
s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
|
|
|
|
s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
|
|
|
|
s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
|
|
|
|
s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
|
|
|
|
s->rstctl = I915_READ(GEN6_RSTCTL);
|
|
|
|
s->misccpctl = I915_READ(GEN7_MISCCPCTL);
|
|
|
|
|
|
|
|
/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
|
|
|
|
s->gfxpause = I915_READ(GEN6_GFXPAUSE);
|
|
|
|
s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
|
|
|
|
s->rpdeuc = I915_READ(GEN6_RPDEUC);
|
|
|
|
s->ecobus = I915_READ(ECOBUS);
|
|
|
|
s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
|
|
|
|
s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
|
|
|
|
s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
|
|
|
|
s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
|
|
|
|
s->rcedata = I915_READ(VLV_RCEDATA);
|
|
|
|
s->spare2gh = I915_READ(VLV_SPAREG2H);
|
|
|
|
|
|
|
|
/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
|
|
|
|
s->gt_imr = I915_READ(GTIMR);
|
|
|
|
s->gt_ier = I915_READ(GTIER);
|
|
|
|
s->pm_imr = I915_READ(GEN6_PMIMR);
|
|
|
|
s->pm_ier = I915_READ(GEN6_PMIER);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
|
2015-09-19 00:03:16 +07:00
|
|
|
s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
|
2014-05-05 19:19:56 +07:00
|
|
|
|
|
|
|
/* GT SA CZ domain, 0x100000-0x138124 */
|
|
|
|
s->tilectl = I915_READ(TILECTL);
|
|
|
|
s->gt_fifoctl = I915_READ(GTFIFOCTL);
|
|
|
|
s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
|
|
|
|
s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
|
|
|
|
s->pmwgicz = I915_READ(VLV_PMWGICZ);
|
|
|
|
|
|
|
|
/* Gunit-Display CZ domain, 0x182028-0x1821CF */
|
|
|
|
s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
|
|
|
|
s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
|
2015-04-02 04:22:57 +07:00
|
|
|
s->pcbr = I915_READ(VLV_PCBR);
|
2014-05-05 19:19:56 +07:00
|
|
|
s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Not saving any of:
|
|
|
|
* DFT, 0x9800-0x9EC0
|
|
|
|
* SARB, 0xB000-0xB1FC
|
|
|
|
* GAC, 0x5208-0x524C, 0x14000-0x14C000
|
|
|
|
* PCI CFG
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
2019-08-20 09:01:46 +07:00
|
|
|
struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
|
2014-05-05 19:19:56 +07:00
|
|
|
u32 val;
|
|
|
|
int i;
|
|
|
|
|
2019-08-20 09:01:46 +07:00
|
|
|
if (!s)
|
|
|
|
return;
|
|
|
|
|
2014-05-05 19:19:56 +07:00
|
|
|
/* GAM 0x4000-0x4770 */
|
|
|
|
I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
|
|
|
|
I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
|
|
|
|
I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
|
|
|
|
I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
|
|
|
|
I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
|
2015-09-19 00:03:16 +07:00
|
|
|
I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
|
2014-05-05 19:19:56 +07:00
|
|
|
|
|
|
|
I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
|
2015-04-16 06:52:30 +07:00
|
|
|
I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
|
2014-05-05 19:19:56 +07:00
|
|
|
|
|
|
|
I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
|
|
|
|
I915_WRITE(GAM_ECOCHK, s->ecochk);
|
|
|
|
I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
|
|
|
|
I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
|
|
|
|
|
|
|
|
I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
|
|
|
|
|
|
|
|
/* MBC 0x9024-0x91D0, 0x8500 */
|
|
|
|
I915_WRITE(VLV_G3DCTL, s->g3dctl);
|
|
|
|
I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
|
|
|
|
I915_WRITE(GEN6_MBCTL, s->mbctl);
|
|
|
|
|
|
|
|
/* GCP 0x9400-0x9424, 0x8100-0x810C */
|
|
|
|
I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
|
|
|
|
I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
|
|
|
|
I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
|
|
|
|
I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
|
|
|
|
I915_WRITE(GEN6_RSTCTL, s->rstctl);
|
|
|
|
I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
|
|
|
|
|
|
|
|
/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
|
|
|
|
I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
|
|
|
|
I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
|
|
|
|
I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
|
|
|
|
I915_WRITE(ECOBUS, s->ecobus);
|
|
|
|
I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
|
|
|
|
I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
|
|
|
|
I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
|
|
|
|
I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
|
|
|
|
I915_WRITE(VLV_RCEDATA, s->rcedata);
|
|
|
|
I915_WRITE(VLV_SPAREG2H, s->spare2gh);
|
|
|
|
|
|
|
|
/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
|
|
|
|
I915_WRITE(GTIMR, s->gt_imr);
|
|
|
|
I915_WRITE(GTIER, s->gt_ier);
|
|
|
|
I915_WRITE(GEN6_PMIMR, s->pm_imr);
|
|
|
|
I915_WRITE(GEN6_PMIER, s->pm_ier);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
|
2015-09-19 00:03:16 +07:00
|
|
|
I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
|
2014-05-05 19:19:56 +07:00
|
|
|
|
|
|
|
/* GT SA CZ domain, 0x100000-0x138124 */
|
|
|
|
I915_WRITE(TILECTL, s->tilectl);
|
|
|
|
I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
|
|
|
|
/*
|
|
|
|
* Preserve the GT allow wake and GFX force clock bit, they are not
|
|
|
|
* be restored, as they are used to control the s0ix suspend/resume
|
|
|
|
* sequence by the caller.
|
|
|
|
*/
|
|
|
|
val = I915_READ(VLV_GTLC_WAKE_CTRL);
|
|
|
|
val &= VLV_GTLC_ALLOWWAKEREQ;
|
|
|
|
val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
|
|
|
|
I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
|
|
|
|
|
|
|
|
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
|
|
|
|
val &= VLV_GFX_CLK_FORCE_ON_BIT;
|
|
|
|
val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
|
|
|
|
I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
|
|
|
|
|
|
|
|
I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
|
|
|
|
|
|
|
|
/* Gunit-Display CZ domain, 0x182028-0x1821CF */
|
|
|
|
I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
|
|
|
|
I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
|
2015-04-02 04:22:57 +07:00
|
|
|
I915_WRITE(VLV_PCBR, s->pcbr);
|
2014-05-05 19:19:56 +07:00
|
|
|
I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
|
|
|
|
}
|
|
|
|
|
2019-06-11 17:45:47 +07:00
|
|
|
static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
|
2017-04-21 20:58:15 +07:00
|
|
|
u32 mask, u32 val)
|
|
|
|
{
|
2019-02-05 04:16:44 +07:00
|
|
|
i915_reg_t reg = VLV_GTLC_PW_STATUS;
|
|
|
|
u32 reg_value;
|
|
|
|
int ret;
|
|
|
|
|
2017-04-21 20:58:15 +07:00
|
|
|
/* The HW does not like us polling for PW_STATUS frequently, so
|
|
|
|
* use the sleeping loop rather than risk the busy spin within
|
|
|
|
* intel_wait_for_register().
|
|
|
|
*
|
|
|
|
* Transitioning between RC6 states should be at most 2ms (see
|
|
|
|
* valleyview_enable_rps) so use a 3ms timeout.
|
|
|
|
*/
|
2019-06-11 17:45:47 +07:00
|
|
|
ret = wait_for(((reg_value =
|
|
|
|
intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
|
|
|
|
== val, 3);
|
2019-02-05 04:16:44 +07:00
|
|
|
|
|
|
|
/* just trace the final value */
|
|
|
|
trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
|
|
|
|
|
|
|
|
return ret;
|
2017-04-21 20:58:15 +07:00
|
|
|
}
|
|
|
|
|
2014-04-18 20:35:02 +07:00
|
|
|
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
|
|
|
|
val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
|
|
|
|
if (force_on)
|
|
|
|
val |= VLV_GFX_CLK_FORCE_ON_BIT;
|
|
|
|
I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
|
|
|
|
|
|
|
|
if (!force_on)
|
|
|
|
return 0;
|
|
|
|
|
2019-03-26 04:49:39 +07:00
|
|
|
err = intel_wait_for_register(&dev_priv->uncore,
|
2016-06-30 21:32:46 +07:00
|
|
|
VLV_GTLC_SURVIVABILITY_REG,
|
|
|
|
VLV_GFX_CLK_STATUS_BIT,
|
|
|
|
VLV_GFX_CLK_STATUS_BIT,
|
|
|
|
20);
|
2014-04-18 20:35:02 +07:00
|
|
|
if (err)
|
|
|
|
DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
|
|
|
|
I915_READ(VLV_GTLC_SURVIVABILITY_REG));
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-05-05 19:19:56 +07:00
|
|
|
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
|
|
|
|
{
|
2017-04-21 20:58:15 +07:00
|
|
|
u32 mask;
|
2014-05-05 19:19:56 +07:00
|
|
|
u32 val;
|
2017-04-21 20:58:15 +07:00
|
|
|
int err;
|
2014-05-05 19:19:56 +07:00
|
|
|
|
|
|
|
val = I915_READ(VLV_GTLC_WAKE_CTRL);
|
|
|
|
val &= ~VLV_GTLC_ALLOWWAKEREQ;
|
|
|
|
if (allow)
|
|
|
|
val |= VLV_GTLC_ALLOWWAKEREQ;
|
|
|
|
I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
|
|
|
|
POSTING_READ(VLV_GTLC_WAKE_CTRL);
|
|
|
|
|
2017-04-21 20:58:15 +07:00
|
|
|
mask = VLV_GTLC_ALLOWWAKEACK;
|
|
|
|
val = allow ? mask : 0;
|
|
|
|
|
|
|
|
err = vlv_wait_for_pw_status(dev_priv, mask, val);
|
2014-05-05 19:19:56 +07:00
|
|
|
if (err)
|
|
|
|
DRM_ERROR("timeout disabling GT waking\n");
|
2016-06-30 21:32:47 +07:00
|
|
|
|
2014-05-05 19:19:56 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-04-21 20:58:15 +07:00
|
|
|
static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
|
|
|
|
bool wait_for_on)
|
2014-05-05 19:19:56 +07:00
|
|
|
{
|
|
|
|
u32 mask;
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
|
|
|
|
val = wait_for_on ? mask : 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RC6 transitioning can be delayed up to 2 msec (see
|
|
|
|
* valleyview_enable_rps), use 3 msec for safety.
|
2018-04-09 16:49:05 +07:00
|
|
|
*
|
|
|
|
* This can fail to turn off the rc6 if the GPU is stuck after a failed
|
|
|
|
* reset and we are trying to force the machine to sleep.
|
2014-05-05 19:19:56 +07:00
|
|
|
*/
|
2017-04-21 20:58:15 +07:00
|
|
|
if (vlv_wait_for_pw_status(dev_priv, mask, val))
|
2018-04-09 16:49:05 +07:00
|
|
|
DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n",
|
|
|
|
onoff(wait_for_on));
|
2014-05-05 19:19:56 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
|
|
|
|
return;
|
|
|
|
|
2016-01-20 03:00:56 +07:00
|
|
|
DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
|
2014-05-05 19:19:56 +07:00
|
|
|
I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
|
|
|
|
}
|
|
|
|
|
2014-08-14 00:37:05 +07:00
|
|
|
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
|
2014-05-05 19:19:56 +07:00
|
|
|
{
|
|
|
|
u32 mask;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bspec defines the following GT well on flags as debug only, so
|
|
|
|
* don't treat them as hard failures.
|
|
|
|
*/
|
2017-04-21 20:58:15 +07:00
|
|
|
vlv_wait_for_gt_wells(dev_priv, false);
|
2014-05-05 19:19:56 +07:00
|
|
|
|
|
|
|
mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
|
|
|
|
WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
|
|
|
|
|
|
|
|
vlv_check_no_gt_access(dev_priv);
|
|
|
|
|
|
|
|
err = vlv_force_gfx_clock(dev_priv, true);
|
|
|
|
if (err)
|
|
|
|
goto err1;
|
|
|
|
|
|
|
|
err = vlv_allow_gt_wake(dev_priv, false);
|
|
|
|
if (err)
|
|
|
|
goto err2;
|
2014-12-12 15:48:16 +07:00
|
|
|
|
2019-08-20 09:01:46 +07:00
|
|
|
vlv_save_gunit_s0ix_state(dev_priv);
|
2014-05-05 19:19:56 +07:00
|
|
|
|
|
|
|
err = vlv_force_gfx_clock(dev_priv, false);
|
|
|
|
if (err)
|
|
|
|
goto err2;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err2:
|
|
|
|
/* For safety always re-enable waking and disable gfx clock forcing */
|
|
|
|
vlv_allow_gt_wake(dev_priv, true);
|
|
|
|
err1:
|
|
|
|
vlv_force_gfx_clock(dev_priv, false);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-08-14 00:37:06 +07:00
|
|
|
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
|
|
|
|
bool rpm_resume)
|
2014-05-05 19:19:56 +07:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If any of the steps fail just try to continue, that's the best we
|
|
|
|
* can do at this point. Return the first error code (which will also
|
|
|
|
* leave RPM permanently disabled).
|
|
|
|
*/
|
|
|
|
ret = vlv_force_gfx_clock(dev_priv, true);
|
|
|
|
|
2019-08-20 09:01:46 +07:00
|
|
|
vlv_restore_gunit_s0ix_state(dev_priv);
|
2014-05-05 19:19:56 +07:00
|
|
|
|
|
|
|
err = vlv_allow_gt_wake(dev_priv, true);
|
|
|
|
if (!ret)
|
|
|
|
ret = err;
|
|
|
|
|
|
|
|
err = vlv_force_gfx_clock(dev_priv, false);
|
|
|
|
if (!ret)
|
|
|
|
ret = err;
|
|
|
|
|
|
|
|
vlv_check_no_gt_access(dev_priv);
|
|
|
|
|
2016-10-24 19:42:18 +07:00
|
|
|
if (rpm_resume)
|
2016-11-01 03:37:22 +07:00
|
|
|
intel_init_clock_gating(dev_priv);
|
2014-05-05 19:19:56 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-22 17:32:42 +07:00
|
|
|
static int intel_runtime_suspend(struct device *kdev)
|
2013-12-07 05:32:13 +07:00
|
|
|
{
|
2019-08-06 14:42:19 +07:00
|
|
|
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
|
2019-06-14 06:21:52 +07:00
|
|
|
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
|
2019-08-06 19:22:08 +07:00
|
|
|
int ret = 0;
|
2013-12-07 05:32:13 +07:00
|
|
|
|
2016-10-13 17:02:55 +07:00
|
|
|
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
|
2014-08-26 17:26:56 +07:00
|
|
|
return -ENODEV;
|
|
|
|
|
2013-12-07 05:32:13 +07:00
|
|
|
DRM_DEBUG_KMS("Suspending device\n");
|
|
|
|
|
2019-06-14 06:21:51 +07:00
|
|
|
disable_rpm_wakeref_asserts(rpm);
|
2015-12-16 07:52:19 +07:00
|
|
|
|
2014-05-07 23:57:49 +07:00
|
|
|
/*
|
|
|
|
* We are safe here against re-faults, since the fault handler takes
|
|
|
|
* an RPM reference.
|
|
|
|
*/
|
2016-10-24 19:42:18 +07:00
|
|
|
i915_gem_runtime_suspend(dev_priv);
|
2014-05-07 23:57:49 +07:00
|
|
|
|
2019-08-01 07:57:09 +07:00
|
|
|
intel_gt_runtime_suspend(&dev_priv->gt);
|
2015-09-30 23:46:37 +07:00
|
|
|
|
2014-11-19 20:30:05 +07:00
|
|
|
intel_runtime_pm_disable_interrupts(dev_priv);
|
2014-04-15 00:24:37 +07:00
|
|
|
|
2019-03-20 01:35:37 +07:00
|
|
|
intel_uncore_suspend(&dev_priv->uncore);
|
2017-11-14 20:55:18 +07:00
|
|
|
|
2019-08-06 19:22:08 +07:00
|
|
|
intel_display_power_suspend(dev_priv);
|
|
|
|
|
|
|
|
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
2016-04-21 00:27:54 +07:00
|
|
|
ret = vlv_suspend_complete(dev_priv);
|
|
|
|
|
2014-04-15 20:39:45 +07:00
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
|
2019-03-20 01:35:37 +07:00
|
|
|
intel_uncore_runtime_resume(&dev_priv->uncore);
|
2017-11-14 20:55:18 +07:00
|
|
|
|
2014-09-30 15:56:44 +07:00
|
|
|
intel_runtime_pm_enable_interrupts(dev_priv);
|
2014-04-15 20:39:45 +07:00
|
|
|
|
2019-08-01 07:57:09 +07:00
|
|
|
intel_gt_runtime_resume(&dev_priv->gt);
|
2018-01-24 22:46:57 +07:00
|
|
|
|
2019-10-16 21:32:33 +07:00
|
|
|
i915_gem_restore_fences(&dev_priv->ggtt);
|
2018-01-24 22:46:57 +07:00
|
|
|
|
2019-06-14 06:21:51 +07:00
|
|
|
enable_rpm_wakeref_asserts(rpm);
|
2015-12-16 07:52:19 +07:00
|
|
|
|
2014-04-15 20:39:45 +07:00
|
|
|
return ret;
|
|
|
|
}
|
drm/i915: make PC8 be part of runtime PM suspend/resume
Currently, when our driver becomes idle for i915.pc8_timeout (default:
5s) we enable PC8, so we save some power, but not everything we can.
Then, while PC8 is enabled, if we stay idle for more
autosuspend_delay_ms (default: 10s) we'll enter runtime PM and put the
graphics device in D3 state, saving even more power. The two features
are separate things with increasing levels of power savings, but if we
disable PC8 we'll never get into D3.
While from the modularity point of view it would be nice to keep these
features as separate, we have reasons to merge them:
- We are not aware of anybody wanting a "PC8 without D3" environment.
- If we keep both features as separate, we'll have to to test both
PC8 and PC8+D3 code paths. We're already having a major pain to
make QA do automated testing of just one thing, testing both paths
will cost even more.
- Only Haswell+ supports PC8, so if we want to add runtime PM support
to, for example, IVB, we'll have to copy some code from the PC8
feature to runtime PM, so merging both features as a single thing
will make it easier for enabling runtime PM on other platforms.
This patch only does the very basic steps required to have PC8 and
runtime PM merged on a single feature: the next patches will take care
of cleaning up everything.
v2: - Rebase.
v3: - Rebase.
- Fully remove the deprecated i915 params since Daniel doesn't
consider them as part of the ABI.
v4: - Rebase.
- Fix typo in the commit message.
v5: - Rebase, again.
- Add a huge comment explaining the different forcewake usage
(Chris, Daniel).
- Use open-coded forcewake functions (Daniel).
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-03-08 06:08:05 +07:00
|
|
|
|
2019-06-14 06:21:51 +07:00
|
|
|
enable_rpm_wakeref_asserts(rpm);
|
2019-07-12 18:24:28 +07:00
|
|
|
intel_runtime_pm_driver_release(rpm);
|
2015-12-15 21:25:08 +07:00
|
|
|
|
2019-03-26 04:49:34 +07:00
|
|
|
if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
|
2015-12-15 21:25:08 +07:00
|
|
|
DRM_ERROR("Unclaimed access detected prior to suspending\n");
|
|
|
|
|
2019-06-14 06:21:51 +07:00
|
|
|
rpm->suspended = true;
|
2014-01-15 06:36:15 +07:00
|
|
|
|
|
|
|
/*
|
2014-08-22 03:09:38 +07:00
|
|
|
* FIXME: We really should find a document that references the arguments
|
|
|
|
* used below!
|
2014-01-15 06:36:15 +07:00
|
|
|
*/
|
2016-05-23 21:08:09 +07:00
|
|
|
if (IS_BROADWELL(dev_priv)) {
|
2015-07-31 04:20:29 +07:00
|
|
|
/*
|
|
|
|
* On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
|
|
|
|
* being detected, and the call we do at intel_runtime_resume()
|
|
|
|
* won't be able to restore them. Since PCI_D3hot matches the
|
|
|
|
* actual specification and appears to be working, use it.
|
|
|
|
*/
|
2016-05-23 21:08:09 +07:00
|
|
|
intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
|
2015-07-31 04:20:29 +07:00
|
|
|
} else {
|
2014-08-22 03:09:38 +07:00
|
|
|
/*
|
|
|
|
* current versions of firmware which depend on this opregion
|
|
|
|
* notification have repurposed the D1 definition to mean
|
|
|
|
* "runtime suspended" vs. what you would normally expect (D3)
|
|
|
|
* to distinguish it from notifications that might be sent via
|
|
|
|
* the suspend path.
|
|
|
|
*/
|
2016-05-23 21:08:09 +07:00
|
|
|
intel_opregion_notify_adapter(dev_priv, PCI_D1);
|
2014-08-22 03:09:38 +07:00
|
|
|
}
|
2013-12-07 05:32:13 +07:00
|
|
|
|
2019-03-20 01:35:35 +07:00
|
|
|
assert_forcewakes_inactive(&dev_priv->uncore);
|
2015-01-16 16:34:34 +07:00
|
|
|
|
2017-01-20 21:28:43 +07:00
|
|
|
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
|
2016-06-22 04:03:44 +07:00
|
|
|
intel_hpd_poll_init(dev_priv);
|
|
|
|
|
drm/i915: make PC8 be part of runtime PM suspend/resume
Currently, when our driver becomes idle for i915.pc8_timeout (default:
5s) we enable PC8, so we save some power, but not everything we can.
Then, while PC8 is enabled, if we stay idle for more
autosuspend_delay_ms (default: 10s) we'll enter runtime PM and put the
graphics device in D3 state, saving even more power. The two features
are separate things with increasing levels of power savings, but if we
disable PC8 we'll never get into D3.
While from the modularity point of view it would be nice to keep these
features as separate, we have reasons to merge them:
- We are not aware of anybody wanting a "PC8 without D3" environment.
- If we keep both features as separate, we'll have to to test both
PC8 and PC8+D3 code paths. We're already having a major pain to
make QA do automated testing of just one thing, testing both paths
will cost even more.
- Only Haswell+ supports PC8, so if we want to add runtime PM support
to, for example, IVB, we'll have to copy some code from the PC8
feature to runtime PM, so merging both features as a single thing
will make it easier for enabling runtime PM on other platforms.
This patch only does the very basic steps required to have PC8 and
runtime PM merged on a single feature: the next patches will take care
of cleaning up everything.
v2: - Rebase.
v3: - Rebase.
- Fully remove the deprecated i915 params since Daniel doesn't
consider them as part of the ABI.
v4: - Rebase.
- Fix typo in the commit message.
v5: - Rebase, again.
- Add a huge comment explaining the different forcewake usage
(Chris, Daniel).
- Use open-coded forcewake functions (Daniel).
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-03-08 06:08:05 +07:00
|
|
|
DRM_DEBUG_KMS("Device suspended\n");
|
2013-12-07 05:32:13 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-08-22 17:32:42 +07:00
|
|
|
static int intel_runtime_resume(struct device *kdev)
|
2013-12-07 05:32:13 +07:00
|
|
|
{
|
2019-08-06 14:42:19 +07:00
|
|
|
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
|
2019-06-14 06:21:52 +07:00
|
|
|
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
|
2014-10-28 02:54:32 +07:00
|
|
|
int ret = 0;
|
2013-12-07 05:32:13 +07:00
|
|
|
|
2016-10-13 17:02:55 +07:00
|
|
|
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
|
2014-08-26 17:26:56 +07:00
|
|
|
return -ENODEV;
|
2013-12-07 05:32:13 +07:00
|
|
|
|
|
|
|
DRM_DEBUG_KMS("Resuming device\n");
|
|
|
|
|
2019-06-14 06:21:51 +07:00
|
|
|
WARN_ON_ONCE(atomic_read(&rpm->wakeref_count));
|
|
|
|
disable_rpm_wakeref_asserts(rpm);
|
2015-12-16 07:52:19 +07:00
|
|
|
|
2016-05-23 21:08:09 +07:00
|
|
|
intel_opregion_notify_adapter(dev_priv, PCI_D0);
|
2019-06-14 06:21:51 +07:00
|
|
|
rpm->suspended = false;
|
2019-03-26 04:49:34 +07:00
|
|
|
if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
|
2015-12-15 21:25:08 +07:00
|
|
|
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
|
2013-12-07 05:32:13 +07:00
|
|
|
|
2019-08-06 19:22:08 +07:00
|
|
|
intel_display_power_resume(dev_priv);
|
|
|
|
|
|
|
|
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
2014-10-28 02:54:32 +07:00
|
|
|
ret = vlv_resume_prepare(dev_priv, true);
|
|
|
|
|
2019-03-20 01:35:37 +07:00
|
|
|
intel_uncore_runtime_resume(&dev_priv->uncore);
|
2017-11-14 20:55:17 +07:00
|
|
|
|
2018-01-24 22:46:57 +07:00
|
|
|
intel_runtime_pm_enable_interrupts(dev_priv);
|
|
|
|
|
2014-04-15 20:39:45 +07:00
|
|
|
/*
|
|
|
|
* No point of rolling back things in case of an error, as the best
|
|
|
|
* we can do is to hope that things will still work (and disable RPM).
|
|
|
|
*/
|
2019-08-01 07:57:09 +07:00
|
|
|
intel_gt_runtime_resume(&dev_priv->gt);
|
2019-10-16 21:32:33 +07:00
|
|
|
i915_gem_restore_fences(&dev_priv->ggtt);
|
2014-04-15 00:24:39 +07:00
|
|
|
|
2015-08-28 03:56:08 +07:00
|
|
|
/*
|
|
|
|
* On VLV/CHV display interrupts are part of the display
|
|
|
|
* power well, so hpd is reinitialized from there. For
|
|
|
|
* everyone else do it here.
|
|
|
|
*/
|
2015-12-10 03:29:35 +07:00
|
|
|
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
|
2015-08-28 03:56:08 +07:00
|
|
|
intel_hpd_init(dev_priv);
|
|
|
|
|
2017-08-17 20:45:28 +07:00
|
|
|
intel_enable_ipc(dev_priv);
|
|
|
|
|
2019-06-14 06:21:51 +07:00
|
|
|
enable_rpm_wakeref_asserts(rpm);
|
2015-12-16 07:52:19 +07:00
|
|
|
|
2014-04-15 20:39:45 +07:00
|
|
|
if (ret)
|
|
|
|
DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
|
|
|
|
else
|
|
|
|
DRM_DEBUG_KMS("Device resumed\n");
|
|
|
|
|
|
|
|
return ret;
|
2013-12-07 05:32:13 +07:00
|
|
|
}
|
|
|
|
|
2016-06-24 20:00:26 +07:00
|
|
|
const struct dev_pm_ops i915_pm_ops = {
|
2014-10-23 23:23:28 +07:00
|
|
|
/*
|
|
|
|
* S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
|
|
|
|
* PMSG_RESUME]
|
|
|
|
*/
|
2018-05-25 16:26:29 +07:00
|
|
|
.prepare = i915_pm_prepare,
|
2011-08-17 02:34:10 +07:00
|
|
|
.suspend = i915_pm_suspend,
|
2014-04-01 23:55:22 +07:00
|
|
|
.suspend_late = i915_pm_suspend_late,
|
|
|
|
.resume_early = i915_pm_resume_early,
|
2011-08-17 02:34:10 +07:00
|
|
|
.resume = i915_pm_resume,
|
2014-10-23 23:23:28 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* S4 event handlers
|
|
|
|
* @freeze, @freeze_late : called (1) before creating the
|
|
|
|
* hibernation image [PMSG_FREEZE] and
|
|
|
|
* (2) after rebooting, before restoring
|
|
|
|
* the image [PMSG_QUIESCE]
|
|
|
|
* @thaw, @thaw_early : called (1) after creating the hibernation
|
|
|
|
* image, before writing it [PMSG_THAW]
|
|
|
|
* and (2) after failing to create or
|
|
|
|
* restore the image [PMSG_RECOVER]
|
|
|
|
* @poweroff, @poweroff_late: called after writing the hibernation
|
|
|
|
* image, before rebooting [PMSG_HIBERNATE]
|
|
|
|
* @restore, @restore_early : called after rebooting and restoring the
|
|
|
|
* hibernation image [PMSG_RESTORE]
|
|
|
|
*/
|
2016-05-14 13:26:32 +07:00
|
|
|
.freeze = i915_pm_freeze,
|
|
|
|
.freeze_late = i915_pm_freeze_late,
|
|
|
|
.thaw_early = i915_pm_thaw_early,
|
|
|
|
.thaw = i915_pm_thaw,
|
2014-10-23 23:23:24 +07:00
|
|
|
.poweroff = i915_pm_suspend,
|
2015-03-02 18:04:41 +07:00
|
|
|
.poweroff_late = i915_pm_poweroff_late,
|
2016-05-14 13:26:32 +07:00
|
|
|
.restore_early = i915_pm_restore_early,
|
|
|
|
.restore = i915_pm_restore,
|
2014-10-23 23:23:28 +07:00
|
|
|
|
|
|
|
/* S0ix (via runtime suspend) event handlers */
|
2014-03-08 06:12:33 +07:00
|
|
|
.runtime_suspend = intel_runtime_suspend,
|
|
|
|
.runtime_resume = intel_runtime_resume,
|
2009-12-16 12:36:10 +07:00
|
|
|
};
|
|
|
|
|
2012-05-17 18:27:22 +07:00
|
|
|
static const struct vm_operations_struct i915_gem_vm_ops = {
|
2008-11-13 01:03:55 +07:00
|
|
|
.fault = i915_gem_fault,
|
2009-02-12 05:01:46 +07:00
|
|
|
.open = drm_gem_vm_open,
|
|
|
|
.close = drm_gem_vm_close,
|
2008-11-13 01:03:55 +07:00
|
|
|
};
|
|
|
|
|
2011-10-31 21:28:57 +07:00
|
|
|
static const struct file_operations i915_driver_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = drm_open,
|
|
|
|
.release = drm_release,
|
|
|
|
.unlocked_ioctl = drm_ioctl,
|
|
|
|
.mmap = drm_gem_mmap,
|
|
|
|
.poll = drm_poll,
|
|
|
|
.read = drm_read,
|
|
|
|
.compat_ioctl = i915_compat_ioctl,
|
|
|
|
.llseek = noop_llseek,
|
|
|
|
};
|
|
|
|
|
2016-06-24 20:00:22 +07:00
|
|
|
static int
|
|
|
|
i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file)
|
|
|
|
{
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_ioctl_desc i915_ioctls[] = {
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
|
2019-04-17 18:25:24 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
|
2016-06-24 20:00:22 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
2018-02-07 23:48:41 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
|
2019-04-17 18:25:24 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
|
2016-06-24 20:00:22 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
|
2019-04-17 18:25:24 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
|
2016-06-24 20:00:22 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
|
2019-04-17 18:25:24 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
|
2016-06-24 20:00:22 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
|
2017-01-10 19:10:44 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
|
2016-06-24 20:00:22 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
|
2018-02-07 23:48:41 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
|
2016-06-24 20:00:22 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
|
2018-04-20 13:51:57 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
|
2019-04-17 18:25:24 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
|
2019-03-22 16:23:24 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
|
2016-06-24 20:00:22 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
|
drm/i915: Add i915 perf infrastructure
Adds base i915 perf infrastructure for Gen performance metrics.
This adds a DRM_IOCTL_I915_PERF_OPEN ioctl that takes an array of uint64
properties to configure a stream of metrics and returns a new fd usable
with standard VFS system calls including read() to read typed and sized
records; ioctl() to enable or disable capture and poll() to wait for
data.
A stream is opened something like:
uint64_t properties[] = {
/* Single context sampling */
DRM_I915_PERF_PROP_CTX_HANDLE, ctx_handle,
/* Include OA reports in samples */
DRM_I915_PERF_PROP_SAMPLE_OA, true,
/* OA unit configuration */
DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
DRM_I915_PERF_PROP_OA_FORMAT, report_format,
DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
};
struct drm_i915_perf_open_param parm = {
.flags = I915_PERF_FLAG_FD_CLOEXEC |
I915_PERF_FLAG_FD_NONBLOCK |
I915_PERF_FLAG_DISABLED,
.properties_ptr = (uint64_t)properties,
.num_properties = sizeof(properties) / 16,
};
int fd = drmIoctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, ¶m);
Records read all start with a common { type, size } header with
DRM_I915_PERF_RECORD_SAMPLE being of most interest. Sample records
contain an extensible number of fields and it's the
DRM_I915_PERF_PROP_SAMPLE_xyz properties given when opening that
determine what's included in every sample.
No specific streams are supported yet so any attempt to open a stream
will return an error.
v2:
use i915_gem_context_get() - Chris Wilson
v3:
update read() interface to avoid passing state struct - Chris Wilson
fix some rebase fallout, with i915-perf init/deinit
v4:
s/DRM_IORW/DRM_IOW/ - Emil Velikov
Signed-off-by: Robert Bragg <robert@sixbynine.org>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Sourab Gupta <sourab.gupta@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/20161107194957.3385-2-robert@sixbynine.org
2016-11-08 02:49:47 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
|
2019-05-22 22:47:01 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
|
2019-05-22 04:11:25 +07:00
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
|
|
|
|
DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
|
2016-06-24 20:00:22 +07:00
|
|
|
};
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static struct drm_driver driver = {
|
2011-08-26 00:55:54 +07:00
|
|
|
/* Don't use MTRRs here; the Xserver or userspace app should
|
|
|
|
* deal with them for Intel hardware.
|
2005-11-11 19:30:27 +07:00
|
|
|
*/
|
2008-07-31 02:06:12 +07:00
|
|
|
.driver_features =
|
2019-06-17 22:39:24 +07:00
|
|
|
DRIVER_GEM |
|
2017-08-15 21:57:33 +07:00
|
|
|
DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
|
2017-02-10 23:35:21 +07:00
|
|
|
.release = i915_driver_release,
|
2008-07-31 02:06:12 +07:00
|
|
|
.open = i915_driver_open,
|
2005-11-10 18:16:34 +07:00
|
|
|
.lastclose = i915_driver_lastclose,
|
2008-07-31 02:06:12 +07:00
|
|
|
.postclose = i915_driver_postclose,
|
2010-01-09 06:45:33 +07:00
|
|
|
|
2016-08-04 13:52:45 +07:00
|
|
|
.gem_close_object = i915_gem_close_object,
|
2016-10-28 19:58:43 +07:00
|
|
|
.gem_free_object_unlocked = i915_gem_free_object,
|
2008-11-13 01:03:55 +07:00
|
|
|
.gem_vm_ops = &i915_gem_vm_ops,
|
2012-05-10 20:25:09 +07:00
|
|
|
|
|
|
|
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
|
|
|
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
|
|
|
.gem_prime_export = i915_gem_prime_export,
|
|
|
|
.gem_prime_import = i915_gem_prime_import,
|
|
|
|
|
2019-06-20 00:08:42 +07:00
|
|
|
.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
|
|
|
|
.get_scanout_position = i915_get_crtc_scanoutpos,
|
|
|
|
|
2011-02-07 09:16:14 +07:00
|
|
|
.dumb_create = i915_gem_dumb_create,
|
2014-12-24 10:11:17 +07:00
|
|
|
.dumb_map_offset = i915_gem_mmap_gtt,
|
2005-04-17 05:20:36 +07:00
|
|
|
.ioctls = i915_ioctls,
|
2016-06-24 20:00:22 +07:00
|
|
|
.num_ioctls = ARRAY_SIZE(i915_ioctls),
|
2011-10-31 21:28:57 +07:00
|
|
|
.fops = &i915_driver_fops,
|
2005-11-10 18:16:34 +07:00
|
|
|
.name = DRIVER_NAME,
|
|
|
|
.desc = DRIVER_DESC,
|
|
|
|
.date = DRIVER_DATE,
|
|
|
|
.major = DRIVER_MAJOR,
|
|
|
|
.minor = DRIVER_MINOR,
|
|
|
|
.patchlevel = DRIVER_PATCHLEVEL,
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|