mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 01:46:41 +07:00
Merge tag 'drm-intel-next-2016-02-14' of git://anongit.freedesktop.org/drm-intel into drm-next
- lots and lots of fbc work from Paulo - max pixel clock checks from Mika Kahola - prep work for nv12 offset handling from Ville - piles of small fixes and refactorings all around * tag 'drm-intel-next-2016-02-14' of git://anongit.freedesktop.org/drm-intel: (113 commits) drm/i915: Update DRIVER_DATE to 20160214 drm/i915: edp resume/On time optimization. agp/intel-gtt: Only register fake agp driver for gen1 drm/i915: TV pixel clock check drm/i915: CRT pixel clock check drm/i915: SDVO pixel clock check drm/i915: DisplayPort-MST pixel clock check drm/i915: HDMI pixel clock check drm/i915: DisplayPort pixel clock check drm/i915: check that rpm ref is held when accessing ringbuf in stolen mem drm/i915: fix error path in intel_setup_gmbus() drm/i915: Stop depending upon CONFIG_AGP_INTEL agp/intel-gtt: Don't leak the scratch page drm/i915: Capture PCI revision and subsytem details in error state drm/i915: fix context/engine cleanup order drm/i915: Handle PipeC fused off on IVB/HSW/BDW drm/i915/skl: Fix typo in DPLL_CFGCR1 definition drm/i915: Skip DDI PLL selection for DSI drm/i915/skl: Explicitly check for eDP in skl_ddi_pll_select() drm/i915/skl: Don't skip mst encoders in skl_ddi_pll_select() ...
This commit is contained in:
commit
efcebcf983
@ -555,8 +555,10 @@ static unsigned int intel_gtt_mappable_entries(void)
|
||||
static void intel_gtt_teardown_scratch_page(void)
|
||||
{
|
||||
set_pages_wb(intel_private.scratch_page, 1);
|
||||
pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
if (intel_private.needs_dmar)
|
||||
pci_unmap_page(intel_private.pcidev,
|
||||
intel_private.scratch_page_dma,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
__free_page(intel_private.scratch_page);
|
||||
}
|
||||
|
||||
@ -1346,16 +1348,6 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
|
||||
{
|
||||
int i, mask;
|
||||
|
||||
/*
|
||||
* Can be called from the fake agp driver but also directly from
|
||||
* drm/i915.ko. Hence we need to check whether everything is set up
|
||||
* already.
|
||||
*/
|
||||
if (intel_private.driver) {
|
||||
intel_private.refcount++;
|
||||
return 1;
|
||||
}
|
||||
|
||||
for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
|
||||
if (gpu_pdev) {
|
||||
if (gpu_pdev->device ==
|
||||
@ -1376,16 +1368,26 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
|
||||
if (!intel_private.driver)
|
||||
return 0;
|
||||
|
||||
intel_private.refcount++;
|
||||
|
||||
#if IS_ENABLED(CONFIG_AGP_INTEL)
|
||||
if (bridge) {
|
||||
if (INTEL_GTT_GEN > 1)
|
||||
return 0;
|
||||
|
||||
bridge->driver = &intel_fake_agp_driver;
|
||||
bridge->dev_private_data = &intel_private;
|
||||
bridge->dev = bridge_pdev;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Can be called from the fake agp driver but also directly from
|
||||
* drm/i915.ko. Hence we need to check whether everything is set up
|
||||
* already.
|
||||
*/
|
||||
if (intel_private.refcount++)
|
||||
return 1;
|
||||
|
||||
intel_private.bridge_dev = pci_dev_get(bridge_pdev);
|
||||
|
||||
dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
|
||||
@ -1430,6 +1432,8 @@ void intel_gmch_remove(void)
|
||||
if (--intel_private.refcount)
|
||||
return;
|
||||
|
||||
if (intel_private.scratch_page)
|
||||
intel_gtt_teardown_scratch_page();
|
||||
if (intel_private.pcidev)
|
||||
pci_dev_put(intel_private.pcidev);
|
||||
if (intel_private.bridge_dev)
|
||||
|
@ -2,9 +2,7 @@ config DRM_I915
|
||||
tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
|
||||
depends on DRM
|
||||
depends on X86 && PCI
|
||||
depends on (AGP || AGP=n)
|
||||
select INTEL_GTT
|
||||
select AGP_INTEL if AGP
|
||||
select INTERVAL_TREE
|
||||
# we need shmfs for the swappable backing store, and in particular
|
||||
# the shmem_readpage() which depends upon tmpfs
|
||||
|
@ -2463,9 +2463,9 @@ static void i915_guc_client_info(struct seq_file *m,
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
seq_printf(m, "\tSubmissions: %llu %s\n",
|
||||
client->submissions[i],
|
||||
client->submissions[ring->guc_id],
|
||||
ring->name);
|
||||
tot += client->submissions[i];
|
||||
tot += client->submissions[ring->guc_id];
|
||||
}
|
||||
seq_printf(m, "\tTotal: %llu\n", tot);
|
||||
}
|
||||
@ -2502,10 +2502,10 @@ static int i915_guc_info(struct seq_file *m, void *data)
|
||||
|
||||
seq_printf(m, "\nGuC submissions:\n");
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n",
|
||||
ring->name, guc.submissions[i],
|
||||
guc.last_seqno[i], guc.last_seqno[i]);
|
||||
total += guc.submissions[i];
|
||||
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
|
||||
ring->name, guc.submissions[ring->guc_id],
|
||||
guc.last_seqno[ring->guc_id]);
|
||||
total += guc.submissions[ring->guc_id];
|
||||
}
|
||||
seq_printf(m, "\t%s: %llu\n", "Total", total);
|
||||
|
||||
@ -2583,6 +2583,10 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
|
||||
enabled = true;
|
||||
}
|
||||
}
|
||||
|
||||
seq_printf(m, "Main link in standby mode: %s\n",
|
||||
yesno(dev_priv->psr.link_standby));
|
||||
|
||||
seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
|
||||
|
||||
if (!HAS_DDI(dev))
|
||||
@ -3221,9 +3225,11 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
struct intel_engine_cs *ring;
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_workarounds *workarounds = &dev_priv->workarounds;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
@ -3231,15 +3237,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
|
||||
for (i = 0; i < dev_priv->workarounds.count; ++i) {
|
||||
seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
seq_printf(m, "HW whitelist count for %s: %d\n",
|
||||
ring->name, workarounds->hw_whitelist_count[i]);
|
||||
for (i = 0; i < workarounds->count; ++i) {
|
||||
i915_reg_t addr;
|
||||
u32 mask, value, read;
|
||||
bool ok;
|
||||
|
||||
addr = dev_priv->workarounds.reg[i].addr;
|
||||
mask = dev_priv->workarounds.reg[i].mask;
|
||||
value = dev_priv->workarounds.reg[i].value;
|
||||
addr = workarounds->reg[i].addr;
|
||||
mask = workarounds->reg[i].mask;
|
||||
value = workarounds->reg[i].value;
|
||||
read = I915_READ(addr);
|
||||
ok = (value & mask) == (read & mask);
|
||||
seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
|
||||
|
@ -391,20 +391,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
||||
if (ret)
|
||||
goto cleanup_vga_client;
|
||||
|
||||
/* Initialise stolen first so that we may reserve preallocated
|
||||
* objects for the BIOS to KMS transition.
|
||||
*/
|
||||
ret = i915_gem_init_stolen(dev);
|
||||
if (ret)
|
||||
goto cleanup_vga_switcheroo;
|
||||
|
||||
intel_power_domains_init_hw(dev_priv, false);
|
||||
|
||||
intel_csr_ucode_init(dev_priv);
|
||||
|
||||
ret = intel_irq_install(dev_priv);
|
||||
if (ret)
|
||||
goto cleanup_gem_stolen;
|
||||
goto cleanup_csr;
|
||||
|
||||
intel_setup_gmbus(dev);
|
||||
|
||||
@ -451,16 +444,15 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
||||
|
||||
cleanup_gem:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_context_fini(dev);
|
||||
i915_gem_cleanup_engines(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
cleanup_irq:
|
||||
intel_guc_ucode_fini(dev);
|
||||
drm_irq_uninstall(dev);
|
||||
intel_teardown_gmbus(dev);
|
||||
cleanup_gem_stolen:
|
||||
i915_gem_cleanup_stolen(dev);
|
||||
cleanup_vga_switcheroo:
|
||||
cleanup_csr:
|
||||
intel_csr_ucode_fini(dev_priv);
|
||||
vga_switcheroo_unregister_client(dev->pdev);
|
||||
cleanup_vga_client:
|
||||
vga_client_register(dev->pdev, NULL, NULL, NULL);
|
||||
@ -816,7 +808,41 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
|
||||
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
|
||||
DRM_INFO("Display fused off, disabling\n");
|
||||
info->num_pipes = 0;
|
||||
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
|
||||
DRM_INFO("PipeC fused off\n");
|
||||
info->num_pipes -= 1;
|
||||
}
|
||||
} else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
|
||||
u32 dfsm = I915_READ(SKL_DFSM);
|
||||
u8 disabled_mask = 0;
|
||||
bool invalid;
|
||||
int num_bits;
|
||||
|
||||
if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
|
||||
disabled_mask |= BIT(PIPE_A);
|
||||
if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
|
||||
disabled_mask |= BIT(PIPE_B);
|
||||
if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
|
||||
disabled_mask |= BIT(PIPE_C);
|
||||
|
||||
num_bits = hweight8(disabled_mask);
|
||||
|
||||
switch (disabled_mask) {
|
||||
case BIT(PIPE_A):
|
||||
case BIT(PIPE_B):
|
||||
case BIT(PIPE_A) | BIT(PIPE_B):
|
||||
case BIT(PIPE_A) | BIT(PIPE_C):
|
||||
invalid = true;
|
||||
break;
|
||||
default:
|
||||
invalid = false;
|
||||
}
|
||||
|
||||
if (num_bits > info->num_pipes || invalid)
|
||||
DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
|
||||
disabled_mask);
|
||||
else
|
||||
info->num_pipes -= num_bits;
|
||||
}
|
||||
|
||||
/* Initialize slice/subslice/EU info */
|
||||
@ -855,6 +881,94 @@ static void intel_init_dpio(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
}
|
||||
|
||||
static int i915_workqueues_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/*
|
||||
* The i915 workqueue is primarily used for batched retirement of
|
||||
* requests (and thus managing bo) once the task has been completed
|
||||
* by the GPU. i915_gem_retire_requests() is called directly when we
|
||||
* need high-priority retirement, such as waiting for an explicit
|
||||
* bo.
|
||||
*
|
||||
* It is also used for periodic low-priority events, such as
|
||||
* idle-timers and recording error state.
|
||||
*
|
||||
* All tasks on the workqueue are expected to acquire the dev mutex
|
||||
* so there is no point in running more than one instance of the
|
||||
* workqueue at any time. Use an ordered one.
|
||||
*/
|
||||
dev_priv->wq = alloc_ordered_workqueue("i915", 0);
|
||||
if (dev_priv->wq == NULL)
|
||||
goto out_err;
|
||||
|
||||
dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
|
||||
if (dev_priv->hotplug.dp_wq == NULL)
|
||||
goto out_free_wq;
|
||||
|
||||
dev_priv->gpu_error.hangcheck_wq =
|
||||
alloc_ordered_workqueue("i915-hangcheck", 0);
|
||||
if (dev_priv->gpu_error.hangcheck_wq == NULL)
|
||||
goto out_free_dp_wq;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_dp_wq:
|
||||
destroy_workqueue(dev_priv->hotplug.dp_wq);
|
||||
out_free_wq:
|
||||
destroy_workqueue(dev_priv->wq);
|
||||
out_err:
|
||||
DRM_ERROR("Failed to allocate workqueues.\n");
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
|
||||
destroy_workqueue(dev_priv->hotplug.dp_wq);
|
||||
destroy_workqueue(dev_priv->wq);
|
||||
}
|
||||
|
||||
static int i915_mmio_setup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int mmio_bar;
|
||||
int mmio_size;
|
||||
|
||||
mmio_bar = IS_GEN2(dev) ? 1 : 0;
|
||||
/*
|
||||
* Before gen4, the registers and the GTT are behind different BARs.
|
||||
* However, from gen4 onwards, the registers and the GTT are shared
|
||||
* in the same BAR, so we want to restrict this ioremap from
|
||||
* clobbering the GTT which we want ioremap_wc instead. Fortunately,
|
||||
* the register BAR remains the same size for all the earlier
|
||||
* generations up to Ironlake.
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen < 5)
|
||||
mmio_size = 512 * 1024;
|
||||
else
|
||||
mmio_size = 2 * 1024 * 1024;
|
||||
dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
|
||||
if (dev_priv->regs == NULL) {
|
||||
DRM_ERROR("failed to map registers\n");
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Try to make sure MCHBAR is enabled before poking at it */
|
||||
intel_setup_mchbar(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i915_mmio_cleanup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
intel_teardown_mchbar(dev);
|
||||
pci_iounmap(dev->pdev, dev_priv->regs);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_driver_load - setup chip and create an initial config
|
||||
* @dev: DRM device
|
||||
@ -870,7 +984,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct intel_device_info *info, *device_info;
|
||||
int ret = 0, mmio_bar, mmio_size;
|
||||
int ret = 0;
|
||||
uint32_t aperture_size;
|
||||
|
||||
info = (struct intel_device_info *) flags;
|
||||
@ -897,6 +1011,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
mutex_init(&dev_priv->modeset_restore_lock);
|
||||
mutex_init(&dev_priv->av_mutex);
|
||||
|
||||
ret = i915_workqueues_init(dev_priv);
|
||||
if (ret < 0)
|
||||
goto out_free_priv;
|
||||
|
||||
intel_pm_setup(dev);
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
@ -915,28 +1033,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
|
||||
if (i915_get_bridge_dev(dev)) {
|
||||
ret = -EIO;
|
||||
goto free_priv;
|
||||
goto out_runtime_pm_put;
|
||||
}
|
||||
|
||||
mmio_bar = IS_GEN2(dev) ? 1 : 0;
|
||||
/* Before gen4, the registers and the GTT are behind different BARs.
|
||||
* However, from gen4 onwards, the registers and the GTT are shared
|
||||
* in the same BAR, so we want to restrict this ioremap from
|
||||
* clobbering the GTT which we want ioremap_wc instead. Fortunately,
|
||||
* the register BAR remains the same size for all the earlier
|
||||
* generations up to Ironlake.
|
||||
*/
|
||||
if (info->gen < 5)
|
||||
mmio_size = 512*1024;
|
||||
else
|
||||
mmio_size = 2*1024*1024;
|
||||
|
||||
dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
|
||||
if (!dev_priv->regs) {
|
||||
DRM_ERROR("failed to map registers\n");
|
||||
ret = -EIO;
|
||||
ret = i915_mmio_setup(dev);
|
||||
if (ret < 0)
|
||||
goto put_bridge;
|
||||
}
|
||||
|
||||
/* This must be called before any calls to HAS_PCH_* */
|
||||
intel_detect_pch(dev);
|
||||
@ -945,7 +1047,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
|
||||
ret = i915_gem_gtt_init(dev);
|
||||
if (ret)
|
||||
goto out_freecsr;
|
||||
goto out_uncore_fini;
|
||||
|
||||
/* WARNING: Apparently we must kick fbdev drivers before vgacon,
|
||||
* otherwise the vga fbdev driver falls over. */
|
||||
@ -991,49 +1093,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
|
||||
aperture_size);
|
||||
|
||||
/* The i915 workqueue is primarily used for batched retirement of
|
||||
* requests (and thus managing bo) once the task has been completed
|
||||
* by the GPU. i915_gem_retire_requests() is called directly when we
|
||||
* need high-priority retirement, such as waiting for an explicit
|
||||
* bo.
|
||||
*
|
||||
* It is also used for periodic low-priority events, such as
|
||||
* idle-timers and recording error state.
|
||||
*
|
||||
* All tasks on the workqueue are expected to acquire the dev mutex
|
||||
* so there is no point in running more than one instance of the
|
||||
* workqueue at any time. Use an ordered one.
|
||||
*/
|
||||
dev_priv->wq = alloc_ordered_workqueue("i915", 0);
|
||||
if (dev_priv->wq == NULL) {
|
||||
DRM_ERROR("Failed to create our workqueue.\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_mtrrfree;
|
||||
}
|
||||
|
||||
dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
|
||||
if (dev_priv->hotplug.dp_wq == NULL) {
|
||||
DRM_ERROR("Failed to create our dp workqueue.\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_freewq;
|
||||
}
|
||||
|
||||
dev_priv->gpu_error.hangcheck_wq =
|
||||
alloc_ordered_workqueue("i915-hangcheck", 0);
|
||||
if (dev_priv->gpu_error.hangcheck_wq == NULL) {
|
||||
DRM_ERROR("Failed to create our hangcheck workqueue.\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_freedpwq;
|
||||
}
|
||||
|
||||
intel_irq_init(dev_priv);
|
||||
intel_uncore_sanitize(dev);
|
||||
|
||||
/* Try to make sure MCHBAR is enabled before poking at it */
|
||||
intel_setup_mchbar(dev);
|
||||
intel_opregion_setup(dev);
|
||||
|
||||
i915_gem_load(dev);
|
||||
i915_gem_load_init(dev);
|
||||
i915_gem_shrinker_init(dev_priv);
|
||||
|
||||
/* On the 945G/GM, the chipset reports the MSI capability on the
|
||||
* integrated graphics even though the support isn't actually there
|
||||
@ -1046,8 +1112,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
* be lost or delayed, but we use them anyways to avoid
|
||||
* stuck interrupts on some machines.
|
||||
*/
|
||||
if (!IS_I945G(dev) && !IS_I945GM(dev))
|
||||
pci_enable_msi(dev->pdev);
|
||||
if (!IS_I945G(dev) && !IS_I945GM(dev)) {
|
||||
if (pci_enable_msi(dev->pdev) < 0)
|
||||
DRM_DEBUG_DRIVER("can't enable MSI");
|
||||
}
|
||||
|
||||
intel_device_info_runtime_init(dev);
|
||||
|
||||
@ -1097,38 +1165,29 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
intel_power_domains_fini(dev_priv);
|
||||
drm_vblank_cleanup(dev);
|
||||
out_gem_unload:
|
||||
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
|
||||
unregister_shrinker(&dev_priv->mm.shrinker);
|
||||
i915_gem_shrinker_cleanup(dev_priv);
|
||||
|
||||
if (dev->pdev->msi_enabled)
|
||||
pci_disable_msi(dev->pdev);
|
||||
|
||||
intel_teardown_mchbar(dev);
|
||||
pm_qos_remove_request(&dev_priv->pm_qos);
|
||||
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
|
||||
out_freedpwq:
|
||||
destroy_workqueue(dev_priv->hotplug.dp_wq);
|
||||
out_freewq:
|
||||
destroy_workqueue(dev_priv->wq);
|
||||
out_mtrrfree:
|
||||
arch_phys_wc_del(dev_priv->gtt.mtrr);
|
||||
io_mapping_free(dev_priv->gtt.mappable);
|
||||
out_gtt:
|
||||
i915_global_gtt_cleanup(dev);
|
||||
out_freecsr:
|
||||
intel_csr_ucode_fini(dev_priv);
|
||||
out_uncore_fini:
|
||||
intel_uncore_fini(dev);
|
||||
pci_iounmap(dev->pdev, dev_priv->regs);
|
||||
i915_mmio_cleanup(dev);
|
||||
put_bridge:
|
||||
pci_dev_put(dev_priv->bridge_dev);
|
||||
free_priv:
|
||||
kmem_cache_destroy(dev_priv->requests);
|
||||
kmem_cache_destroy(dev_priv->vmas);
|
||||
kmem_cache_destroy(dev_priv->objects);
|
||||
|
||||
i915_gem_load_cleanup(dev);
|
||||
out_runtime_pm_put:
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
i915_workqueues_cleanup(dev_priv);
|
||||
out_free_priv:
|
||||
kfree(dev_priv);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1153,8 +1212,7 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
|
||||
i915_teardown_sysfs(dev);
|
||||
|
||||
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
|
||||
unregister_shrinker(&dev_priv->mm.shrinker);
|
||||
i915_gem_shrinker_cleanup(dev_priv);
|
||||
|
||||
io_mapping_free(dev_priv->gtt.mappable);
|
||||
arch_phys_wc_del(dev_priv->gtt.mtrr);
|
||||
@ -1182,6 +1240,8 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
vga_switcheroo_unregister_client(dev->pdev);
|
||||
vga_client_register(dev->pdev, NULL, NULL, NULL);
|
||||
|
||||
intel_csr_ucode_fini(dev_priv);
|
||||
|
||||
/* Free error state after interrupts are fully disabled. */
|
||||
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
|
||||
i915_destroy_error_state(dev);
|
||||
@ -1196,31 +1256,21 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
|
||||
intel_guc_ucode_fini(dev);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_context_fini(dev);
|
||||
i915_gem_cleanup_engines(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
intel_fbc_cleanup_cfb(dev_priv);
|
||||
i915_gem_cleanup_stolen(dev);
|
||||
|
||||
intel_csr_ucode_fini(dev_priv);
|
||||
|
||||
intel_teardown_mchbar(dev);
|
||||
|
||||
destroy_workqueue(dev_priv->hotplug.dp_wq);
|
||||
destroy_workqueue(dev_priv->wq);
|
||||
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
|
||||
pm_qos_remove_request(&dev_priv->pm_qos);
|
||||
|
||||
i915_global_gtt_cleanup(dev);
|
||||
|
||||
intel_uncore_fini(dev);
|
||||
if (dev_priv->regs != NULL)
|
||||
pci_iounmap(dev->pdev, dev_priv->regs);
|
||||
i915_mmio_cleanup(dev);
|
||||
|
||||
kmem_cache_destroy(dev_priv->requests);
|
||||
kmem_cache_destroy(dev_priv->vmas);
|
||||
kmem_cache_destroy(dev_priv->objects);
|
||||
i915_gem_load_cleanup(dev);
|
||||
pci_dev_put(dev_priv->bridge_dev);
|
||||
i915_workqueues_cleanup(dev_priv);
|
||||
kfree(dev_priv);
|
||||
|
||||
return 0;
|
||||
|
@ -59,7 +59,7 @@
|
||||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20160124"
|
||||
#define DRIVER_DATE "20160214"
|
||||
|
||||
#undef WARN_ON
|
||||
/* Many gcc seem to no see through this and fall over :( */
|
||||
@ -900,16 +900,15 @@ enum fb_op_origin {
|
||||
ORIGIN_DIRTYFB,
|
||||
};
|
||||
|
||||
struct i915_fbc {
|
||||
struct intel_fbc {
|
||||
/* This is always the inner lock when overlapping with struct_mutex and
|
||||
* it's the outer lock when overlapping with stolen_lock. */
|
||||
struct mutex lock;
|
||||
unsigned threshold;
|
||||
unsigned int fb_id;
|
||||
unsigned int possible_framebuffer_bits;
|
||||
unsigned int busy_bits;
|
||||
unsigned int visible_pipes_mask;
|
||||
struct intel_crtc *crtc;
|
||||
int y;
|
||||
|
||||
struct drm_mm_node compressed_fb;
|
||||
struct drm_mm_node *compressed_llb;
|
||||
@ -919,18 +918,52 @@ struct i915_fbc {
|
||||
bool enabled;
|
||||
bool active;
|
||||
|
||||
struct intel_fbc_state_cache {
|
||||
struct {
|
||||
unsigned int mode_flags;
|
||||
uint32_t hsw_bdw_pixel_rate;
|
||||
} crtc;
|
||||
|
||||
struct {
|
||||
unsigned int rotation;
|
||||
int src_w;
|
||||
int src_h;
|
||||
bool visible;
|
||||
} plane;
|
||||
|
||||
struct {
|
||||
u64 ilk_ggtt_offset;
|
||||
uint32_t pixel_format;
|
||||
unsigned int stride;
|
||||
int fence_reg;
|
||||
unsigned int tiling_mode;
|
||||
} fb;
|
||||
} state_cache;
|
||||
|
||||
struct intel_fbc_reg_params {
|
||||
struct {
|
||||
enum pipe pipe;
|
||||
enum plane plane;
|
||||
unsigned int fence_y_offset;
|
||||
} crtc;
|
||||
|
||||
struct {
|
||||
u64 ggtt_offset;
|
||||
uint32_t pixel_format;
|
||||
unsigned int stride;
|
||||
int fence_reg;
|
||||
} fb;
|
||||
|
||||
int cfb_size;
|
||||
} params;
|
||||
|
||||
struct intel_fbc_work {
|
||||
bool scheduled;
|
||||
u32 scheduled_vblank;
|
||||
struct work_struct work;
|
||||
struct drm_framebuffer *fb;
|
||||
unsigned long enable_jiffies;
|
||||
} work;
|
||||
|
||||
const char *no_fbc_reason;
|
||||
|
||||
bool (*is_active)(struct drm_i915_private *dev_priv);
|
||||
void (*activate)(struct intel_crtc *crtc);
|
||||
void (*deactivate)(struct drm_i915_private *dev_priv);
|
||||
};
|
||||
|
||||
/**
|
||||
@ -970,6 +1003,7 @@ struct i915_psr {
|
||||
unsigned busy_frontbuffer_bits;
|
||||
bool psr2_support;
|
||||
bool aux_frame_sync;
|
||||
bool link_standby;
|
||||
};
|
||||
|
||||
enum intel_pch {
|
||||
@ -1657,11 +1691,18 @@ struct i915_wa_reg {
|
||||
u32 mask;
|
||||
};
|
||||
|
||||
#define I915_MAX_WA_REGS 16
|
||||
/*
|
||||
* RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only
|
||||
* allowing it for RCS as we don't foresee any requirement of having
|
||||
* a whitelist for other engines. When it is really required for
|
||||
* other engines then the limit need to be increased.
|
||||
*/
|
||||
#define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS)
|
||||
|
||||
struct i915_workarounds {
|
||||
struct i915_wa_reg reg[I915_MAX_WA_REGS];
|
||||
u32 count;
|
||||
u32 hw_whitelist_count[I915_NUM_RINGS];
|
||||
};
|
||||
|
||||
struct i915_virtual_gpu {
|
||||
@ -1758,7 +1799,7 @@ struct drm_i915_private {
|
||||
u32 pipestat_irq_mask[I915_MAX_PIPES];
|
||||
|
||||
struct i915_hotplug hotplug;
|
||||
struct i915_fbc fbc;
|
||||
struct intel_fbc fbc;
|
||||
struct i915_drrs drrs;
|
||||
struct intel_opregion opregion;
|
||||
struct intel_vbt_data vbt;
|
||||
@ -1993,6 +2034,9 @@ enum hdmi_force_audio {
|
||||
#define I915_GTT_OFFSET_NONE ((u32)-1)
|
||||
|
||||
struct drm_i915_gem_object_ops {
|
||||
unsigned int flags;
|
||||
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
|
||||
|
||||
/* Interface between the GEM object and its backing storage.
|
||||
* get_pages() is called once prior to the use of the associated set
|
||||
* of pages before to binding them into the GTT, and put_pages() is
|
||||
@ -2008,6 +2052,7 @@ struct drm_i915_gem_object_ops {
|
||||
*/
|
||||
int (*get_pages)(struct drm_i915_gem_object *);
|
||||
void (*put_pages)(struct drm_i915_gem_object *);
|
||||
|
||||
int (*dmabuf_export)(struct drm_i915_gem_object *);
|
||||
void (*release)(struct drm_i915_gem_object *);
|
||||
};
|
||||
@ -2841,7 +2886,8 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void i915_gem_load(struct drm_device *dev);
|
||||
void i915_gem_load_init(struct drm_device *dev);
|
||||
void i915_gem_load_cleanup(struct drm_device *dev);
|
||||
void *i915_gem_object_alloc(struct drm_device *dev);
|
||||
void i915_gem_object_free(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
||||
@ -3012,7 +3058,7 @@ int i915_gem_init_rings(struct drm_device *dev);
|
||||
int __must_check i915_gem_init_hw(struct drm_device *dev);
|
||||
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
|
||||
void i915_gem_init_swizzling(struct drm_device *dev);
|
||||
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
|
||||
void i915_gem_cleanup_engines(struct drm_device *dev);
|
||||
int __must_check i915_gpu_idle(struct drm_device *dev);
|
||||
int __must_check i915_gem_suspend(struct drm_device *dev);
|
||||
void __i915_add_request(struct drm_i915_gem_request *req,
|
||||
@ -3254,6 +3300,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
|
||||
#define I915_SHRINK_ACTIVE 0x8
|
||||
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
|
||||
|
||||
|
||||
/* i915_gem_tiling.c */
|
||||
@ -3424,16 +3471,14 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val
|
||||
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
|
||||
void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
|
||||
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
|
||||
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
|
||||
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
|
||||
u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
|
||||
void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
|
||||
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
|
||||
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
|
||||
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
|
||||
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
|
||||
u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
|
||||
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
|
||||
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
|
||||
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
|
||||
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
|
||||
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
|
||||
u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
|
||||
|
@ -2680,7 +2680,7 @@ void i915_gem_request_free(struct kref *req_ref)
|
||||
|
||||
if (ctx) {
|
||||
if (i915.enable_execlists && ctx != req->i915->kernel_context)
|
||||
intel_lr_context_unpin(req);
|
||||
intel_lr_context_unpin(ctx, req->ring);
|
||||
|
||||
i915_gem_context_unreference(ctx);
|
||||
}
|
||||
@ -4465,6 +4465,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
||||
}
|
||||
|
||||
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
|
||||
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
|
||||
.get_pages = i915_gem_object_get_pages_gtt,
|
||||
.put_pages = i915_gem_object_put_pages_gtt,
|
||||
};
|
||||
@ -4912,7 +4913,7 @@ i915_gem_init_hw(struct drm_device *dev)
|
||||
req = i915_gem_request_alloc(ring, NULL);
|
||||
if (IS_ERR(req)) {
|
||||
ret = PTR_ERR(req);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_cleanup_engines(dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -4925,7 +4926,7 @@ i915_gem_init_hw(struct drm_device *dev)
|
||||
if (ret && ret != -EIO) {
|
||||
DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
|
||||
i915_gem_request_cancel(req);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_cleanup_engines(dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -4933,7 +4934,7 @@ i915_gem_init_hw(struct drm_device *dev)
|
||||
if (ret && ret != -EIO) {
|
||||
DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
|
||||
i915_gem_request_cancel(req);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_cleanup_engines(dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -5008,7 +5009,7 @@ int i915_gem_init(struct drm_device *dev)
|
||||
}
|
||||
|
||||
void
|
||||
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
|
||||
i915_gem_cleanup_engines(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
@ -5017,13 +5018,14 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
dev_priv->gt.cleanup_ring(ring);
|
||||
|
||||
if (i915.enable_execlists)
|
||||
/*
|
||||
* Neither the BIOS, ourselves or any other kernel
|
||||
* expects the system to be in execlists mode on startup,
|
||||
* so we need to reset the GPU back to legacy mode.
|
||||
*/
|
||||
intel_gpu_reset(dev);
|
||||
if (i915.enable_execlists) {
|
||||
/*
|
||||
* Neither the BIOS, ourselves or any other kernel
|
||||
* expects the system to be in execlists mode on startup,
|
||||
* so we need to reset the GPU back to legacy mode.
|
||||
*/
|
||||
intel_gpu_reset(dev);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
@ -5034,7 +5036,7 @@ init_ring_lists(struct intel_engine_cs *ring)
|
||||
}
|
||||
|
||||
void
|
||||
i915_gem_load(struct drm_device *dev)
|
||||
i915_gem_load_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
@ -5100,11 +5102,18 @@ i915_gem_load(struct drm_device *dev)
|
||||
|
||||
dev_priv->mm.interruptible = true;
|
||||
|
||||
i915_gem_shrinker_init(dev_priv);
|
||||
|
||||
mutex_init(&dev_priv->fb_tracking.lock);
|
||||
}
|
||||
|
||||
void i915_gem_load_cleanup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
kmem_cache_destroy(dev_priv->requests);
|
||||
kmem_cache_destroy(dev_priv->vmas);
|
||||
kmem_cache_destroy(dev_priv->objects);
|
||||
}
|
||||
|
||||
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
@ -5302,7 +5311,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
|
||||
struct page *page;
|
||||
|
||||
/* Only default objects have per-page dirty tracking */
|
||||
if (WARN_ON(obj->ops != &i915_gem_object_ops))
|
||||
if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
|
||||
return NULL;
|
||||
|
||||
page = i915_gem_object_get_page(obj, n);
|
||||
|
@ -321,6 +321,18 @@ i915_gem_create_context(struct drm_device *dev,
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static void i915_gem_context_unpin(struct intel_context *ctx,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
if (i915.enable_execlists) {
|
||||
intel_lr_context_unpin(ctx, engine);
|
||||
} else {
|
||||
if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state)
|
||||
i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
|
||||
i915_gem_context_unreference(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
void i915_gem_context_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -329,22 +341,15 @@ void i915_gem_context_reset(struct drm_device *dev)
|
||||
if (i915.enable_execlists) {
|
||||
struct intel_context *ctx;
|
||||
|
||||
list_for_each_entry(ctx, &dev_priv->context_list, link) {
|
||||
list_for_each_entry(ctx, &dev_priv->context_list, link)
|
||||
intel_lr_context_reset(dev, ctx);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[i];
|
||||
struct intel_context *lctx = ring->last_context;
|
||||
|
||||
if (lctx) {
|
||||
if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
|
||||
i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
|
||||
|
||||
i915_gem_context_unreference(lctx);
|
||||
if (ring->last_context) {
|
||||
i915_gem_context_unpin(ring->last_context, ring);
|
||||
ring->last_context = NULL;
|
||||
}
|
||||
}
|
||||
@ -417,13 +422,6 @@ void i915_gem_context_fini(struct drm_device *dev)
|
||||
* to offset the do_switch part, so that i915_gem_context_unreference()
|
||||
* can then free the base object correctly. */
|
||||
WARN_ON(!dev_priv->ring[RCS].last_context);
|
||||
if (dev_priv->ring[RCS].last_context == dctx) {
|
||||
/* Fake switch to NULL context */
|
||||
WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
|
||||
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
|
||||
i915_gem_context_unreference(dctx);
|
||||
dev_priv->ring[RCS].last_context = NULL;
|
||||
}
|
||||
|
||||
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
|
||||
}
|
||||
@ -432,7 +430,7 @@ void i915_gem_context_fini(struct drm_device *dev)
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[i];
|
||||
|
||||
if (ring->last_context) {
|
||||
i915_gem_context_unreference(ring->last_context);
|
||||
i915_gem_context_unpin(ring->last_context, ring);
|
||||
ring->last_context = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -1401,6 +1401,7 @@ eb_select_ring(struct drm_i915_private *dev_priv,
|
||||
bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
|
||||
} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
|
||||
bsd_idx <= I915_EXEC_BSD_RING2) {
|
||||
bsd_idx >>= I915_EXEC_BSD_SHIFT;
|
||||
bsd_idx--;
|
||||
} else {
|
||||
DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
|
||||
@ -1654,7 +1655,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
* must be freed again. If it was submitted then it is being tracked
|
||||
* on the active request list and no clean up is required here.
|
||||
*/
|
||||
if (ret && req)
|
||||
if (ret && !IS_ERR_OR_NULL(req))
|
||||
i915_gem_request_cancel(req);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -2132,6 +2132,25 @@ static void i915_address_space_init(struct i915_address_space *vm,
|
||||
list_add_tail(&vm->global_link, &dev_priv->vm_list);
|
||||
}
|
||||
|
||||
static void gtt_write_workarounds(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* This function is for gtt related workarounds. This function is
|
||||
* called on driver load and after a GPU reset, so you can place
|
||||
* workarounds here even if they get overwritten by GPU reset.
|
||||
*/
|
||||
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
|
||||
if (IS_BROADWELL(dev))
|
||||
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
|
||||
else if (IS_CHERRYVIEW(dev))
|
||||
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
|
||||
else if (IS_SKYLAKE(dev))
|
||||
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
|
||||
else if (IS_BROXTON(dev))
|
||||
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
|
||||
}
|
||||
|
||||
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -2148,6 +2167,8 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
|
||||
|
||||
int i915_ppgtt_init_hw(struct drm_device *dev)
|
||||
{
|
||||
gtt_write_workarounds(dev);
|
||||
|
||||
/* In the case of execlists, PPGTT is enabled by the context descriptor
|
||||
* and the PDPs are contained within the context itself. We don't
|
||||
* need to do anything here. */
|
||||
@ -2809,6 +2830,8 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
|
||||
ppgtt->base.cleanup(&ppgtt->base);
|
||||
}
|
||||
|
||||
i915_gem_cleanup_stolen(dev);
|
||||
|
||||
if (drm_mm_initialized(&vm->mm)) {
|
||||
if (intel_vgpu_active(dev))
|
||||
intel_vgt_deballoon();
|
||||
@ -3181,6 +3204,14 @@ int i915_gem_gtt_init(struct drm_device *dev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Initialise stolen early so that we may reserve preallocated
|
||||
* objects for the BIOS to KMS transition.
|
||||
*/
|
||||
ret = i915_gem_init_stolen(dev);
|
||||
if (ret)
|
||||
goto out_gtt_cleanup;
|
||||
|
||||
/* GMADR is the PCI mmio aperture into the global GTT. */
|
||||
DRM_INFO("Memory usable by graphics device = %lluM\n",
|
||||
gtt->base.total >> 20);
|
||||
@ -3200,6 +3231,11 @@ int i915_gem_gtt_init(struct drm_device *dev)
|
||||
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
|
||||
|
||||
return 0;
|
||||
|
||||
out_gtt_cleanup:
|
||||
gtt->base.cleanup(&dev_priv->gtt.base);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
||||
@ -3333,6 +3369,7 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
|
||||
static struct scatterlist *
|
||||
rotate_pages(const dma_addr_t *in, unsigned int offset,
|
||||
unsigned int width, unsigned int height,
|
||||
unsigned int stride,
|
||||
struct sg_table *st, struct scatterlist *sg)
|
||||
{
|
||||
unsigned int column, row;
|
||||
@ -3344,7 +3381,7 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
|
||||
}
|
||||
|
||||
for (column = 0; column < width; column++) {
|
||||
src_idx = width * (height - 1) + column;
|
||||
src_idx = stride * (height - 1) + column;
|
||||
for (row = 0; row < height; row++) {
|
||||
st->nents++;
|
||||
/* We don't need the pages, but need to initialize
|
||||
@ -3355,7 +3392,7 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
|
||||
sg_dma_address(sg) = in[offset + src_idx];
|
||||
sg_dma_len(sg) = PAGE_SIZE;
|
||||
sg = sg_next(sg);
|
||||
src_idx -= width;
|
||||
src_idx -= stride;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3363,10 +3400,9 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
|
||||
}
|
||||
|
||||
static struct sg_table *
|
||||
intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
|
||||
intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info;
|
||||
unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
|
||||
unsigned int size_pages_uv;
|
||||
struct sg_page_iter sg_iter;
|
||||
@ -3408,6 +3444,7 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
|
||||
/* Rotate the pages. */
|
||||
sg = rotate_pages(page_addr_list, 0,
|
||||
rot_info->width_pages, rot_info->height_pages,
|
||||
rot_info->width_pages,
|
||||
st, NULL);
|
||||
|
||||
/* Append the UV plane if NV12. */
|
||||
@ -3423,6 +3460,7 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
|
||||
rotate_pages(page_addr_list, uv_start_page,
|
||||
rot_info->width_pages_uv,
|
||||
rot_info->height_pages_uv,
|
||||
rot_info->width_pages_uv,
|
||||
st, sg);
|
||||
}
|
||||
|
||||
@ -3504,7 +3542,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
|
||||
vma->ggtt_view.pages = vma->obj->pages;
|
||||
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
|
||||
vma->ggtt_view.pages =
|
||||
intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
|
||||
intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
|
||||
else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
|
||||
vma->ggtt_view.pages =
|
||||
intel_partial_pages(&vma->ggtt_view, vma->obj);
|
||||
@ -3598,7 +3636,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
|
||||
if (view->type == I915_GGTT_VIEW_NORMAL) {
|
||||
return obj->base.size;
|
||||
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
|
||||
return view->params.rotation_info.size;
|
||||
return view->params.rotated.size;
|
||||
} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
|
||||
return view->params.partial.size << PAGE_SHIFT;
|
||||
} else {
|
||||
|
@ -155,7 +155,7 @@ struct i915_ggtt_view {
|
||||
u64 offset;
|
||||
unsigned int size;
|
||||
} partial;
|
||||
struct intel_rotation_info rotation_info;
|
||||
struct intel_rotation_info rotated;
|
||||
} params;
|
||||
|
||||
struct sg_table *pages;
|
||||
@ -342,6 +342,8 @@ struct i915_gtt {
|
||||
|
||||
size_t stolen_size; /* Total size of stolen memory */
|
||||
size_t stolen_usable_size; /* Total size minus BIOS reserved */
|
||||
size_t stolen_reserved_base;
|
||||
size_t stolen_reserved_size;
|
||||
u64 mappable_end; /* End offset that we can CPU map */
|
||||
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
|
||||
phys_addr_t mappable_base; /* PA of our GMADR */
|
||||
|
@ -367,8 +367,20 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
|
||||
dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
|
||||
dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
|
||||
dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
|
||||
register_shrinker(&dev_priv->mm.shrinker);
|
||||
WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
|
||||
|
||||
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
|
||||
register_oom_notifier(&dev_priv->mm.oom_notifier);
|
||||
WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_shrinker_cleanup - Clean up i915 shrinker
|
||||
* @dev_priv: i915 device
|
||||
*
|
||||
* This function unregisters the i915 shrinker and OOM handler.
|
||||
*/
|
||||
void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
|
||||
unregister_shrinker(&dev_priv->mm.shrinker);
|
||||
}
|
||||
|
@ -458,6 +458,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
dev_priv->gtt.stolen_reserved_base = reserved_base;
|
||||
dev_priv->gtt.stolen_reserved_size = reserved_size;
|
||||
|
||||
/* It is possible for the reserved area to end before the end of stolen
|
||||
* memory, so just consider the start. */
|
||||
reserved_total = stolen_top - reserved_base;
|
||||
|
@ -49,21 +49,18 @@ struct i915_mmu_notifier {
|
||||
struct hlist_node node;
|
||||
struct mmu_notifier mn;
|
||||
struct rb_root objects;
|
||||
struct list_head linear;
|
||||
bool has_linear;
|
||||
};
|
||||
|
||||
struct i915_mmu_object {
|
||||
struct i915_mmu_notifier *mn;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct interval_tree_node it;
|
||||
struct list_head link;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct work_struct work;
|
||||
bool active;
|
||||
bool is_linear;
|
||||
bool attached;
|
||||
};
|
||||
|
||||
static void __cancel_userptr__worker(struct work_struct *work)
|
||||
static void cancel_userptr(struct work_struct *work)
|
||||
{
|
||||
struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
|
||||
struct drm_i915_gem_object *obj = mo->obj;
|
||||
@ -94,24 +91,22 @@ static void __cancel_userptr__worker(struct work_struct *work)
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
static unsigned long cancel_userptr(struct i915_mmu_object *mo)
|
||||
static void add_object(struct i915_mmu_object *mo)
|
||||
{
|
||||
unsigned long end = mo->obj->userptr.ptr + mo->obj->base.size;
|
||||
if (mo->attached)
|
||||
return;
|
||||
|
||||
/* The mmu_object is released late when destroying the
|
||||
* GEM object so it is entirely possible to gain a
|
||||
* reference on an object in the process of being freed
|
||||
* since our serialisation is via the spinlock and not
|
||||
* the struct_mutex - and consequently use it after it
|
||||
* is freed and then double free it.
|
||||
*/
|
||||
if (mo->active && kref_get_unless_zero(&mo->obj->base.refcount)) {
|
||||
schedule_work(&mo->work);
|
||||
/* only schedule one work packet to avoid the refleak */
|
||||
mo->active = false;
|
||||
}
|
||||
interval_tree_insert(&mo->it, &mo->mn->objects);
|
||||
mo->attached = true;
|
||||
}
|
||||
|
||||
return end;
|
||||
static void del_object(struct i915_mmu_object *mo)
|
||||
{
|
||||
if (!mo->attached)
|
||||
return;
|
||||
|
||||
interval_tree_remove(&mo->it, &mo->mn->objects);
|
||||
mo->attached = false;
|
||||
}
|
||||
|
||||
static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
||||
@ -122,28 +117,36 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
||||
struct i915_mmu_notifier *mn =
|
||||
container_of(_mn, struct i915_mmu_notifier, mn);
|
||||
struct i915_mmu_object *mo;
|
||||
struct interval_tree_node *it;
|
||||
LIST_HEAD(cancelled);
|
||||
|
||||
if (RB_EMPTY_ROOT(&mn->objects))
|
||||
return;
|
||||
|
||||
/* interval ranges are inclusive, but invalidate range is exclusive */
|
||||
end--;
|
||||
|
||||
spin_lock(&mn->lock);
|
||||
if (mn->has_linear) {
|
||||
list_for_each_entry(mo, &mn->linear, link) {
|
||||
if (mo->it.last < start || mo->it.start > end)
|
||||
continue;
|
||||
it = interval_tree_iter_first(&mn->objects, start, end);
|
||||
while (it) {
|
||||
/* The mmu_object is released late when destroying the
|
||||
* GEM object so it is entirely possible to gain a
|
||||
* reference on an object in the process of being freed
|
||||
* since our serialisation is via the spinlock and not
|
||||
* the struct_mutex - and consequently use it after it
|
||||
* is freed and then double free it. To prevent that
|
||||
* use-after-free we only acquire a reference on the
|
||||
* object if it is not in the process of being destroyed.
|
||||
*/
|
||||
mo = container_of(it, struct i915_mmu_object, it);
|
||||
if (kref_get_unless_zero(&mo->obj->base.refcount))
|
||||
schedule_work(&mo->work);
|
||||
|
||||
cancel_userptr(mo);
|
||||
}
|
||||
} else {
|
||||
struct interval_tree_node *it;
|
||||
|
||||
it = interval_tree_iter_first(&mn->objects, start, end);
|
||||
while (it) {
|
||||
mo = container_of(it, struct i915_mmu_object, it);
|
||||
start = cancel_userptr(mo);
|
||||
it = interval_tree_iter_next(it, start, end);
|
||||
}
|
||||
list_add(&mo->link, &cancelled);
|
||||
it = interval_tree_iter_next(it, start, end);
|
||||
}
|
||||
list_for_each_entry(mo, &cancelled, link)
|
||||
del_object(mo);
|
||||
spin_unlock(&mn->lock);
|
||||
}
|
||||
|
||||
@ -164,8 +167,6 @@ i915_mmu_notifier_create(struct mm_struct *mm)
|
||||
spin_lock_init(&mn->lock);
|
||||
mn->mn.ops = &i915_gem_userptr_notifier;
|
||||
mn->objects = RB_ROOT;
|
||||
INIT_LIST_HEAD(&mn->linear);
|
||||
mn->has_linear = false;
|
||||
|
||||
/* Protected by mmap_sem (write-lock) */
|
||||
ret = __mmu_notifier_register(&mn->mn, mm);
|
||||
@ -177,85 +178,6 @@ i915_mmu_notifier_create(struct mm_struct *mm)
|
||||
return mn;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_mmu_notifier_add(struct drm_device *dev,
|
||||
struct i915_mmu_notifier *mn,
|
||||
struct i915_mmu_object *mo)
|
||||
{
|
||||
struct interval_tree_node *it;
|
||||
int ret = 0;
|
||||
|
||||
/* By this point we have already done a lot of expensive setup that
|
||||
* we do not want to repeat just because the caller (e.g. X) has a
|
||||
* signal pending (and partly because of that expensive setup, X
|
||||
* using an interrupt timer is likely to get stuck in an EINTR loop).
|
||||
*/
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
/* Make sure we drop the final active reference (and thereby
|
||||
* remove the objects from the interval tree) before we do
|
||||
* the check for overlapping objects.
|
||||
*/
|
||||
i915_gem_retire_requests(dev);
|
||||
|
||||
spin_lock(&mn->lock);
|
||||
it = interval_tree_iter_first(&mn->objects,
|
||||
mo->it.start, mo->it.last);
|
||||
if (it) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
/* We only need to check the first object in the range as it
|
||||
* either has cancelled gup work queued and we need to
|
||||
* return back to the user to give time for the gup-workers
|
||||
* to flush their object references upon which the object will
|
||||
* be removed from the interval-tree, or the the range is
|
||||
* still in use by another client and the overlap is invalid.
|
||||
*
|
||||
* If we do have an overlap, we cannot use the interval tree
|
||||
* for fast range invalidation.
|
||||
*/
|
||||
|
||||
obj = container_of(it, struct i915_mmu_object, it)->obj;
|
||||
if (!obj->userptr.workers)
|
||||
mn->has_linear = mo->is_linear = true;
|
||||
else
|
||||
ret = -EAGAIN;
|
||||
} else
|
||||
interval_tree_insert(&mo->it, &mn->objects);
|
||||
|
||||
if (ret == 0)
|
||||
list_add(&mo->link, &mn->linear);
|
||||
|
||||
spin_unlock(&mn->lock);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
|
||||
{
|
||||
struct i915_mmu_object *mo;
|
||||
|
||||
list_for_each_entry(mo, &mn->linear, link)
|
||||
if (mo->is_linear)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
|
||||
struct i915_mmu_object *mo)
|
||||
{
|
||||
spin_lock(&mn->lock);
|
||||
list_del(&mo->link);
|
||||
if (mo->is_linear)
|
||||
mn->has_linear = i915_mmu_notifier_has_linear(mn);
|
||||
else
|
||||
interval_tree_remove(&mo->it, &mn->objects);
|
||||
spin_unlock(&mn->lock);
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
@ -265,7 +187,9 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
|
||||
if (mo == NULL)
|
||||
return;
|
||||
|
||||
i915_mmu_notifier_del(mo->mn, mo);
|
||||
spin_lock(&mo->mn->lock);
|
||||
del_object(mo);
|
||||
spin_unlock(&mo->mn->lock);
|
||||
kfree(mo);
|
||||
|
||||
obj->userptr.mmu_object = NULL;
|
||||
@ -299,7 +223,6 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
|
||||
{
|
||||
struct i915_mmu_notifier *mn;
|
||||
struct i915_mmu_object *mo;
|
||||
int ret;
|
||||
|
||||
if (flags & I915_USERPTR_UNSYNCHRONIZED)
|
||||
return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
|
||||
@ -316,16 +239,10 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
|
||||
return -ENOMEM;
|
||||
|
||||
mo->mn = mn;
|
||||
mo->it.start = obj->userptr.ptr;
|
||||
mo->it.last = mo->it.start + obj->base.size - 1;
|
||||
mo->obj = obj;
|
||||
INIT_WORK(&mo->work, __cancel_userptr__worker);
|
||||
|
||||
ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
|
||||
if (ret) {
|
||||
kfree(mo);
|
||||
return ret;
|
||||
}
|
||||
mo->it.start = obj->userptr.ptr;
|
||||
mo->it.last = obj->userptr.ptr + obj->base.size - 1;
|
||||
INIT_WORK(&mo->work, cancel_userptr);
|
||||
|
||||
obj->userptr.mmu_object = mo;
|
||||
return 0;
|
||||
@ -552,8 +469,10 @@ __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
|
||||
/* In order to serialise get_pages with an outstanding
|
||||
* cancel_userptr, we must drop the struct_mutex and try again.
|
||||
*/
|
||||
if (!value || !work_pending(&obj->userptr.mmu_object->work))
|
||||
obj->userptr.mmu_object->active = value;
|
||||
if (!value)
|
||||
del_object(obj->userptr.mmu_object);
|
||||
else if (!work_pending(&obj->userptr.mmu_object->work))
|
||||
add_object(obj->userptr.mmu_object);
|
||||
else
|
||||
ret = -EAGAIN;
|
||||
spin_unlock(&obj->userptr.mmu_object->mn->lock);
|
||||
@ -789,9 +708,10 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
|
||||
}
|
||||
|
||||
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
|
||||
.dmabuf_export = i915_gem_userptr_dmabuf_export,
|
||||
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
|
||||
.get_pages = i915_gem_userptr_get_pages,
|
||||
.put_pages = i915_gem_userptr_put_pages,
|
||||
.dmabuf_export = i915_gem_userptr_dmabuf_export,
|
||||
.release = i915_gem_userptr_release,
|
||||
};
|
||||
|
||||
|
@ -365,6 +365,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
||||
err_printf(m, "Reset count: %u\n", error->reset_count);
|
||||
err_printf(m, "Suspend count: %u\n", error->suspend_count);
|
||||
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
|
||||
err_printf(m, "PCI Revision: 0x%02x\n", dev->pdev->revision);
|
||||
err_printf(m, "PCI Subsystem: %04x:%04x\n",
|
||||
dev->pdev->subsystem_vendor,
|
||||
dev->pdev->subsystem_device);
|
||||
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
|
||||
|
||||
if (HAS_CSR(dev)) {
|
||||
|
@ -376,6 +376,8 @@ static void guc_init_proc_desc(struct intel_guc *guc,
|
||||
static void guc_init_ctx_desc(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_context *ctx = client->owner;
|
||||
struct guc_context_desc desc;
|
||||
struct sg_table *sg;
|
||||
@ -388,10 +390,8 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
|
||||
desc.priority = client->priority;
|
||||
desc.db_id = client->doorbell_id;
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
struct guc_execlist_context *lrc = &desc.lrc[i];
|
||||
struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
|
||||
struct intel_engine_cs *ring;
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id];
|
||||
struct drm_i915_gem_object *obj;
|
||||
uint64_t ctx_desc;
|
||||
|
||||
@ -406,7 +406,6 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
|
||||
if (!obj)
|
||||
break; /* XXX: continue? */
|
||||
|
||||
ring = ringbuf->ring;
|
||||
ctx_desc = intel_lr_context_descriptor(ctx, ring);
|
||||
lrc->context_desc = (u32)ctx_desc;
|
||||
|
||||
@ -414,16 +413,16 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
|
||||
lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
|
||||
LRC_STATE_PN * PAGE_SIZE;
|
||||
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
|
||||
(ring->id << GUC_ELC_ENGINE_OFFSET);
|
||||
(ring->guc_id << GUC_ELC_ENGINE_OFFSET);
|
||||
|
||||
obj = ringbuf->obj;
|
||||
obj = ctx->engine[i].ringbuf->obj;
|
||||
|
||||
lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
|
||||
lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
|
||||
lrc->ring_next_free_location = lrc->ring_begin;
|
||||
lrc->ring_current_tail_pointer_value = 0;
|
||||
|
||||
desc.engines_used |= (1 << ring->id);
|
||||
desc.engines_used |= (1 << ring->guc_id);
|
||||
}
|
||||
|
||||
WARN_ON(desc.engines_used == 0);
|
||||
@ -510,7 +509,6 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc)
|
||||
static int guc_add_workqueue_item(struct i915_guc_client *gc,
|
||||
struct drm_i915_gem_request *rq)
|
||||
{
|
||||
enum intel_ring_id ring_id = rq->ring->id;
|
||||
struct guc_wq_item *wqi;
|
||||
void *base;
|
||||
u32 tail, wq_len, wq_off, space;
|
||||
@ -544,7 +542,7 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
|
||||
wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
|
||||
wqi->header = WQ_TYPE_INORDER |
|
||||
(wq_len << WQ_LEN_SHIFT) |
|
||||
(ring_id << WQ_TARGET_SHIFT) |
|
||||
(rq->ring->guc_id << WQ_TARGET_SHIFT) |
|
||||
WQ_NO_WCFLUSH_WAIT;
|
||||
|
||||
/* The GuC wants only the low-order word of the context descriptor */
|
||||
@ -560,29 +558,6 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define CTX_RING_BUFFER_START 0x08
|
||||
|
||||
/* Update the ringbuffer pointer in a saved context image */
|
||||
static void lr_context_update(struct drm_i915_gem_request *rq)
|
||||
{
|
||||
enum intel_ring_id ring_id = rq->ring->id;
|
||||
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state;
|
||||
struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
|
||||
struct page *page;
|
||||
uint32_t *reg_state;
|
||||
|
||||
BUG_ON(!ctx_obj);
|
||||
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
|
||||
WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
|
||||
|
||||
page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
|
||||
reg_state = kmap_atomic(page);
|
||||
|
||||
reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
|
||||
|
||||
kunmap_atomic(reg_state);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_guc_submit() - Submit commands through GuC
|
||||
* @client: the guc client where commands will go through
|
||||
@ -594,18 +569,14 @@ int i915_guc_submit(struct i915_guc_client *client,
|
||||
struct drm_i915_gem_request *rq)
|
||||
{
|
||||
struct intel_guc *guc = client->guc;
|
||||
enum intel_ring_id ring_id = rq->ring->id;
|
||||
unsigned int engine_id = rq->ring->guc_id;
|
||||
int q_ret, b_ret;
|
||||
|
||||
/* Need this because of the deferred pin ctx and ring */
|
||||
/* Shall we move this right after ring is pinned? */
|
||||
lr_context_update(rq);
|
||||
|
||||
q_ret = guc_add_workqueue_item(client, rq);
|
||||
if (q_ret == 0)
|
||||
b_ret = guc_ring_doorbell(client);
|
||||
|
||||
client->submissions[ring_id] += 1;
|
||||
client->submissions[engine_id] += 1;
|
||||
if (q_ret) {
|
||||
client->q_fail += 1;
|
||||
client->retcode = q_ret;
|
||||
@ -615,8 +586,8 @@ int i915_guc_submit(struct i915_guc_client *client,
|
||||
} else {
|
||||
client->retcode = 0;
|
||||
}
|
||||
guc->submissions[ring_id] += 1;
|
||||
guc->last_seqno[ring_id] = rq->seqno;
|
||||
guc->submissions[engine_id] += 1;
|
||||
guc->last_seqno[engine_id] = rq->seqno;
|
||||
|
||||
return q_ret;
|
||||
}
|
||||
@ -848,7 +819,7 @@ static void init_guc_policies(struct guc_policies *policies)
|
||||
policies->max_num_work_items = POLICY_MAX_NUM_WI;
|
||||
|
||||
for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
|
||||
policy = &policies->policy[p][i];
|
||||
|
||||
policy->execution_quantum = 1000000;
|
||||
@ -900,7 +871,7 @@ static void guc_create_ads(struct intel_guc *guc)
|
||||
ads->golden_context_lrca = ring->status_page.gfx_addr;
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
ads->eng_state_size[i] = intel_lr_context_size(ring);
|
||||
ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring);
|
||||
|
||||
/* GuC scheduling policies */
|
||||
policies = (void *)ads + sizeof(struct guc_ads);
|
||||
@ -912,12 +883,12 @@ static void guc_create_ads(struct intel_guc *guc)
|
||||
/* MMIO reg state */
|
||||
reg_state = (void *)policies + sizeof(struct guc_policies);
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
reg_state->mmio_white_list[i].mmio_start =
|
||||
dev_priv->ring[i].mmio_base + GUC_MMIO_WHITE_LIST_START;
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
reg_state->mmio_white_list[ring->guc_id].mmio_start =
|
||||
ring->mmio_base + GUC_MMIO_WHITE_LIST_START;
|
||||
|
||||
/* Nothing to be saved or restored for now. */
|
||||
reg_state->mmio_white_list[i].count = 0;
|
||||
reg_state->mmio_white_list[ring->guc_id].count = 0;
|
||||
}
|
||||
|
||||
ads->reg_state_addr = ads->scheduler_policies +
|
||||
|
@ -127,7 +127,8 @@ MODULE_PARM_DESC(enable_execlists,
|
||||
"(-1=auto [default], 0=disabled, 1=enabled)");
|
||||
|
||||
module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600);
|
||||
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
|
||||
MODULE_PARM_DESC(enable_psr, "Enable PSR "
|
||||
"(0=disabled [default], 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode)");
|
||||
|
||||
module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
|
||||
MODULE_PARM_DESC(preliminary_hw_support,
|
||||
|
@ -610,16 +610,17 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define IOSF_BYTE_ENABLES_SHIFT 4
|
||||
#define IOSF_BAR_SHIFT 1
|
||||
#define IOSF_SB_BUSY (1<<0)
|
||||
#define IOSF_PORT_BUNIT 0x3
|
||||
#define IOSF_PORT_PUNIT 0x4
|
||||
#define IOSF_PORT_BUNIT 0x03
|
||||
#define IOSF_PORT_PUNIT 0x04
|
||||
#define IOSF_PORT_NC 0x11
|
||||
#define IOSF_PORT_DPIO 0x12
|
||||
#define IOSF_PORT_DPIO_2 0x1a
|
||||
#define IOSF_PORT_GPIO_NC 0x13
|
||||
#define IOSF_PORT_CCK 0x14
|
||||
#define IOSF_PORT_CCU 0xA9
|
||||
#define IOSF_PORT_GPS_CORE 0x48
|
||||
#define IOSF_PORT_FLISDSI 0x1B
|
||||
#define IOSF_PORT_DPIO_2 0x1a
|
||||
#define IOSF_PORT_FLISDSI 0x1b
|
||||
#define IOSF_PORT_GPIO_SC 0x48
|
||||
#define IOSF_PORT_GPIO_SUS 0xa8
|
||||
#define IOSF_PORT_CCU 0xa9
|
||||
#define VLV_IOSF_DATA _MMIO(VLV_DISPLAY_BASE + 0x2104)
|
||||
#define VLV_IOSF_ADDR _MMIO(VLV_DISPLAY_BASE + 0x2108)
|
||||
|
||||
@ -1635,6 +1636,9 @@ enum skl_disp_power_wells {
|
||||
#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
|
||||
#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
|
||||
|
||||
#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base)+0x4D0) + (i)*4)
|
||||
#define RING_MAX_NONPRIV_SLOTS 12
|
||||
|
||||
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
|
||||
|
||||
#if 0
|
||||
@ -5945,6 +5949,7 @@ enum skl_disp_power_wells {
|
||||
#define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31)
|
||||
#define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30)
|
||||
#define ILK_DISPLAY_DEBUG_DISABLE (1 << 29)
|
||||
#define IVB_PIPE_C_DISABLE (1 << 28)
|
||||
#define ILK_HDCP_DISABLE (1 << 25)
|
||||
#define ILK_eDP_A_DISABLE (1 << 24)
|
||||
#define HSW_CDCLK_LIMIT (1 << 24)
|
||||
@ -5991,10 +5996,19 @@ enum skl_disp_power_wells {
|
||||
#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
|
||||
#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
|
||||
#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
|
||||
#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
|
||||
#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
|
||||
#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
|
||||
|
||||
#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0)
|
||||
#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1<<14)
|
||||
|
||||
#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
|
||||
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
|
||||
|
||||
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
|
||||
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
|
||||
|
||||
/* GEN7 chicken */
|
||||
#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
|
||||
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
|
||||
@ -6040,6 +6054,8 @@ enum skl_disp_power_wells {
|
||||
#define HDC_FORCE_NON_COHERENT (1<<4)
|
||||
#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10)
|
||||
|
||||
#define GEN8_HDC_CHICKEN1 _MMIO(0x7304)
|
||||
|
||||
/* GEN9 chicken */
|
||||
#define SLICE_ECO_CHICKEN0 _MMIO(0x7308)
|
||||
#define PIXEL_MASK_CAMMING_DISABLE (1 << 14)
|
||||
@ -6770,6 +6786,16 @@ enum skl_disp_power_wells {
|
||||
|
||||
#define VLV_PMWGICZ _MMIO(0x1300a4)
|
||||
|
||||
#define RC6_LOCATION _MMIO(0xD40)
|
||||
#define RC6_CTX_IN_DRAM (1 << 0)
|
||||
#define RC6_CTX_BASE _MMIO(0xD48)
|
||||
#define RC6_CTX_BASE_MASK 0xFFFFFFF0
|
||||
#define PWRCTX_MAXCNT_RCSUNIT _MMIO(0x2054)
|
||||
#define PWRCTX_MAXCNT_VCSUNIT0 _MMIO(0x12054)
|
||||
#define PWRCTX_MAXCNT_BCSUNIT _MMIO(0x22054)
|
||||
#define PWRCTX_MAXCNT_VECSUNIT _MMIO(0x1A054)
|
||||
#define PWRCTX_MAXCNT_VCSUNIT1 _MMIO(0x1C054)
|
||||
#define IDLE_TIME_MASK 0xFFFFF
|
||||
#define FORCEWAKE _MMIO(0xA18C)
|
||||
#define FORCEWAKE_VLV _MMIO(0x1300b0)
|
||||
#define FORCEWAKE_ACK_VLV _MMIO(0x1300b4)
|
||||
@ -6908,6 +6934,7 @@ enum skl_disp_power_wells {
|
||||
#define GEN6_RPDEUC _MMIO(0xA084)
|
||||
#define GEN6_RPDEUCSW _MMIO(0xA088)
|
||||
#define GEN6_RC_STATE _MMIO(0xA094)
|
||||
#define RC6_STATE (1 << 18)
|
||||
#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098)
|
||||
#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C)
|
||||
#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0)
|
||||
@ -7519,7 +7546,7 @@ enum skl_disp_power_wells {
|
||||
#define DPLL_CFGCR2_PDIV_7 (4<<2)
|
||||
#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
|
||||
|
||||
#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR2)
|
||||
#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
|
||||
#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
|
||||
|
||||
/* BXT display engine PLL */
|
||||
@ -8159,4 +8186,11 @@ enum skl_disp_power_wells {
|
||||
#define GEN9_VEBOX_MOCS(i) _MMIO(0xcb00 + (i) * 4) /* Video MOCS registers */
|
||||
#define GEN9_BLT_MOCS(i) _MMIO(0xcc00 + (i) * 4) /* Blitter MOCS registers */
|
||||
|
||||
/* gamt regs */
|
||||
#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
|
||||
#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */
|
||||
#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV 0x5FF101FF /* max/min for LRA1/2 */
|
||||
#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */
|
||||
#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */
|
||||
|
||||
#endif /* _I915_REG_H_ */
|
||||
|
@ -49,7 +49,7 @@ static void i915_save_display(struct drm_device *dev)
|
||||
dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
|
||||
dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
|
||||
dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
|
||||
} else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
|
||||
} else if (INTEL_INFO(dev)->gen <= 4) {
|
||||
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
|
||||
dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
|
||||
dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
|
||||
@ -84,7 +84,7 @@ static void i915_restore_display(struct drm_device *dev)
|
||||
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
|
||||
I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
|
||||
I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
|
||||
} else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
|
||||
} else if (INTEL_INFO(dev)->gen <= 4) {
|
||||
I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
|
||||
I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
|
||||
I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
|
||||
@ -92,7 +92,7 @@ static void i915_restore_display(struct drm_device *dev)
|
||||
}
|
||||
|
||||
/* only restore FBC info on the platform that supports FBC*/
|
||||
intel_fbc_disable(dev_priv);
|
||||
intel_fbc_global_disable(dev_priv);
|
||||
|
||||
/* restore FBC interval */
|
||||
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
|
||||
|
@ -216,6 +216,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
int max_dotclk = to_i915(dev)->max_dotclk_freq;
|
||||
|
||||
int max_clock = 0;
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
@ -231,6 +232,9 @@ intel_crt_mode_valid(struct drm_connector *connector,
|
||||
if (mode->clock > max_clock)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if (mode->clock > max_dotclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
|
||||
if (HAS_PCH_LPT(dev) &&
|
||||
(ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
|
||||
|
@ -179,7 +179,8 @@ static const struct stepping_info kbl_stepping_info[] = {
|
||||
static const struct stepping_info skl_stepping_info[] = {
|
||||
{'A', '0'}, {'B', '0'}, {'C', '0'},
|
||||
{'D', '0'}, {'E', '0'}, {'F', '0'},
|
||||
{'G', '0'}, {'H', '0'}, {'I', '0'}
|
||||
{'G', '0'}, {'H', '0'}, {'I', '0'},
|
||||
{'J', '0'}, {'K', '0'}
|
||||
};
|
||||
|
||||
static const struct stepping_info bxt_stepping_info[] = {
|
||||
|
@ -1531,7 +1531,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
|
||||
DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
|
||||
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
|
||||
wrpll_params.central_freq;
|
||||
} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
|
||||
} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
|
||||
intel_encoder->type == INTEL_OUTPUT_DP_MST) {
|
||||
switch (crtc_state->port_clock / 2) {
|
||||
case 81000:
|
||||
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
|
||||
@ -1545,8 +1546,10 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
|
||||
}
|
||||
|
||||
cfgcr1 = cfgcr2 = 0;
|
||||
} else /* eDP */
|
||||
} else if (intel_encoder->type == INTEL_OUTPUT_EDP) {
|
||||
return true;
|
||||
} else
|
||||
return false;
|
||||
|
||||
memset(&crtc_state->dpll_hw_state, 0,
|
||||
sizeof(crtc_state->dpll_hw_state));
|
||||
@ -3281,7 +3284,6 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
|
||||
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
|
||||
(DDI_BUF_PORT_REVERSAL |
|
||||
DDI_A_4_LANES);
|
||||
intel_dig_port->max_lanes = max_lanes;
|
||||
|
||||
/*
|
||||
* Bspec says that DDI_A_4_LANES is the only supported configuration
|
||||
@ -3294,9 +3296,12 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
|
||||
if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
|
||||
DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
|
||||
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
|
||||
max_lanes = 4;
|
||||
}
|
||||
}
|
||||
|
||||
intel_dig_port->max_lanes = max_lanes;
|
||||
|
||||
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
|
||||
intel_encoder->cloneable = 0;
|
||||
|
@ -2284,7 +2284,7 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
|
||||
const struct drm_plane_state *plane_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(fb->dev);
|
||||
struct intel_rotation_info *info = &view->params.rotation_info;
|
||||
struct intel_rotation_info *info = &view->params.rotated;
|
||||
unsigned int tile_size, tile_width, tile_height, cpp;
|
||||
|
||||
*view = i915_ggtt_view_normal;
|
||||
@ -2306,7 +2306,7 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
|
||||
tile_size = intel_tile_size(dev_priv);
|
||||
|
||||
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
tile_width = intel_tile_width(dev_priv, cpp, fb->modifier[0]);
|
||||
tile_width = intel_tile_width(dev_priv, fb->modifier[0], cpp);
|
||||
tile_height = tile_size / tile_width;
|
||||
|
||||
info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_width);
|
||||
@ -2448,11 +2448,11 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
|
||||
|
||||
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
|
||||
* is assumed to be a power-of-two. */
|
||||
unsigned long intel_compute_tile_offset(struct drm_i915_private *dev_priv,
|
||||
int *x, int *y,
|
||||
uint64_t fb_modifier,
|
||||
unsigned int cpp,
|
||||
unsigned int pitch)
|
||||
u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
|
||||
int *x, int *y,
|
||||
uint64_t fb_modifier,
|
||||
unsigned int cpp,
|
||||
unsigned int pitch)
|
||||
{
|
||||
if (fb_modifier != DRM_FORMAT_MOD_NONE) {
|
||||
unsigned int tile_size, tile_width, tile_height;
|
||||
@ -2706,14 +2706,12 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
|
||||
struct drm_framebuffer *fb = plane_state->base.fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
int plane = intel_crtc->plane;
|
||||
unsigned long linear_offset;
|
||||
int x = plane_state->src.x1 >> 16;
|
||||
int y = plane_state->src.y1 >> 16;
|
||||
u32 linear_offset;
|
||||
u32 dspcntr;
|
||||
i915_reg_t reg = DSPCNTR(plane);
|
||||
int pixel_size;
|
||||
|
||||
pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
int x = plane_state->src.x1 >> 16;
|
||||
int y = plane_state->src.y1 >> 16;
|
||||
|
||||
dspcntr = DISPPLANE_GAMMA_ENABLE;
|
||||
|
||||
@ -2771,13 +2769,12 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
|
||||
if (IS_G4X(dev))
|
||||
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
|
||||
|
||||
linear_offset = y * fb->pitches[0] + x * pixel_size;
|
||||
linear_offset = y * fb->pitches[0] + x * cpp;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
intel_crtc->dspaddr_offset =
|
||||
intel_compute_tile_offset(dev_priv, &x, &y,
|
||||
fb->modifier[0],
|
||||
pixel_size,
|
||||
fb->modifier[0], cpp,
|
||||
fb->pitches[0]);
|
||||
linear_offset -= intel_crtc->dspaddr_offset;
|
||||
} else {
|
||||
@ -2794,7 +2791,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
|
||||
data and adding to linear_offset*/
|
||||
linear_offset +=
|
||||
(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
|
||||
(crtc_state->pipe_src_w - 1) * pixel_size;
|
||||
(crtc_state->pipe_src_w - 1) * cpp;
|
||||
}
|
||||
|
||||
intel_crtc->adjusted_x = x;
|
||||
@ -2839,10 +2836,10 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
|
||||
struct drm_framebuffer *fb = plane_state->base.fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
int plane = intel_crtc->plane;
|
||||
unsigned long linear_offset;
|
||||
u32 linear_offset;
|
||||
u32 dspcntr;
|
||||
i915_reg_t reg = DSPCNTR(plane);
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
int x = plane_state->src.x1 >> 16;
|
||||
int y = plane_state->src.y1 >> 16;
|
||||
|
||||
@ -2881,11 +2878,10 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
|
||||
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
|
||||
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
|
||||
|
||||
linear_offset = y * fb->pitches[0] + x * pixel_size;
|
||||
linear_offset = y * fb->pitches[0] + x * cpp;
|
||||
intel_crtc->dspaddr_offset =
|
||||
intel_compute_tile_offset(dev_priv, &x, &y,
|
||||
fb->modifier[0],
|
||||
pixel_size,
|
||||
fb->modifier[0], cpp,
|
||||
fb->pitches[0]);
|
||||
linear_offset -= intel_crtc->dspaddr_offset;
|
||||
if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
|
||||
@ -2899,7 +2895,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
|
||||
data and adding to linear_offset*/
|
||||
linear_offset +=
|
||||
(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
|
||||
(crtc_state->pipe_src_w - 1) * pixel_size;
|
||||
(crtc_state->pipe_src_w - 1) * cpp;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2951,7 +2947,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
|
||||
offset = vma->node.start;
|
||||
|
||||
if (plane == 1) {
|
||||
offset += vma->ggtt_view.params.rotation_info.uv_start_page *
|
||||
offset += vma->ggtt_view.params.rotated.uv_start_page *
|
||||
PAGE_SIZE;
|
||||
}
|
||||
|
||||
@ -3160,9 +3156,6 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int pipe = to_intel_crtc(crtc)->pipe;
|
||||
|
||||
if (dev_priv->fbc.deactivate)
|
||||
dev_priv->fbc.deactivate(dev_priv);
|
||||
|
||||
I915_WRITE(PLANE_CTL(pipe, 0), 0);
|
||||
I915_WRITE(PLANE_SURF(pipe, 0), 0);
|
||||
POSTING_READ(PLANE_SURF(pipe, 0));
|
||||
@ -4803,7 +4796,7 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
|
||||
intel_update_watermarks(&crtc->base);
|
||||
|
||||
if (atomic->update_fbc)
|
||||
intel_fbc_update(crtc);
|
||||
intel_fbc_post_update(crtc);
|
||||
|
||||
if (atomic->post_enable_primary)
|
||||
intel_post_enable_primary(&crtc->base);
|
||||
@ -4811,26 +4804,39 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
|
||||
memset(atomic, 0, sizeof(*atomic));
|
||||
}
|
||||
|
||||
static void intel_pre_plane_update(struct intel_crtc *crtc)
|
||||
static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
|
||||
struct intel_crtc_state *pipe_config =
|
||||
to_intel_crtc_state(crtc->base.state);
|
||||
struct drm_atomic_state *old_state = old_crtc_state->base.state;
|
||||
struct drm_plane *primary = crtc->base.primary;
|
||||
struct drm_plane_state *old_pri_state =
|
||||
drm_atomic_get_existing_plane_state(old_state, primary);
|
||||
bool modeset = needs_modeset(&pipe_config->base);
|
||||
|
||||
if (atomic->disable_fbc)
|
||||
intel_fbc_deactivate(crtc);
|
||||
if (atomic->update_fbc)
|
||||
intel_fbc_pre_update(crtc);
|
||||
|
||||
if (crtc->atomic.disable_ips)
|
||||
hsw_disable_ips(crtc);
|
||||
if (old_pri_state) {
|
||||
struct intel_plane_state *primary_state =
|
||||
to_intel_plane_state(primary->state);
|
||||
struct intel_plane_state *old_primary_state =
|
||||
to_intel_plane_state(old_pri_state);
|
||||
|
||||
if (atomic->pre_disable_primary)
|
||||
intel_pre_disable_primary(&crtc->base);
|
||||
if (old_primary_state->visible &&
|
||||
(modeset || !primary_state->visible))
|
||||
intel_pre_disable_primary(&crtc->base);
|
||||
}
|
||||
|
||||
if (pipe_config->disable_cxsr) {
|
||||
crtc->wm.cxsr_allowed = false;
|
||||
intel_set_memory_cxsr(dev_priv, false);
|
||||
|
||||
if (old_crtc_state->base.active)
|
||||
intel_set_memory_cxsr(dev_priv, false);
|
||||
}
|
||||
|
||||
if (!needs_modeset(&pipe_config->base) && pipe_config->wm_changed)
|
||||
@ -4931,8 +4937,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
||||
if (intel_crtc->config->has_pch_encoder)
|
||||
intel_wait_for_vblank(dev, pipe);
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
|
||||
|
||||
intel_fbc_enable(intel_crtc);
|
||||
}
|
||||
|
||||
/* IPS only exists on ULT machines and is tied to pipe A. */
|
||||
@ -5045,8 +5049,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
||||
intel_wait_for_vblank(dev, hsw_workaround_pipe);
|
||||
intel_wait_for_vblank(dev, hsw_workaround_pipe);
|
||||
}
|
||||
|
||||
intel_fbc_enable(intel_crtc);
|
||||
}
|
||||
|
||||
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
|
||||
@ -5127,8 +5129,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
|
||||
}
|
||||
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
|
||||
|
||||
intel_fbc_disable_crtc(intel_crtc);
|
||||
}
|
||||
|
||||
static void haswell_crtc_disable(struct drm_crtc *crtc)
|
||||
@ -5179,8 +5179,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
|
||||
true);
|
||||
}
|
||||
|
||||
intel_fbc_disable_crtc(intel_crtc);
|
||||
}
|
||||
|
||||
static void i9xx_pfit_enable(struct intel_crtc *crtc)
|
||||
@ -6291,8 +6289,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
|
||||
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder)
|
||||
encoder->enable(encoder);
|
||||
|
||||
intel_fbc_enable(intel_crtc);
|
||||
}
|
||||
|
||||
static void i9xx_pfit_disable(struct intel_crtc *crtc)
|
||||
@ -6355,8 +6351,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
|
||||
|
||||
if (!IS_GEN2(dev))
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
|
||||
|
||||
intel_fbc_disable_crtc(intel_crtc);
|
||||
}
|
||||
|
||||
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
|
||||
@ -6380,6 +6374,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
|
||||
|
||||
dev_priv->display.crtc_disable(crtc);
|
||||
intel_crtc->active = false;
|
||||
intel_fbc_disable(intel_crtc);
|
||||
intel_update_watermarks(crtc);
|
||||
intel_disable_shared_dpll(intel_crtc);
|
||||
|
||||
@ -9853,8 +9848,13 @@ static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
|
||||
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
if (!intel_ddi_pll_select(crtc, crtc_state))
|
||||
return -EINVAL;
|
||||
struct intel_encoder *intel_encoder =
|
||||
intel_ddi_get_crtc_new_encoder(crtc_state);
|
||||
|
||||
if (intel_encoder->type != INTEL_OUTPUT_DSI) {
|
||||
if (!intel_ddi_pll_select(crtc, crtc_state))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
crtc->lowfreq_avail = false;
|
||||
|
||||
@ -10914,6 +10914,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
|
||||
intel_fbc_post_update(crtc);
|
||||
drm_framebuffer_unreference(work->old_fb);
|
||||
|
||||
BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
|
||||
@ -11629,6 +11630,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
|
||||
crtc->primary->fb = fb;
|
||||
update_state_fb(crtc->primary);
|
||||
intel_fbc_pre_update(intel_crtc);
|
||||
|
||||
work->pending_flip_obj = obj;
|
||||
|
||||
@ -11713,7 +11715,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
to_intel_plane(primary)->frontbuffer_bit);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
intel_fbc_deactivate(intel_crtc);
|
||||
intel_frontbuffer_flip_prepare(dev,
|
||||
to_intel_plane(primary)->frontbuffer_bit);
|
||||
|
||||
@ -11724,7 +11725,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
cleanup_unpin:
|
||||
intel_unpin_fb_obj(fb, crtc->primary->state);
|
||||
cleanup_pending:
|
||||
if (request)
|
||||
if (!IS_ERR_OR_NULL(request))
|
||||
i915_gem_request_cancel(request);
|
||||
atomic_dec(&intel_crtc->unpin_work_count);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
@ -11835,7 +11836,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct drm_plane *plane = plane_state->plane;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane_state *old_plane_state =
|
||||
to_intel_plane_state(plane->state);
|
||||
int idx = intel_crtc->base.base.id, ret;
|
||||
@ -11901,39 +11901,8 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
|
||||
|
||||
switch (plane->type) {
|
||||
case DRM_PLANE_TYPE_PRIMARY:
|
||||
intel_crtc->atomic.pre_disable_primary = turn_off;
|
||||
intel_crtc->atomic.post_enable_primary = turn_on;
|
||||
|
||||
if (turn_off) {
|
||||
/*
|
||||
* FIXME: Actually if we will still have any other
|
||||
* plane enabled on the pipe we could let IPS enabled
|
||||
* still, but for now lets consider that when we make
|
||||
* primary invisible by setting DSPCNTR to 0 on
|
||||
* update_primary_plane function IPS needs to be
|
||||
* disable.
|
||||
*/
|
||||
intel_crtc->atomic.disable_ips = true;
|
||||
|
||||
intel_crtc->atomic.disable_fbc = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* FBC does not work on some platforms for rotated
|
||||
* planes, so disable it when rotation is not 0 and
|
||||
* update it when rotation is set back to 0.
|
||||
*
|
||||
* FIXME: This is redundant with the fbc update done in
|
||||
* the primary plane enable function except that that
|
||||
* one is done too late. We eventually need to unify
|
||||
* this.
|
||||
*/
|
||||
|
||||
if (visible &&
|
||||
INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
|
||||
dev_priv->fbc.crtc == intel_crtc &&
|
||||
plane_state->rotation != BIT(DRM_ROTATE_0))
|
||||
intel_crtc->atomic.disable_fbc = true;
|
||||
intel_crtc->atomic.update_fbc = true;
|
||||
|
||||
/*
|
||||
* BDW signals flip done immediately if the plane
|
||||
@ -11943,7 +11912,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
|
||||
if (turn_on && IS_BROADWELL(dev))
|
||||
intel_crtc->atomic.wait_vblank = true;
|
||||
|
||||
intel_crtc->atomic.update_fbc |= visible || mode_changed;
|
||||
break;
|
||||
case DRM_PLANE_TYPE_CURSOR:
|
||||
break;
|
||||
@ -13348,6 +13316,7 @@ static void calc_watermark_data(struct drm_atomic_state *state)
|
||||
static int intel_atomic_check(struct drm_device *dev,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
@ -13390,7 +13359,7 @@ static int intel_atomic_check(struct drm_device *dev,
|
||||
return ret;
|
||||
|
||||
if (i915.fastboot &&
|
||||
intel_pipe_config_compare(state->dev,
|
||||
intel_pipe_config_compare(dev,
|
||||
to_intel_crtc_state(crtc->state),
|
||||
pipe_config, true)) {
|
||||
crtc_state->mode_changed = false;
|
||||
@ -13416,12 +13385,13 @@ static int intel_atomic_check(struct drm_device *dev,
|
||||
if (ret)
|
||||
return ret;
|
||||
} else
|
||||
intel_state->cdclk = to_i915(state->dev)->cdclk_freq;
|
||||
intel_state->cdclk = dev_priv->cdclk_freq;
|
||||
|
||||
ret = drm_atomic_helper_check_planes(state->dev, state);
|
||||
ret = drm_atomic_helper_check_planes(dev, state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_fbc_choose_crtc(dev_priv, state);
|
||||
calc_watermark_data(state);
|
||||
|
||||
return 0;
|
||||
@ -13542,12 +13512,13 @@ static int intel_atomic_commit(struct drm_device *dev,
|
||||
if (!needs_modeset(crtc->state))
|
||||
continue;
|
||||
|
||||
intel_pre_plane_update(intel_crtc);
|
||||
intel_pre_plane_update(to_intel_crtc_state(crtc_state));
|
||||
|
||||
if (crtc_state->active) {
|
||||
intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
|
||||
dev_priv->display.crtc_disable(crtc);
|
||||
intel_crtc->active = false;
|
||||
intel_fbc_disable(intel_crtc);
|
||||
intel_disable_shared_dpll(intel_crtc);
|
||||
|
||||
/*
|
||||
@ -13597,7 +13568,10 @@ static int intel_atomic_commit(struct drm_device *dev,
|
||||
}
|
||||
|
||||
if (!modeset)
|
||||
intel_pre_plane_update(intel_crtc);
|
||||
intel_pre_plane_update(to_intel_crtc_state(crtc_state));
|
||||
|
||||
if (crtc->state->active && intel_crtc->atomic.update_fbc)
|
||||
intel_fbc_enable(intel_crtc);
|
||||
|
||||
if (crtc->state->active &&
|
||||
(crtc->state->planes_changed || update_pipe))
|
||||
@ -14682,10 +14656,12 @@ u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
|
||||
u32 gen = INTEL_INFO(dev)->gen;
|
||||
|
||||
if (gen >= 9) {
|
||||
int cpp = drm_format_plane_cpp(pixel_format, 0);
|
||||
|
||||
/* "The stride in bytes must not exceed the of the size of 8K
|
||||
* pixels and 32K bytes."
|
||||
*/
|
||||
return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
|
||||
return min(8192 * cpp, 32768);
|
||||
} else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
|
||||
return 32*1024;
|
||||
} else if (gen >= 4) {
|
||||
@ -15934,6 +15910,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
|
||||
modeset_put_power_domains(dev_priv, put_domains);
|
||||
}
|
||||
intel_display_set_init_power(dev_priv, false);
|
||||
|
||||
intel_fbc_init_pipe_state(dev_priv);
|
||||
}
|
||||
|
||||
void intel_display_resume(struct drm_device *dev)
|
||||
@ -16063,7 +16041,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
||||
|
||||
intel_unregister_dsm_handler();
|
||||
|
||||
intel_fbc_disable(dev_priv);
|
||||
intel_fbc_global_disable(dev_priv);
|
||||
|
||||
/* flush any delayed tasks or pending work */
|
||||
flush_scheduled_work();
|
||||
|
@ -203,6 +203,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
|
||||
int target_clock = mode->clock;
|
||||
int max_rate, mode_rate, max_lanes, max_link_clock;
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
|
||||
if (is_edp(intel_dp) && fixed_mode) {
|
||||
if (mode->hdisplay > fixed_mode->hdisplay)
|
||||
@ -220,7 +221,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
|
||||
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
|
||||
mode_rate = intel_dp_link_required(target_clock, 18);
|
||||
|
||||
if (mode_rate > max_rate)
|
||||
if (mode_rate > max_rate || target_clock > max_dotclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if (mode->clock < 10000)
|
||||
@ -979,7 +980,10 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
|
||||
if (WARN_ON(txsize > 20))
|
||||
return -E2BIG;
|
||||
|
||||
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
|
||||
if (msg->buffer)
|
||||
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
|
||||
else
|
||||
WARN_ON(msg->size);
|
||||
|
||||
ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
|
||||
if (ret > 0) {
|
||||
@ -1798,12 +1802,21 @@ static void wait_panel_off(struct intel_dp *intel_dp)
|
||||
|
||||
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
|
||||
{
|
||||
ktime_t panel_power_on_time;
|
||||
s64 panel_power_off_duration;
|
||||
|
||||
DRM_DEBUG_KMS("Wait for panel power cycle\n");
|
||||
|
||||
/* take the difference of currrent time and panel power off time
|
||||
* and then make panel wait for t11_t12 if needed. */
|
||||
panel_power_on_time = ktime_get_boottime();
|
||||
panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
|
||||
|
||||
/* When we disable the VDD override bit last we have to do the manual
|
||||
* wait. */
|
||||
wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
|
||||
intel_dp->panel_power_cycle_delay);
|
||||
if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
|
||||
wait_remaining_ms_from_jiffies(jiffies,
|
||||
intel_dp->panel_power_cycle_delay - panel_power_off_duration);
|
||||
|
||||
wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
|
||||
}
|
||||
@ -1955,7 +1968,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
|
||||
I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
|
||||
|
||||
if ((pp & POWER_TARGET_ON) == 0)
|
||||
intel_dp->last_power_cycle = jiffies;
|
||||
intel_dp->panel_power_off_time = ktime_get_boottime();
|
||||
|
||||
power_domain = intel_display_port_aux_power_domain(intel_encoder);
|
||||
intel_display_power_put(dev_priv, power_domain);
|
||||
@ -2104,7 +2117,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
|
||||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
POSTING_READ(pp_ctrl_reg);
|
||||
|
||||
intel_dp->last_power_cycle = jiffies;
|
||||
intel_dp->panel_power_off_time = ktime_get_boottime();
|
||||
wait_panel_off(intel_dp);
|
||||
|
||||
/* We got a reference when we enabled the VDD. */
|
||||
@ -3995,7 +4008,7 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
|
||||
} while (--attempts && count);
|
||||
|
||||
if (attempts == 0) {
|
||||
DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
|
||||
DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
@ -5102,7 +5115,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
|
||||
|
||||
static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
|
||||
{
|
||||
intel_dp->last_power_cycle = jiffies;
|
||||
intel_dp->panel_power_off_time = ktime_get_boottime();
|
||||
intel_dp->last_power_on = jiffies;
|
||||
intel_dp->last_backlight_off = jiffies;
|
||||
}
|
||||
|
@ -215,27 +215,46 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Pick training pattern for channel equalization. Training Pattern 3 for HBR2
|
||||
* or 1.2 devices that support it, Training Pattern 2 otherwise.
|
||||
*/
|
||||
static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
|
||||
{
|
||||
u32 training_pattern = DP_TRAINING_PATTERN_2;
|
||||
bool source_tps3, sink_tps3;
|
||||
|
||||
/*
|
||||
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
|
||||
* also mandatory for downstream devices that support HBR2. However, not
|
||||
* all sinks follow the spec.
|
||||
*
|
||||
* Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
|
||||
* supported in source but still not enabled.
|
||||
*/
|
||||
source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
|
||||
sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
|
||||
|
||||
if (source_tps3 && sink_tps3) {
|
||||
training_pattern = DP_TRAINING_PATTERN_3;
|
||||
} else if (intel_dp->link_rate == 540000) {
|
||||
if (!source_tps3)
|
||||
DRM_DEBUG_KMS("5.4 Gbps link rate without source HBR2/TPS3 support\n");
|
||||
if (!sink_tps3)
|
||||
DRM_DEBUG_KMS("5.4 Gbps link rate without sink TPS3 support\n");
|
||||
}
|
||||
|
||||
return training_pattern;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
|
||||
{
|
||||
bool channel_eq = false;
|
||||
int tries, cr_tries;
|
||||
uint32_t training_pattern = DP_TRAINING_PATTERN_2;
|
||||
u32 training_pattern;
|
||||
|
||||
/*
|
||||
* Training Pattern 3 for HBR2 or 1.2 devices that support it.
|
||||
*
|
||||
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
|
||||
* also mandatory for downstream devices that support HBR2.
|
||||
*
|
||||
* Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
|
||||
* supported but still not enabled.
|
||||
*/
|
||||
if (intel_dp_source_supports_hbr2(intel_dp) &&
|
||||
drm_dp_tps3_supported(intel_dp->dpcd))
|
||||
training_pattern = DP_TRAINING_PATTERN_3;
|
||||
else if (intel_dp->link_rate == 540000)
|
||||
DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
|
||||
training_pattern = intel_dp_training_pattern(intel_dp);
|
||||
|
||||
/* channel equalization */
|
||||
if (!intel_dp_set_link_train(intel_dp,
|
||||
|
@ -371,6 +371,8 @@ static enum drm_mode_status
|
||||
intel_dp_mst_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
|
||||
/* TODO - validate mode against available PBN for link */
|
||||
if (mode->clock < 10000)
|
||||
return MODE_CLOCK_LOW;
|
||||
@ -378,6 +380,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
|
||||
return MODE_H_ILLEGAL;
|
||||
|
||||
if (mode->clock > max_dotclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
|
@ -492,6 +492,8 @@ struct intel_crtc_state {
|
||||
|
||||
bool ips_enabled;
|
||||
|
||||
bool enable_fbc;
|
||||
|
||||
bool double_wide;
|
||||
|
||||
bool dp_encoder_is_mst;
|
||||
@ -542,16 +544,15 @@ struct intel_mmio_flip {
|
||||
*/
|
||||
struct intel_crtc_atomic_commit {
|
||||
/* Sleepable operations to perform before commit */
|
||||
bool disable_fbc;
|
||||
bool disable_ips;
|
||||
bool pre_disable_primary;
|
||||
|
||||
/* Sleepable operations to perform after commit */
|
||||
unsigned fb_bits;
|
||||
bool wait_vblank;
|
||||
bool update_fbc;
|
||||
bool post_enable_primary;
|
||||
unsigned update_sprite_watermarks;
|
||||
|
||||
/* Sleepable operations to perform before and after commit */
|
||||
bool update_fbc;
|
||||
};
|
||||
|
||||
struct intel_crtc {
|
||||
@ -575,7 +576,7 @@ struct intel_crtc {
|
||||
/* Display surface base address adjustement for pageflips. Note that on
|
||||
* gen4+ this only adjusts up to a tile, offsets within a tile are
|
||||
* handled in the hw itself (with the TILEOFF register). */
|
||||
unsigned long dspaddr_offset;
|
||||
u32 dspaddr_offset;
|
||||
int adjusted_x;
|
||||
int adjusted_y;
|
||||
|
||||
@ -770,9 +771,9 @@ struct intel_dp {
|
||||
int backlight_off_delay;
|
||||
struct delayed_work panel_vdd_work;
|
||||
bool want_panel_vdd;
|
||||
unsigned long last_power_cycle;
|
||||
unsigned long last_power_on;
|
||||
unsigned long last_backlight_off;
|
||||
ktime_t panel_power_off_time;
|
||||
|
||||
struct notifier_block edp_notifier;
|
||||
|
||||
@ -1172,11 +1173,11 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
|
||||
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
|
||||
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
|
||||
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
|
||||
unsigned long intel_compute_tile_offset(struct drm_i915_private *dev_priv,
|
||||
int *x, int *y,
|
||||
uint64_t fb_modifier,
|
||||
unsigned int cpp,
|
||||
unsigned int pitch);
|
||||
u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
|
||||
int *x, int *y,
|
||||
uint64_t fb_modifier,
|
||||
unsigned int cpp,
|
||||
unsigned int pitch);
|
||||
void intel_prepare_reset(struct drm_device *dev);
|
||||
void intel_finish_reset(struct drm_device *dev);
|
||||
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
|
||||
@ -1327,13 +1328,16 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
|
||||
#endif
|
||||
|
||||
/* intel_fbc.c */
|
||||
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
|
||||
struct drm_atomic_state *state);
|
||||
bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_deactivate(struct intel_crtc *crtc);
|
||||
void intel_fbc_update(struct intel_crtc *crtc);
|
||||
void intel_fbc_pre_update(struct intel_crtc *crtc);
|
||||
void intel_fbc_post_update(struct intel_crtc *crtc);
|
||||
void intel_fbc_init(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_enable(struct intel_crtc *crtc);
|
||||
void intel_fbc_disable(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_disable_crtc(struct intel_crtc *crtc);
|
||||
void intel_fbc_disable(struct intel_crtc *crtc);
|
||||
void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits,
|
||||
enum fb_op_origin origin);
|
||||
@ -1559,6 +1563,7 @@ void skl_wm_get_hw_state(struct drm_device *dev);
|
||||
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
|
||||
struct skl_ddb_allocation *ddb /* out */);
|
||||
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
|
||||
int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6);
|
||||
|
||||
/* intel_sdvo.c */
|
||||
bool intel_sdvo_init(struct drm_device *dev,
|
||||
|
@ -478,8 +478,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
intel_dsi_prepare(encoder);
|
||||
intel_enable_dsi_pll(encoder);
|
||||
intel_dsi_prepare(encoder);
|
||||
|
||||
/* Panel Enable over CRC PMIC */
|
||||
if (intel_dsi->gpio_panel)
|
||||
|
@ -204,10 +204,28 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
|
||||
struct drm_device *dev = intel_dsi->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->vbt.dsi.seq_version >= 3)
|
||||
data++;
|
||||
|
||||
gpio = *data++;
|
||||
|
||||
/* pull up/down */
|
||||
action = *data++;
|
||||
action = *data++ & 1;
|
||||
|
||||
if (gpio >= ARRAY_SIZE(gtable)) {
|
||||
DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!IS_VALLEYVIEW(dev_priv)) {
|
||||
DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (dev_priv->vbt.dsi.seq_version >= 3) {
|
||||
DRM_DEBUG_KMS("GPIO element v3 not supported\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
function = gtable[gpio].function_reg;
|
||||
pad = gtable[gpio].pad_reg;
|
||||
@ -216,16 +234,18 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
|
||||
if (!gtable[gpio].init) {
|
||||
/* program the function */
|
||||
/* FIXME: remove constant below */
|
||||
vlv_gpio_nc_write(dev_priv, function, 0x2000CC00);
|
||||
vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, function,
|
||||
0x2000CC00);
|
||||
gtable[gpio].init = 1;
|
||||
}
|
||||
|
||||
val = 0x4 | action;
|
||||
|
||||
/* pull up/down */
|
||||
vlv_gpio_nc_write(dev_priv, pad, val);
|
||||
vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, pad, val);
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
|
||||
out:
|
||||
return data;
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -46,7 +46,7 @@ struct i915_guc_client {
|
||||
uint32_t wq_head;
|
||||
|
||||
/* GuC submission statistics & status */
|
||||
uint64_t submissions[I915_NUM_RINGS];
|
||||
uint64_t submissions[GUC_MAX_ENGINES_NUM];
|
||||
uint32_t q_fail;
|
||||
uint32_t b_fail;
|
||||
int retcode;
|
||||
@ -106,8 +106,8 @@ struct intel_guc {
|
||||
uint32_t action_fail; /* Total number of failures */
|
||||
int32_t action_err; /* Last error code */
|
||||
|
||||
uint64_t submissions[I915_NUM_RINGS];
|
||||
uint32_t last_seqno[I915_NUM_RINGS];
|
||||
uint64_t submissions[GUC_MAX_ENGINES_NUM];
|
||||
uint32_t last_seqno[GUC_MAX_ENGINES_NUM];
|
||||
};
|
||||
|
||||
/* intel_guc_loader.c */
|
||||
|
@ -44,6 +44,13 @@
|
||||
#define GUC_MAX_GPU_CONTEXTS 1024
|
||||
#define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS
|
||||
|
||||
#define GUC_RENDER_ENGINE 0
|
||||
#define GUC_VIDEO_ENGINE 1
|
||||
#define GUC_BLITTER_ENGINE 2
|
||||
#define GUC_VIDEOENHANCE_ENGINE 3
|
||||
#define GUC_VIDEO_ENGINE2 4
|
||||
#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
|
||||
|
||||
/* Work queue item header definitions */
|
||||
#define WQ_STATUS_ACTIVE 1
|
||||
#define WQ_STATUS_SUSPENDED 2
|
||||
@ -285,7 +292,7 @@ struct guc_context_desc {
|
||||
u64 db_trigger_phy;
|
||||
u16 db_id;
|
||||
|
||||
struct guc_execlist_context lrc[I915_NUM_RINGS];
|
||||
struct guc_execlist_context lrc[GUC_MAX_ENGINES_NUM];
|
||||
|
||||
u8 attribute;
|
||||
|
||||
@ -344,7 +351,7 @@ struct guc_policy {
|
||||
} __packed;
|
||||
|
||||
struct guc_policies {
|
||||
struct guc_policy policy[GUC_CTX_PRIORITY_NUM][I915_NUM_RINGS];
|
||||
struct guc_policy policy[GUC_CTX_PRIORITY_NUM][GUC_MAX_ENGINES_NUM];
|
||||
|
||||
/* In micro seconds. How much time to allow before DPC processing is
|
||||
* called back via interrupt (to prevent DPC queue drain starving).
|
||||
@ -388,14 +395,14 @@ struct guc_mmio_regset {
|
||||
|
||||
struct guc_mmio_reg_state {
|
||||
struct guc_mmio_regset global_reg;
|
||||
struct guc_mmio_regset engine_reg[I915_NUM_RINGS];
|
||||
struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM];
|
||||
|
||||
/* MMIO registers that are set as non privileged */
|
||||
struct __packed {
|
||||
u32 mmio_start;
|
||||
u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
|
||||
u32 count;
|
||||
} mmio_white_list[I915_NUM_RINGS];
|
||||
} mmio_white_list[GUC_MAX_ENGINES_NUM];
|
||||
} __packed;
|
||||
|
||||
/* GuC Additional Data Struct */
|
||||
@ -406,7 +413,7 @@ struct guc_ads {
|
||||
u32 golden_context_lrca;
|
||||
u32 scheduler_policies;
|
||||
u32 reserved0[3];
|
||||
u32 eng_state_size[I915_NUM_RINGS];
|
||||
u32 eng_state_size[GUC_MAX_ENGINES_NUM];
|
||||
u32 reserved2[4];
|
||||
} __packed;
|
||||
|
||||
|
8
drivers/gpu/drm/i915/intel_hdmi.c
Executable file → Normal file
8
drivers/gpu/drm/i915/intel_hdmi.c
Executable file → Normal file
@ -1202,11 +1202,19 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
|
||||
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
|
||||
enum drm_mode_status status;
|
||||
int clock;
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return MODE_NO_DBLESCAN;
|
||||
|
||||
clock = mode->clock;
|
||||
|
||||
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
|
||||
clock *= 2;
|
||||
|
||||
if (clock > max_dotclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
|
||||
clock *= 2;
|
||||
|
||||
|
@ -683,7 +683,7 @@ int intel_setup_gmbus(struct drm_device *dev)
|
||||
return 0;
|
||||
|
||||
err:
|
||||
while (--pin) {
|
||||
while (pin--) {
|
||||
if (!intel_gmbus_is_valid_pin(dev_priv, pin))
|
||||
continue;
|
||||
|
||||
|
@ -225,7 +225,8 @@ enum {
|
||||
#define GEN8_CTX_ID_SHIFT 32
|
||||
#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
|
||||
|
||||
static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
|
||||
static int intel_lr_context_pin(struct intel_context *ctx,
|
||||
struct intel_engine_cs *engine);
|
||||
static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
|
||||
struct drm_i915_gem_object *default_ctx_obj);
|
||||
|
||||
@ -393,7 +394,6 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
|
||||
uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
|
||||
|
||||
reg_state[CTX_RING_TAIL+1] = rq->tail;
|
||||
reg_state[CTX_RING_BUFFER_START+1] = rq->ringbuf->vma->node.start;
|
||||
|
||||
if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
|
||||
/* True 32b PPGTT with dynamic page allocation: update PDP
|
||||
@ -599,7 +599,7 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
|
||||
int num_elements = 0;
|
||||
|
||||
if (request->ctx != request->i915->kernel_context)
|
||||
intel_lr_context_pin(request);
|
||||
intel_lr_context_pin(request->ctx, ring);
|
||||
|
||||
i915_gem_request_reference(request);
|
||||
|
||||
@ -704,7 +704,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
|
||||
}
|
||||
|
||||
if (request->ctx != request->i915->kernel_context)
|
||||
ret = intel_lr_context_pin(request);
|
||||
ret = intel_lr_context_pin(request->ctx, request->ring);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -765,6 +765,7 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = request->ringbuf;
|
||||
struct drm_i915_private *dev_priv = request->i915;
|
||||
struct intel_engine_cs *engine = request->ring;
|
||||
|
||||
intel_logical_ring_advance(ringbuf);
|
||||
request->tail = ringbuf->tail;
|
||||
@ -779,9 +780,20 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
|
||||
intel_logical_ring_emit(ringbuf, MI_NOOP);
|
||||
intel_logical_ring_advance(ringbuf);
|
||||
|
||||
if (intel_ring_stopped(request->ring))
|
||||
if (intel_ring_stopped(engine))
|
||||
return 0;
|
||||
|
||||
if (engine->last_context != request->ctx) {
|
||||
if (engine->last_context)
|
||||
intel_lr_context_unpin(engine->last_context, engine);
|
||||
if (request->ctx != request->i915->kernel_context) {
|
||||
intel_lr_context_pin(request->ctx, engine);
|
||||
engine->last_context = request->ctx;
|
||||
} else {
|
||||
engine->last_context = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (dev_priv->guc.execbuf_client)
|
||||
i915_guc_submit(dev_priv->guc.execbuf_client, request);
|
||||
else
|
||||
@ -1015,7 +1027,8 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
|
||||
ctx->engine[ring->id].state;
|
||||
|
||||
if (ctx_obj && (ctx != req->i915->kernel_context))
|
||||
intel_lr_context_unpin(req);
|
||||
intel_lr_context_unpin(ctx, ring);
|
||||
|
||||
list_del(&req->execlist_link);
|
||||
i915_gem_request_unreference(req);
|
||||
}
|
||||
@ -1059,14 +1072,15 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx)
|
||||
static int intel_lr_context_do_pin(struct intel_context *ctx,
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
|
||||
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
|
||||
struct page *lrc_state_page;
|
||||
uint32_t *lrc_reg_state;
|
||||
int ret;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
|
||||
@ -1088,7 +1102,9 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
|
||||
|
||||
ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
|
||||
intel_lr_context_descriptor_update(ctx, ring);
|
||||
ctx->engine[ring->id].lrc_reg_state = kmap(lrc_state_page);
|
||||
lrc_reg_state = kmap(lrc_state_page);
|
||||
lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
|
||||
ctx->engine[ring->id].lrc_reg_state = lrc_reg_state;
|
||||
ctx_obj->dirty = true;
|
||||
|
||||
/* Invalidate GuC TLB. */
|
||||
@ -1103,41 +1119,44 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
|
||||
static int intel_lr_context_pin(struct intel_context *ctx,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
int ret = 0;
|
||||
struct intel_engine_cs *ring = rq->ring;
|
||||
|
||||
if (rq->ctx->engine[ring->id].pin_count++ == 0) {
|
||||
ret = intel_lr_context_do_pin(ring, rq->ctx);
|
||||
if (ctx->engine[engine->id].pin_count++ == 0) {
|
||||
ret = intel_lr_context_do_pin(ctx, engine);
|
||||
if (ret)
|
||||
goto reset_pin_count;
|
||||
|
||||
i915_gem_context_reference(ctx);
|
||||
}
|
||||
return ret;
|
||||
|
||||
reset_pin_count:
|
||||
rq->ctx->engine[ring->id].pin_count = 0;
|
||||
ctx->engine[engine->id].pin_count = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
|
||||
void intel_lr_context_unpin(struct intel_context *ctx,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_engine_cs *ring = rq->ring;
|
||||
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
|
||||
struct intel_ringbuffer *ringbuf = rq->ringbuf;
|
||||
struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
|
||||
WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
|
||||
|
||||
if (!ctx_obj)
|
||||
if (WARN_ON_ONCE(!ctx_obj))
|
||||
return;
|
||||
|
||||
if (--rq->ctx->engine[ring->id].pin_count == 0) {
|
||||
kunmap(kmap_to_page(rq->ctx->engine[ring->id].lrc_reg_state));
|
||||
intel_unpin_ringbuffer_obj(ringbuf);
|
||||
if (--ctx->engine[engine->id].pin_count == 0) {
|
||||
kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
|
||||
intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
|
||||
i915_gem_object_ggtt_unpin(ctx_obj);
|
||||
rq->ctx->engine[ring->id].lrc_vma = NULL;
|
||||
rq->ctx->engine[ring->id].lrc_desc = 0;
|
||||
rq->ctx->engine[ring->id].lrc_reg_state = NULL;
|
||||
ctx->engine[engine->id].lrc_vma = NULL;
|
||||
ctx->engine[engine->id].lrc_desc = 0;
|
||||
ctx->engine[engine->id].lrc_reg_state = NULL;
|
||||
|
||||
i915_gem_context_unreference(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2062,7 +2081,7 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
|
||||
goto error;
|
||||
|
||||
/* As this is the default context, always pin it */
|
||||
ret = intel_lr_context_do_pin(ring, dctx);
|
||||
ret = intel_lr_context_do_pin(dctx, ring);
|
||||
if (ret) {
|
||||
DRM_ERROR(
|
||||
"Failed to pin and map ringbuffer %s: %d\n",
|
||||
@ -2086,6 +2105,7 @@ static int logical_render_ring_init(struct drm_device *dev)
|
||||
ring->name = "render ring";
|
||||
ring->id = RCS;
|
||||
ring->exec_id = I915_EXEC_RENDER;
|
||||
ring->guc_id = GUC_RENDER_ENGINE;
|
||||
ring->mmio_base = RENDER_RING_BASE;
|
||||
|
||||
logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT);
|
||||
@ -2137,6 +2157,7 @@ static int logical_bsd_ring_init(struct drm_device *dev)
|
||||
ring->name = "bsd ring";
|
||||
ring->id = VCS;
|
||||
ring->exec_id = I915_EXEC_BSD;
|
||||
ring->guc_id = GUC_VIDEO_ENGINE;
|
||||
ring->mmio_base = GEN6_BSD_RING_BASE;
|
||||
|
||||
logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT);
|
||||
@ -2153,6 +2174,7 @@ static int logical_bsd2_ring_init(struct drm_device *dev)
|
||||
ring->name = "bsd2 ring";
|
||||
ring->id = VCS2;
|
||||
ring->exec_id = I915_EXEC_BSD;
|
||||
ring->guc_id = GUC_VIDEO_ENGINE2;
|
||||
ring->mmio_base = GEN8_BSD2_RING_BASE;
|
||||
|
||||
logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT);
|
||||
@ -2169,6 +2191,7 @@ static int logical_blt_ring_init(struct drm_device *dev)
|
||||
ring->name = "blitter ring";
|
||||
ring->id = BCS;
|
||||
ring->exec_id = I915_EXEC_BLT;
|
||||
ring->guc_id = GUC_BLITTER_ENGINE;
|
||||
ring->mmio_base = BLT_RING_BASE;
|
||||
|
||||
logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT);
|
||||
@ -2185,6 +2208,7 @@ static int logical_vebox_ring_init(struct drm_device *dev)
|
||||
ring->name = "video enhancement ring";
|
||||
ring->id = VECS;
|
||||
ring->exec_id = I915_EXEC_VEBOX;
|
||||
ring->guc_id = GUC_VIDEOENHANCE_ENGINE;
|
||||
ring->mmio_base = VEBOX_RING_BASE;
|
||||
|
||||
logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT);
|
||||
|
@ -101,7 +101,8 @@ void intel_lr_context_free(struct intel_context *ctx);
|
||||
uint32_t intel_lr_context_size(struct intel_engine_cs *ring);
|
||||
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
|
||||
struct intel_engine_cs *ring);
|
||||
void intel_lr_context_unpin(struct drm_i915_gem_request *req);
|
||||
void intel_lr_context_unpin(struct intel_context *ctx,
|
||||
struct intel_engine_cs *engine);
|
||||
void intel_lr_context_reset(struct drm_device *dev,
|
||||
struct intel_context *ctx);
|
||||
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
|
||||
|
@ -548,7 +548,7 @@ static const struct intel_watermark_params i845_wm_info = {
|
||||
* intel_calculate_wm - calculate watermark level
|
||||
* @clock_in_khz: pixel clock
|
||||
* @wm: chip FIFO params
|
||||
* @pixel_size: display pixel size
|
||||
* @cpp: bytes per pixel
|
||||
* @latency_ns: memory latency for the platform
|
||||
*
|
||||
* Calculate the watermark level (the level at which the display plane will
|
||||
@ -564,8 +564,7 @@ static const struct intel_watermark_params i845_wm_info = {
|
||||
*/
|
||||
static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
|
||||
const struct intel_watermark_params *wm,
|
||||
int fifo_size,
|
||||
int pixel_size,
|
||||
int fifo_size, int cpp,
|
||||
unsigned long latency_ns)
|
||||
{
|
||||
long entries_required, wm_size;
|
||||
@ -576,7 +575,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
|
||||
* clocks go from a few thousand to several hundred thousand.
|
||||
* latency is usually a few thousand
|
||||
*/
|
||||
entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
|
||||
entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) /
|
||||
1000;
|
||||
entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
|
||||
|
||||
@ -640,13 +639,13 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
|
||||
crtc = single_enabled_crtc(dev);
|
||||
if (crtc) {
|
||||
const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
|
||||
int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
|
||||
int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
|
||||
int clock = adjusted_mode->crtc_clock;
|
||||
|
||||
/* Display SR */
|
||||
wm = intel_calculate_wm(clock, &pineview_display_wm,
|
||||
pineview_display_wm.fifo_size,
|
||||
pixel_size, latency->display_sr);
|
||||
cpp, latency->display_sr);
|
||||
reg = I915_READ(DSPFW1);
|
||||
reg &= ~DSPFW_SR_MASK;
|
||||
reg |= FW_WM(wm, SR);
|
||||
@ -656,7 +655,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
|
||||
/* cursor SR */
|
||||
wm = intel_calculate_wm(clock, &pineview_cursor_wm,
|
||||
pineview_display_wm.fifo_size,
|
||||
pixel_size, latency->cursor_sr);
|
||||
cpp, latency->cursor_sr);
|
||||
reg = I915_READ(DSPFW3);
|
||||
reg &= ~DSPFW_CURSOR_SR_MASK;
|
||||
reg |= FW_WM(wm, CURSOR_SR);
|
||||
@ -665,7 +664,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
|
||||
/* Display HPLL off SR */
|
||||
wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
|
||||
pineview_display_hplloff_wm.fifo_size,
|
||||
pixel_size, latency->display_hpll_disable);
|
||||
cpp, latency->display_hpll_disable);
|
||||
reg = I915_READ(DSPFW3);
|
||||
reg &= ~DSPFW_HPLL_SR_MASK;
|
||||
reg |= FW_WM(wm, HPLL_SR);
|
||||
@ -674,7 +673,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
|
||||
/* cursor HPLL off SR */
|
||||
wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
|
||||
pineview_display_hplloff_wm.fifo_size,
|
||||
pixel_size, latency->cursor_hpll_disable);
|
||||
cpp, latency->cursor_hpll_disable);
|
||||
reg = I915_READ(DSPFW3);
|
||||
reg &= ~DSPFW_HPLL_CURSOR_MASK;
|
||||
reg |= FW_WM(wm, HPLL_CURSOR);
|
||||
@ -698,7 +697,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
int htotal, hdisplay, clock, pixel_size;
|
||||
int htotal, hdisplay, clock, cpp;
|
||||
int line_time_us, line_count;
|
||||
int entries, tlb_miss;
|
||||
|
||||
@ -713,10 +712,10 @@ static bool g4x_compute_wm0(struct drm_device *dev,
|
||||
clock = adjusted_mode->crtc_clock;
|
||||
htotal = adjusted_mode->crtc_htotal;
|
||||
hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
|
||||
pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
|
||||
cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
|
||||
|
||||
/* Use the small buffer method to calculate plane watermark */
|
||||
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
|
||||
entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
|
||||
tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
|
||||
if (tlb_miss > 0)
|
||||
entries += tlb_miss;
|
||||
@ -728,7 +727,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
|
||||
/* Use the large buffer method to calculate cursor watermark */
|
||||
line_time_us = max(htotal * 1000 / clock, 1);
|
||||
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
|
||||
entries = line_count * crtc->cursor->state->crtc_w * pixel_size;
|
||||
entries = line_count * crtc->cursor->state->crtc_w * cpp;
|
||||
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
|
||||
if (tlb_miss > 0)
|
||||
entries += tlb_miss;
|
||||
@ -784,7 +783,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
int hdisplay, htotal, pixel_size, clock;
|
||||
int hdisplay, htotal, cpp, clock;
|
||||
unsigned long line_time_us;
|
||||
int line_count, line_size;
|
||||
int small, large;
|
||||
@ -800,21 +799,21 @@ static bool g4x_compute_srwm(struct drm_device *dev,
|
||||
clock = adjusted_mode->crtc_clock;
|
||||
htotal = adjusted_mode->crtc_htotal;
|
||||
hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
|
||||
pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
|
||||
cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
|
||||
|
||||
line_time_us = max(htotal * 1000 / clock, 1);
|
||||
line_count = (latency_ns / line_time_us + 1000) / 1000;
|
||||
line_size = hdisplay * pixel_size;
|
||||
line_size = hdisplay * cpp;
|
||||
|
||||
/* Use the minimum of the small and large buffer method for primary */
|
||||
small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
|
||||
small = ((clock * cpp / 1000) * latency_ns) / 1000;
|
||||
large = line_count * line_size;
|
||||
|
||||
entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
|
||||
*display_wm = entries + display->guard_size;
|
||||
|
||||
/* calculate the self-refresh watermark for display cursor */
|
||||
entries = line_count * pixel_size * crtc->cursor->state->crtc_w;
|
||||
entries = line_count * cpp * crtc->cursor->state->crtc_w;
|
||||
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
|
||||
*cursor_wm = entries + cursor->guard_size;
|
||||
|
||||
@ -906,13 +905,13 @@ enum vlv_wm_level {
|
||||
static unsigned int vlv_wm_method2(unsigned int pixel_rate,
|
||||
unsigned int pipe_htotal,
|
||||
unsigned int horiz_pixels,
|
||||
unsigned int bytes_per_pixel,
|
||||
unsigned int cpp,
|
||||
unsigned int latency)
|
||||
{
|
||||
unsigned int ret;
|
||||
|
||||
ret = (latency * pixel_rate) / (pipe_htotal * 10000);
|
||||
ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
|
||||
ret = (ret + 1) * horiz_pixels * cpp;
|
||||
ret = DIV_ROUND_UP(ret, 64);
|
||||
|
||||
return ret;
|
||||
@ -941,7 +940,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
|
||||
int level)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
|
||||
int clock, htotal, pixel_size, width, wm;
|
||||
int clock, htotal, cpp, width, wm;
|
||||
|
||||
if (dev_priv->wm.pri_latency[level] == 0)
|
||||
return USHRT_MAX;
|
||||
@ -949,7 +948,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
|
||||
if (!state->visible)
|
||||
return 0;
|
||||
|
||||
pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
|
||||
cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
|
||||
clock = crtc->config->base.adjusted_mode.crtc_clock;
|
||||
htotal = crtc->config->base.adjusted_mode.crtc_htotal;
|
||||
width = crtc->config->pipe_src_w;
|
||||
@ -965,7 +964,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
|
||||
*/
|
||||
wm = 63;
|
||||
} else {
|
||||
wm = vlv_wm_method2(clock, htotal, width, pixel_size,
|
||||
wm = vlv_wm_method2(clock, htotal, width, cpp,
|
||||
dev_priv->wm.pri_latency[level] * 10);
|
||||
}
|
||||
|
||||
@ -1439,7 +1438,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
|
||||
int clock = adjusted_mode->crtc_clock;
|
||||
int htotal = adjusted_mode->crtc_htotal;
|
||||
int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
|
||||
int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
|
||||
int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
|
||||
unsigned long line_time_us;
|
||||
int entries;
|
||||
|
||||
@ -1447,7 +1446,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
|
||||
|
||||
/* Use ns/us then divide to preserve precision */
|
||||
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
|
||||
pixel_size * hdisplay;
|
||||
cpp * hdisplay;
|
||||
entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
|
||||
srwm = I965_FIFO_SIZE - entries;
|
||||
if (srwm < 0)
|
||||
@ -1457,7 +1456,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
|
||||
entries, srwm);
|
||||
|
||||
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
|
||||
pixel_size * crtc->cursor->state->crtc_w;
|
||||
cpp * crtc->cursor->state->crtc_w;
|
||||
entries = DIV_ROUND_UP(entries,
|
||||
i965_cursor_wm_info.cacheline_size);
|
||||
cursor_sr = i965_cursor_wm_info.fifo_size -
|
||||
@ -1518,7 +1517,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
||||
crtc = intel_get_crtc_for_plane(dev, 0);
|
||||
if (intel_crtc_active(crtc)) {
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
|
||||
int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
|
||||
if (IS_GEN2(dev))
|
||||
cpp = 4;
|
||||
|
||||
@ -1540,7 +1539,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
||||
crtc = intel_get_crtc_for_plane(dev, 1);
|
||||
if (intel_crtc_active(crtc)) {
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
|
||||
int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
|
||||
if (IS_GEN2(dev))
|
||||
cpp = 4;
|
||||
|
||||
@ -1586,7 +1585,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
||||
int clock = adjusted_mode->crtc_clock;
|
||||
int htotal = adjusted_mode->crtc_htotal;
|
||||
int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
|
||||
int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8;
|
||||
int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0);
|
||||
unsigned long line_time_us;
|
||||
int entries;
|
||||
|
||||
@ -1594,7 +1593,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
||||
|
||||
/* Use ns/us then divide to preserve precision */
|
||||
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
|
||||
pixel_size * hdisplay;
|
||||
cpp * hdisplay;
|
||||
entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
|
||||
DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
|
||||
srwm = wm_info->fifo_size - entries;
|
||||
@ -1685,15 +1684,14 @@ uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
|
||||
}
|
||||
|
||||
/* latency must be in 0.1us units. */
|
||||
static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
|
||||
uint32_t latency)
|
||||
static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
|
||||
{
|
||||
uint64_t ret;
|
||||
|
||||
if (WARN(latency == 0, "Latency value missing\n"))
|
||||
return UINT_MAX;
|
||||
|
||||
ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
|
||||
ret = (uint64_t) pixel_rate * cpp * latency;
|
||||
ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
|
||||
|
||||
return ret;
|
||||
@ -1701,7 +1699,7 @@ static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
|
||||
|
||||
/* latency must be in 0.1us units. */
|
||||
static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
|
||||
uint32_t horiz_pixels, uint8_t bytes_per_pixel,
|
||||
uint32_t horiz_pixels, uint8_t cpp,
|
||||
uint32_t latency)
|
||||
{
|
||||
uint32_t ret;
|
||||
@ -1712,13 +1710,13 @@ static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
|
||||
return UINT_MAX;
|
||||
|
||||
ret = (latency * pixel_rate) / (pipe_htotal * 10000);
|
||||
ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
|
||||
ret = (ret + 1) * horiz_pixels * cpp;
|
||||
ret = DIV_ROUND_UP(ret, 64) + 2;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
|
||||
uint8_t bytes_per_pixel)
|
||||
uint8_t cpp)
|
||||
{
|
||||
/*
|
||||
* Neither of these should be possible since this function shouldn't be
|
||||
@ -1726,12 +1724,12 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
|
||||
* extra paranoid to avoid a potential divide-by-zero if we screw up
|
||||
* elsewhere in the driver.
|
||||
*/
|
||||
if (WARN_ON(!bytes_per_pixel))
|
||||
if (WARN_ON(!cpp))
|
||||
return 0;
|
||||
if (WARN_ON(!horiz_pixels))
|
||||
return 0;
|
||||
|
||||
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
|
||||
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
|
||||
}
|
||||
|
||||
struct ilk_wm_maximums {
|
||||
@ -1750,13 +1748,14 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
|
||||
uint32_t mem_value,
|
||||
bool is_lp)
|
||||
{
|
||||
int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
|
||||
int cpp = pstate->base.fb ?
|
||||
drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
|
||||
uint32_t method1, method2;
|
||||
|
||||
if (!cstate->base.active || !pstate->visible)
|
||||
return 0;
|
||||
|
||||
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
|
||||
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
|
||||
|
||||
if (!is_lp)
|
||||
return method1;
|
||||
@ -1764,8 +1763,7 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
|
||||
method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
|
||||
cstate->base.adjusted_mode.crtc_htotal,
|
||||
drm_rect_width(&pstate->dst),
|
||||
bpp,
|
||||
mem_value);
|
||||
cpp, mem_value);
|
||||
|
||||
return min(method1, method2);
|
||||
}
|
||||
@ -1778,18 +1776,18 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
|
||||
const struct intel_plane_state *pstate,
|
||||
uint32_t mem_value)
|
||||
{
|
||||
int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
|
||||
int cpp = pstate->base.fb ?
|
||||
drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
|
||||
uint32_t method1, method2;
|
||||
|
||||
if (!cstate->base.active || !pstate->visible)
|
||||
return 0;
|
||||
|
||||
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
|
||||
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
|
||||
method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
|
||||
cstate->base.adjusted_mode.crtc_htotal,
|
||||
drm_rect_width(&pstate->dst),
|
||||
bpp,
|
||||
mem_value);
|
||||
cpp, mem_value);
|
||||
return min(method1, method2);
|
||||
}
|
||||
|
||||
@ -1801,16 +1799,20 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
|
||||
const struct intel_plane_state *pstate,
|
||||
uint32_t mem_value)
|
||||
{
|
||||
int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
|
||||
/*
|
||||
* We treat the cursor plane as always-on for the purposes of watermark
|
||||
* calculation. Until we have two-stage watermark programming merged,
|
||||
* this is necessary to avoid flickering.
|
||||
*/
|
||||
int cpp = 4;
|
||||
int width = pstate->visible ? pstate->base.crtc_w : 64;
|
||||
|
||||
if (!cstate->base.active || !pstate->visible)
|
||||
if (!cstate->base.active)
|
||||
return 0;
|
||||
|
||||
return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
|
||||
cstate->base.adjusted_mode.crtc_htotal,
|
||||
drm_rect_width(&pstate->dst),
|
||||
bpp,
|
||||
mem_value);
|
||||
width, cpp, mem_value);
|
||||
}
|
||||
|
||||
/* Only for WM_LP. */
|
||||
@ -1818,12 +1820,13 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
|
||||
const struct intel_plane_state *pstate,
|
||||
uint32_t pri_val)
|
||||
{
|
||||
int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
|
||||
int cpp = pstate->base.fb ?
|
||||
drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
|
||||
|
||||
if (!cstate->base.active || !pstate->visible)
|
||||
return 0;
|
||||
|
||||
return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), bpp);
|
||||
return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), cpp);
|
||||
}
|
||||
|
||||
static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
|
||||
@ -3042,26 +3045,25 @@ static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
|
||||
|
||||
/*
|
||||
* The max latency should be 257 (max the punit can code is 255 and we add 2us
|
||||
* for the read latency) and bytes_per_pixel should always be <= 8, so that
|
||||
* for the read latency) and cpp should always be <= 8, so that
|
||||
* should allow pixel_rate up to ~2 GHz which seems sufficient since max
|
||||
* 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
|
||||
*/
|
||||
static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
|
||||
uint32_t latency)
|
||||
static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
|
||||
{
|
||||
uint32_t wm_intermediate_val, ret;
|
||||
|
||||
if (latency == 0)
|
||||
return UINT_MAX;
|
||||
|
||||
wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
|
||||
wm_intermediate_val = latency * pixel_rate * cpp / 512;
|
||||
ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
|
||||
uint32_t horiz_pixels, uint8_t bytes_per_pixel,
|
||||
uint32_t horiz_pixels, uint8_t cpp,
|
||||
uint64_t tiling, uint32_t latency)
|
||||
{
|
||||
uint32_t ret;
|
||||
@ -3071,7 +3073,7 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
|
||||
if (latency == 0)
|
||||
return UINT_MAX;
|
||||
|
||||
plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
|
||||
plane_bytes_per_line = horiz_pixels * cpp;
|
||||
|
||||
if (tiling == I915_FORMAT_MOD_Y_TILED ||
|
||||
tiling == I915_FORMAT_MOD_Yf_TILED) {
|
||||
@ -3121,23 +3123,21 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
||||
uint32_t plane_bytes_per_line, plane_blocks_per_line;
|
||||
uint32_t res_blocks, res_lines;
|
||||
uint32_t selected_result;
|
||||
uint8_t bytes_per_pixel;
|
||||
uint8_t cpp;
|
||||
|
||||
if (latency == 0 || !cstate->base.active || !fb)
|
||||
return false;
|
||||
|
||||
bytes_per_pixel = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
|
||||
bytes_per_pixel,
|
||||
latency);
|
||||
cpp, latency);
|
||||
method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
|
||||
cstate->base.adjusted_mode.crtc_htotal,
|
||||
cstate->pipe_src_w,
|
||||
bytes_per_pixel,
|
||||
fb->modifier[0],
|
||||
cpp, fb->modifier[0],
|
||||
latency);
|
||||
|
||||
plane_bytes_per_line = cstate->pipe_src_w * bytes_per_pixel;
|
||||
plane_bytes_per_line = cstate->pipe_src_w * cpp;
|
||||
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
|
||||
|
||||
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
|
||||
@ -3145,11 +3145,11 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
||||
uint32_t min_scanlines = 4;
|
||||
uint32_t y_tile_minimum;
|
||||
if (intel_rotation_90_or_270(plane->state->rotation)) {
|
||||
int bpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
|
||||
int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
|
||||
drm_format_plane_cpp(fb->pixel_format, 1) :
|
||||
drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
|
||||
switch (bpp) {
|
||||
switch (cpp) {
|
||||
case 1:
|
||||
min_scanlines = 16;
|
||||
break;
|
||||
@ -4562,12 +4562,62 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
|
||||
onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
|
||||
}
|
||||
|
||||
static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
|
||||
static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
bool enable_rc6 = true;
|
||||
unsigned long rc6_ctx_base;
|
||||
|
||||
if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
|
||||
DRM_DEBUG_KMS("RC6 Base location not set properly.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* The exact context size is not known for BXT, so assume a page size
|
||||
* for this check.
|
||||
*/
|
||||
rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
|
||||
if (!((rc6_ctx_base >= dev_priv->gtt.stolen_reserved_base) &&
|
||||
(rc6_ctx_base + PAGE_SIZE <= dev_priv->gtt.stolen_reserved_base +
|
||||
dev_priv->gtt.stolen_reserved_size))) {
|
||||
DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
|
||||
((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
|
||||
((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
|
||||
((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
|
||||
DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE |
|
||||
GEN6_RC_CTL_HW_ENABLE)) &&
|
||||
((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) ||
|
||||
!(I915_READ(GEN6_RC_STATE) & RC6_STATE))) {
|
||||
DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
return enable_rc6;
|
||||
}
|
||||
|
||||
int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
|
||||
{
|
||||
/* No RC6 before Ironlake and code is gone for ilk. */
|
||||
if (INTEL_INFO(dev)->gen < 6)
|
||||
return 0;
|
||||
|
||||
if (!enable_rc6)
|
||||
return 0;
|
||||
|
||||
if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) {
|
||||
DRM_INFO("RC6 disabled by BIOS\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Respect the kernel parameter if it is set */
|
||||
if (enable_rc6 >= 0) {
|
||||
int mask;
|
||||
@ -6057,7 +6107,6 @@ void intel_init_gt_powersave(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
|
||||
/*
|
||||
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
|
||||
* requirement.
|
||||
@ -7189,9 +7238,10 @@ static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
|
||||
{
|
||||
int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
|
||||
|
||||
div = vlv_gpu_freq_div(czclk_freq) / 2;
|
||||
div = vlv_gpu_freq_div(czclk_freq);
|
||||
if (div < 0)
|
||||
return div;
|
||||
div /= 2;
|
||||
|
||||
return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
|
||||
}
|
||||
@ -7200,9 +7250,10 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
|
||||
{
|
||||
int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
|
||||
|
||||
mul = vlv_gpu_freq_div(czclk_freq) / 2;
|
||||
mul = vlv_gpu_freq_div(czclk_freq);
|
||||
if (mul < 0)
|
||||
return mul;
|
||||
mul /= 2;
|
||||
|
||||
/* CHV needs even values */
|
||||
return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
|
||||
|
@ -225,7 +225,12 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
|
||||
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
|
||||
}
|
||||
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE);
|
||||
if (dev_priv->psr.link_standby)
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
|
||||
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
|
||||
else
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
|
||||
DP_PSR_ENABLE);
|
||||
}
|
||||
|
||||
static void vlv_psr_enable_source(struct intel_dp *intel_dp)
|
||||
@ -280,6 +285,9 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
|
||||
if (IS_HASWELL(dev))
|
||||
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
|
||||
|
||||
if (dev_priv->psr.link_standby)
|
||||
val |= EDP_PSR_LINK_STANDBY;
|
||||
|
||||
I915_WRITE(EDP_PSR_CTL, val |
|
||||
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
|
||||
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
|
||||
@ -304,8 +312,15 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
|
||||
|
||||
dev_priv->psr.source_ok = false;
|
||||
|
||||
if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
|
||||
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
|
||||
/*
|
||||
* HSW spec explicitly says PSR is tied to port A.
|
||||
* BDW+ platforms with DDI implementation of PSR have different
|
||||
* PSR registers per transcoder and we only implement transcoder EDP
|
||||
* ones. Since by Display design transcoder EDP is tied to port A
|
||||
* we can safely escape based on the port A.
|
||||
*/
|
||||
if (HAS_DDI(dev) && dig_port->port != PORT_A) {
|
||||
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -314,6 +329,12 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
|
||||
!dev_priv->psr.link_standby) {
|
||||
DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (IS_HASWELL(dev) &&
|
||||
I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
|
||||
S3D_ENABLE) {
|
||||
@ -327,12 +348,6 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
|
||||
((dev_priv->vbt.psr.full_link) || (dig_port->port != PORT_A))) {
|
||||
DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
dev_priv->psr.source_ok = true;
|
||||
return true;
|
||||
}
|
||||
@ -763,6 +778,27 @@ void intel_psr_init(struct drm_device *dev)
|
||||
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
|
||||
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
|
||||
|
||||
/* Set link_standby x link_off defaults */
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
/* HSW and BDW require workarounds that we don't implement. */
|
||||
dev_priv->psr.link_standby = false;
|
||||
else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
|
||||
/* On VLV and CHV only standby mode is supported. */
|
||||
dev_priv->psr.link_standby = true;
|
||||
else
|
||||
/* For new platforms let's respect VBT back again */
|
||||
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
|
||||
|
||||
/* Override link_standby x link_off defaults */
|
||||
if (i915.enable_psr == 2 && !dev_priv->psr.link_standby) {
|
||||
DRM_DEBUG_KMS("PSR: Forcing link standby\n");
|
||||
dev_priv->psr.link_standby = true;
|
||||
}
|
||||
if (i915.enable_psr == 3 && dev_priv->psr.link_standby) {
|
||||
DRM_DEBUG_KMS("PSR: Forcing main link off\n");
|
||||
dev_priv->psr.link_standby = false;
|
||||
}
|
||||
|
||||
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
|
||||
mutex_init(&dev_priv->psr.lock);
|
||||
}
|
||||
|
@ -789,6 +789,22 @@ static int wa_add(struct drm_i915_private *dev_priv,
|
||||
|
||||
#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
|
||||
|
||||
static int wa_ring_whitelist_reg(struct intel_engine_cs *ring, i915_reg_t reg)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct i915_workarounds *wa = &dev_priv->workarounds;
|
||||
const uint32_t index = wa->hw_whitelist_count[ring->id];
|
||||
|
||||
if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
|
||||
return -EINVAL;
|
||||
|
||||
WA_WRITE(RING_FORCE_TO_NONPRIV(ring->mmio_base, index),
|
||||
i915_mmio_reg_offset(reg));
|
||||
wa->hw_whitelist_count[ring->id]++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen8_init_workarounds(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
@ -894,6 +910,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t tmp;
|
||||
int ret;
|
||||
|
||||
/* WaEnableLbsSlaRetryTimerDecrement:skl */
|
||||
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
|
||||
@ -964,6 +981,20 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
|
||||
/* WaDisableSTUnitPowerOptimization:skl,bxt */
|
||||
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
|
||||
|
||||
/* WaOCLCoherentLineFlush:skl,bxt */
|
||||
I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
|
||||
GEN8_LQSC_FLUSH_COHERENT_LINES));
|
||||
|
||||
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt */
|
||||
ret= wa_ring_whitelist_reg(ring, GEN8_CS_CHICKEN1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* WaAllowUMDToModifyHDCChicken1:skl,bxt */
|
||||
ret = wa_ring_whitelist_reg(ring, GEN8_HDC_CHICKEN1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1019,6 +1050,16 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Actual WA is to disable percontext preemption granularity control
|
||||
* until D0 which is the default case so this is equivalent to
|
||||
* !WaDisablePerCtxtPreemptionGranularityControl:skl
|
||||
*/
|
||||
if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) {
|
||||
I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
|
||||
_MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
|
||||
}
|
||||
|
||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
|
||||
/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
|
||||
I915_WRITE(FF_SLICE_CS_CHICKEN2,
|
||||
@ -1071,6 +1112,11 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
|
||||
GEN7_HALF_SLICE_CHICKEN1,
|
||||
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
|
||||
|
||||
/* WaDisableLSQCROPERFforOCL:skl */
|
||||
ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return skl_tune_iz_hashing(ring);
|
||||
}
|
||||
|
||||
@ -1106,6 +1152,20 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
|
||||
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
|
||||
}
|
||||
|
||||
/* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
|
||||
/* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
|
||||
/* WaDisableObjectLevelPreemtionForInstanceId:bxt */
|
||||
/* WaDisableLSQCROPERFforOCL:bxt */
|
||||
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
|
||||
ret = wa_ring_whitelist_reg(ring, GEN9_CS_DEBUG_MODE1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1117,6 +1177,7 @@ int init_workarounds_ring(struct intel_engine_cs *ring)
|
||||
WARN_ON(ring->id != RCS);
|
||||
|
||||
dev_priv->workarounds.count = 0;
|
||||
dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
|
||||
|
||||
if (IS_BROADWELL(dev))
|
||||
return bdw_init_workarounds(ring);
|
||||
@ -2058,6 +2119,9 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Access through the GTT requires the device to be awake. */
|
||||
assert_rpm_wakelock_held(dev_priv);
|
||||
|
||||
ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
|
||||
i915_gem_obj_ggtt_offset(obj), ringbuf->size);
|
||||
if (ringbuf->virtual_start == NULL) {
|
||||
|
@ -158,6 +158,7 @@ struct intel_engine_cs {
|
||||
#define I915_NUM_RINGS 5
|
||||
#define _VCS(n) (VCS + (n))
|
||||
unsigned int exec_id;
|
||||
unsigned int guc_id;
|
||||
u32 mmio_base;
|
||||
struct drm_device *dev;
|
||||
struct intel_ringbuffer *buffer;
|
||||
|
@ -1527,6 +1527,7 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return MODE_NO_DBLESCAN;
|
||||
@ -1537,6 +1538,9 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
|
||||
if (intel_sdvo->pixel_clock_max < mode->clock)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if (mode->clock > max_dotclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if (intel_sdvo->is_lvds) {
|
||||
if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
|
||||
return MODE_PANEL;
|
||||
|
@ -129,17 +129,18 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
|
||||
return val;
|
||||
}
|
||||
|
||||
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
|
||||
u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg)
|
||||
{
|
||||
u32 val = 0;
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port,
|
||||
SB_CRRDDA_NP, reg, &val);
|
||||
return val;
|
||||
}
|
||||
|
||||
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
|
||||
void vlv_iosf_sb_write(struct drm_i915_private *dev_priv,
|
||||
u8 port, u32 reg, u32 val)
|
||||
{
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port,
|
||||
SB_CRWRDA_NP, reg, &val);
|
||||
}
|
||||
|
||||
@ -171,20 +172,6 @@ void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
|
||||
SB_CRWRDA_NP, reg, &val);
|
||||
}
|
||||
|
||||
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
|
||||
{
|
||||
u32 val = 0;
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
|
||||
SB_CRRDDA_NP, reg, &val);
|
||||
return val;
|
||||
}
|
||||
|
||||
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
|
||||
{
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
|
||||
SB_CRWRDA_NP, reg, &val);
|
||||
}
|
||||
|
||||
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
|
||||
{
|
||||
u32 val = 0;
|
||||
|
@ -350,8 +350,8 @@ vlv_update_plane(struct drm_plane *dplane,
|
||||
int pipe = intel_plane->pipe;
|
||||
int plane = intel_plane->plane;
|
||||
u32 sprctl;
|
||||
unsigned long sprsurf_offset, linear_offset;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
u32 sprsurf_offset, linear_offset;
|
||||
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
|
||||
int crtc_x = plane_state->dst.x1;
|
||||
int crtc_y = plane_state->dst.y1;
|
||||
@ -422,10 +422,9 @@ vlv_update_plane(struct drm_plane *dplane,
|
||||
crtc_w--;
|
||||
crtc_h--;
|
||||
|
||||
linear_offset = y * fb->pitches[0] + x * pixel_size;
|
||||
linear_offset = y * fb->pitches[0] + x * cpp;
|
||||
sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
|
||||
fb->modifier[0],
|
||||
pixel_size,
|
||||
fb->modifier[0], cpp,
|
||||
fb->pitches[0]);
|
||||
linear_offset -= sprsurf_offset;
|
||||
|
||||
@ -434,7 +433,7 @@ vlv_update_plane(struct drm_plane *dplane,
|
||||
|
||||
x += src_w;
|
||||
y += src_h;
|
||||
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
|
||||
linear_offset += src_h * fb->pitches[0] + src_w * cpp;
|
||||
}
|
||||
|
||||
if (key->flags) {
|
||||
@ -493,8 +492,8 @@ ivb_update_plane(struct drm_plane *plane,
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
enum pipe pipe = intel_plane->pipe;
|
||||
u32 sprctl, sprscale = 0;
|
||||
unsigned long sprsurf_offset, linear_offset;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
u32 sprsurf_offset, linear_offset;
|
||||
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
|
||||
int crtc_x = plane_state->dst.x1;
|
||||
int crtc_y = plane_state->dst.y1;
|
||||
@ -556,10 +555,9 @@ ivb_update_plane(struct drm_plane *plane,
|
||||
if (crtc_w != src_w || crtc_h != src_h)
|
||||
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
|
||||
|
||||
linear_offset = y * fb->pitches[0] + x * pixel_size;
|
||||
linear_offset = y * fb->pitches[0] + x * cpp;
|
||||
sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
|
||||
fb->modifier[0],
|
||||
pixel_size,
|
||||
fb->modifier[0], cpp,
|
||||
fb->pitches[0]);
|
||||
linear_offset -= sprsurf_offset;
|
||||
|
||||
@ -570,8 +568,7 @@ ivb_update_plane(struct drm_plane *plane,
|
||||
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
|
||||
x += src_w;
|
||||
y += src_h;
|
||||
linear_offset += src_h * fb->pitches[0] +
|
||||
src_w * pixel_size;
|
||||
linear_offset += src_h * fb->pitches[0] + src_w * cpp;
|
||||
}
|
||||
}
|
||||
|
||||
@ -635,9 +632,9 @@ ilk_update_plane(struct drm_plane *plane,
|
||||
struct drm_framebuffer *fb = plane_state->base.fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
int pipe = intel_plane->pipe;
|
||||
unsigned long dvssurf_offset, linear_offset;
|
||||
u32 dvscntr, dvsscale;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
u32 dvssurf_offset, linear_offset;
|
||||
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
|
||||
int crtc_x = plane_state->dst.x1;
|
||||
int crtc_y = plane_state->dst.y1;
|
||||
@ -695,10 +692,9 @@ ilk_update_plane(struct drm_plane *plane,
|
||||
if (crtc_w != src_w || crtc_h != src_h)
|
||||
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
|
||||
|
||||
linear_offset = y * fb->pitches[0] + x * pixel_size;
|
||||
linear_offset = y * fb->pitches[0] + x * cpp;
|
||||
dvssurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
|
||||
fb->modifier[0],
|
||||
pixel_size,
|
||||
fb->modifier[0], cpp,
|
||||
fb->pitches[0]);
|
||||
linear_offset -= dvssurf_offset;
|
||||
|
||||
@ -707,7 +703,7 @@ ilk_update_plane(struct drm_plane *plane,
|
||||
|
||||
x += src_w;
|
||||
y += src_h;
|
||||
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
|
||||
linear_offset += src_h * fb->pitches[0] + src_w * cpp;
|
||||
}
|
||||
|
||||
if (key->flags) {
|
||||
@ -772,7 +768,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
|
||||
int hscale, vscale;
|
||||
int max_scale, min_scale;
|
||||
bool can_scale;
|
||||
int pixel_size;
|
||||
|
||||
if (!fb) {
|
||||
state->visible = false;
|
||||
@ -894,6 +889,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
|
||||
/* Check size restrictions when scaling */
|
||||
if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
|
||||
unsigned int width_bytes;
|
||||
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
|
||||
WARN_ON(!can_scale);
|
||||
|
||||
@ -905,9 +901,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
|
||||
if (src_w < 3 || src_h < 3)
|
||||
state->visible = false;
|
||||
|
||||
pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
width_bytes = ((src_x * pixel_size) & 63) +
|
||||
src_w * pixel_size;
|
||||
width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 9 && (src_w > 2048 || src_h > 2048 ||
|
||||
width_bytes > 4096 || fb->pitches[0] > 4096)) {
|
||||
|
@ -897,6 +897,10 @@ intel_tv_mode_valid(struct drm_connector *connector,
|
||||
{
|
||||
struct intel_tv *intel_tv = intel_attached_tv(connector);
|
||||
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
|
||||
if (mode->clock > max_dotclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
/* Ensure TV refresh is close to desired refresh */
|
||||
if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
|
||||
@ -1420,6 +1424,7 @@ intel_tv_get_modes(struct drm_connector *connector)
|
||||
if (!mode_ptr)
|
||||
continue;
|
||||
strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
|
||||
mode_ptr->name[DRM_DISPLAY_MODE_LEN - 1] = '\0';
|
||||
|
||||
mode_ptr->hdisplay = hactive_s;
|
||||
mode_ptr->hsync_start = hactive_s + 1;
|
||||
|
@ -400,6 +400,8 @@ void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
|
||||
|
||||
void intel_uncore_sanitize(struct drm_device *dev)
|
||||
{
|
||||
i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
|
||||
|
||||
/* BIOS often leaves RC6 enabled, but disable it for hw init */
|
||||
intel_disable_gt_powersave(dev);
|
||||
}
|
||||
|
@ -277,7 +277,9 @@
|
||||
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
|
||||
|
||||
#define INTEL_SKL_GT3_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */
|
||||
|
||||
@ -296,7 +298,9 @@
|
||||
#define INTEL_BXT_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x0A84, info), \
|
||||
INTEL_VGA_DEVICE(0x1A84, info), \
|
||||
INTEL_VGA_DEVICE(0x5A84, info)
|
||||
INTEL_VGA_DEVICE(0x1A85, info), \
|
||||
INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \
|
||||
INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */
|
||||
|
||||
#define INTEL_KBL_GT1_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \
|
||||
|
@ -772,10 +772,12 @@ struct drm_i915_gem_execbuffer2 {
|
||||
#define I915_EXEC_HANDLE_LUT (1<<12)
|
||||
|
||||
/** Used for switching BSD rings on the platforms with two BSD rings */
|
||||
#define I915_EXEC_BSD_MASK (3<<13)
|
||||
#define I915_EXEC_BSD_DEFAULT (0<<13) /* default ping-pong mode */
|
||||
#define I915_EXEC_BSD_RING1 (1<<13)
|
||||
#define I915_EXEC_BSD_RING2 (2<<13)
|
||||
#define I915_EXEC_BSD_SHIFT (13)
|
||||
#define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT)
|
||||
/* default ping-pong mode */
|
||||
#define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT)
|
||||
#define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT)
|
||||
#define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT)
|
||||
|
||||
/** Tell the kernel that the batchbuffer is processed by
|
||||
* the resource streamer.
|
||||
|
Loading…
Reference in New Issue
Block a user