2009-02-18 08:08:50 +07:00
|
|
|
/*
|
|
|
|
* Copyright © 2008 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Eric Anholt <eric@anholt.net>
|
|
|
|
* Keith Packard <keithp@keithp.com>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2017-03-03 16:13:38 +07:00
|
|
|
#include <linux/sched/mm.h>
|
2019-04-05 18:00:08 +07:00
|
|
|
#include <linux/sort.h>
|
|
|
|
|
2019-01-18 04:03:34 +07:00
|
|
|
#include <drm/drm_debugfs.h>
|
2019-06-13 15:44:15 +07:00
|
|
|
|
2019-05-28 16:29:49 +07:00
|
|
|
#include "gem/i915_gem_context.h"
|
2019-08-09 03:27:58 +07:00
|
|
|
#include "gt/intel_gt_pm.h"
|
2019-10-04 20:40:06 +07:00
|
|
|
#include "gt/intel_gt_requests.h"
|
2019-04-25 00:48:39 +07:00
|
|
|
#include "gt/intel_reset.h"
|
2019-09-27 18:08:49 +07:00
|
|
|
#include "gt/intel_rc6.h"
|
2019-10-25 04:16:41 +07:00
|
|
|
#include "gt/intel_rps.h"
|
2019-07-13 17:00:11 +07:00
|
|
|
#include "gt/uc/intel_guc_submission.h"
|
2019-04-25 00:48:39 +07:00
|
|
|
|
2019-05-02 22:02:43 +07:00
|
|
|
#include "i915_debugfs.h"
|
2019-12-05 22:43:40 +07:00
|
|
|
#include "i915_debugfs_params.h"
|
2019-04-29 19:29:27 +07:00
|
|
|
#include "i915_irq.h"
|
2019-08-06 17:07:28 +07:00
|
|
|
#include "i915_trace.h"
|
2019-04-05 18:00:15 +07:00
|
|
|
#include "intel_pm.h"
|
2019-04-26 15:17:22 +07:00
|
|
|
#include "intel_sideband.h"
|
2019-01-16 22:33:04 +07:00
|
|
|
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
|
|
|
|
{
|
|
|
|
return to_i915(node->minor->dev);
|
|
|
|
}
|
|
|
|
|
2010-08-25 22:03:34 +07:00
|
|
|
static int i915_capabilities(struct seq_file *m, void *data)
|
|
|
|
{
|
2019-12-08 01:29:37 +07:00
|
|
|
struct drm_i915_private *i915 = node_to_i915(m->private);
|
2017-12-19 18:43:44 +07:00
|
|
|
struct drm_printer p = drm_seq_file_printer(m);
|
2010-08-25 22:03:34 +07:00
|
|
|
|
2019-12-08 01:29:37 +07:00
|
|
|
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
|
2019-09-11 18:46:55 +07:00
|
|
|
|
2019-12-08 01:29:37 +07:00
|
|
|
intel_device_info_print_static(INTEL_INFO(i915), &p);
|
|
|
|
intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
|
|
|
|
intel_driver_caps_print(&i915->caps, &p);
|
2010-08-25 22:03:34 +07:00
|
|
|
|
2017-02-07 04:36:08 +07:00
|
|
|
kernel_param_lock(THIS_MODULE);
|
2017-12-19 18:43:46 +07:00
|
|
|
i915_params_dump(&i915_modparams, &p);
|
2017-02-07 04:36:08 +07:00
|
|
|
kernel_param_unlock(THIS_MODULE);
|
|
|
|
|
2010-08-25 22:03:34 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2009-02-18 08:08:50 +07:00
|
|
|
|
2016-05-12 20:18:52 +07:00
|
|
|
static char get_tiling_flag(struct drm_i915_gem_object *obj)
|
2009-02-11 21:26:38 +07:00
|
|
|
{
|
2016-08-05 16:14:23 +07:00
|
|
|
switch (i915_gem_object_get_tiling(obj)) {
|
2011-08-17 02:34:10 +07:00
|
|
|
default:
|
2016-04-15 17:34:52 +07:00
|
|
|
case I915_TILING_NONE: return ' ';
|
|
|
|
case I915_TILING_X: return 'X';
|
|
|
|
case I915_TILING_Y: return 'Y';
|
2011-08-17 02:34:10 +07:00
|
|
|
}
|
2009-02-11 21:26:38 +07:00
|
|
|
}
|
|
|
|
|
2016-05-12 20:18:52 +07:00
|
|
|
static char get_global_flag(struct drm_i915_gem_object *obj)
|
2016-04-15 17:34:52 +07:00
|
|
|
{
|
2019-08-22 13:09:13 +07:00
|
|
|
return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
|
2016-04-15 17:34:52 +07:00
|
|
|
}
|
|
|
|
|
2016-05-12 20:18:52 +07:00
|
|
|
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
|
2013-08-01 07:00:00 +07:00
|
|
|
{
|
2016-10-28 19:58:35 +07:00
|
|
|
return obj->mm.mapping ? 'M' : ' ';
|
2013-08-01 07:00:00 +07:00
|
|
|
}
|
|
|
|
|
2017-10-07 05:18:28 +07:00
|
|
|
static const char *
|
|
|
|
stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
|
|
|
|
{
|
|
|
|
size_t x = 0;
|
|
|
|
|
|
|
|
switch (page_sizes) {
|
|
|
|
case 0:
|
|
|
|
return "";
|
|
|
|
case I915_GTT_PAGE_SIZE_4K:
|
|
|
|
return "4K";
|
|
|
|
case I915_GTT_PAGE_SIZE_64K:
|
|
|
|
return "64K";
|
|
|
|
case I915_GTT_PAGE_SIZE_2M:
|
|
|
|
return "2M";
|
|
|
|
default:
|
|
|
|
if (!buf)
|
|
|
|
return "M";
|
|
|
|
|
|
|
|
if (page_sizes & I915_GTT_PAGE_SIZE_2M)
|
|
|
|
x += snprintf(buf + x, len - x, "2M, ");
|
|
|
|
if (page_sizes & I915_GTT_PAGE_SIZE_64K)
|
|
|
|
x += snprintf(buf + x, len - x, "64K, ");
|
|
|
|
if (page_sizes & I915_GTT_PAGE_SIZE_4K)
|
|
|
|
x += snprintf(buf + x, len - x, "4K, ");
|
|
|
|
buf[x-2] = '\0';
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-11 23:14:51 +07:00
|
|
|
void
|
|
|
|
i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
2010-08-26 04:45:57 +07:00
|
|
|
{
|
2015-04-27 19:41:17 +07:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
2016-03-16 18:00:36 +07:00
|
|
|
struct intel_engine_cs *engine;
|
2013-08-01 07:00:00 +07:00
|
|
|
struct i915_vma *vma;
|
2013-12-07 05:10:55 +07:00
|
|
|
int pin_count = 0;
|
|
|
|
|
2019-09-02 11:02:47 +07:00
|
|
|
seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
|
2010-08-26 04:45:57 +07:00
|
|
|
&obj->base,
|
|
|
|
get_tiling_flag(obj),
|
2013-08-01 07:00:00 +07:00
|
|
|
get_global_flag(obj),
|
2016-04-15 17:34:52 +07:00
|
|
|
get_pin_mapped_flag(obj),
|
2011-12-20 23:54:15 +07:00
|
|
|
obj->base.size / 1024,
|
2018-02-16 19:43:38 +07:00
|
|
|
obj->read_domains,
|
|
|
|
obj->write_domain,
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
i915_cache_level_str(dev_priv, obj->cache_level),
|
2016-10-28 19:58:35 +07:00
|
|
|
obj->mm.dirty ? " dirty" : "",
|
|
|
|
obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
|
2010-08-26 04:45:57 +07:00
|
|
|
if (obj->base.name)
|
|
|
|
seq_printf(m, " (name: %d)", obj->base.name);
|
2019-06-13 14:32:54 +07:00
|
|
|
|
|
|
|
spin_lock(&obj->vma.lock);
|
2019-01-28 17:23:54 +07:00
|
|
|
list_for_each_entry(vma, &obj->vma.list, obj_link) {
|
2016-08-04 13:52:26 +07:00
|
|
|
if (!drm_mm_node_allocated(&vma->node))
|
|
|
|
continue;
|
|
|
|
|
2019-06-13 14:32:54 +07:00
|
|
|
spin_unlock(&obj->vma.lock);
|
|
|
|
|
|
|
|
if (i915_vma_is_pinned(vma))
|
|
|
|
pin_count++;
|
|
|
|
|
2017-10-07 05:18:28 +07:00
|
|
|
seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
|
2016-08-04 22:32:32 +07:00
|
|
|
i915_vma_is_ggtt(vma) ? "g" : "pp",
|
2017-10-07 05:18:28 +07:00
|
|
|
vma->node.start, vma->node.size,
|
|
|
|
stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
|
2017-01-12 18:21:08 +07:00
|
|
|
if (i915_vma_is_ggtt(vma)) {
|
|
|
|
switch (vma->ggtt_view.type) {
|
|
|
|
case I915_GGTT_VIEW_NORMAL:
|
|
|
|
seq_puts(m, ", normal");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case I915_GGTT_VIEW_PARTIAL:
|
|
|
|
seq_printf(m, ", partial [%08llx+%x]",
|
2017-01-14 07:28:25 +07:00
|
|
|
vma->ggtt_view.partial.offset << PAGE_SHIFT,
|
|
|
|
vma->ggtt_view.partial.size << PAGE_SHIFT);
|
2017-01-12 18:21:08 +07:00
|
|
|
break;
|
|
|
|
|
|
|
|
case I915_GGTT_VIEW_ROTATED:
|
|
|
|
seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
|
2017-01-14 07:28:25 +07:00
|
|
|
vma->ggtt_view.rotated.plane[0].width,
|
|
|
|
vma->ggtt_view.rotated.plane[0].height,
|
|
|
|
vma->ggtt_view.rotated.plane[0].stride,
|
|
|
|
vma->ggtt_view.rotated.plane[0].offset,
|
|
|
|
vma->ggtt_view.rotated.plane[1].width,
|
|
|
|
vma->ggtt_view.rotated.plane[1].height,
|
|
|
|
vma->ggtt_view.rotated.plane[1].stride,
|
|
|
|
vma->ggtt_view.rotated.plane[1].offset);
|
2017-01-12 18:21:08 +07:00
|
|
|
break;
|
|
|
|
|
2019-05-09 19:21:52 +07:00
|
|
|
case I915_GGTT_VIEW_REMAPPED:
|
|
|
|
seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
|
|
|
|
vma->ggtt_view.remapped.plane[0].width,
|
|
|
|
vma->ggtt_view.remapped.plane[0].height,
|
|
|
|
vma->ggtt_view.remapped.plane[0].stride,
|
|
|
|
vma->ggtt_view.remapped.plane[0].offset,
|
|
|
|
vma->ggtt_view.remapped.plane[1].width,
|
|
|
|
vma->ggtt_view.remapped.plane[1].height,
|
|
|
|
vma->ggtt_view.remapped.plane[1].stride,
|
|
|
|
vma->ggtt_view.remapped.plane[1].offset);
|
|
|
|
break;
|
|
|
|
|
2017-01-12 18:21:08 +07:00
|
|
|
default:
|
|
|
|
MISSING_CASE(vma->ggtt_view.type);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2016-08-18 23:17:00 +07:00
|
|
|
if (vma->fence)
|
2019-08-13 00:48:03 +07:00
|
|
|
seq_printf(m, " , fence: %d", vma->fence->id);
|
2016-02-26 18:03:20 +07:00
|
|
|
seq_puts(m, ")");
|
2019-06-13 14:32:54 +07:00
|
|
|
|
|
|
|
spin_lock(&obj->vma.lock);
|
2013-08-01 07:00:00 +07:00
|
|
|
}
|
2019-06-13 14:32:54 +07:00
|
|
|
spin_unlock(&obj->vma.lock);
|
|
|
|
|
|
|
|
seq_printf(m, " (pinned x %d)", pin_count);
|
2012-11-15 18:32:21 +07:00
|
|
|
if (obj->stolen)
|
2015-01-23 15:05:06 +07:00
|
|
|
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
|
2019-09-02 11:02:47 +07:00
|
|
|
if (i915_gem_object_is_framebuffer(obj))
|
|
|
|
seq_printf(m, " (fb)");
|
2016-08-04 13:52:30 +07:00
|
|
|
|
drm/i915: Move GEM activity tracking into a common struct reservation_object
In preparation to support many distinct timelines, we need to expand the
activity tracking on the GEM object to handle more than just a request
per engine. We already use the struct reservation_object on the dma-buf
to handle many fence contexts, so integrating that into the GEM object
itself is the preferred solution. (For example, we can now share the same
reservation_object between every consumer/producer using this buffer and
skip the manual import/export via dma-buf.)
v2: Reimplement busy-ioctl (by walking the reservation object), postpone
the ABI change for another day. Similarly use the reservation object to
find the last_write request (if active and from i915) for choosing
display CS flips.
Caveats:
* busy-ioctl: busy-ioctl only reports on the native fences, it will not
warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is
being rendered to by external fences. It also will not report the same
busy state as wait-ioctl (or polling on the dma-buf) in the same
circumstances. On the plus side, it does retain reporting of which
*i915* engines are engaged with this object.
* non-blocking atomic modesets take a step backwards as the wait for
render completion blocks the ioctl. This is fixed in a subsequent
patch to use a fence instead for awaiting on the rendering, see
"drm/i915: Restore nonblocking awaits for modesetting"
* dynamic array manipulation for shared-fences in reservation is slower
than the previous lockless static assignment (e.g. gem_exec_lut_handle
runtime on ivb goes from 42s to 66s), mainly due to atomic operations
(maintaining the fence refcounts).
* loss of object-level retirement callbacks, emulated by VMA retirement
tracking.
* minor loss of object-level last activity information from debugfs,
could be replaced with per-vma information if desired
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 19:58:44 +07:00
|
|
|
engine = i915_gem_object_last_write_engine(obj);
|
2016-08-04 13:52:30 +07:00
|
|
|
if (engine)
|
|
|
|
seq_printf(m, " (%s)", engine->name);
|
2010-08-26 04:45:57 +07:00
|
|
|
}
|
|
|
|
|
drm/i915: Track clients and print their object usage in debugfs
By stashing a pointer of who opened the device and keeping a list of
open fd, we can then walk each client and inspect how many objects they
have open. For example,
i915_gem_objects:
1102 objects, 613646336 bytes
663 [662] objects, 468783104 [468750336] bytes in gtt
37 [37] active objects, 46874624 [46874624] bytes
626 [625] inactive objects, 421908480 [421875712] bytes
282 unbound objects, 6512640 bytes
85 purgeable objects, 6787072 bytes
28 pinned mappable objects, 3686400 bytes
40 fault mappable objects, 27783168 bytes
2145386496 [536870912] gtt total
Xorg: 43 objects, 32243712 bytes (10223616 active, 16683008 inactive, 4096 unbound)
gnome-shell: 30 objects, 28381184 bytes (0 active, 28336128 inactive, 0 unbound)
xonotic-linux64: 1032 objects, 569933824 bytes (46874624 active, 383545344 inactive, 6508544 unbound)
v2: Use existing drm->filelist as pointed out by Ben.
v3: Not even stashing the task_struct is required as Ben pointed out
drm_file->pid.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-06-05 05:49:08 +07:00
|
|
|
struct file_stats {
|
2019-01-07 18:54:25 +07:00
|
|
|
struct i915_address_space *vm;
|
2015-06-25 22:35:05 +07:00
|
|
|
unsigned long count;
|
|
|
|
u64 total, unbound;
|
|
|
|
u64 active, inactive;
|
2019-01-07 18:54:25 +07:00
|
|
|
u64 closed;
|
drm/i915: Track clients and print their object usage in debugfs
By stashing a pointer of who opened the device and keeping a list of
open fd, we can then walk each client and inspect how many objects they
have open. For example,
i915_gem_objects:
1102 objects, 613646336 bytes
663 [662] objects, 468783104 [468750336] bytes in gtt
37 [37] active objects, 46874624 [46874624] bytes
626 [625] inactive objects, 421908480 [421875712] bytes
282 unbound objects, 6512640 bytes
85 purgeable objects, 6787072 bytes
28 pinned mappable objects, 3686400 bytes
40 fault mappable objects, 27783168 bytes
2145386496 [536870912] gtt total
Xorg: 43 objects, 32243712 bytes (10223616 active, 16683008 inactive, 4096 unbound)
gnome-shell: 30 objects, 28381184 bytes (0 active, 28336128 inactive, 0 unbound)
xonotic-linux64: 1032 objects, 569933824 bytes (46874624 active, 383545344 inactive, 6508544 unbound)
v2: Use existing drm->filelist as pointed out by Ben.
v3: Not even stashing the task_struct is required as Ben pointed out
drm_file->pid.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-06-05 05:49:08 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static int per_file_stats(int id, void *ptr, void *data)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj = ptr;
|
|
|
|
struct file_stats *stats = data;
|
2014-03-19 20:45:45 +07:00
|
|
|
struct i915_vma *vma;
|
drm/i915: Track clients and print their object usage in debugfs
By stashing a pointer of who opened the device and keeping a list of
open fd, we can then walk each client and inspect how many objects they
have open. For example,
i915_gem_objects:
1102 objects, 613646336 bytes
663 [662] objects, 468783104 [468750336] bytes in gtt
37 [37] active objects, 46874624 [46874624] bytes
626 [625] inactive objects, 421908480 [421875712] bytes
282 unbound objects, 6512640 bytes
85 purgeable objects, 6787072 bytes
28 pinned mappable objects, 3686400 bytes
40 fault mappable objects, 27783168 bytes
2145386496 [536870912] gtt total
Xorg: 43 objects, 32243712 bytes (10223616 active, 16683008 inactive, 4096 unbound)
gnome-shell: 30 objects, 28381184 bytes (0 active, 28336128 inactive, 0 unbound)
xonotic-linux64: 1032 objects, 569933824 bytes (46874624 active, 383545344 inactive, 6508544 unbound)
v2: Use existing drm->filelist as pointed out by Ben.
v3: Not even stashing the task_struct is required as Ben pointed out
drm_file->pid.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-06-05 05:49:08 +07:00
|
|
|
|
2019-09-03 13:21:33 +07:00
|
|
|
if (!kref_get_unless_zero(&obj->base.refcount))
|
|
|
|
return 0;
|
|
|
|
|
drm/i915: Track clients and print their object usage in debugfs
By stashing a pointer of who opened the device and keeping a list of
open fd, we can then walk each client and inspect how many objects they
have open. For example,
i915_gem_objects:
1102 objects, 613646336 bytes
663 [662] objects, 468783104 [468750336] bytes in gtt
37 [37] active objects, 46874624 [46874624] bytes
626 [625] inactive objects, 421908480 [421875712] bytes
282 unbound objects, 6512640 bytes
85 purgeable objects, 6787072 bytes
28 pinned mappable objects, 3686400 bytes
40 fault mappable objects, 27783168 bytes
2145386496 [536870912] gtt total
Xorg: 43 objects, 32243712 bytes (10223616 active, 16683008 inactive, 4096 unbound)
gnome-shell: 30 objects, 28381184 bytes (0 active, 28336128 inactive, 0 unbound)
xonotic-linux64: 1032 objects, 569933824 bytes (46874624 active, 383545344 inactive, 6508544 unbound)
v2: Use existing drm->filelist as pointed out by Ben.
v3: Not even stashing the task_struct is required as Ben pointed out
drm_file->pid.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-06-05 05:49:08 +07:00
|
|
|
stats->count++;
|
|
|
|
stats->total += obj->base.size;
|
2019-06-12 17:57:20 +07:00
|
|
|
if (!atomic_read(&obj->bind_count))
|
2016-08-04 13:52:26 +07:00
|
|
|
stats->unbound += obj->base.size;
|
2014-03-19 20:45:46 +07:00
|
|
|
|
2019-08-08 23:24:07 +07:00
|
|
|
spin_lock(&obj->vma.lock);
|
|
|
|
if (!stats->vm) {
|
|
|
|
for_each_ggtt_vma(vma, obj) {
|
|
|
|
if (!drm_mm_node_allocated(&vma->node))
|
2014-03-19 20:45:45 +07:00
|
|
|
continue;
|
2016-08-04 13:52:20 +07:00
|
|
|
|
2019-08-08 23:24:07 +07:00
|
|
|
if (i915_vma_is_active(vma))
|
|
|
|
stats->active += vma->node.size;
|
|
|
|
else
|
|
|
|
stats->inactive += vma->node.size;
|
2019-01-07 18:54:25 +07:00
|
|
|
|
2019-08-08 23:24:07 +07:00
|
|
|
if (i915_vma_is_closed(vma))
|
|
|
|
stats->closed += vma->node.size;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
struct rb_node *p = obj->vma.tree.rb_node;
|
|
|
|
|
|
|
|
while (p) {
|
|
|
|
long cmp;
|
|
|
|
|
|
|
|
vma = rb_entry(p, typeof(*vma), obj_node);
|
|
|
|
cmp = i915_vma_compare(vma, stats->vm, NULL);
|
|
|
|
if (cmp == 0) {
|
|
|
|
if (drm_mm_node_allocated(&vma->node)) {
|
|
|
|
if (i915_vma_is_active(vma))
|
|
|
|
stats->active += vma->node.size;
|
|
|
|
else
|
|
|
|
stats->inactive += vma->node.size;
|
|
|
|
|
|
|
|
if (i915_vma_is_closed(vma))
|
|
|
|
stats->closed += vma->node.size;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (cmp < 0)
|
|
|
|
p = p->rb_right;
|
|
|
|
else
|
|
|
|
p = p->rb_left;
|
|
|
|
}
|
drm/i915: Track clients and print their object usage in debugfs
By stashing a pointer of who opened the device and keeping a list of
open fd, we can then walk each client and inspect how many objects they
have open. For example,
i915_gem_objects:
1102 objects, 613646336 bytes
663 [662] objects, 468783104 [468750336] bytes in gtt
37 [37] active objects, 46874624 [46874624] bytes
626 [625] inactive objects, 421908480 [421875712] bytes
282 unbound objects, 6512640 bytes
85 purgeable objects, 6787072 bytes
28 pinned mappable objects, 3686400 bytes
40 fault mappable objects, 27783168 bytes
2145386496 [536870912] gtt total
Xorg: 43 objects, 32243712 bytes (10223616 active, 16683008 inactive, 4096 unbound)
gnome-shell: 30 objects, 28381184 bytes (0 active, 28336128 inactive, 0 unbound)
xonotic-linux64: 1032 objects, 569933824 bytes (46874624 active, 383545344 inactive, 6508544 unbound)
v2: Use existing drm->filelist as pointed out by Ben.
v3: Not even stashing the task_struct is required as Ben pointed out
drm_file->pid.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-06-05 05:49:08 +07:00
|
|
|
}
|
2019-08-08 23:24:07 +07:00
|
|
|
spin_unlock(&obj->vma.lock);
|
drm/i915: Track clients and print their object usage in debugfs
By stashing a pointer of who opened the device and keeping a list of
open fd, we can then walk each client and inspect how many objects they
have open. For example,
i915_gem_objects:
1102 objects, 613646336 bytes
663 [662] objects, 468783104 [468750336] bytes in gtt
37 [37] active objects, 46874624 [46874624] bytes
626 [625] inactive objects, 421908480 [421875712] bytes
282 unbound objects, 6512640 bytes
85 purgeable objects, 6787072 bytes
28 pinned mappable objects, 3686400 bytes
40 fault mappable objects, 27783168 bytes
2145386496 [536870912] gtt total
Xorg: 43 objects, 32243712 bytes (10223616 active, 16683008 inactive, 4096 unbound)
gnome-shell: 30 objects, 28381184 bytes (0 active, 28336128 inactive, 0 unbound)
xonotic-linux64: 1032 objects, 569933824 bytes (46874624 active, 383545344 inactive, 6508544 unbound)
v2: Use existing drm->filelist as pointed out by Ben.
v3: Not even stashing the task_struct is required as Ben pointed out
drm_file->pid.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-06-05 05:49:08 +07:00
|
|
|
|
2019-09-03 13:21:33 +07:00
|
|
|
i915_gem_object_put(obj);
|
drm/i915: Track clients and print their object usage in debugfs
By stashing a pointer of who opened the device and keeping a list of
open fd, we can then walk each client and inspect how many objects they
have open. For example,
i915_gem_objects:
1102 objects, 613646336 bytes
663 [662] objects, 468783104 [468750336] bytes in gtt
37 [37] active objects, 46874624 [46874624] bytes
626 [625] inactive objects, 421908480 [421875712] bytes
282 unbound objects, 6512640 bytes
85 purgeable objects, 6787072 bytes
28 pinned mappable objects, 3686400 bytes
40 fault mappable objects, 27783168 bytes
2145386496 [536870912] gtt total
Xorg: 43 objects, 32243712 bytes (10223616 active, 16683008 inactive, 4096 unbound)
gnome-shell: 30 objects, 28381184 bytes (0 active, 28336128 inactive, 0 unbound)
xonotic-linux64: 1032 objects, 569933824 bytes (46874624 active, 383545344 inactive, 6508544 unbound)
v2: Use existing drm->filelist as pointed out by Ben.
v3: Not even stashing the task_struct is required as Ben pointed out
drm_file->pid.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-06-05 05:49:08 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-07 22:20:40 +07:00
|
|
|
#define print_file_stats(m, name, stats) do { \
|
|
|
|
if (stats.count) \
|
2019-08-08 23:24:07 +07:00
|
|
|
seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
|
2015-04-07 22:20:40 +07:00
|
|
|
name, \
|
|
|
|
stats.count, \
|
|
|
|
stats.total, \
|
|
|
|
stats.active, \
|
|
|
|
stats.inactive, \
|
2019-01-07 18:54:25 +07:00
|
|
|
stats.unbound, \
|
|
|
|
stats.closed); \
|
2015-04-07 22:20:40 +07:00
|
|
|
} while (0)
|
2014-12-12 03:13:08 +07:00
|
|
|
|
2019-01-07 18:54:25 +07:00
|
|
|
static void print_context_stats(struct seq_file *m,
|
|
|
|
struct drm_i915_private *i915)
|
2016-05-24 20:53:43 +07:00
|
|
|
{
|
2019-01-07 18:54:25 +07:00
|
|
|
struct file_stats kstats = {};
|
2019-10-04 20:40:09 +07:00
|
|
|
struct i915_gem_context *ctx, *cn;
|
2018-04-30 20:15:01 +07:00
|
|
|
|
2019-10-04 20:40:09 +07:00
|
|
|
spin_lock(&i915->gem.contexts.lock);
|
|
|
|
list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
|
2019-04-26 23:33:35 +07:00
|
|
|
struct i915_gem_engines_iter it;
|
2019-03-08 20:25:16 +07:00
|
|
|
struct intel_context *ce;
|
2016-05-24 20:53:43 +07:00
|
|
|
|
2019-10-04 20:40:09 +07:00
|
|
|
if (!kref_get_unless_zero(&ctx->ref))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
spin_unlock(&i915->gem.contexts.lock);
|
|
|
|
|
2019-04-26 23:33:35 +07:00
|
|
|
for_each_gem_engine(ce,
|
|
|
|
i915_gem_context_lock_engines(ctx), it) {
|
2020-01-09 15:51:42 +07:00
|
|
|
if (intel_context_pin_if_active(ce)) {
|
2019-09-03 13:21:33 +07:00
|
|
|
rcu_read_lock();
|
2019-08-10 01:25:17 +07:00
|
|
|
if (ce->state)
|
|
|
|
per_file_stats(0,
|
|
|
|
ce->state->obj, &kstats);
|
2019-01-07 18:54:25 +07:00
|
|
|
per_file_stats(0, ce->ring->vma->obj, &kstats);
|
2019-09-03 13:21:33 +07:00
|
|
|
rcu_read_unlock();
|
2020-01-09 15:51:42 +07:00
|
|
|
intel_context_unpin(ce);
|
2019-08-10 01:25:17 +07:00
|
|
|
}
|
2019-01-07 18:54:25 +07:00
|
|
|
}
|
2019-04-26 23:33:35 +07:00
|
|
|
i915_gem_context_unlock_engines(ctx);
|
2016-05-24 20:53:43 +07:00
|
|
|
|
2019-01-07 18:54:25 +07:00
|
|
|
if (!IS_ERR_OR_NULL(ctx->file_priv)) {
|
2019-10-04 20:40:09 +07:00
|
|
|
struct file_stats stats = {
|
|
|
|
.vm = rcu_access_pointer(ctx->vm),
|
|
|
|
};
|
2019-01-07 18:54:25 +07:00
|
|
|
struct drm_file *file = ctx->file_priv->file;
|
|
|
|
struct task_struct *task;
|
|
|
|
char name[80];
|
2016-05-24 20:53:43 +07:00
|
|
|
|
2019-09-03 13:21:33 +07:00
|
|
|
rcu_read_lock();
|
2019-01-07 18:54:25 +07:00
|
|
|
idr_for_each(&file->object_idr, per_file_stats, &stats);
|
2019-09-03 13:21:33 +07:00
|
|
|
rcu_read_unlock();
|
2016-05-24 20:53:43 +07:00
|
|
|
|
2019-01-07 18:54:25 +07:00
|
|
|
rcu_read_lock();
|
|
|
|
task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
|
2019-03-21 21:07:10 +07:00
|
|
|
snprintf(name, sizeof(name), "%s",
|
|
|
|
task ? task->comm : "<unknown>");
|
2019-01-07 18:54:25 +07:00
|
|
|
rcu_read_unlock();
|
2016-05-24 20:53:43 +07:00
|
|
|
|
2019-01-07 18:54:25 +07:00
|
|
|
print_file_stats(m, name, stats);
|
|
|
|
}
|
2019-10-04 20:40:09 +07:00
|
|
|
|
|
|
|
spin_lock(&i915->gem.contexts.lock);
|
|
|
|
list_safe_reset_next(ctx, cn, link);
|
|
|
|
i915_gem_context_put(ctx);
|
2016-05-24 20:53:43 +07:00
|
|
|
}
|
2019-10-04 20:40:09 +07:00
|
|
|
spin_unlock(&i915->gem.contexts.lock);
|
2016-05-24 20:53:43 +07:00
|
|
|
|
2019-01-07 18:54:25 +07:00
|
|
|
print_file_stats(m, "[k]contexts", kstats);
|
2016-05-24 20:53:43 +07:00
|
|
|
}
|
|
|
|
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
static int i915_gem_object_info(struct seq_file *m, void *data)
|
2010-09-30 17:46:12 +07:00
|
|
|
{
|
2019-06-12 17:57:20 +07:00
|
|
|
struct drm_i915_private *i915 = node_to_i915(m->private);
|
2019-12-27 20:37:48 +07:00
|
|
|
struct intel_memory_region *mr;
|
|
|
|
enum intel_region_id id;
|
2010-09-30 17:46:12 +07:00
|
|
|
|
2019-08-03 04:21:36 +07:00
|
|
|
seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
|
2019-06-12 17:57:20 +07:00
|
|
|
i915->mm.shrink_count,
|
2019-08-03 04:21:36 +07:00
|
|
|
atomic_read(&i915->mm.free_count),
|
2019-06-12 17:57:20 +07:00
|
|
|
i915->mm.shrink_memory);
|
2019-12-27 20:37:48 +07:00
|
|
|
for_each_memory_region(mr, i915, id)
|
|
|
|
seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
|
|
|
|
mr->name, &mr->total, &mr->avail);
|
2014-12-12 03:13:08 +07:00
|
|
|
seq_putc(m, '\n');
|
2016-04-27 00:29:41 +07:00
|
|
|
|
2019-06-12 17:57:20 +07:00
|
|
|
print_context_stats(m, i915);
|
2010-09-30 17:46:12 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-02-20 22:37:53 +07:00
|
|
|
static void gen8_display_interrupt_info(struct seq_file *m)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
2019-08-22 00:30:31 +07:00
|
|
|
enum pipe pipe;
|
2018-02-20 22:37:53 +07:00
|
|
|
|
|
|
|
for_each_pipe(dev_priv, pipe) {
|
|
|
|
enum intel_display_power_domain power_domain;
|
2019-01-14 21:21:24 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2018-02-20 22:37:53 +07:00
|
|
|
|
|
|
|
power_domain = POWER_DOMAIN_PIPE(pipe);
|
2019-01-14 21:21:24 +07:00
|
|
|
wakeref = intel_display_power_get_if_enabled(dev_priv,
|
|
|
|
power_domain);
|
|
|
|
if (!wakeref) {
|
2018-02-20 22:37:53 +07:00
|
|
|
seq_printf(m, "Pipe %c power disabled\n",
|
|
|
|
pipe_name(pipe));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
seq_printf(m, "Pipe %c IMR:\t%08x\n",
|
|
|
|
pipe_name(pipe),
|
|
|
|
I915_READ(GEN8_DE_PIPE_IMR(pipe)));
|
|
|
|
seq_printf(m, "Pipe %c IIR:\t%08x\n",
|
|
|
|
pipe_name(pipe),
|
|
|
|
I915_READ(GEN8_DE_PIPE_IIR(pipe)));
|
|
|
|
seq_printf(m, "Pipe %c IER:\t%08x\n",
|
|
|
|
pipe_name(pipe),
|
|
|
|
I915_READ(GEN8_DE_PIPE_IER(pipe)));
|
|
|
|
|
2019-01-14 21:21:24 +07:00
|
|
|
intel_display_power_put(dev_priv, power_domain, wakeref);
|
2018-02-20 22:37:53 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
|
|
|
|
I915_READ(GEN8_DE_PORT_IMR));
|
|
|
|
seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
|
|
|
|
I915_READ(GEN8_DE_PORT_IIR));
|
|
|
|
seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
|
|
|
|
I915_READ(GEN8_DE_PORT_IER));
|
|
|
|
|
|
|
|
seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
|
|
|
|
I915_READ(GEN8_DE_MISC_IMR));
|
|
|
|
seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
|
|
|
|
I915_READ(GEN8_DE_MISC_IIR));
|
|
|
|
seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
|
|
|
|
I915_READ(GEN8_DE_MISC_IER));
|
|
|
|
|
|
|
|
seq_printf(m, "PCU interrupt mask:\t%08x\n",
|
|
|
|
I915_READ(GEN8_PCU_IMR));
|
|
|
|
seq_printf(m, "PCU interrupt identity:\t%08x\n",
|
|
|
|
I915_READ(GEN8_PCU_IIR));
|
|
|
|
seq_printf(m, "PCU interrupt enable:\t%08x\n",
|
|
|
|
I915_READ(GEN8_PCU_IER));
|
|
|
|
}
|
|
|
|
|
2009-02-18 08:08:50 +07:00
|
|
|
static int i915_interrupt_info(struct seq_file *m, void *data)
|
|
|
|
{
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
2016-03-16 18:00:36 +07:00
|
|
|
struct intel_engine_cs *engine;
|
2019-01-14 21:21:14 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2016-09-03 13:53:43 +07:00
|
|
|
int i, pipe;
|
2010-07-03 13:58:38 +07:00
|
|
|
|
2019-06-14 06:21:54 +07:00
|
|
|
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
2009-02-18 08:08:50 +07:00
|
|
|
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
if (IS_CHERRYVIEW(dev_priv)) {
|
2019-01-14 21:21:24 +07:00
|
|
|
intel_wakeref_t pref;
|
|
|
|
|
2014-04-09 17:28:09 +07:00
|
|
|
seq_printf(m, "Master Interrupt Control:\t%08x\n",
|
|
|
|
I915_READ(GEN8_MASTER_IRQ));
|
|
|
|
|
|
|
|
seq_printf(m, "Display IER:\t%08x\n",
|
|
|
|
I915_READ(VLV_IER));
|
|
|
|
seq_printf(m, "Display IIR:\t%08x\n",
|
|
|
|
I915_READ(VLV_IIR));
|
|
|
|
seq_printf(m, "Display IIR_RW:\t%08x\n",
|
|
|
|
I915_READ(VLV_IIR_RW));
|
|
|
|
seq_printf(m, "Display IMR:\t%08x\n",
|
|
|
|
I915_READ(VLV_IMR));
|
2016-10-24 19:42:15 +07:00
|
|
|
for_each_pipe(dev_priv, pipe) {
|
|
|
|
enum intel_display_power_domain power_domain;
|
|
|
|
|
|
|
|
power_domain = POWER_DOMAIN_PIPE(pipe);
|
2019-01-14 21:21:24 +07:00
|
|
|
pref = intel_display_power_get_if_enabled(dev_priv,
|
|
|
|
power_domain);
|
|
|
|
if (!pref) {
|
2016-10-24 19:42:15 +07:00
|
|
|
seq_printf(m, "Pipe %c power disabled\n",
|
|
|
|
pipe_name(pipe));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-04-09 17:28:09 +07:00
|
|
|
seq_printf(m, "Pipe %c stat:\t%08x\n",
|
|
|
|
pipe_name(pipe),
|
|
|
|
I915_READ(PIPESTAT(pipe)));
|
|
|
|
|
2019-01-14 21:21:24 +07:00
|
|
|
intel_display_power_put(dev_priv, power_domain, pref);
|
2016-10-24 19:42:15 +07:00
|
|
|
}
|
|
|
|
|
2019-01-14 21:21:24 +07:00
|
|
|
pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
|
2014-04-09 17:28:09 +07:00
|
|
|
seq_printf(m, "Port hotplug:\t%08x\n",
|
|
|
|
I915_READ(PORT_HOTPLUG_EN));
|
|
|
|
seq_printf(m, "DPFLIPSTAT:\t%08x\n",
|
|
|
|
I915_READ(VLV_DPFLIPSTAT));
|
|
|
|
seq_printf(m, "DPINVGTT:\t%08x\n",
|
|
|
|
I915_READ(DPINVGTT));
|
2019-01-14 21:21:24 +07:00
|
|
|
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
|
2014-04-09 17:28:09 +07:00
|
|
|
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
|
|
|
|
i, I915_READ(GEN8_GT_IMR(i)));
|
|
|
|
seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
|
|
|
|
i, I915_READ(GEN8_GT_IIR(i)));
|
|
|
|
seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
|
|
|
|
i, I915_READ(GEN8_GT_IER(i)));
|
|
|
|
}
|
|
|
|
|
|
|
|
seq_printf(m, "PCU interrupt mask:\t%08x\n",
|
|
|
|
I915_READ(GEN8_PCU_IMR));
|
|
|
|
seq_printf(m, "PCU interrupt identity:\t%08x\n",
|
|
|
|
I915_READ(GEN8_PCU_IIR));
|
|
|
|
seq_printf(m, "PCU interrupt enable:\t%08x\n",
|
|
|
|
I915_READ(GEN8_PCU_IER));
|
2018-02-20 22:37:53 +07:00
|
|
|
} else if (INTEL_GEN(dev_priv) >= 11) {
|
|
|
|
seq_printf(m, "Master Interrupt Control: %08x\n",
|
|
|
|
I915_READ(GEN11_GFX_MSTR_IRQ));
|
|
|
|
|
|
|
|
seq_printf(m, "Render/Copy Intr Enable: %08x\n",
|
|
|
|
I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
|
|
|
|
seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
|
|
|
|
I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
|
|
|
|
seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
|
|
|
|
I915_READ(GEN11_GUC_SG_INTR_ENABLE));
|
|
|
|
seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
|
|
|
|
I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
|
|
|
|
seq_printf(m, "Crypto Intr Enable:\t %08x\n",
|
|
|
|
I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
|
|
|
|
seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
|
|
|
|
I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
|
|
|
|
|
|
|
|
seq_printf(m, "Display Interrupt Control:\t%08x\n",
|
|
|
|
I915_READ(GEN11_DISPLAY_INT_CTL));
|
|
|
|
|
|
|
|
gen8_display_interrupt_info(m);
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
} else if (INTEL_GEN(dev_priv) >= 8) {
|
2013-11-03 11:07:10 +07:00
|
|
|
seq_printf(m, "Master Interrupt Control:\t%08x\n",
|
|
|
|
I915_READ(GEN8_MASTER_IRQ));
|
|
|
|
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
|
|
|
|
i, I915_READ(GEN8_GT_IMR(i)));
|
|
|
|
seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
|
|
|
|
i, I915_READ(GEN8_GT_IIR(i)));
|
|
|
|
seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
|
|
|
|
i, I915_READ(GEN8_GT_IER(i)));
|
|
|
|
}
|
|
|
|
|
2018-02-20 22:37:53 +07:00
|
|
|
gen8_display_interrupt_info(m);
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
} else if (IS_VALLEYVIEW(dev_priv)) {
|
2019-09-12 19:54:18 +07:00
|
|
|
intel_wakeref_t pref;
|
|
|
|
|
2012-03-29 03:39:38 +07:00
|
|
|
seq_printf(m, "Display IER:\t%08x\n",
|
|
|
|
I915_READ(VLV_IER));
|
|
|
|
seq_printf(m, "Display IIR:\t%08x\n",
|
|
|
|
I915_READ(VLV_IIR));
|
|
|
|
seq_printf(m, "Display IIR_RW:\t%08x\n",
|
|
|
|
I915_READ(VLV_IIR_RW));
|
|
|
|
seq_printf(m, "Display IMR:\t%08x\n",
|
|
|
|
I915_READ(VLV_IMR));
|
2017-02-10 20:36:32 +07:00
|
|
|
for_each_pipe(dev_priv, pipe) {
|
|
|
|
enum intel_display_power_domain power_domain;
|
|
|
|
|
|
|
|
power_domain = POWER_DOMAIN_PIPE(pipe);
|
2019-01-14 21:21:24 +07:00
|
|
|
pref = intel_display_power_get_if_enabled(dev_priv,
|
|
|
|
power_domain);
|
|
|
|
if (!pref) {
|
2017-02-10 20:36:32 +07:00
|
|
|
seq_printf(m, "Pipe %c power disabled\n",
|
|
|
|
pipe_name(pipe));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2012-03-29 03:39:38 +07:00
|
|
|
seq_printf(m, "Pipe %c stat:\t%08x\n",
|
|
|
|
pipe_name(pipe),
|
|
|
|
I915_READ(PIPESTAT(pipe)));
|
2019-01-14 21:21:24 +07:00
|
|
|
intel_display_power_put(dev_priv, power_domain, pref);
|
2017-02-10 20:36:32 +07:00
|
|
|
}
|
2012-03-29 03:39:38 +07:00
|
|
|
|
|
|
|
seq_printf(m, "Master IER:\t%08x\n",
|
|
|
|
I915_READ(VLV_MASTER_IER));
|
|
|
|
|
|
|
|
seq_printf(m, "Render IER:\t%08x\n",
|
|
|
|
I915_READ(GTIER));
|
|
|
|
seq_printf(m, "Render IIR:\t%08x\n",
|
|
|
|
I915_READ(GTIIR));
|
|
|
|
seq_printf(m, "Render IMR:\t%08x\n",
|
|
|
|
I915_READ(GTIMR));
|
|
|
|
|
|
|
|
seq_printf(m, "PM IER:\t\t%08x\n",
|
|
|
|
I915_READ(GEN6_PMIER));
|
|
|
|
seq_printf(m, "PM IIR:\t\t%08x\n",
|
|
|
|
I915_READ(GEN6_PMIIR));
|
|
|
|
seq_printf(m, "PM IMR:\t\t%08x\n",
|
|
|
|
I915_READ(GEN6_PMIMR));
|
|
|
|
|
2019-09-12 19:54:18 +07:00
|
|
|
pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
|
2012-03-29 03:39:38 +07:00
|
|
|
seq_printf(m, "Port hotplug:\t%08x\n",
|
|
|
|
I915_READ(PORT_HOTPLUG_EN));
|
|
|
|
seq_printf(m, "DPFLIPSTAT:\t%08x\n",
|
|
|
|
I915_READ(VLV_DPFLIPSTAT));
|
|
|
|
seq_printf(m, "DPINVGTT:\t%08x\n",
|
|
|
|
I915_READ(DPINVGTT));
|
2019-09-12 19:54:18 +07:00
|
|
|
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
|
2012-03-29 03:39:38 +07:00
|
|
|
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
} else if (!HAS_PCH_SPLIT(dev_priv)) {
|
2009-08-10 20:37:24 +07:00
|
|
|
seq_printf(m, "Interrupt enable: %08x\n",
|
drm/i915: add GEN2_ prefix to the I{E, I, M, S}R registers
This discussion started because we use token pasting in the
GEN{2,3}_IRQ_INIT and GEN{2,3}_IRQ_RESET macros, so gen2-4 passes an
empty argument to those macros, making the code a little weird. The
original proposal was to just add a comment as the empty argument, but
Ville suggested we just add a prefix to the registers, and that indeed
sounds like a more elegant solution.
Now doing this is kinda against our rules for register naming since we
only add gens or platform names as register prefixes when the given
gen/platform changes a register that already existed before. On the
other hand, we have so many instances of IIR/IMR in comments that
adding a prefix would make the users of these register more easily
findable, in addition to make our token pasting macros actually
readable. So IMHO opening an exception here is worth it.
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190410235344.31199-4-paulo.r.zanoni@intel.com
2019-04-11 06:53:42 +07:00
|
|
|
I915_READ(GEN2_IER));
|
2009-08-10 20:37:24 +07:00
|
|
|
seq_printf(m, "Interrupt identity: %08x\n",
|
drm/i915: add GEN2_ prefix to the I{E, I, M, S}R registers
This discussion started because we use token pasting in the
GEN{2,3}_IRQ_INIT and GEN{2,3}_IRQ_RESET macros, so gen2-4 passes an
empty argument to those macros, making the code a little weird. The
original proposal was to just add a comment as the empty argument, but
Ville suggested we just add a prefix to the registers, and that indeed
sounds like a more elegant solution.
Now doing this is kinda against our rules for register naming since we
only add gens or platform names as register prefixes when the given
gen/platform changes a register that already existed before. On the
other hand, we have so many instances of IIR/IMR in comments that
adding a prefix would make the users of these register more easily
findable, in addition to make our token pasting macros actually
readable. So IMHO opening an exception here is worth it.
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190410235344.31199-4-paulo.r.zanoni@intel.com
2019-04-11 06:53:42 +07:00
|
|
|
I915_READ(GEN2_IIR));
|
2009-08-10 20:37:24 +07:00
|
|
|
seq_printf(m, "Interrupt mask: %08x\n",
|
drm/i915: add GEN2_ prefix to the I{E, I, M, S}R registers
This discussion started because we use token pasting in the
GEN{2,3}_IRQ_INIT and GEN{2,3}_IRQ_RESET macros, so gen2-4 passes an
empty argument to those macros, making the code a little weird. The
original proposal was to just add a comment as the empty argument, but
Ville suggested we just add a prefix to the registers, and that indeed
sounds like a more elegant solution.
Now doing this is kinda against our rules for register naming since we
only add gens or platform names as register prefixes when the given
gen/platform changes a register that already existed before. On the
other hand, we have so many instances of IIR/IMR in comments that
adding a prefix would make the users of these register more easily
findable, in addition to make our token pasting macros actually
readable. So IMHO opening an exception here is worth it.
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190410235344.31199-4-paulo.r.zanoni@intel.com
2019-04-11 06:53:42 +07:00
|
|
|
I915_READ(GEN2_IMR));
|
2014-08-18 19:49:10 +07:00
|
|
|
for_each_pipe(dev_priv, pipe)
|
2011-02-08 03:26:52 +07:00
|
|
|
seq_printf(m, "Pipe %c stat: %08x\n",
|
|
|
|
pipe_name(pipe),
|
|
|
|
I915_READ(PIPESTAT(pipe)));
|
2009-08-10 20:37:24 +07:00
|
|
|
} else {
|
|
|
|
seq_printf(m, "North Display Interrupt enable: %08x\n",
|
|
|
|
I915_READ(DEIER));
|
|
|
|
seq_printf(m, "North Display Interrupt identity: %08x\n",
|
|
|
|
I915_READ(DEIIR));
|
|
|
|
seq_printf(m, "North Display Interrupt mask: %08x\n",
|
|
|
|
I915_READ(DEIMR));
|
|
|
|
seq_printf(m, "South Display Interrupt enable: %08x\n",
|
|
|
|
I915_READ(SDEIER));
|
|
|
|
seq_printf(m, "South Display Interrupt identity: %08x\n",
|
|
|
|
I915_READ(SDEIIR));
|
|
|
|
seq_printf(m, "South Display Interrupt mask: %08x\n",
|
|
|
|
I915_READ(SDEIMR));
|
|
|
|
seq_printf(m, "Graphics Interrupt enable: %08x\n",
|
|
|
|
I915_READ(GTIER));
|
|
|
|
seq_printf(m, "Graphics Interrupt identity: %08x\n",
|
|
|
|
I915_READ(GTIIR));
|
|
|
|
seq_printf(m, "Graphics Interrupt mask: %08x\n",
|
|
|
|
I915_READ(GTIMR));
|
|
|
|
}
|
2018-02-20 22:37:53 +07:00
|
|
|
|
|
|
|
if (INTEL_GEN(dev_priv) >= 11) {
|
|
|
|
seq_printf(m, "RCS Intr Mask:\t %08x\n",
|
|
|
|
I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
|
|
|
|
seq_printf(m, "BCS Intr Mask:\t %08x\n",
|
|
|
|
I915_READ(GEN11_BCS_RSVD_INTR_MASK));
|
|
|
|
seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
|
|
|
|
I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
|
|
|
|
seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
|
|
|
|
I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
|
|
|
|
seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
|
|
|
|
I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
|
|
|
|
seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
|
|
|
|
I915_READ(GEN11_GUC_SG_INTR_MASK));
|
|
|
|
seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
|
|
|
|
I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
|
|
|
|
seq_printf(m, "Crypto Intr Mask:\t %08x\n",
|
|
|
|
I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
|
|
|
|
seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
|
|
|
|
I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
|
|
|
|
|
|
|
|
} else if (INTEL_GEN(dev_priv) >= 6) {
|
2019-08-06 19:43:00 +07:00
|
|
|
for_each_uabi_engine(engine, dev_priv) {
|
2012-09-02 02:51:22 +07:00
|
|
|
seq_printf(m,
|
|
|
|
"Graphics Interrupt mask (%s): %08x\n",
|
2019-03-26 04:49:40 +07:00
|
|
|
engine->name, ENGINE_READ(engine, RING_IMR));
|
2011-01-05 05:22:17 +07:00
|
|
|
}
|
|
|
|
}
|
2018-02-20 22:37:53 +07:00
|
|
|
|
2019-06-14 06:21:54 +07:00
|
|
|
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
2010-07-03 13:58:38 +07:00
|
|
|
|
2009-02-18 08:08:50 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-02-11 21:26:38 +07:00
|
|
|
static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
|
|
|
|
{
|
2019-06-13 14:32:54 +07:00
|
|
|
struct drm_i915_private *i915 = node_to_i915(m->private);
|
|
|
|
unsigned int i;
|
2010-07-03 13:58:38 +07:00
|
|
|
|
2019-06-13 14:32:54 +07:00
|
|
|
seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
|
2009-02-11 21:26:38 +07:00
|
|
|
|
2019-06-13 14:32:54 +07:00
|
|
|
rcu_read_lock();
|
|
|
|
for (i = 0; i < i915->ggtt.num_fences; i++) {
|
2019-08-22 13:09:12 +07:00
|
|
|
struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
|
|
|
|
struct i915_vma *vma = reg->vma;
|
2009-02-11 21:26:38 +07:00
|
|
|
|
drm/i915: Track unbound pages
When dealing with a working set larger than the GATT, or even the
mappable aperture when touching through the GTT, we end up with evicting
objects only to rebind them at a new offset again later. Moving an
object into and out of the GTT requires clflushing the pages, thus
causing a double-clflush penalty for rebinding.
To avoid having to clflush on rebinding, we can track the pages as they
are evicted from the GTT and only relinquish those pages on memory
pressure.
As usual, if it were not for the handling of out-of-memory condition and
having to manually shrink our own bo caches, it would be a net reduction
of code. Alas.
Note: The patch also contains a few changes to the last-hope
evict_everything logic in i916_gem_execbuffer.c - we no longer try to
only evict the purgeable stuff in a first try (since that's superflous
and only helps in OOM corner-cases, not fragmented-gtt trashing
situations).
Also, the extraction of the get_pages retry loop from bind_to_gtt (and
other callsites) to get_pages should imo have been a separate patch.
v2: Ditch the newly added put_pages (for unbound objects only) in
i915_gem_reset. A quick irc discussion hasn't revealed any important
reason for this, so if we need this, I'd like to have a git blame'able
explanation for it.
v3: Undo the s/drm_malloc_ab/kmalloc/ in get_pages that Chris noticed.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
[danvet: Split out code movements and rant a bit in the commit message
with a few Notes. Done v2]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-08-20 16:40:46 +07:00
|
|
|
seq_printf(m, "Fence %d, pin count = %d, object = ",
|
2019-08-22 13:09:12 +07:00
|
|
|
i, atomic_read(®->pin_count));
|
2016-08-18 23:17:00 +07:00
|
|
|
if (!vma)
|
2013-06-25 04:59:48 +07:00
|
|
|
seq_puts(m, "unused");
|
2010-10-27 21:11:53 +07:00
|
|
|
else
|
2020-02-11 23:14:51 +07:00
|
|
|
i915_debugfs_describe_obj(m, vma->obj);
|
2013-06-25 04:59:48 +07:00
|
|
|
seq_putc(m, '\n');
|
2009-02-11 21:26:38 +07:00
|
|
|
}
|
2019-06-13 14:32:54 +07:00
|
|
|
rcu_read_unlock();
|
2009-02-11 21:26:38 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-12 16:05:18 +07:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
|
2017-02-14 23:46:11 +07:00
|
|
|
static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
|
|
|
|
size_t count, loff_t *pos)
|
2012-04-27 20:17:40 +07:00
|
|
|
{
|
2020-01-10 19:30:56 +07:00
|
|
|
struct i915_gpu_coredump *error;
|
2017-02-14 23:46:11 +07:00
|
|
|
ssize_t ret;
|
2018-11-23 20:23:25 +07:00
|
|
|
void *buf;
|
2012-04-27 20:17:40 +07:00
|
|
|
|
2018-11-23 20:23:25 +07:00
|
|
|
error = file->private_data;
|
2017-02-14 23:46:11 +07:00
|
|
|
if (!error)
|
|
|
|
return 0;
|
2012-04-27 20:17:40 +07:00
|
|
|
|
2018-11-23 20:23:25 +07:00
|
|
|
/* Bounce buffer required because of kernfs __user API convenience. */
|
|
|
|
buf = kmalloc(count, GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
2012-04-27 20:17:40 +07:00
|
|
|
|
2020-01-10 19:30:56 +07:00
|
|
|
ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
|
2018-11-23 20:23:25 +07:00
|
|
|
if (ret <= 0)
|
2017-02-14 23:46:11 +07:00
|
|
|
goto out;
|
2012-04-27 20:17:40 +07:00
|
|
|
|
2018-11-23 20:23:25 +07:00
|
|
|
if (!copy_to_user(ubuf, buf, ret))
|
|
|
|
*pos += ret;
|
|
|
|
else
|
|
|
|
ret = -EFAULT;
|
2012-04-27 20:17:40 +07:00
|
|
|
|
2017-02-14 23:46:11 +07:00
|
|
|
out:
|
2018-11-23 20:23:25 +07:00
|
|
|
kfree(buf);
|
2017-02-14 23:46:11 +07:00
|
|
|
return ret;
|
|
|
|
}
|
2013-05-23 17:55:35 +07:00
|
|
|
|
2017-02-14 23:46:11 +07:00
|
|
|
static int gpu_state_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
2020-01-10 19:30:56 +07:00
|
|
|
i915_gpu_coredump_put(file->private_data);
|
2013-05-23 17:55:35 +07:00
|
|
|
return 0;
|
2012-04-27 20:17:40 +07:00
|
|
|
}
|
|
|
|
|
2017-02-14 23:46:11 +07:00
|
|
|
static int i915_gpu_info_open(struct inode *inode, struct file *file)
|
2012-04-27 20:17:40 +07:00
|
|
|
{
|
2017-03-28 20:14:07 +07:00
|
|
|
struct drm_i915_private *i915 = inode->i_private;
|
2020-01-10 19:30:56 +07:00
|
|
|
struct i915_gpu_coredump *gpu;
|
2019-01-14 21:21:14 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2012-04-27 20:17:40 +07:00
|
|
|
|
2019-01-14 21:21:23 +07:00
|
|
|
gpu = NULL;
|
2019-06-14 06:21:55 +07:00
|
|
|
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
|
2020-01-10 19:30:56 +07:00
|
|
|
gpu = i915_gpu_coredump(i915);
|
2018-12-07 18:05:54 +07:00
|
|
|
if (IS_ERR(gpu))
|
|
|
|
return PTR_ERR(gpu);
|
2012-04-27 20:17:40 +07:00
|
|
|
|
2017-02-14 23:46:11 +07:00
|
|
|
file->private_data = gpu;
|
2013-05-23 17:55:35 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-14 23:46:11 +07:00
|
|
|
static const struct file_operations i915_gpu_info_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = i915_gpu_info_open,
|
|
|
|
.read = gpu_state_read,
|
|
|
|
.llseek = default_llseek,
|
|
|
|
.release = gpu_state_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
i915_error_state_write(struct file *filp,
|
|
|
|
const char __user *ubuf,
|
|
|
|
size_t cnt,
|
|
|
|
loff_t *ppos)
|
2013-06-06 19:18:41 +07:00
|
|
|
{
|
2020-01-10 19:30:56 +07:00
|
|
|
struct i915_gpu_coredump *error = filp->private_data;
|
2013-06-06 19:18:41 +07:00
|
|
|
|
2017-02-14 23:46:11 +07:00
|
|
|
if (!error)
|
|
|
|
return 0;
|
2013-05-23 17:55:35 +07:00
|
|
|
|
2017-02-14 23:46:11 +07:00
|
|
|
DRM_DEBUG_DRIVER("Resetting error state\n");
|
|
|
|
i915_reset_error_state(error->i915);
|
2013-05-23 17:55:35 +07:00
|
|
|
|
2017-02-14 23:46:11 +07:00
|
|
|
return cnt;
|
|
|
|
}
|
2013-05-23 17:55:35 +07:00
|
|
|
|
2017-02-14 23:46:11 +07:00
|
|
|
static int i915_error_state_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2020-01-10 19:30:56 +07:00
|
|
|
struct i915_gpu_coredump *error;
|
2018-12-07 18:05:54 +07:00
|
|
|
|
|
|
|
error = i915_first_error_state(inode->i_private);
|
|
|
|
if (IS_ERR(error))
|
|
|
|
return PTR_ERR(error);
|
|
|
|
|
|
|
|
file->private_data = error;
|
2017-02-14 23:46:11 +07:00
|
|
|
return 0;
|
2012-04-27 20:17:40 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations i915_error_state_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = i915_error_state_open,
|
2017-02-14 23:46:11 +07:00
|
|
|
.read = gpu_state_read,
|
2012-04-27 20:17:40 +07:00
|
|
|
.write = i915_error_state_write,
|
|
|
|
.llseek = default_llseek,
|
2017-02-14 23:46:11 +07:00
|
|
|
.release = gpu_state_release,
|
2012-04-27 20:17:40 +07:00
|
|
|
};
|
2016-10-12 16:05:18 +07:00
|
|
|
#endif
|
|
|
|
|
2014-03-31 13:00:02 +07:00
|
|
|
static int i915_frequency_info(struct seq_file *m, void *unused)
|
2010-01-30 02:27:07 +07:00
|
|
|
{
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
2019-06-11 17:45:48 +07:00
|
|
|
struct intel_uncore *uncore = &dev_priv->uncore;
|
2019-10-25 04:16:41 +07:00
|
|
|
struct intel_rps *rps = &dev_priv->gt.rps;
|
2019-01-14 21:21:14 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2013-11-28 03:21:54 +07:00
|
|
|
int ret = 0;
|
|
|
|
|
2019-06-14 06:21:54 +07:00
|
|
|
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
2010-12-18 05:19:02 +07:00
|
|
|
|
drm/i915: replace IS_GEN<N> with IS_GEN(..., N)
Define IS_GEN() similarly to our IS_GEN_RANGE(). but use gen instead of
gen_mask to do the comparison. Now callers can pass then gen as a parameter,
so we don't require one macro for each gen.
The following spatch was used to convert the users of these macros:
@@
expression e;
@@
(
- IS_GEN2(e)
+ IS_GEN(e, 2)
|
- IS_GEN3(e)
+ IS_GEN(e, 3)
|
- IS_GEN4(e)
+ IS_GEN(e, 4)
|
- IS_GEN5(e)
+ IS_GEN(e, 5)
|
- IS_GEN6(e)
+ IS_GEN(e, 6)
|
- IS_GEN7(e)
+ IS_GEN(e, 7)
|
- IS_GEN8(e)
+ IS_GEN(e, 8)
|
- IS_GEN9(e)
+ IS_GEN(e, 9)
|
- IS_GEN10(e)
+ IS_GEN(e, 10)
|
- IS_GEN11(e)
+ IS_GEN(e, 11)
)
v2: use IS_GEN rather than GT_GEN and compare to info.gen rather than
using the bitmask
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181212181044.15886-2-lucas.demarchi@intel.com
2018-12-13 01:10:43 +07:00
|
|
|
if (IS_GEN(dev_priv, 5)) {
|
2019-06-11 17:45:48 +07:00
|
|
|
u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
|
|
|
|
u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
|
2010-12-18 05:19:02 +07:00
|
|
|
|
|
|
|
seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
|
|
|
|
seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
|
|
|
|
seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
|
|
|
|
MEMSTAT_VID_SHIFT);
|
|
|
|
seq_printf(m, "Current P-state: %d\n",
|
|
|
|
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
|
2017-10-11 04:30:02 +07:00
|
|
|
u32 rpmodectl, freq_sts;
|
2015-12-10 03:29:35 +07:00
|
|
|
|
2017-10-11 04:30:02 +07:00
|
|
|
rpmodectl = I915_READ(GEN6_RP_CONTROL);
|
|
|
|
seq_printf(m, "Video Turbo Mode: %s\n",
|
|
|
|
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
|
|
|
|
seq_printf(m, "HW control enabled: %s\n",
|
|
|
|
yesno(rpmodectl & GEN6_RP_ENABLE));
|
|
|
|
seq_printf(m, "SW control enabled: %s\n",
|
|
|
|
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
|
|
|
|
GEN6_RP_MEDIA_SW_MODE));
|
|
|
|
|
2019-04-26 15:17:20 +07:00
|
|
|
vlv_punit_get(dev_priv);
|
2015-12-10 03:29:35 +07:00
|
|
|
freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
|
2019-04-26 15:17:20 +07:00
|
|
|
vlv_punit_put(dev_priv);
|
|
|
|
|
2015-12-10 03:29:35 +07:00
|
|
|
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
|
|
|
|
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
|
|
|
|
|
|
|
|
seq_printf(m, "actual GPU freq: %d MHz\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
|
2015-12-10 03:29:35 +07:00
|
|
|
|
|
|
|
seq_printf(m, "current GPU freq: %d MHz\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps, rps->cur_freq));
|
2015-12-10 03:29:35 +07:00
|
|
|
|
|
|
|
seq_printf(m, "max GPU freq: %d MHz\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps, rps->max_freq));
|
2015-12-10 03:29:35 +07:00
|
|
|
|
|
|
|
seq_printf(m, "min GPU freq: %d MHz\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps, rps->min_freq));
|
2015-12-10 03:29:35 +07:00
|
|
|
|
|
|
|
seq_printf(m, "idle GPU freq: %d MHz\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps, rps->idle_freq));
|
2015-12-10 03:29:35 +07:00
|
|
|
|
|
|
|
seq_printf(m,
|
|
|
|
"efficient (RPe) frequency: %d MHz\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps, rps->efficient_freq));
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
} else if (INTEL_GEN(dev_priv) >= 6) {
|
2015-06-26 04:54:07 +07:00
|
|
|
u32 rp_state_limits;
|
|
|
|
u32 gt_perf_status;
|
|
|
|
u32 rp_state_cap;
|
2014-03-27 16:06:14 +07:00
|
|
|
u32 rpmodectl, rpinclimit, rpdeclimit;
|
2013-08-27 05:51:01 +07:00
|
|
|
u32 rpstat, cagf, reqf;
|
2011-01-19 06:49:25 +07:00
|
|
|
u32 rpupei, rpcurup, rpprevup;
|
|
|
|
u32 rpdownei, rpcurdown, rpprevdown;
|
2014-08-02 04:14:48 +07:00
|
|
|
u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
|
2010-12-18 05:19:02 +07:00
|
|
|
int max_freq;
|
|
|
|
|
2015-06-26 04:54:07 +07:00
|
|
|
rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
|
2016-12-02 15:23:49 +07:00
|
|
|
if (IS_GEN9_LP(dev_priv)) {
|
2015-06-26 04:54:07 +07:00
|
|
|
rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
|
|
|
|
gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
|
|
|
|
} else {
|
|
|
|
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
|
|
|
|
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
|
|
|
|
}
|
|
|
|
|
2010-12-18 05:19:02 +07:00
|
|
|
/* RPSTAT1 is in the GT power well */
|
2019-03-20 01:35:36 +07:00
|
|
|
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
|
2010-12-18 05:19:02 +07:00
|
|
|
|
2013-08-27 05:51:01 +07:00
|
|
|
reqf = I915_READ(GEN6_RPNSWREQ);
|
2017-07-07 03:41:13 +07:00
|
|
|
if (INTEL_GEN(dev_priv) >= 9)
|
2015-03-06 12:37:21 +07:00
|
|
|
reqf >>= 23;
|
|
|
|
else {
|
|
|
|
reqf &= ~GEN6_TURBO_DISABLE;
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
2015-03-06 12:37:21 +07:00
|
|
|
reqf >>= 24;
|
|
|
|
else
|
|
|
|
reqf >>= 25;
|
|
|
|
}
|
2019-10-25 04:16:41 +07:00
|
|
|
reqf = intel_gpu_freq(rps, reqf);
|
2013-08-27 05:51:01 +07:00
|
|
|
|
2014-03-27 16:06:14 +07:00
|
|
|
rpmodectl = I915_READ(GEN6_RP_CONTROL);
|
|
|
|
rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
|
|
|
|
rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
|
|
|
|
|
2011-01-19 06:49:25 +07:00
|
|
|
rpstat = I915_READ(GEN6_RPSTAT1);
|
2016-04-23 01:35:46 +07:00
|
|
|
rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
|
|
|
|
rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
|
|
|
|
rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
|
|
|
|
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
|
|
|
|
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
|
|
|
|
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
|
2019-12-14 01:37:35 +07:00
|
|
|
cagf = intel_rps_read_actual_frequency(rps);
|
2011-01-19 06:49:25 +07:00
|
|
|
|
2019-03-20 01:35:36 +07:00
|
|
|
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
|
2011-04-26 02:11:50 +07:00
|
|
|
|
2018-05-11 04:59:55 +07:00
|
|
|
if (INTEL_GEN(dev_priv) >= 11) {
|
|
|
|
pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
|
|
|
|
pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
|
|
|
|
/*
|
|
|
|
* The equivalent to the PM ISR & IIR cannot be read
|
|
|
|
* without affecting the current state of the system
|
|
|
|
*/
|
|
|
|
pm_isr = 0;
|
|
|
|
pm_iir = 0;
|
|
|
|
} else if (INTEL_GEN(dev_priv) >= 8) {
|
2014-08-02 04:14:48 +07:00
|
|
|
pm_ier = I915_READ(GEN8_GT_IER(2));
|
|
|
|
pm_imr = I915_READ(GEN8_GT_IMR(2));
|
|
|
|
pm_isr = I915_READ(GEN8_GT_ISR(2));
|
|
|
|
pm_iir = I915_READ(GEN8_GT_IIR(2));
|
2018-05-11 04:59:55 +07:00
|
|
|
} else {
|
|
|
|
pm_ier = I915_READ(GEN6_PMIER);
|
|
|
|
pm_imr = I915_READ(GEN6_PMIMR);
|
|
|
|
pm_isr = I915_READ(GEN6_PMISR);
|
|
|
|
pm_iir = I915_READ(GEN6_PMIIR);
|
2014-08-02 04:14:48 +07:00
|
|
|
}
|
2018-05-11 04:59:55 +07:00
|
|
|
pm_mask = I915_READ(GEN6_PMINTRMSK);
|
|
|
|
|
2017-10-11 04:29:59 +07:00
|
|
|
seq_printf(m, "Video Turbo Mode: %s\n",
|
|
|
|
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
|
|
|
|
seq_printf(m, "HW control enabled: %s\n",
|
|
|
|
yesno(rpmodectl & GEN6_RP_ENABLE));
|
|
|
|
seq_printf(m, "SW control enabled: %s\n",
|
|
|
|
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
|
|
|
|
GEN6_RP_MEDIA_SW_MODE));
|
2018-05-11 04:59:55 +07:00
|
|
|
|
|
|
|
seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
|
|
|
|
pm_ier, pm_imr, pm_mask);
|
|
|
|
if (INTEL_GEN(dev_priv) <= 10)
|
|
|
|
seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
|
|
|
|
pm_isr, pm_iir);
|
2017-03-11 09:37:00 +07:00
|
|
|
seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
|
2017-10-11 04:30:06 +07:00
|
|
|
rps->pm_intrmsk_mbz);
|
2010-12-18 05:19:02 +07:00
|
|
|
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
|
|
|
|
seq_printf(m, "Render p-state ratio: %d\n",
|
2017-07-07 03:41:13 +07:00
|
|
|
(gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
|
2010-12-18 05:19:02 +07:00
|
|
|
seq_printf(m, "Render p-state VID: %d\n",
|
|
|
|
gt_perf_status & 0xff);
|
|
|
|
seq_printf(m, "Render p-state limit: %d\n",
|
|
|
|
rp_state_limits & 0xff);
|
2014-03-27 16:06:14 +07:00
|
|
|
seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
|
|
|
|
seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
|
|
|
|
seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
|
|
|
|
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
|
2013-08-27 05:51:01 +07:00
|
|
|
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
|
2013-01-30 03:00:15 +07:00
|
|
|
seq_printf(m, "CAGF: %dMHz\n", cagf);
|
2016-04-23 01:35:46 +07:00
|
|
|
seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
|
|
|
|
rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
|
|
|
|
seq_printf(m, "RP CUR UP: %d (%dus)\n",
|
|
|
|
rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
|
|
|
|
seq_printf(m, "RP PREV UP: %d (%dus)\n",
|
|
|
|
rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
|
drm/i915: Interactive RPS mode
RPS provides a feedback loop where we use the load during the previous
evaluation interval to decide whether to up or down clock the GPU
frequency. Our responsiveness is split into 3 regimes, a high and low
plateau with the intent to keep the gpu clocked high to cover occasional
stalls under high load, and low despite occasional glitches under steady
low load, and inbetween. However, we run into situations like kodi where
we want to stay at low power (video decoding is done efficiently
inside the fixed function HW and doesn't need high clocks even for high
bitrate streams), but just occasionally the pipeline is more complex
than a video decode and we need a smidgen of extra GPU power to present
on time. In the high power regime, we sample at sub frame intervals with
a bias to upclocking, and conversely at low power we sample over a few
frames worth to provide what we consider to be the right levels of
responsiveness respectively. At low power, we more or less expect to be
kicked out to high power at the start of a busy sequence by waitboosting.
Prior to commit e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active
request") whenever we missed the frame or stalled, we would immediate go
full throttle and upclock the GPU to max. But in commit e9af4ea2b9e7, we
relaxed the waitboosting to only apply if the pipeline was deep to avoid
over-committing resources for a near miss. Sadly though, a near miss is
still a miss, and perceptible as jitter in the frame delivery.
To try and prevent the near miss before having to resort to boosting
after the fact, we use the pageflip queue as an indication that we are
in an "interactive" regime and so should sample the load more frequently
to provide power before the frame misses it vblank. This will make us
more favorable to providing a small power increase (one or two bins) as
required rather than going all the way to maximum and then having to
work back down again. (We still keep the waitboosting mechanism around
just in case a dramatic change in system load requires urgent uplocking,
faster than we can provide in a few evaluation intervals.)
v2: Reduce rps_set_interactive to a boolean parameter to avoid the
confusion of what if they wanted a new power mode after pinning to a
different mode (which to choose?)
v3: Only reprogram RPS while the GT is awake, it will be set when we
wake the GT, and while off warns about being used outside of rpm.
v4: Fix deferred application of interactive mode
v5: s/state/interactive/
v6: Group the mutex with its principle in a substruct
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107111
Fixes: e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active request")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180731132629.3381-1-chris@chris-wilson.co.uk
2018-07-31 20:26:29 +07:00
|
|
|
seq_printf(m, "Up threshold: %d%%\n",
|
|
|
|
rps->power.up_threshold);
|
2015-04-27 19:41:19 +07:00
|
|
|
|
2016-04-23 01:35:46 +07:00
|
|
|
seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
|
|
|
|
rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
|
|
|
|
seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
|
|
|
|
rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
|
|
|
|
seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
|
|
|
|
rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
|
drm/i915: Interactive RPS mode
RPS provides a feedback loop where we use the load during the previous
evaluation interval to decide whether to up or down clock the GPU
frequency. Our responsiveness is split into 3 regimes, a high and low
plateau with the intent to keep the gpu clocked high to cover occasional
stalls under high load, and low despite occasional glitches under steady
low load, and inbetween. However, we run into situations like kodi where
we want to stay at low power (video decoding is done efficiently
inside the fixed function HW and doesn't need high clocks even for high
bitrate streams), but just occasionally the pipeline is more complex
than a video decode and we need a smidgen of extra GPU power to present
on time. In the high power regime, we sample at sub frame intervals with
a bias to upclocking, and conversely at low power we sample over a few
frames worth to provide what we consider to be the right levels of
responsiveness respectively. At low power, we more or less expect to be
kicked out to high power at the start of a busy sequence by waitboosting.
Prior to commit e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active
request") whenever we missed the frame or stalled, we would immediate go
full throttle and upclock the GPU to max. But in commit e9af4ea2b9e7, we
relaxed the waitboosting to only apply if the pipeline was deep to avoid
over-committing resources for a near miss. Sadly though, a near miss is
still a miss, and perceptible as jitter in the frame delivery.
To try and prevent the near miss before having to resort to boosting
after the fact, we use the pageflip queue as an indication that we are
in an "interactive" regime and so should sample the load more frequently
to provide power before the frame misses it vblank. This will make us
more favorable to providing a small power increase (one or two bins) as
required rather than going all the way to maximum and then having to
work back down again. (We still keep the waitboosting mechanism around
just in case a dramatic change in system load requires urgent uplocking,
faster than we can provide in a few evaluation intervals.)
v2: Reduce rps_set_interactive to a boolean parameter to avoid the
confusion of what if they wanted a new power mode after pinning to a
different mode (which to choose?)
v3: Only reprogram RPS while the GT is awake, it will be set when we
wake the GT, and while off warns about being used outside of rpm.
v4: Fix deferred application of interactive mode
v5: s/state/interactive/
v6: Group the mutex with its principle in a substruct
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107111
Fixes: e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active request")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180731132629.3381-1-chris@chris-wilson.co.uk
2018-07-31 20:26:29 +07:00
|
|
|
seq_printf(m, "Down threshold: %d%%\n",
|
|
|
|
rps->power.down_threshold);
|
2010-12-18 05:19:02 +07:00
|
|
|
|
2016-12-02 15:23:49 +07:00
|
|
|
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
|
2015-06-26 04:54:07 +07:00
|
|
|
rp_state_cap >> 16) & 0xff;
|
2017-07-07 03:41:13 +07:00
|
|
|
max_freq *= (IS_GEN9_BC(dev_priv) ||
|
2018-04-05 21:00:52 +07:00
|
|
|
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
|
2010-12-18 05:19:02 +07:00
|
|
|
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps, max_freq));
|
2010-12-18 05:19:02 +07:00
|
|
|
|
|
|
|
max_freq = (rp_state_cap & 0xff00) >> 8;
|
2017-07-07 03:41:13 +07:00
|
|
|
max_freq *= (IS_GEN9_BC(dev_priv) ||
|
2018-04-05 21:00:52 +07:00
|
|
|
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
|
2010-12-18 05:19:02 +07:00
|
|
|
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps, max_freq));
|
2010-12-18 05:19:02 +07:00
|
|
|
|
2016-12-02 15:23:49 +07:00
|
|
|
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
|
2015-06-26 04:54:07 +07:00
|
|
|
rp_state_cap >> 0) & 0xff;
|
2017-07-07 03:41:13 +07:00
|
|
|
max_freq *= (IS_GEN9_BC(dev_priv) ||
|
2018-04-05 21:00:52 +07:00
|
|
|
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
|
2010-12-18 05:19:02 +07:00
|
|
|
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps, max_freq));
|
2013-04-06 04:29:22 +07:00
|
|
|
seq_printf(m, "Max overclocked frequency: %dMHz\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps, rps->max_freq));
|
2015-03-18 16:48:21 +07:00
|
|
|
|
2015-04-27 19:41:19 +07:00
|
|
|
seq_printf(m, "Current freq: %d MHz\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps, rps->cur_freq));
|
2015-04-27 19:41:19 +07:00
|
|
|
seq_printf(m, "Actual freq: %d MHz\n", cagf);
|
2015-03-18 16:48:21 +07:00
|
|
|
seq_printf(m, "Idle freq: %d MHz\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps, rps->idle_freq));
|
2015-04-27 19:41:19 +07:00
|
|
|
seq_printf(m, "Min freq: %d MHz\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps, rps->min_freq));
|
2016-07-13 15:10:35 +07:00
|
|
|
seq_printf(m, "Boost freq: %d MHz\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps, rps->boost_freq));
|
2015-04-27 19:41:19 +07:00
|
|
|
seq_printf(m, "Max freq: %d MHz\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps, rps->max_freq));
|
2015-04-27 19:41:19 +07:00
|
|
|
seq_printf(m,
|
|
|
|
"efficient (RPe) frequency: %d MHz\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps, rps->efficient_freq));
|
2010-12-18 05:19:02 +07:00
|
|
|
} else {
|
2013-06-25 04:59:48 +07:00
|
|
|
seq_puts(m, "no P-state info available\n");
|
2010-12-18 05:19:02 +07:00
|
|
|
}
|
2010-01-30 02:27:07 +07:00
|
|
|
|
2017-02-08 01:33:45 +07:00
|
|
|
seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
|
2015-09-25 18:00:32 +07:00
|
|
|
seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
|
|
|
|
seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
|
|
|
|
|
2019-06-14 06:21:54 +07:00
|
|
|
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
2013-11-28 03:21:54 +07:00
|
|
|
return ret;
|
2010-01-30 02:27:07 +07:00
|
|
|
}
|
|
|
|
|
2011-06-29 03:04:16 +07:00
|
|
|
static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
|
|
|
{
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
2019-10-25 04:16:41 +07:00
|
|
|
struct intel_rps *rps = &dev_priv->gt.rps;
|
2015-06-29 16:20:22 +07:00
|
|
|
unsigned int max_gpu_freq, min_gpu_freq;
|
2019-01-14 21:21:14 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2018-03-08 21:26:48 +07:00
|
|
|
int gpu_freq, ia_freq;
|
2011-06-29 03:04:16 +07:00
|
|
|
|
2017-12-15 21:36:35 +07:00
|
|
|
if (!HAS_LLC(dev_priv))
|
|
|
|
return -ENODEV;
|
2011-06-29 03:04:16 +07:00
|
|
|
|
2018-03-08 21:26:48 +07:00
|
|
|
min_gpu_freq = rps->min_freq;
|
|
|
|
max_gpu_freq = rps->max_freq;
|
2018-04-05 21:00:52 +07:00
|
|
|
if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
|
2015-06-29 16:20:22 +07:00
|
|
|
/* Convert GT frequency to 50 HZ units */
|
2018-03-08 21:26:48 +07:00
|
|
|
min_gpu_freq /= GEN9_FREQ_SCALER;
|
|
|
|
max_gpu_freq /= GEN9_FREQ_SCALER;
|
2015-06-29 16:20:22 +07:00
|
|
|
}
|
|
|
|
|
2013-06-25 04:59:48 +07:00
|
|
|
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
|
2011-06-29 03:04:16 +07:00
|
|
|
|
2019-06-14 06:21:54 +07:00
|
|
|
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
2015-06-29 16:20:22 +07:00
|
|
|
for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
|
2012-09-27 00:34:00 +07:00
|
|
|
ia_freq = gpu_freq;
|
|
|
|
sandybridge_pcode_read(dev_priv,
|
|
|
|
GEN6_PCODE_READ_MIN_FREQ_TABLE,
|
2019-05-21 23:40:24 +07:00
|
|
|
&ia_freq, NULL);
|
2013-04-13 01:10:13 +07:00
|
|
|
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps,
|
|
|
|
(gpu_freq *
|
|
|
|
(IS_GEN9_BC(dev_priv) ||
|
|
|
|
INTEL_GEN(dev_priv) >= 10 ?
|
|
|
|
GEN9_FREQ_SCALER : 1))),
|
2013-04-13 01:10:13 +07:00
|
|
|
((ia_freq >> 0) & 0xff) * 100,
|
|
|
|
((ia_freq >> 8) & 0xff) * 100);
|
2011-06-29 03:04:16 +07:00
|
|
|
}
|
2019-06-14 06:21:54 +07:00
|
|
|
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
2019-04-26 15:17:21 +07:00
|
|
|
|
|
|
|
return 0;
|
2011-06-29 03:04:16 +07:00
|
|
|
}
|
|
|
|
|
2016-08-03 04:50:21 +07:00
|
|
|
static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
|
2014-07-24 23:04:46 +07:00
|
|
|
{
|
2018-03-07 20:42:24 +07:00
|
|
|
seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
|
|
|
|
ring->space, ring->head, ring->tail, ring->emit);
|
2014-07-24 23:04:46 +07:00
|
|
|
}
|
|
|
|
|
2011-03-20 08:14:29 +07:00
|
|
|
static int i915_context_status(struct seq_file *m, void *unused)
|
|
|
|
{
|
2019-10-04 20:40:09 +07:00
|
|
|
struct drm_i915_private *i915 = node_to_i915(m->private);
|
|
|
|
struct i915_gem_context *ctx, *cn;
|
2011-03-20 08:14:29 +07:00
|
|
|
|
2019-10-04 20:40:09 +07:00
|
|
|
spin_lock(&i915->gem.contexts.lock);
|
|
|
|
list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
|
2019-04-26 23:33:35 +07:00
|
|
|
struct i915_gem_engines_iter it;
|
2019-03-08 20:25:16 +07:00
|
|
|
struct intel_context *ce;
|
|
|
|
|
2019-10-04 20:40:09 +07:00
|
|
|
if (!kref_get_unless_zero(&ctx->ref))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
spin_unlock(&i915->gem.contexts.lock);
|
|
|
|
|
2018-09-04 22:31:17 +07:00
|
|
|
seq_puts(m, "HW context ");
|
2016-08-15 16:49:08 +07:00
|
|
|
if (ctx->pid) {
|
2016-05-24 20:53:39 +07:00
|
|
|
struct task_struct *task;
|
|
|
|
|
2016-08-15 16:49:08 +07:00
|
|
|
task = get_pid_task(ctx->pid, PIDTYPE_PID);
|
2016-05-24 20:53:39 +07:00
|
|
|
if (task) {
|
|
|
|
seq_printf(m, "(%s [%d]) ",
|
|
|
|
task->comm, task->pid);
|
|
|
|
put_task_struct(task);
|
|
|
|
}
|
2016-08-15 16:49:08 +07:00
|
|
|
} else if (IS_ERR(ctx->file_priv)) {
|
|
|
|
seq_puts(m, "(deleted) ");
|
2016-05-24 20:53:39 +07:00
|
|
|
} else {
|
|
|
|
seq_puts(m, "(kernel) ");
|
|
|
|
}
|
|
|
|
|
2016-05-24 20:53:41 +07:00
|
|
|
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
|
|
|
|
seq_putc(m, '\n');
|
2014-07-24 23:04:46 +07:00
|
|
|
|
2019-04-26 23:33:35 +07:00
|
|
|
for_each_gem_engine(ce,
|
|
|
|
i915_gem_context_lock_engines(ctx), it) {
|
2020-01-09 15:51:42 +07:00
|
|
|
if (intel_context_pin_if_active(ce)) {
|
2019-08-10 01:25:17 +07:00
|
|
|
seq_printf(m, "%s: ", ce->engine->name);
|
|
|
|
if (ce->state)
|
2020-02-11 23:14:51 +07:00
|
|
|
i915_debugfs_describe_obj(m, ce->state->obj);
|
2016-08-03 04:50:21 +07:00
|
|
|
describe_ctx_ring(m, ce->ring);
|
2019-08-10 01:25:17 +07:00
|
|
|
seq_putc(m, '\n');
|
2020-01-09 15:51:42 +07:00
|
|
|
intel_context_unpin(ce);
|
2019-08-10 01:25:17 +07:00
|
|
|
}
|
2014-07-24 23:04:46 +07:00
|
|
|
}
|
2019-04-26 23:33:35 +07:00
|
|
|
i915_gem_context_unlock_engines(ctx);
|
2013-09-18 11:12:45 +07:00
|
|
|
|
|
|
|
seq_putc(m, '\n');
|
2013-02-15 06:05:12 +07:00
|
|
|
|
2019-10-04 20:40:09 +07:00
|
|
|
spin_lock(&i915->gem.contexts.lock);
|
|
|
|
list_safe_reset_next(ctx, cn, link);
|
|
|
|
i915_gem_context_put(ctx);
|
|
|
|
}
|
|
|
|
spin_unlock(&i915->gem.contexts.lock);
|
2011-03-20 08:14:29 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-12-14 19:57:16 +07:00
|
|
|
static const char *swizzle_string(unsigned swizzle)
|
|
|
|
{
|
2013-06-25 04:59:49 +07:00
|
|
|
switch (swizzle) {
|
2011-12-14 19:57:16 +07:00
|
|
|
case I915_BIT_6_SWIZZLE_NONE:
|
|
|
|
return "none";
|
|
|
|
case I915_BIT_6_SWIZZLE_9:
|
|
|
|
return "bit9";
|
|
|
|
case I915_BIT_6_SWIZZLE_9_10:
|
|
|
|
return "bit9/bit10";
|
|
|
|
case I915_BIT_6_SWIZZLE_9_11:
|
|
|
|
return "bit9/bit11";
|
|
|
|
case I915_BIT_6_SWIZZLE_9_10_11:
|
|
|
|
return "bit9/bit10/bit11";
|
|
|
|
case I915_BIT_6_SWIZZLE_9_17:
|
|
|
|
return "bit9/bit17";
|
|
|
|
case I915_BIT_6_SWIZZLE_9_10_17:
|
|
|
|
return "bit9/bit10/bit17";
|
|
|
|
case I915_BIT_6_SWIZZLE_UNKNOWN:
|
2012-12-29 00:00:09 +07:00
|
|
|
return "unknown";
|
2011-12-14 19:57:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return "bug";
|
|
|
|
}
|
|
|
|
|
|
|
|
static int i915_swizzle_info(struct seq_file *m, void *data)
|
|
|
|
{
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
2019-06-11 17:45:48 +07:00
|
|
|
struct intel_uncore *uncore = &dev_priv->uncore;
|
2019-01-14 21:21:14 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2012-08-09 20:07:02 +07:00
|
|
|
|
2019-06-14 06:21:54 +07:00
|
|
|
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
2011-12-14 19:57:16 +07:00
|
|
|
|
|
|
|
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
|
2019-10-16 21:32:34 +07:00
|
|
|
swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
|
2011-12-14 19:57:16 +07:00
|
|
|
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
|
2019-10-16 21:32:34 +07:00
|
|
|
swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
|
2011-12-14 19:57:16 +07:00
|
|
|
|
drm/i915: merge gen checks to use range
Instead of using IS_GEN() for consecutive gen checks, let's pass the
range to IS_GEN_RANGE(). By code inspection these were the ranges deemed
necessary for spatch:
@@
expression e;
@@
(
- IS_GEN(e, 3) || IS_GEN(e, 2)
+ IS_GEN_RANGE(e, 2, 3)
|
- IS_GEN(e, 3) || IS_GEN(e, 4)
+ IS_GEN_RANGE(e, 3, 4)
|
- IS_GEN(e, 5) || IS_GEN(e, 6)
+ IS_GEN_RANGE(e, 5, 6)
|
- IS_GEN(e, 6) || IS_GEN(e, 7)
+ IS_GEN_RANGE(e, 6, 7)
|
- IS_GEN(e, 7) || IS_GEN(e, 8)
+ IS_GEN_RANGE(e, 7, 8)
|
- IS_GEN(e, 8) || IS_GEN(e, 9)
+ IS_GEN_RANGE(e, 8, 9)
|
- IS_GEN(e, 10) || IS_GEN(e, 9)
+ IS_GEN_RANGE(e, 9, 10)
|
- IS_GEN(e, 9) || IS_GEN(e, 10)
+ IS_GEN_RANGE(e, 9, 10)
)
After conversion, checking we don't have any missing IS_GEN_RANGE() ||
IS_GEN() was also done.
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181212181044.15886-3-lucas.demarchi@intel.com
2018-12-13 01:10:44 +07:00
|
|
|
if (IS_GEN_RANGE(dev_priv, 3, 4)) {
|
2011-12-14 19:57:16 +07:00
|
|
|
seq_printf(m, "DDC = 0x%08x\n",
|
2019-06-11 17:45:48 +07:00
|
|
|
intel_uncore_read(uncore, DCC));
|
2014-11-20 15:26:30 +07:00
|
|
|
seq_printf(m, "DDC2 = 0x%08x\n",
|
2019-06-11 17:45:48 +07:00
|
|
|
intel_uncore_read(uncore, DCC2));
|
2011-12-14 19:57:16 +07:00
|
|
|
seq_printf(m, "C0DRB3 = 0x%04x\n",
|
2019-06-11 17:45:48 +07:00
|
|
|
intel_uncore_read16(uncore, C0DRB3));
|
2011-12-14 19:57:16 +07:00
|
|
|
seq_printf(m, "C1DRB3 = 0x%04x\n",
|
2019-06-11 17:45:48 +07:00
|
|
|
intel_uncore_read16(uncore, C1DRB3));
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
} else if (INTEL_GEN(dev_priv) >= 6) {
|
2012-01-31 22:47:56 +07:00
|
|
|
seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
|
2019-06-11 17:45:48 +07:00
|
|
|
intel_uncore_read(uncore, MAD_DIMM_C0));
|
2012-01-31 22:47:56 +07:00
|
|
|
seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
|
2019-06-11 17:45:48 +07:00
|
|
|
intel_uncore_read(uncore, MAD_DIMM_C1));
|
2012-01-31 22:47:56 +07:00
|
|
|
seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
|
2019-06-11 17:45:48 +07:00
|
|
|
intel_uncore_read(uncore, MAD_DIMM_C2));
|
2012-01-31 22:47:56 +07:00
|
|
|
seq_printf(m, "TILECTL = 0x%08x\n",
|
2019-06-11 17:45:48 +07:00
|
|
|
intel_uncore_read(uncore, TILECTL));
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
if (INTEL_GEN(dev_priv) >= 8)
|
2013-11-03 11:07:14 +07:00
|
|
|
seq_printf(m, "GAMTARBMODE = 0x%08x\n",
|
2019-06-11 17:45:48 +07:00
|
|
|
intel_uncore_read(uncore, GAMTARBMODE));
|
2013-11-03 11:07:14 +07:00
|
|
|
else
|
|
|
|
seq_printf(m, "ARB_MODE = 0x%08x\n",
|
2019-06-11 17:45:48 +07:00
|
|
|
intel_uncore_read(uncore, ARB_MODE));
|
2012-01-31 22:47:56 +07:00
|
|
|
seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
|
2019-06-11 17:45:48 +07:00
|
|
|
intel_uncore_read(uncore, DISP_ARB_CTL));
|
2011-12-14 19:57:16 +07:00
|
|
|
}
|
2014-11-20 15:26:30 +07:00
|
|
|
|
|
|
|
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
|
|
|
|
seq_puts(m, "L-shaped memory detected\n");
|
|
|
|
|
2019-06-14 06:21:54 +07:00
|
|
|
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
2011-12-14 19:57:16 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-08-15 15:49:33 +07:00
|
|
|
static const char *rps_power_to_str(unsigned int power)
|
|
|
|
{
|
|
|
|
static const char * const strings[] = {
|
|
|
|
[LOW_POWER] = "low power",
|
|
|
|
[BETWEEN] = "mixed",
|
|
|
|
[HIGH_POWER] = "high power",
|
|
|
|
};
|
|
|
|
|
|
|
|
if (power >= ARRAY_SIZE(strings) || !strings[power])
|
|
|
|
return "unknown";
|
|
|
|
|
|
|
|
return strings[power];
|
|
|
|
}
|
|
|
|
|
2015-04-07 22:20:32 +07:00
|
|
|
static int i915_rps_boost_info(struct seq_file *m, void *data)
|
|
|
|
{
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
2019-10-25 04:16:41 +07:00
|
|
|
struct intel_rps *rps = &dev_priv->gt.rps;
|
2018-10-02 18:32:21 +07:00
|
|
|
|
2017-10-11 04:30:06 +07:00
|
|
|
seq_printf(m, "RPS enabled? %d\n", rps->enabled);
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 03:07:17 +07:00
|
|
|
seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
|
2017-06-28 19:35:48 +07:00
|
|
|
seq_printf(m, "Boosts outstanding? %d\n",
|
2017-10-11 04:30:06 +07:00
|
|
|
atomic_read(&rps->num_waiters));
|
drm/i915: Interactive RPS mode
RPS provides a feedback loop where we use the load during the previous
evaluation interval to decide whether to up or down clock the GPU
frequency. Our responsiveness is split into 3 regimes, a high and low
plateau with the intent to keep the gpu clocked high to cover occasional
stalls under high load, and low despite occasional glitches under steady
low load, and inbetween. However, we run into situations like kodi where
we want to stay at low power (video decoding is done efficiently
inside the fixed function HW and doesn't need high clocks even for high
bitrate streams), but just occasionally the pipeline is more complex
than a video decode and we need a smidgen of extra GPU power to present
on time. In the high power regime, we sample at sub frame intervals with
a bias to upclocking, and conversely at low power we sample over a few
frames worth to provide what we consider to be the right levels of
responsiveness respectively. At low power, we more or less expect to be
kicked out to high power at the start of a busy sequence by waitboosting.
Prior to commit e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active
request") whenever we missed the frame or stalled, we would immediate go
full throttle and upclock the GPU to max. But in commit e9af4ea2b9e7, we
relaxed the waitboosting to only apply if the pipeline was deep to avoid
over-committing resources for a near miss. Sadly though, a near miss is
still a miss, and perceptible as jitter in the frame delivery.
To try and prevent the near miss before having to resort to boosting
after the fact, we use the pageflip queue as an indication that we are
in an "interactive" regime and so should sample the load more frequently
to provide power before the frame misses it vblank. This will make us
more favorable to providing a small power increase (one or two bins) as
required rather than going all the way to maximum and then having to
work back down again. (We still keep the waitboosting mechanism around
just in case a dramatic change in system load requires urgent uplocking,
faster than we can provide in a few evaluation intervals.)
v2: Reduce rps_set_interactive to a boolean parameter to avoid the
confusion of what if they wanted a new power mode after pinning to a
different mode (which to choose?)
v3: Only reprogram RPS while the GT is awake, it will be set when we
wake the GT, and while off warns about being used outside of rpm.
v4: Fix deferred application of interactive mode
v5: s/state/interactive/
v6: Group the mutex with its principle in a substruct
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107111
Fixes: e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active request")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180731132629.3381-1-chris@chris-wilson.co.uk
2018-07-31 20:26:29 +07:00
|
|
|
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
|
2018-10-02 18:32:21 +07:00
|
|
|
seq_printf(m, "Frequency requested %d, actual %d\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps, rps->cur_freq),
|
2019-12-14 01:37:35 +07:00
|
|
|
intel_rps_read_actual_frequency(rps));
|
2016-08-15 15:49:33 +07:00
|
|
|
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps, rps->min_freq),
|
|
|
|
intel_gpu_freq(rps, rps->min_freq_softlimit),
|
|
|
|
intel_gpu_freq(rps, rps->max_freq_softlimit),
|
|
|
|
intel_gpu_freq(rps, rps->max_freq));
|
2016-08-15 15:49:33 +07:00
|
|
|
seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
|
2019-10-25 04:16:41 +07:00
|
|
|
intel_gpu_freq(rps, rps->idle_freq),
|
|
|
|
intel_gpu_freq(rps, rps->efficient_freq),
|
|
|
|
intel_gpu_freq(rps, rps->boost_freq));
|
2016-04-27 00:29:41 +07:00
|
|
|
|
2019-02-13 16:25:04 +07:00
|
|
|
seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
|
2015-04-07 22:20:32 +07:00
|
|
|
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 03:07:17 +07:00
|
|
|
if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
|
2016-08-15 15:49:33 +07:00
|
|
|
u32 rpup, rpupei;
|
|
|
|
u32 rpdown, rpdownei;
|
|
|
|
|
2019-03-20 01:35:36 +07:00
|
|
|
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
|
2016-08-15 15:49:33 +07:00
|
|
|
rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
|
|
|
|
rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
|
|
|
|
rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
|
|
|
|
rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
|
2019-03-20 01:35:36 +07:00
|
|
|
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
|
2016-08-15 15:49:33 +07:00
|
|
|
|
|
|
|
seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
|
drm/i915: Interactive RPS mode
RPS provides a feedback loop where we use the load during the previous
evaluation interval to decide whether to up or down clock the GPU
frequency. Our responsiveness is split into 3 regimes, a high and low
plateau with the intent to keep the gpu clocked high to cover occasional
stalls under high load, and low despite occasional glitches under steady
low load, and inbetween. However, we run into situations like kodi where
we want to stay at low power (video decoding is done efficiently
inside the fixed function HW and doesn't need high clocks even for high
bitrate streams), but just occasionally the pipeline is more complex
than a video decode and we need a smidgen of extra GPU power to present
on time. In the high power regime, we sample at sub frame intervals with
a bias to upclocking, and conversely at low power we sample over a few
frames worth to provide what we consider to be the right levels of
responsiveness respectively. At low power, we more or less expect to be
kicked out to high power at the start of a busy sequence by waitboosting.
Prior to commit e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active
request") whenever we missed the frame or stalled, we would immediate go
full throttle and upclock the GPU to max. But in commit e9af4ea2b9e7, we
relaxed the waitboosting to only apply if the pipeline was deep to avoid
over-committing resources for a near miss. Sadly though, a near miss is
still a miss, and perceptible as jitter in the frame delivery.
To try and prevent the near miss before having to resort to boosting
after the fact, we use the pageflip queue as an indication that we are
in an "interactive" regime and so should sample the load more frequently
to provide power before the frame misses it vblank. This will make us
more favorable to providing a small power increase (one or two bins) as
required rather than going all the way to maximum and then having to
work back down again. (We still keep the waitboosting mechanism around
just in case a dramatic change in system load requires urgent uplocking,
faster than we can provide in a few evaluation intervals.)
v2: Reduce rps_set_interactive to a boolean parameter to avoid the
confusion of what if they wanted a new power mode after pinning to a
different mode (which to choose?)
v3: Only reprogram RPS while the GT is awake, it will be set when we
wake the GT, and while off warns about being used outside of rpm.
v4: Fix deferred application of interactive mode
v5: s/state/interactive/
v6: Group the mutex with its principle in a substruct
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107111
Fixes: e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active request")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180731132629.3381-1-chris@chris-wilson.co.uk
2018-07-31 20:26:29 +07:00
|
|
|
rps_power_to_str(rps->power.mode));
|
2016-08-15 15:49:33 +07:00
|
|
|
seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
|
2017-02-18 18:27:08 +07:00
|
|
|
rpup && rpupei ? 100 * rpup / rpupei : 0,
|
drm/i915: Interactive RPS mode
RPS provides a feedback loop where we use the load during the previous
evaluation interval to decide whether to up or down clock the GPU
frequency. Our responsiveness is split into 3 regimes, a high and low
plateau with the intent to keep the gpu clocked high to cover occasional
stalls under high load, and low despite occasional glitches under steady
low load, and inbetween. However, we run into situations like kodi where
we want to stay at low power (video decoding is done efficiently
inside the fixed function HW and doesn't need high clocks even for high
bitrate streams), but just occasionally the pipeline is more complex
than a video decode and we need a smidgen of extra GPU power to present
on time. In the high power regime, we sample at sub frame intervals with
a bias to upclocking, and conversely at low power we sample over a few
frames worth to provide what we consider to be the right levels of
responsiveness respectively. At low power, we more or less expect to be
kicked out to high power at the start of a busy sequence by waitboosting.
Prior to commit e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active
request") whenever we missed the frame or stalled, we would immediate go
full throttle and upclock the GPU to max. But in commit e9af4ea2b9e7, we
relaxed the waitboosting to only apply if the pipeline was deep to avoid
over-committing resources for a near miss. Sadly though, a near miss is
still a miss, and perceptible as jitter in the frame delivery.
To try and prevent the near miss before having to resort to boosting
after the fact, we use the pageflip queue as an indication that we are
in an "interactive" regime and so should sample the load more frequently
to provide power before the frame misses it vblank. This will make us
more favorable to providing a small power increase (one or two bins) as
required rather than going all the way to maximum and then having to
work back down again. (We still keep the waitboosting mechanism around
just in case a dramatic change in system load requires urgent uplocking,
faster than we can provide in a few evaluation intervals.)
v2: Reduce rps_set_interactive to a boolean parameter to avoid the
confusion of what if they wanted a new power mode after pinning to a
different mode (which to choose?)
v3: Only reprogram RPS while the GT is awake, it will be set when we
wake the GT, and while off warns about being used outside of rpm.
v4: Fix deferred application of interactive mode
v5: s/state/interactive/
v6: Group the mutex with its principle in a substruct
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107111
Fixes: e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active request")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180731132629.3381-1-chris@chris-wilson.co.uk
2018-07-31 20:26:29 +07:00
|
|
|
rps->power.up_threshold);
|
2016-08-15 15:49:33 +07:00
|
|
|
seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
|
2017-02-18 18:27:08 +07:00
|
|
|
rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
|
drm/i915: Interactive RPS mode
RPS provides a feedback loop where we use the load during the previous
evaluation interval to decide whether to up or down clock the GPU
frequency. Our responsiveness is split into 3 regimes, a high and low
plateau with the intent to keep the gpu clocked high to cover occasional
stalls under high load, and low despite occasional glitches under steady
low load, and inbetween. However, we run into situations like kodi where
we want to stay at low power (video decoding is done efficiently
inside the fixed function HW and doesn't need high clocks even for high
bitrate streams), but just occasionally the pipeline is more complex
than a video decode and we need a smidgen of extra GPU power to present
on time. In the high power regime, we sample at sub frame intervals with
a bias to upclocking, and conversely at low power we sample over a few
frames worth to provide what we consider to be the right levels of
responsiveness respectively. At low power, we more or less expect to be
kicked out to high power at the start of a busy sequence by waitboosting.
Prior to commit e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active
request") whenever we missed the frame or stalled, we would immediate go
full throttle and upclock the GPU to max. But in commit e9af4ea2b9e7, we
relaxed the waitboosting to only apply if the pipeline was deep to avoid
over-committing resources for a near miss. Sadly though, a near miss is
still a miss, and perceptible as jitter in the frame delivery.
To try and prevent the near miss before having to resort to boosting
after the fact, we use the pageflip queue as an indication that we are
in an "interactive" regime and so should sample the load more frequently
to provide power before the frame misses it vblank. This will make us
more favorable to providing a small power increase (one or two bins) as
required rather than going all the way to maximum and then having to
work back down again. (We still keep the waitboosting mechanism around
just in case a dramatic change in system load requires urgent uplocking,
faster than we can provide in a few evaluation intervals.)
v2: Reduce rps_set_interactive to a boolean parameter to avoid the
confusion of what if they wanted a new power mode after pinning to a
different mode (which to choose?)
v3: Only reprogram RPS while the GT is awake, it will be set when we
wake the GT, and while off warns about being used outside of rpm.
v4: Fix deferred application of interactive mode
v5: s/state/interactive/
v6: Group the mutex with its principle in a substruct
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107111
Fixes: e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active request")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180731132629.3381-1-chris@chris-wilson.co.uk
2018-07-31 20:26:29 +07:00
|
|
|
rps->power.down_threshold);
|
2016-08-15 15:49:33 +07:00
|
|
|
} else {
|
|
|
|
seq_puts(m, "\nRPS Autotuning inactive\n");
|
|
|
|
}
|
|
|
|
|
2015-05-22 03:01:47 +07:00
|
|
|
return 0;
|
2015-04-07 22:20:32 +07:00
|
|
|
}
|
|
|
|
|
2013-07-05 01:02:07 +07:00
|
|
|
static int i915_llc(struct seq_file *m, void *data)
|
|
|
|
{
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
2016-04-13 21:26:43 +07:00
|
|
|
const bool edram = INTEL_GEN(dev_priv) > 8;
|
2013-07-05 01:02:07 +07:00
|
|
|
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
|
2019-03-29 00:45:32 +07:00
|
|
|
seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
|
|
|
|
dev_priv->edram_size_mb);
|
2013-07-05 01:02:07 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-18 23:05:56 +07:00
|
|
|
static int i915_huc_load_status_info(struct seq_file *m, void *data)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
2019-01-14 21:21:14 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2017-10-17 16:44:49 +07:00
|
|
|
struct drm_printer p;
|
2017-01-18 23:05:56 +07:00
|
|
|
|
2019-07-25 07:18:06 +07:00
|
|
|
if (!HAS_GT_UC(dev_priv))
|
2017-12-15 21:36:35 +07:00
|
|
|
return -ENODEV;
|
2017-01-18 23:05:56 +07:00
|
|
|
|
2017-10-17 16:44:49 +07:00
|
|
|
p = drm_seq_file_printer(m);
|
2019-07-13 17:00:12 +07:00
|
|
|
intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
|
2017-01-18 23:05:56 +07:00
|
|
|
|
2019-06-14 06:21:55 +07:00
|
|
|
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
|
2019-01-14 21:21:23 +07:00
|
|
|
seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
|
2017-01-18 23:05:56 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-08-12 21:43:37 +07:00
|
|
|
static int i915_guc_load_status_info(struct seq_file *m, void *data)
|
|
|
|
{
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
2019-01-14 21:21:14 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2017-10-17 16:44:49 +07:00
|
|
|
struct drm_printer p;
|
2015-08-12 21:43:37 +07:00
|
|
|
|
2019-07-25 07:18:06 +07:00
|
|
|
if (!HAS_GT_UC(dev_priv))
|
2017-12-15 21:36:35 +07:00
|
|
|
return -ENODEV;
|
2015-08-12 21:43:37 +07:00
|
|
|
|
2017-10-17 16:44:49 +07:00
|
|
|
p = drm_seq_file_printer(m);
|
2019-07-13 17:00:12 +07:00
|
|
|
intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
|
2015-08-12 21:43:37 +07:00
|
|
|
|
2019-06-14 06:21:55 +07:00
|
|
|
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
|
2019-01-14 21:21:23 +07:00
|
|
|
u32 tmp = I915_READ(GUC_STATUS);
|
|
|
|
u32 i;
|
|
|
|
|
|
|
|
seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
|
|
|
|
seq_printf(m, "\tBootrom status = 0x%x\n",
|
|
|
|
(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
|
|
|
|
seq_printf(m, "\tuKernel status = 0x%x\n",
|
|
|
|
(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
|
|
|
|
seq_printf(m, "\tMIA Core status = 0x%x\n",
|
|
|
|
(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
|
|
|
|
seq_puts(m, "\nScratch registers:\n");
|
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
seq_printf(m, "\t%2d: \t0x%x\n",
|
|
|
|
i, I915_READ(SOFT_SCRATCH(i)));
|
|
|
|
}
|
|
|
|
}
|
2017-02-03 15:28:33 +07:00
|
|
|
|
2015-08-12 21:43:37 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-19 16:53:44 +07:00
|
|
|
static const char *
|
|
|
|
stringify_guc_log_type(enum guc_log_buffer_type type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case GUC_ISR_LOG_BUFFER:
|
|
|
|
return "ISR";
|
|
|
|
case GUC_DPC_LOG_BUFFER:
|
|
|
|
return "DPC";
|
|
|
|
case GUC_CRASH_DUMP_LOG_BUFFER:
|
|
|
|
return "CRASH";
|
|
|
|
default:
|
|
|
|
MISSING_CASE(type);
|
|
|
|
}
|
|
|
|
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
2020-02-19 05:33:18 +07:00
|
|
|
static void i915_guc_log_info(struct seq_file *m, struct intel_guc_log *log)
|
2016-10-12 23:24:36 +07:00
|
|
|
{
|
2018-03-19 16:53:44 +07:00
|
|
|
enum guc_log_buffer_type type;
|
2016-10-12 23:24:36 +07:00
|
|
|
|
2019-10-22 23:37:52 +07:00
|
|
|
if (!intel_guc_log_relay_created(log)) {
|
|
|
|
seq_puts(m, "GuC log relay not created\n");
|
2018-03-19 16:53:44 +07:00
|
|
|
return;
|
|
|
|
}
|
2016-10-12 23:24:36 +07:00
|
|
|
|
2018-03-19 16:53:44 +07:00
|
|
|
seq_puts(m, "GuC logging stats:\n");
|
2016-10-12 23:24:36 +07:00
|
|
|
|
2018-03-19 16:53:42 +07:00
|
|
|
seq_printf(m, "\tRelay full count: %u\n",
|
2018-03-19 16:53:44 +07:00
|
|
|
log->relay.full_count);
|
|
|
|
|
|
|
|
for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
|
|
|
|
seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
|
|
|
|
stringify_guc_log_type(type),
|
|
|
|
log->stats[type].flush,
|
|
|
|
log->stats[type].sampled_overflow);
|
|
|
|
}
|
2016-10-12 23:24:36 +07:00
|
|
|
}
|
|
|
|
|
2017-05-10 22:04:51 +07:00
|
|
|
static int i915_guc_info(struct seq_file *m, void *data)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
2020-02-19 05:33:19 +07:00
|
|
|
struct intel_uc *uc = &dev_priv->gt.uc;
|
2017-05-10 22:04:51 +07:00
|
|
|
|
2020-02-19 05:33:19 +07:00
|
|
|
if (!intel_uc_uses_guc(uc))
|
2017-12-15 21:36:35 +07:00
|
|
|
return -ENODEV;
|
|
|
|
|
2020-02-19 05:33:19 +07:00
|
|
|
i915_guc_log_info(m, &uc->guc.log);
|
2018-03-19 16:53:43 +07:00
|
|
|
|
2015-08-12 21:43:44 +07:00
|
|
|
/* Add more as required ... */
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-10 22:04:51 +07:00
|
|
|
static int i915_guc_stage_pool(struct seq_file *m, void *data)
|
2015-08-12 21:43:40 +07:00
|
|
|
{
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
2020-02-19 05:33:20 +07:00
|
|
|
struct intel_uc *uc = &dev_priv->gt.uc;
|
|
|
|
struct guc_stage_desc *desc = uc->guc.stage_desc_pool_vaddr;
|
2017-05-10 22:04:51 +07:00
|
|
|
int index;
|
2015-08-12 21:43:40 +07:00
|
|
|
|
2020-02-19 05:33:20 +07:00
|
|
|
if (!intel_uc_uses_guc_submission(uc))
|
2017-12-15 21:36:35 +07:00
|
|
|
return -ENODEV;
|
2015-08-12 21:43:40 +07:00
|
|
|
|
2017-05-10 22:04:51 +07:00
|
|
|
for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
|
|
|
|
if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
seq_printf(m, "GuC stage descriptor %u:\n", index);
|
|
|
|
seq_printf(m, "\tIndex: %u\n", desc->stage_id);
|
|
|
|
seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
|
|
|
|
seq_printf(m, "\tPriority: %d\n", desc->priority);
|
|
|
|
seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
|
|
|
|
seq_printf(m, "\tEngines used: 0x%x\n",
|
|
|
|
desc->engines_used);
|
|
|
|
seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
|
|
|
|
desc->db_trigger_phy,
|
|
|
|
desc->db_trigger_cpu,
|
|
|
|
desc->db_trigger_uk);
|
|
|
|
seq_printf(m, "\tProcess descriptor: 0x%x\n",
|
|
|
|
desc->process_desc);
|
2017-05-16 16:22:35 +07:00
|
|
|
seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
|
2017-05-10 22:04:51 +07:00
|
|
|
desc->wq_addr, desc->wq_size);
|
|
|
|
seq_putc(m, '\n');
|
|
|
|
|
2019-08-06 19:43:00 +07:00
|
|
|
for_each_uabi_engine(engine, dev_priv) {
|
2017-05-10 22:04:51 +07:00
|
|
|
u32 guc_engine_id = engine->guc_id;
|
|
|
|
struct guc_execlist_context *lrc =
|
|
|
|
&desc->lrc[guc_engine_id];
|
|
|
|
|
|
|
|
seq_printf(m, "\t%s LRC:\n", engine->name);
|
|
|
|
seq_printf(m, "\t\tContext desc: 0x%x\n",
|
|
|
|
lrc->context_desc);
|
|
|
|
seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
|
|
|
|
seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
|
|
|
|
seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
|
|
|
|
seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
|
|
|
|
seq_putc(m, '\n');
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-08-12 21:43:40 +07:00
|
|
|
static int i915_guc_log_dump(struct seq_file *m, void *data)
|
|
|
|
{
|
2017-05-23 00:50:28 +07:00
|
|
|
struct drm_info_node *node = m->private;
|
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(node);
|
|
|
|
bool dump_load_err = !!node->info_ent->data;
|
|
|
|
struct drm_i915_gem_object *obj = NULL;
|
|
|
|
u32 *log;
|
|
|
|
int i = 0;
|
2015-08-12 21:43:40 +07:00
|
|
|
|
2019-07-25 07:18:06 +07:00
|
|
|
if (!HAS_GT_UC(dev_priv))
|
2017-12-15 21:36:35 +07:00
|
|
|
return -ENODEV;
|
|
|
|
|
2017-05-23 00:50:28 +07:00
|
|
|
if (dump_load_err)
|
2019-08-03 01:40:53 +07:00
|
|
|
obj = dev_priv->gt.uc.load_err_log;
|
2019-07-13 17:00:12 +07:00
|
|
|
else if (dev_priv->gt.uc.guc.log.vma)
|
|
|
|
obj = dev_priv->gt.uc.guc.log.vma->obj;
|
2015-08-12 21:43:40 +07:00
|
|
|
|
2017-05-23 00:50:28 +07:00
|
|
|
if (!obj)
|
|
|
|
return 0;
|
2015-08-12 21:43:40 +07:00
|
|
|
|
2017-05-23 00:50:28 +07:00
|
|
|
log = i915_gem_object_pin_map(obj, I915_MAP_WC);
|
|
|
|
if (IS_ERR(log)) {
|
|
|
|
DRM_DEBUG("Failed to pin object\n");
|
|
|
|
seq_puts(m, "(log data unaccessible)\n");
|
|
|
|
return PTR_ERR(log);
|
2015-08-12 21:43:40 +07:00
|
|
|
}
|
|
|
|
|
2017-05-23 00:50:28 +07:00
|
|
|
for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
|
|
|
|
seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
|
|
|
|
*(log + i), *(log + i + 1),
|
|
|
|
*(log + i + 2), *(log + i + 3));
|
|
|
|
|
2015-08-12 21:43:40 +07:00
|
|
|
seq_putc(m, '\n');
|
|
|
|
|
2017-05-23 00:50:28 +07:00
|
|
|
i915_gem_object_unpin_map(obj);
|
|
|
|
|
2015-08-12 21:43:40 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-19 16:53:40 +07:00
|
|
|
static int i915_guc_log_level_get(void *data, u64 *val)
|
2016-10-12 23:24:41 +07:00
|
|
|
{
|
2017-04-08 02:42:20 +07:00
|
|
|
struct drm_i915_private *dev_priv = data;
|
2020-02-19 05:33:19 +07:00
|
|
|
struct intel_uc *uc = &dev_priv->gt.uc;
|
2016-10-12 23:24:41 +07:00
|
|
|
|
2020-02-19 05:33:19 +07:00
|
|
|
if (!intel_uc_uses_guc(uc))
|
2017-12-15 21:36:35 +07:00
|
|
|
return -ENODEV;
|
|
|
|
|
2020-02-19 05:33:19 +07:00
|
|
|
*val = intel_guc_log_get_level(&uc->guc.log);
|
2016-10-12 23:24:41 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-19 16:53:40 +07:00
|
|
|
static int i915_guc_log_level_set(void *data, u64 val)
|
2016-10-12 23:24:41 +07:00
|
|
|
{
|
2017-04-08 02:42:20 +07:00
|
|
|
struct drm_i915_private *dev_priv = data;
|
2020-02-19 05:33:19 +07:00
|
|
|
struct intel_uc *uc = &dev_priv->gt.uc;
|
2016-10-12 23:24:41 +07:00
|
|
|
|
2020-02-19 05:33:19 +07:00
|
|
|
if (!intel_uc_uses_guc(uc))
|
2017-12-15 21:36:35 +07:00
|
|
|
return -ENODEV;
|
|
|
|
|
2020-02-19 05:33:19 +07:00
|
|
|
return intel_guc_log_set_level(&uc->guc.log, val);
|
2016-10-12 23:24:41 +07:00
|
|
|
}
|
|
|
|
|
2018-03-19 16:53:40 +07:00
|
|
|
DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
|
|
|
|
i915_guc_log_level_get, i915_guc_log_level_set,
|
2016-10-12 23:24:41 +07:00
|
|
|
"%lld\n");
|
|
|
|
|
2018-03-19 16:53:40 +07:00
|
|
|
static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2019-08-18 16:52:02 +07:00
|
|
|
struct drm_i915_private *i915 = inode->i_private;
|
|
|
|
struct intel_guc *guc = &i915->gt.uc.guc;
|
|
|
|
struct intel_guc_log *log = &guc->log;
|
2018-03-19 16:53:40 +07:00
|
|
|
|
2020-01-31 22:37:06 +07:00
|
|
|
if (!intel_guc_is_ready(guc))
|
2018-03-19 16:53:40 +07:00
|
|
|
return -ENODEV;
|
|
|
|
|
2019-08-18 16:52:02 +07:00
|
|
|
file->private_data = log;
|
2018-03-19 16:53:40 +07:00
|
|
|
|
2019-08-18 16:52:02 +07:00
|
|
|
return intel_guc_log_relay_open(log);
|
2018-03-19 16:53:40 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
i915_guc_log_relay_write(struct file *filp,
|
|
|
|
const char __user *ubuf,
|
|
|
|
size_t cnt,
|
|
|
|
loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct intel_guc_log *log = filp->private_data;
|
2019-10-22 23:37:52 +07:00
|
|
|
int val;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable and start the guc log relay on value of 1.
|
|
|
|
* Flush log relay for any other value.
|
|
|
|
*/
|
|
|
|
if (val == 1)
|
|
|
|
ret = intel_guc_log_relay_start(log);
|
|
|
|
else
|
|
|
|
intel_guc_log_relay_flush(log);
|
2018-03-19 16:53:40 +07:00
|
|
|
|
2019-10-22 23:37:52 +07:00
|
|
|
return ret ?: cnt;
|
2018-03-19 16:53:40 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
2019-08-18 16:52:02 +07:00
|
|
|
struct drm_i915_private *i915 = inode->i_private;
|
|
|
|
struct intel_guc *guc = &i915->gt.uc.guc;
|
2018-03-19 16:53:40 +07:00
|
|
|
|
2019-08-18 16:52:02 +07:00
|
|
|
intel_guc_log_relay_close(&guc->log);
|
2018-03-19 16:53:40 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations i915_guc_log_relay_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = i915_guc_log_relay_open,
|
|
|
|
.write = i915_guc_log_relay_write,
|
|
|
|
.release = i915_guc_log_relay_release,
|
|
|
|
};
|
|
|
|
|
2015-06-05 00:23:57 +07:00
|
|
|
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
|
2013-08-19 23:18:10 +07:00
|
|
|
{
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
2016-08-22 17:32:44 +07:00
|
|
|
struct pci_dev *pdev = dev_priv->drm.pdev;
|
2013-08-19 23:18:10 +07:00
|
|
|
|
2016-04-03 20:14:21 +07:00
|
|
|
if (!HAS_RUNTIME_PM(dev_priv))
|
|
|
|
seq_puts(m, "Runtime power management not supported\n");
|
2013-08-19 23:18:10 +07:00
|
|
|
|
2019-01-14 21:21:25 +07:00
|
|
|
seq_printf(m, "Runtime power status: %s\n",
|
|
|
|
enableddisabled(!dev_priv->power_domains.wakeref));
|
|
|
|
|
2019-02-28 17:20:35 +07:00
|
|
|
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
|
2013-08-19 23:18:10 +07:00
|
|
|
seq_printf(m, "IRQs disabled: %s\n",
|
2014-06-20 23:29:20 +07:00
|
|
|
yesno(!intel_irqs_enabled(dev_priv)));
|
2015-06-15 18:52:28 +07:00
|
|
|
#ifdef CONFIG_PM
|
2015-06-05 00:23:58 +07:00
|
|
|
seq_printf(m, "Usage count: %d\n",
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
atomic_read(&dev_priv->drm.dev->power.usage_count));
|
2015-06-15 18:52:28 +07:00
|
|
|
#else
|
|
|
|
seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
|
|
|
|
#endif
|
2016-04-03 20:14:21 +07:00
|
|
|
seq_printf(m, "PCI device power state: %s [%d]\n",
|
2016-08-22 17:32:44 +07:00
|
|
|
pci_power_name(pdev->current_state),
|
|
|
|
pdev->current_state);
|
2013-08-19 23:18:10 +07:00
|
|
|
|
2019-01-14 21:21:09 +07:00
|
|
|
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
|
|
|
|
struct drm_printer p = drm_seq_file_printer(m);
|
|
|
|
|
2019-06-14 06:21:53 +07:00
|
|
|
print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
|
2019-01-14 21:21:09 +07:00
|
|
|
}
|
|
|
|
|
2013-08-20 16:29:23 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-02-11 23:14:51 +07:00
|
|
|
static int i915_engine_info(struct seq_file *m, void *unused)
|
2013-11-25 22:15:35 +07:00
|
|
|
{
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
2020-02-11 23:14:51 +07:00
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
intel_wakeref_t wakeref;
|
|
|
|
struct drm_printer p;
|
2013-11-25 22:15:35 +07:00
|
|
|
|
2020-02-11 23:14:51 +07:00
|
|
|
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
2013-11-25 22:15:35 +07:00
|
|
|
|
2020-02-11 23:14:51 +07:00
|
|
|
seq_printf(m, "GT awake? %s [%d]\n",
|
|
|
|
yesno(dev_priv->gt.awake),
|
|
|
|
atomic_read(&dev_priv->gt.wakeref.count));
|
|
|
|
seq_printf(m, "CS timestamp frequency: %u kHz\n",
|
|
|
|
RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
|
2013-11-25 22:15:35 +07:00
|
|
|
|
2020-02-11 23:14:51 +07:00
|
|
|
p = drm_seq_file_printer(m);
|
|
|
|
for_each_uabi_engine(engine, dev_priv)
|
|
|
|
intel_engine_dump(engine, &p, "%s\n", engine->name);
|
2013-11-25 22:15:35 +07:00
|
|
|
|
2020-02-11 23:14:51 +07:00
|
|
|
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
2013-11-25 22:15:35 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-02-11 23:14:51 +07:00
|
|
|
static int i915_rcs_topology(struct seq_file *m, void *unused)
|
2015-10-27 19:47:01 +07:00
|
|
|
{
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
2020-02-11 23:14:51 +07:00
|
|
|
struct drm_printer p = drm_seq_file_printer(m);
|
2015-10-30 22:54:47 +07:00
|
|
|
|
2020-02-11 23:14:51 +07:00
|
|
|
intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
|
2015-10-30 22:53:32 +07:00
|
|
|
|
2015-10-27 19:47:01 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-02-11 23:14:51 +07:00
|
|
|
static int i915_shrinker_info(struct seq_file *m, void *unused)
|
2014-02-08 03:48:15 +07:00
|
|
|
{
|
2020-02-11 23:14:51 +07:00
|
|
|
struct drm_i915_private *i915 = node_to_i915(m->private);
|
2014-02-08 03:48:15 +07:00
|
|
|
|
2020-02-11 23:14:51 +07:00
|
|
|
seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
|
|
|
|
seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
|
2014-02-08 03:48:15 +07:00
|
|
|
|
2020-02-11 23:14:51 +07:00
|
|
|
return 0;
|
2014-02-08 03:48:15 +07:00
|
|
|
}
|
|
|
|
|
2020-02-11 23:14:51 +07:00
|
|
|
static int i915_wa_registers(struct seq_file *m, void *unused)
|
2014-02-08 03:48:15 +07:00
|
|
|
{
|
2020-02-11 23:14:51 +07:00
|
|
|
struct drm_i915_private *i915 = node_to_i915(m->private);
|
|
|
|
struct intel_engine_cs *engine;
|
2019-11-30 01:54:27 +07:00
|
|
|
|
2020-02-11 23:14:51 +07:00
|
|
|
for_each_uabi_engine(engine, i915) {
|
|
|
|
const struct i915_wa_list *wal = &engine->ctx_wa_list;
|
|
|
|
const struct i915_wa *wa;
|
|
|
|
unsigned int count;
|
2019-11-30 01:54:34 +07:00
|
|
|
|
2020-02-11 23:14:51 +07:00
|
|
|
count = wal->count;
|
|
|
|
if (!count)
|
|
|
|
continue;
|
2014-11-05 00:06:50 +07:00
|
|
|
|
2020-02-11 23:14:51 +07:00
|
|
|
seq_printf(m, "%s: Workarounds applied: %u\n",
|
|
|
|
engine->name, count);
|
2014-01-22 19:36:08 +07:00
|
|
|
|
2020-02-11 23:14:51 +07:00
|
|
|
for (wa = wal->list; count--; wa++)
|
|
|
|
seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
|
|
|
|
i915_mmio_reg_offset(wa->reg),
|
|
|
|
wa->set, wa->clr);
|
2014-11-05 00:06:50 +07:00
|
|
|
|
2020-02-11 23:14:51 +07:00
|
|
|
seq_printf(m, "\n");
|
|
|
|
}
|
2014-01-22 19:36:08 +07:00
|
|
|
|
2020-02-11 23:14:51 +07:00
|
|
|
return 0;
|
2014-01-22 19:36:08 +07:00
|
|
|
}
|
|
|
|
|
2013-03-11 04:10:06 +07:00
|
|
|
static int
|
|
|
|
i915_wedged_get(void *data, u64 *val)
|
2009-10-14 04:20:20 +07:00
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct drm_i915_private *i915 = data;
|
|
|
|
int ret = intel_gt_terminally_wedged(&i915->gt);
|
2009-10-14 04:20:20 +07:00
|
|
|
|
2019-02-20 21:56:37 +07:00
|
|
|
switch (ret) {
|
|
|
|
case -EIO:
|
|
|
|
*val = 1;
|
|
|
|
return 0;
|
|
|
|
case 0:
|
|
|
|
*val = 0;
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
return ret;
|
|
|
|
}
|
2009-10-14 04:20:20 +07:00
|
|
|
}
|
|
|
|
|
2013-03-11 04:10:06 +07:00
|
|
|
static int
|
|
|
|
i915_wedged_set(void *data, u64 val)
|
2009-10-14 04:20:20 +07:00
|
|
|
{
|
2017-03-25 20:47:35 +07:00
|
|
|
struct drm_i915_private *i915 = data;
|
2014-04-15 00:24:27 +07:00
|
|
|
|
2019-02-08 22:37:06 +07:00
|
|
|
/* Flush any previous reset before applying for a new one */
|
2019-07-13 02:29:53 +07:00
|
|
|
wait_event(i915->gt.reset.queue,
|
|
|
|
!test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
|
2015-01-28 22:03:14 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
|
|
|
|
"Manually set wedged engine mask = %llx", val);
|
2013-03-11 04:10:06 +07:00
|
|
|
return 0;
|
2009-10-14 04:20:20 +07:00
|
|
|
}
|
|
|
|
|
2013-03-11 04:10:06 +07:00
|
|
|
DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
|
|
|
|
i915_wedged_get, i915_wedged_set,
|
2013-04-12 16:10:05 +07:00
|
|
|
"%llu\n");
|
2009-10-14 04:20:20 +07:00
|
|
|
|
2019-10-12 14:23:07 +07:00
|
|
|
static int
|
|
|
|
i915_perf_noa_delay_set(void *data, u64 val)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = data;
|
|
|
|
const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This would lead to infinite waits as we're doing timestamp
|
|
|
|
* difference on the CS with only 32bits.
|
|
|
|
*/
|
|
|
|
if (val > mul_u32_u32(U32_MAX, clk))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
atomic64_set(&i915->perf.noa_programming_delay, val);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
i915_perf_noa_delay_get(void *data, u64 *val)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = data;
|
|
|
|
|
|
|
|
*val = atomic64_read(&i915->perf.noa_programming_delay);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
|
|
|
|
i915_perf_noa_delay_get,
|
|
|
|
i915_perf_noa_delay_set,
|
|
|
|
"%llu\n");
|
|
|
|
|
2017-10-18 19:16:21 +07:00
|
|
|
#define DROP_UNBOUND BIT(0)
|
|
|
|
#define DROP_BOUND BIT(1)
|
|
|
|
#define DROP_RETIRE BIT(2)
|
|
|
|
#define DROP_ACTIVE BIT(3)
|
|
|
|
#define DROP_FREED BIT(4)
|
|
|
|
#define DROP_SHRINK_ALL BIT(5)
|
|
|
|
#define DROP_IDLE BIT(6)
|
2018-09-03 15:33:37 +07:00
|
|
|
#define DROP_RESET_ACTIVE BIT(7)
|
|
|
|
#define DROP_RESET_SEQNO BIT(8)
|
2019-10-12 00:38:23 +07:00
|
|
|
#define DROP_RCU BIT(9)
|
2016-10-28 19:58:42 +07:00
|
|
|
#define DROP_ALL (DROP_UNBOUND | \
|
|
|
|
DROP_BOUND | \
|
|
|
|
DROP_RETIRE | \
|
|
|
|
DROP_ACTIVE | \
|
2017-03-08 21:46:22 +07:00
|
|
|
DROP_FREED | \
|
2017-10-18 19:16:21 +07:00
|
|
|
DROP_SHRINK_ALL |\
|
2018-09-03 15:33:37 +07:00
|
|
|
DROP_IDLE | \
|
|
|
|
DROP_RESET_ACTIVE | \
|
2019-10-12 00:38:23 +07:00
|
|
|
DROP_RESET_SEQNO | \
|
|
|
|
DROP_RCU)
|
2013-03-11 04:10:06 +07:00
|
|
|
static int
|
|
|
|
i915_drop_caches_get(void *data, u64 *val)
|
2013-01-15 19:39:35 +07:00
|
|
|
{
|
2013-03-11 04:10:06 +07:00
|
|
|
*val = DROP_ALL;
|
2013-01-15 19:39:35 +07:00
|
|
|
|
2013-03-11 04:10:06 +07:00
|
|
|
return 0;
|
2013-01-15 19:39:35 +07:00
|
|
|
}
|
2013-03-11 04:10:06 +07:00
|
|
|
static int
|
2019-10-22 16:47:21 +07:00
|
|
|
gt_drop_caches(struct intel_gt *gt, u64 val)
|
2013-01-15 19:39:35 +07:00
|
|
|
{
|
2019-10-04 20:40:02 +07:00
|
|
|
int ret;
|
2013-01-15 19:39:35 +07:00
|
|
|
|
2019-01-28 08:02:18 +07:00
|
|
|
if (val & DROP_RESET_ACTIVE &&
|
2019-10-04 20:40:06 +07:00
|
|
|
wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
|
|
|
|
intel_gt_set_wedged(gt);
|
2018-09-03 15:33:37 +07:00
|
|
|
|
2019-10-04 20:40:02 +07:00
|
|
|
if (val & DROP_RETIRE)
|
2019-10-04 20:40:06 +07:00
|
|
|
intel_gt_retire_requests(gt);
|
2019-03-18 16:51:49 +07:00
|
|
|
|
2019-10-04 20:40:02 +07:00
|
|
|
if (val & (DROP_IDLE | DROP_ACTIVE)) {
|
2019-10-04 20:40:06 +07:00
|
|
|
ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
|
2013-01-15 19:39:35 +07:00
|
|
|
if (ret)
|
2019-03-18 16:51:49 +07:00
|
|
|
return ret;
|
2019-10-04 20:40:02 +07:00
|
|
|
}
|
2013-01-15 19:39:35 +07:00
|
|
|
|
2019-10-04 20:40:02 +07:00
|
|
|
if (val & DROP_IDLE) {
|
2019-10-04 20:40:06 +07:00
|
|
|
ret = intel_gt_pm_wait_for_idle(gt);
|
2019-10-04 20:40:02 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-09-03 15:33:37 +07:00
|
|
|
}
|
|
|
|
|
2019-10-04 20:40:06 +07:00
|
|
|
if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
|
|
|
|
intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
|
2013-01-15 19:39:35 +07:00
|
|
|
|
2019-10-22 16:47:21 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
i915_drop_caches_set(void *data, u64 val)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = data;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
|
|
|
|
val, val & DROP_ALL);
|
|
|
|
|
|
|
|
ret = gt_drop_caches(&i915->gt, val);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-03-03 16:13:38 +07:00
|
|
|
fs_reclaim_acquire(GFP_KERNEL);
|
2014-09-09 17:16:08 +07:00
|
|
|
if (val & DROP_BOUND)
|
2018-09-03 15:33:37 +07:00
|
|
|
i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
|
2014-09-04 01:23:37 +07:00
|
|
|
|
2014-09-09 17:16:08 +07:00
|
|
|
if (val & DROP_UNBOUND)
|
2018-09-03 15:33:37 +07:00
|
|
|
i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
|
2013-01-15 19:39:35 +07:00
|
|
|
|
2017-03-08 21:46:22 +07:00
|
|
|
if (val & DROP_SHRINK_ALL)
|
2018-09-03 15:33:37 +07:00
|
|
|
i915_gem_shrink_all(i915);
|
2017-03-03 16:13:38 +07:00
|
|
|
fs_reclaim_release(GFP_KERNEL);
|
2017-03-08 21:46:22 +07:00
|
|
|
|
2019-10-12 00:38:23 +07:00
|
|
|
if (val & DROP_RCU)
|
|
|
|
rcu_barrier();
|
|
|
|
|
2018-02-20 05:06:31 +07:00
|
|
|
if (val & DROP_FREED)
|
2018-09-03 15:33:37 +07:00
|
|
|
i915_gem_drain_freed_objects(i915);
|
2016-10-28 19:58:42 +07:00
|
|
|
|
2019-03-18 16:51:49 +07:00
|
|
|
return 0;
|
2013-01-15 19:39:35 +07:00
|
|
|
}
|
|
|
|
|
2013-03-11 04:10:06 +07:00
|
|
|
DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
|
|
|
|
i915_drop_caches_get, i915_drop_caches_set,
|
|
|
|
"0x%08llx\n");
|
2013-01-15 19:39:35 +07:00
|
|
|
|
2013-03-11 04:10:06 +07:00
|
|
|
static int
|
|
|
|
i915_cache_sharing_get(void *data, u64 *val)
|
2011-08-04 01:28:44 +07:00
|
|
|
{
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
struct drm_i915_private *dev_priv = data;
|
2019-01-14 21:21:14 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2019-01-14 21:21:23 +07:00
|
|
|
u32 snpcr = 0;
|
2011-08-04 01:28:44 +07:00
|
|
|
|
drm/i915: merge gen checks to use range
Instead of using IS_GEN() for consecutive gen checks, let's pass the
range to IS_GEN_RANGE(). By code inspection these were the ranges deemed
necessary for spatch:
@@
expression e;
@@
(
- IS_GEN(e, 3) || IS_GEN(e, 2)
+ IS_GEN_RANGE(e, 2, 3)
|
- IS_GEN(e, 3) || IS_GEN(e, 4)
+ IS_GEN_RANGE(e, 3, 4)
|
- IS_GEN(e, 5) || IS_GEN(e, 6)
+ IS_GEN_RANGE(e, 5, 6)
|
- IS_GEN(e, 6) || IS_GEN(e, 7)
+ IS_GEN_RANGE(e, 6, 7)
|
- IS_GEN(e, 7) || IS_GEN(e, 8)
+ IS_GEN_RANGE(e, 7, 8)
|
- IS_GEN(e, 8) || IS_GEN(e, 9)
+ IS_GEN_RANGE(e, 8, 9)
|
- IS_GEN(e, 10) || IS_GEN(e, 9)
+ IS_GEN_RANGE(e, 9, 10)
|
- IS_GEN(e, 9) || IS_GEN(e, 10)
+ IS_GEN_RANGE(e, 9, 10)
)
After conversion, checking we don't have any missing IS_GEN_RANGE() ||
IS_GEN() was also done.
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181212181044.15886-3-lucas.demarchi@intel.com
2018-12-13 01:10:44 +07:00
|
|
|
if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
|
2012-08-09 20:07:01 +07:00
|
|
|
return -ENODEV;
|
|
|
|
|
2019-06-14 06:21:55 +07:00
|
|
|
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
|
2019-01-14 21:21:23 +07:00
|
|
|
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
|
2011-08-04 01:28:44 +07:00
|
|
|
|
2013-03-11 04:10:06 +07:00
|
|
|
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
|
2011-08-04 01:28:44 +07:00
|
|
|
|
2013-03-11 04:10:06 +07:00
|
|
|
return 0;
|
2011-08-04 01:28:44 +07:00
|
|
|
}
|
|
|
|
|
2013-03-11 04:10:06 +07:00
|
|
|
static int
|
|
|
|
i915_cache_sharing_set(void *data, u64 val)
|
2011-08-04 01:28:44 +07:00
|
|
|
{
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
struct drm_i915_private *dev_priv = data;
|
2019-01-14 21:21:14 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2011-08-04 01:28:44 +07:00
|
|
|
|
drm/i915: merge gen checks to use range
Instead of using IS_GEN() for consecutive gen checks, let's pass the
range to IS_GEN_RANGE(). By code inspection these were the ranges deemed
necessary for spatch:
@@
expression e;
@@
(
- IS_GEN(e, 3) || IS_GEN(e, 2)
+ IS_GEN_RANGE(e, 2, 3)
|
- IS_GEN(e, 3) || IS_GEN(e, 4)
+ IS_GEN_RANGE(e, 3, 4)
|
- IS_GEN(e, 5) || IS_GEN(e, 6)
+ IS_GEN_RANGE(e, 5, 6)
|
- IS_GEN(e, 6) || IS_GEN(e, 7)
+ IS_GEN_RANGE(e, 6, 7)
|
- IS_GEN(e, 7) || IS_GEN(e, 8)
+ IS_GEN_RANGE(e, 7, 8)
|
- IS_GEN(e, 8) || IS_GEN(e, 9)
+ IS_GEN_RANGE(e, 8, 9)
|
- IS_GEN(e, 10) || IS_GEN(e, 9)
+ IS_GEN_RANGE(e, 9, 10)
|
- IS_GEN(e, 9) || IS_GEN(e, 10)
+ IS_GEN_RANGE(e, 9, 10)
)
After conversion, checking we don't have any missing IS_GEN_RANGE() ||
IS_GEN() was also done.
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181212181044.15886-3-lucas.demarchi@intel.com
2018-12-13 01:10:44 +07:00
|
|
|
if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
|
2012-08-09 20:07:01 +07:00
|
|
|
return -ENODEV;
|
|
|
|
|
2013-03-11 04:10:06 +07:00
|
|
|
if (val > 3)
|
2011-08-04 01:28:44 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2020-01-31 16:34:13 +07:00
|
|
|
drm_dbg(&dev_priv->drm,
|
|
|
|
"Manually setting uncore sharing to %llu\n", val);
|
2019-06-14 06:21:55 +07:00
|
|
|
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
|
2019-01-14 21:21:23 +07:00
|
|
|
u32 snpcr;
|
|
|
|
|
|
|
|
/* Update the cache sharing policy here as well */
|
|
|
|
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
|
|
|
|
snpcr &= ~GEN6_MBC_SNPCR_MASK;
|
|
|
|
snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
|
|
|
|
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
|
|
|
|
}
|
2011-08-04 01:28:44 +07:00
|
|
|
|
2013-03-11 04:10:06 +07:00
|
|
|
return 0;
|
2011-08-04 01:28:44 +07:00
|
|
|
}
|
|
|
|
|
2019-08-23 23:03:06 +07:00
|
|
|
static void
|
|
|
|
intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
|
|
|
|
u8 *to_mask)
|
|
|
|
{
|
|
|
|
int offset = slice * sseu->ss_stride;
|
|
|
|
|
|
|
|
memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
|
|
|
|
}
|
|
|
|
|
2013-03-11 04:10:06 +07:00
|
|
|
DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
|
|
|
|
i915_cache_sharing_get, i915_cache_sharing_set,
|
|
|
|
"%llu\n");
|
2011-08-04 01:28:44 +07:00
|
|
|
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
|
2016-08-31 23:13:01 +07:00
|
|
|
struct sseu_dev_info *sseu)
|
2015-04-04 08:13:17 +07:00
|
|
|
{
|
2018-03-13 07:40:54 +07:00
|
|
|
#define SS_MAX 2
|
|
|
|
const int ss_max = SS_MAX;
|
|
|
|
u32 sig1[SS_MAX], sig2[SS_MAX];
|
2015-04-04 08:13:17 +07:00
|
|
|
int ss;
|
|
|
|
|
|
|
|
sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
|
|
|
|
sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
|
|
|
|
sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
|
|
|
|
sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
|
|
|
|
|
|
|
|
for (ss = 0; ss < ss_max; ss++) {
|
|
|
|
unsigned int eu_cnt;
|
|
|
|
|
|
|
|
if (sig1[ss] & CHV_SS_PG_ENABLE)
|
|
|
|
/* skip disabled subslice */
|
|
|
|
continue;
|
|
|
|
|
2016-08-31 23:13:04 +07:00
|
|
|
sseu->slice_mask = BIT(0);
|
2018-03-06 19:28:52 +07:00
|
|
|
sseu->subslice_mask[0] |= BIT(ss);
|
2015-04-04 08:13:17 +07:00
|
|
|
eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
|
|
|
|
((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
|
|
|
|
((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
|
|
|
|
((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
|
2016-08-31 23:13:01 +07:00
|
|
|
sseu->eu_total += eu_cnt;
|
|
|
|
sseu->eu_per_subslice = max_t(unsigned int,
|
|
|
|
sseu->eu_per_subslice, eu_cnt);
|
2015-04-04 08:13:17 +07:00
|
|
|
}
|
2018-03-13 07:40:54 +07:00
|
|
|
#undef SS_MAX
|
2015-04-04 08:13:17 +07:00
|
|
|
}
|
|
|
|
|
2017-10-26 07:15:46 +07:00
|
|
|
static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
|
|
|
|
struct sseu_dev_info *sseu)
|
|
|
|
{
|
2018-03-13 18:31:49 +07:00
|
|
|
#define SS_MAX 6
|
2018-12-31 21:56:41 +07:00
|
|
|
const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
|
2018-03-13 18:31:49 +07:00
|
|
|
u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
|
2017-10-26 07:15:46 +07:00
|
|
|
int s, ss;
|
|
|
|
|
2018-03-06 19:28:53 +07:00
|
|
|
for (s = 0; s < info->sseu.max_slices; s++) {
|
2017-10-26 07:15:46 +07:00
|
|
|
/*
|
|
|
|
* FIXME: Valid SS Mask respects the spec and read
|
2018-11-20 22:14:15 +07:00
|
|
|
* only valid bits for those registers, excluding reserved
|
2017-10-26 07:15:46 +07:00
|
|
|
* although this seems wrong because it would leave many
|
|
|
|
* subslices without ACK.
|
|
|
|
*/
|
|
|
|
s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
|
|
|
|
GEN10_PGCTL_VALID_SS_MASK(s);
|
|
|
|
eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
|
|
|
|
eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
|
|
|
|
}
|
|
|
|
|
|
|
|
eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
|
|
|
|
GEN9_PGCTL_SSA_EU19_ACK |
|
|
|
|
GEN9_PGCTL_SSA_EU210_ACK |
|
|
|
|
GEN9_PGCTL_SSA_EU311_ACK;
|
|
|
|
eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
|
|
|
|
GEN9_PGCTL_SSB_EU19_ACK |
|
|
|
|
GEN9_PGCTL_SSB_EU210_ACK |
|
|
|
|
GEN9_PGCTL_SSB_EU311_ACK;
|
|
|
|
|
2018-03-06 19:28:53 +07:00
|
|
|
for (s = 0; s < info->sseu.max_slices; s++) {
|
2017-10-26 07:15:46 +07:00
|
|
|
if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
|
|
|
|
/* skip disabled slice */
|
|
|
|
continue;
|
|
|
|
|
|
|
|
sseu->slice_mask |= BIT(s);
|
2019-08-23 23:03:06 +07:00
|
|
|
intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
|
2017-10-26 07:15:46 +07:00
|
|
|
|
2018-03-06 19:28:53 +07:00
|
|
|
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
|
2017-10-26 07:15:46 +07:00
|
|
|
unsigned int eu_cnt;
|
|
|
|
|
2019-09-13 14:51:37 +07:00
|
|
|
if (info->sseu.has_subslice_pg &&
|
|
|
|
!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
|
2017-10-26 07:15:46 +07:00
|
|
|
/* skip disabled subslice */
|
|
|
|
continue;
|
|
|
|
|
|
|
|
eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
|
|
|
|
eu_mask[ss % 2]);
|
|
|
|
sseu->eu_total += eu_cnt;
|
|
|
|
sseu->eu_per_subslice = max_t(unsigned int,
|
|
|
|
sseu->eu_per_subslice,
|
|
|
|
eu_cnt);
|
|
|
|
}
|
|
|
|
}
|
2018-03-13 18:31:49 +07:00
|
|
|
#undef SS_MAX
|
2017-10-26 07:15:46 +07:00
|
|
|
}
|
|
|
|
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
|
2016-08-31 23:13:01 +07:00
|
|
|
struct sseu_dev_info *sseu)
|
2015-04-04 08:13:17 +07:00
|
|
|
{
|
2018-03-13 18:31:49 +07:00
|
|
|
#define SS_MAX 3
|
2018-12-31 21:56:41 +07:00
|
|
|
const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
|
2018-03-13 18:31:49 +07:00
|
|
|
u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
|
2015-04-04 08:13:17 +07:00
|
|
|
int s, ss;
|
2015-04-04 08:13:18 +07:00
|
|
|
|
2018-03-06 19:28:53 +07:00
|
|
|
for (s = 0; s < info->sseu.max_slices; s++) {
|
2015-04-04 08:13:18 +07:00
|
|
|
s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
|
|
|
|
eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
|
|
|
|
eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
|
|
|
|
}
|
|
|
|
|
2015-04-04 08:13:17 +07:00
|
|
|
eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
|
|
|
|
GEN9_PGCTL_SSA_EU19_ACK |
|
|
|
|
GEN9_PGCTL_SSA_EU210_ACK |
|
|
|
|
GEN9_PGCTL_SSA_EU311_ACK;
|
|
|
|
eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
|
|
|
|
GEN9_PGCTL_SSB_EU19_ACK |
|
|
|
|
GEN9_PGCTL_SSB_EU210_ACK |
|
|
|
|
GEN9_PGCTL_SSB_EU311_ACK;
|
|
|
|
|
2018-03-06 19:28:53 +07:00
|
|
|
for (s = 0; s < info->sseu.max_slices; s++) {
|
2015-04-04 08:13:17 +07:00
|
|
|
if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
|
|
|
|
/* skip disabled slice */
|
|
|
|
continue;
|
|
|
|
|
2016-08-31 23:13:04 +07:00
|
|
|
sseu->slice_mask |= BIT(s);
|
2015-04-04 08:13:18 +07:00
|
|
|
|
2017-10-26 07:15:46 +07:00
|
|
|
if (IS_GEN9_BC(dev_priv))
|
2019-08-23 23:03:06 +07:00
|
|
|
intel_sseu_copy_subslices(&info->sseu, s,
|
|
|
|
sseu->subslice_mask);
|
2015-04-04 08:13:18 +07:00
|
|
|
|
2018-03-06 19:28:53 +07:00
|
|
|
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
|
2015-04-04 08:13:17 +07:00
|
|
|
unsigned int eu_cnt;
|
2019-08-23 23:03:07 +07:00
|
|
|
u8 ss_idx = s * info->sseu.ss_stride +
|
|
|
|
ss / BITS_PER_BYTE;
|
2015-04-04 08:13:17 +07:00
|
|
|
|
2016-12-02 15:23:49 +07:00
|
|
|
if (IS_GEN9_LP(dev_priv)) {
|
2016-08-31 23:13:05 +07:00
|
|
|
if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
|
|
|
|
/* skip disabled subslice */
|
|
|
|
continue;
|
2015-04-04 08:13:18 +07:00
|
|
|
|
2019-08-23 23:03:07 +07:00
|
|
|
sseu->subslice_mask[ss_idx] |=
|
|
|
|
BIT(ss % BITS_PER_BYTE);
|
2016-08-31 23:13:05 +07:00
|
|
|
}
|
2015-04-04 08:13:18 +07:00
|
|
|
|
2015-04-04 08:13:17 +07:00
|
|
|
eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
|
|
|
|
eu_mask[ss%2]);
|
2016-08-31 23:13:01 +07:00
|
|
|
sseu->eu_total += eu_cnt;
|
|
|
|
sseu->eu_per_subslice = max_t(unsigned int,
|
|
|
|
sseu->eu_per_subslice,
|
|
|
|
eu_cnt);
|
2015-04-04 08:13:17 +07:00
|
|
|
}
|
|
|
|
}
|
2018-03-13 18:31:49 +07:00
|
|
|
#undef SS_MAX
|
2015-04-04 08:13:17 +07:00
|
|
|
}
|
|
|
|
|
2019-12-24 15:40:10 +07:00
|
|
|
static void bdw_sseu_device_status(struct drm_i915_private *dev_priv,
|
|
|
|
struct sseu_dev_info *sseu)
|
2015-09-25 16:54:58 +07:00
|
|
|
{
|
2019-08-23 23:02:57 +07:00
|
|
|
const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
|
2015-09-25 16:54:58 +07:00
|
|
|
u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
int s;
|
2015-09-25 16:54:58 +07:00
|
|
|
|
2016-08-31 23:13:04 +07:00
|
|
|
sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
|
2015-09-25 16:54:58 +07:00
|
|
|
|
2016-08-31 23:13:04 +07:00
|
|
|
if (sseu->slice_mask) {
|
2019-08-23 23:02:57 +07:00
|
|
|
sseu->eu_per_subslice = info->sseu.eu_per_subslice;
|
|
|
|
for (s = 0; s < fls(sseu->slice_mask); s++)
|
2019-08-23 23:03:06 +07:00
|
|
|
intel_sseu_copy_subslices(&info->sseu, s,
|
|
|
|
sseu->subslice_mask);
|
2016-08-31 23:13:05 +07:00
|
|
|
sseu->eu_total = sseu->eu_per_subslice *
|
2019-05-24 22:40:21 +07:00
|
|
|
intel_sseu_subslice_total(sseu);
|
2015-09-25 16:54:58 +07:00
|
|
|
|
|
|
|
/* subtract fused off EU(s) from enabled slice(s) */
|
2016-08-31 23:13:07 +07:00
|
|
|
for (s = 0; s < fls(sseu->slice_mask); s++) {
|
2019-08-23 23:02:57 +07:00
|
|
|
u8 subslice_7eu = info->sseu.subslice_7eu[s];
|
2015-09-25 16:54:58 +07:00
|
|
|
|
2016-08-31 23:13:01 +07:00
|
|
|
sseu->eu_total -= hweight8(subslice_7eu);
|
2015-09-25 16:54:58 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-31 23:13:03 +07:00
|
|
|
static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
|
|
|
|
const struct sseu_dev_info *sseu)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
|
|
|
const char *type = is_available_info ? "Available" : "Enabled";
|
2018-03-06 19:28:52 +07:00
|
|
|
int s;
|
2016-08-31 23:13:03 +07:00
|
|
|
|
2016-08-31 23:13:06 +07:00
|
|
|
seq_printf(m, " %s Slice Mask: %04x\n", type,
|
|
|
|
sseu->slice_mask);
|
2016-08-31 23:13:03 +07:00
|
|
|
seq_printf(m, " %s Slice Total: %u\n", type,
|
2016-08-31 23:13:04 +07:00
|
|
|
hweight8(sseu->slice_mask));
|
2016-08-31 23:13:03 +07:00
|
|
|
seq_printf(m, " %s Subslice Total: %u\n", type,
|
2019-05-24 22:40:21 +07:00
|
|
|
intel_sseu_subslice_total(sseu));
|
2018-03-06 19:28:52 +07:00
|
|
|
for (s = 0; s < fls(sseu->slice_mask); s++) {
|
|
|
|
seq_printf(m, " %s Slice%i subslices: %u\n", type,
|
2019-05-24 22:40:20 +07:00
|
|
|
s, intel_sseu_subslices_per_slice(sseu, s));
|
2018-03-06 19:28:52 +07:00
|
|
|
}
|
2016-08-31 23:13:03 +07:00
|
|
|
seq_printf(m, " %s EU Total: %u\n", type,
|
|
|
|
sseu->eu_total);
|
|
|
|
seq_printf(m, " %s EU Per Subslice: %u\n", type,
|
|
|
|
sseu->eu_per_subslice);
|
|
|
|
|
|
|
|
if (!is_available_info)
|
|
|
|
return;
|
|
|
|
|
|
|
|
seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
|
|
|
|
if (HAS_POOLED_EU(dev_priv))
|
|
|
|
seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
|
|
|
|
|
|
|
|
seq_printf(m, " Has Slice Power Gating: %s\n",
|
|
|
|
yesno(sseu->has_slice_pg));
|
|
|
|
seq_printf(m, " Has Subslice Power Gating: %s\n",
|
|
|
|
yesno(sseu->has_subslice_pg));
|
|
|
|
seq_printf(m, " Has EU Power Gating: %s\n",
|
|
|
|
yesno(sseu->has_eu_pg));
|
|
|
|
}
|
|
|
|
|
2015-02-13 23:27:54 +07:00
|
|
|
static int i915_sseu_status(struct seq_file *m, void *unused)
|
|
|
|
{
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
2019-08-23 23:02:57 +07:00
|
|
|
const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
|
2016-08-31 23:13:01 +07:00
|
|
|
struct sseu_dev_info sseu;
|
2019-01-14 21:21:14 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2015-02-13 23:27:54 +07:00
|
|
|
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 17:59:31 +07:00
|
|
|
if (INTEL_GEN(dev_priv) < 8)
|
2015-02-13 23:27:54 +07:00
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
seq_puts(m, "SSEU Device Info\n");
|
2019-08-23 23:02:57 +07:00
|
|
|
i915_print_sseu_info(m, true, &info->sseu);
|
2015-02-13 23:27:54 +07:00
|
|
|
|
2015-02-13 23:27:55 +07:00
|
|
|
seq_puts(m, "SSEU Device Status\n");
|
2016-08-31 23:13:01 +07:00
|
|
|
memset(&sseu, 0, sizeof(sseu));
|
2019-08-23 23:02:58 +07:00
|
|
|
intel_sseu_set_info(&sseu, info->sseu.max_slices,
|
|
|
|
info->sseu.max_subslices,
|
|
|
|
info->sseu.max_eus_per_subslice);
|
2016-08-01 21:33:27 +07:00
|
|
|
|
2019-06-14 06:21:55 +07:00
|
|
|
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
|
2019-01-14 21:21:23 +07:00
|
|
|
if (IS_CHERRYVIEW(dev_priv))
|
|
|
|
cherryview_sseu_device_status(dev_priv, &sseu);
|
|
|
|
else if (IS_BROADWELL(dev_priv))
|
2019-12-24 15:40:10 +07:00
|
|
|
bdw_sseu_device_status(dev_priv, &sseu);
|
2019-01-14 21:21:23 +07:00
|
|
|
else if (IS_GEN(dev_priv, 9))
|
|
|
|
gen9_sseu_device_status(dev_priv, &sseu);
|
|
|
|
else if (INTEL_GEN(dev_priv) >= 10)
|
|
|
|
gen10_sseu_device_status(dev_priv, &sseu);
|
2015-02-13 23:27:55 +07:00
|
|
|
}
|
2016-08-01 21:33:27 +07:00
|
|
|
|
2016-08-31 23:13:03 +07:00
|
|
|
i915_print_sseu_info(m, false, &sseu);
|
2015-02-13 23:27:55 +07:00
|
|
|
|
2015-02-13 23:27:54 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-04-26 01:25:56 +07:00
|
|
|
static int i915_forcewake_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2017-09-07 20:44:41 +07:00
|
|
|
struct drm_i915_private *i915 = inode->i_private;
|
2019-09-12 19:48:13 +07:00
|
|
|
struct intel_gt *gt = &i915->gt;
|
2011-04-26 01:25:56 +07:00
|
|
|
|
2019-09-12 19:48:13 +07:00
|
|
|
atomic_inc(>->user_wakeref);
|
|
|
|
intel_gt_pm_get(gt);
|
|
|
|
if (INTEL_GEN(i915) >= 6)
|
|
|
|
intel_uncore_forcewake_user_get(gt->uncore);
|
2011-04-26 01:25:56 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-04-17 04:07:40 +07:00
|
|
|
static int i915_forcewake_release(struct inode *inode, struct file *file)
|
2011-04-26 01:25:56 +07:00
|
|
|
{
|
2017-09-07 20:44:41 +07:00
|
|
|
struct drm_i915_private *i915 = inode->i_private;
|
2019-09-12 19:48:13 +07:00
|
|
|
struct intel_gt *gt = &i915->gt;
|
2011-04-26 01:25:56 +07:00
|
|
|
|
2019-09-12 19:48:13 +07:00
|
|
|
if (INTEL_GEN(i915) >= 6)
|
|
|
|
intel_uncore_forcewake_user_put(&i915->uncore);
|
|
|
|
intel_gt_pm_put(gt);
|
|
|
|
atomic_dec(>->user_wakeref);
|
2011-04-26 01:25:56 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations i915_forcewake_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = i915_forcewake_open,
|
|
|
|
.release = i915_forcewake_release,
|
|
|
|
};
|
|
|
|
|
2013-10-18 01:09:56 +07:00
|
|
|
static const struct drm_info_list i915_debugfs_list[] = {
|
2011-01-14 02:06:50 +07:00
|
|
|
{"i915_capabilities", i915_capabilities, 0},
|
2010-09-30 17:46:12 +07:00
|
|
|
{"i915_gem_objects", i915_gem_object_info, 0},
|
2009-02-11 21:26:38 +07:00
|
|
|
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
|
2009-02-18 08:08:50 +07:00
|
|
|
{"i915_gem_interrupt", i915_interrupt_info, 0},
|
2015-08-12 21:43:44 +07:00
|
|
|
{"i915_guc_info", i915_guc_info, 0},
|
2015-08-12 21:43:37 +07:00
|
|
|
{"i915_guc_load_status", i915_guc_load_status_info, 0},
|
2015-08-12 21:43:40 +07:00
|
|
|
{"i915_guc_log_dump", i915_guc_log_dump, 0},
|
2017-05-23 00:50:28 +07:00
|
|
|
{"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
|
2017-05-10 22:04:51 +07:00
|
|
|
{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
|
2017-01-18 23:05:56 +07:00
|
|
|
{"i915_huc_load_status", i915_huc_load_status_info, 0},
|
2014-03-31 13:00:02 +07:00
|
|
|
{"i915_frequency_info", i915_frequency_info, 0},
|
2011-06-29 03:04:16 +07:00
|
|
|
{"i915_ring_freq_table", i915_ring_freq_table, 0},
|
2011-03-20 08:14:29 +07:00
|
|
|
{"i915_context_status", i915_context_status, 0},
|
2011-12-14 19:57:16 +07:00
|
|
|
{"i915_swizzle_info", i915_swizzle_info, 0},
|
2013-07-05 01:02:07 +07:00
|
|
|
{"i915_llc", i915_llc, 0},
|
2015-06-05 00:23:57 +07:00
|
|
|
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
|
2016-10-05 03:11:31 +07:00
|
|
|
{"i915_engine_info", i915_engine_info, 0},
|
2018-03-06 19:28:54 +07:00
|
|
|
{"i915_rcs_topology", i915_rcs_topology, 0},
|
2017-10-14 03:26:19 +07:00
|
|
|
{"i915_shrinker_info", i915_shrinker_info, 0},
|
2014-08-30 22:50:59 +07:00
|
|
|
{"i915_wa_registers", i915_wa_registers, 0},
|
2015-02-13 23:27:54 +07:00
|
|
|
{"i915_sseu_status", i915_sseu_status, 0},
|
2015-04-07 22:20:32 +07:00
|
|
|
{"i915_rps_boost_info", i915_rps_boost_info, 0},
|
2009-02-18 08:08:50 +07:00
|
|
|
};
|
2009-07-02 09:26:52 +07:00
|
|
|
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
|
2009-02-18 08:08:50 +07:00
|
|
|
|
2013-10-18 01:09:56 +07:00
|
|
|
static const struct i915_debugfs_files {
|
2013-07-05 01:49:44 +07:00
|
|
|
const char *name;
|
|
|
|
const struct file_operations *fops;
|
|
|
|
} i915_debugfs_files[] = {
|
2019-10-12 14:23:07 +07:00
|
|
|
{"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
|
2013-07-05 01:49:44 +07:00
|
|
|
{"i915_wedged", &i915_wedged_fops},
|
|
|
|
{"i915_cache_sharing", &i915_cache_sharing_fops},
|
|
|
|
{"i915_gem_drop_caches", &i915_drop_caches_fops},
|
2016-10-12 16:05:18 +07:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
|
2013-07-05 01:49:44 +07:00
|
|
|
{"i915_error_state", &i915_error_state_fops},
|
2017-02-14 23:46:11 +07:00
|
|
|
{"i915_gpu_info", &i915_gpu_info_fops},
|
2016-10-12 16:05:18 +07:00
|
|
|
#endif
|
2018-03-19 16:53:40 +07:00
|
|
|
{"i915_guc_log_level", &i915_guc_log_level_fops},
|
|
|
|
{"i915_guc_log_relay", &i915_guc_log_relay_fops},
|
2013-07-05 01:49:44 +07:00
|
|
|
};
|
|
|
|
|
2016-06-24 20:00:17 +07:00
|
|
|
int i915_debugfs_register(struct drm_i915_private *dev_priv)
|
2009-02-18 08:08:50 +07:00
|
|
|
{
|
2016-07-05 16:40:23 +07:00
|
|
|
struct drm_minor *minor = dev_priv->drm.primary;
|
2018-06-28 14:23:02 +07:00
|
|
|
int i;
|
2009-10-14 04:20:20 +07:00
|
|
|
|
2019-12-05 22:43:40 +07:00
|
|
|
i915_debugfs_params(dev_priv);
|
|
|
|
|
2019-06-13 21:52:29 +07:00
|
|
|
debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
|
|
|
|
to_i915(minor->dev), &i915_forcewake_fops);
|
2013-07-05 01:49:44 +07:00
|
|
|
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
|
2019-06-13 21:52:29 +07:00
|
|
|
debugfs_create_file(i915_debugfs_files[i].name,
|
|
|
|
S_IRUGO | S_IWUSR,
|
|
|
|
minor->debugfs_root,
|
|
|
|
to_i915(minor->dev),
|
|
|
|
i915_debugfs_files[i].fops);
|
2013-07-05 01:49:44 +07:00
|
|
|
}
|
2012-12-04 20:12:00 +07:00
|
|
|
|
2009-07-02 09:26:52 +07:00
|
|
|
return drm_debugfs_create_files(i915_debugfs_list,
|
|
|
|
I915_DEBUGFS_ENTRIES,
|
2009-02-18 08:08:50 +07:00
|
|
|
minor->debugfs_root, minor);
|
|
|
|
}
|