mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
07fe0b1280
As alluded to in several patches, and it will be reiterated later... A VMA is an abstraction for a GEM BO bound into an address space. Therefore it stands to reason, that the existing bind, and unbind are the ones which will be the most impacted. This patch implements this, and updates all callers which weren't already updated in the series (because it was too messy). This patch represents the bulk of an earlier, larger patch. I've pulled out a bunch of things by the request of Daniel. The history is preserved for posterity with the email convention of ">" One big change from the original patch aside from a bunch of cropping is I've created an i915_vma_unbind() function. That is because we always have the VMA anyway, and doing an extra lookup is useful. There is a caveat, we retain an i915_gem_object_ggtt_unbind, for the global cases which might not talk in VMAs. > drm/i915: plumb VM into object operations > > This patch was formerly known as: > "drm/i915: Create VMAs (part 3) - plumbing" > > This patch adds a VM argument, bind/unbind, and the object > offset/size/color getters/setters. It preserves the old ggtt helper > functions because things still need, and will continue to need them. > > Some code will still need to be ported over after this. > > v2: Fix purge to pick an object and unbind all vmas > This was doable because of the global bound list change. > > v3: With the commit to actually pin/unpin pages in place, there is no > longer a need to check if unbind succeeded before calling put_pages(). > Make put_pages only BUG() after checking pin count. > > v4: Rebased on top of the new hangcheck work by Mika > plumbed eb_destroy also > Many checkpatch related fixes > > v5: Very large rebase > > v6: > Change BUG_ON to WARN_ON (Daniel) > Rename vm to ggtt in preallocate stolen, since it is always ggtt when > dealing with stolen memory. (Daniel) > list_for_each will short-circuit already (Daniel) > remove superflous space (Daniel) > Use per object list of vmas (Daniel) > Make obj_bound_any() use obj_bound for each vm (Ben) > s/bind_to_gtt/bind_to_vm/ (Ben) > > Fixed up the inactive shrinker. As Daniel noticed the code could > potentially count the same object multiple times. While it's not > possible in the current case, since 1 object can only ever be bound into > 1 address space thus far - we may as well try to get something more > future proof in place now. With a prep patch before this to switch over > to using the bound list + inactive check, we're now able to carry that > forward for every address space an object is bound into. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> [danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA in destroy".] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
463 lines
11 KiB
C
463 lines
11 KiB
C
#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
|
|
#define _I915_TRACE_H_
|
|
|
|
#include <linux/stringify.h>
|
|
#include <linux/types.h>
|
|
#include <linux/tracepoint.h>
|
|
|
|
#include <drm/drmP.h>
|
|
#include "i915_drv.h"
|
|
#include "intel_ringbuffer.h"
|
|
|
|
#undef TRACE_SYSTEM
|
|
#define TRACE_SYSTEM i915
|
|
#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
|
|
#define TRACE_INCLUDE_FILE i915_trace
|
|
|
|
/* object tracking */
|
|
|
|
TRACE_EVENT(i915_gem_object_create,
|
|
TP_PROTO(struct drm_i915_gem_object *obj),
|
|
TP_ARGS(obj),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(struct drm_i915_gem_object *, obj)
|
|
__field(u32, size)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->obj = obj;
|
|
__entry->size = obj->base.size;
|
|
),
|
|
|
|
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
|
|
);
|
|
|
|
TRACE_EVENT(i915_vma_bind,
|
|
TP_PROTO(struct i915_vma *vma, bool mappable),
|
|
TP_ARGS(vma, mappable),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(struct drm_i915_gem_object *, obj)
|
|
__field(struct i915_address_space *, vm)
|
|
__field(u32, offset)
|
|
__field(u32, size)
|
|
__field(bool, mappable)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->obj = vma->obj;
|
|
__entry->vm = vma->vm;
|
|
__entry->offset = vma->node.start;
|
|
__entry->size = vma->node.size;
|
|
__entry->mappable = mappable;
|
|
),
|
|
|
|
TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
|
|
__entry->obj, __entry->offset, __entry->size,
|
|
__entry->mappable ? ", mappable" : "",
|
|
__entry->vm)
|
|
);
|
|
|
|
TRACE_EVENT(i915_vma_unbind,
|
|
TP_PROTO(struct i915_vma *vma),
|
|
TP_ARGS(vma),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(struct drm_i915_gem_object *, obj)
|
|
__field(struct i915_address_space *, vm)
|
|
__field(u32, offset)
|
|
__field(u32, size)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->obj = vma->obj;
|
|
__entry->vm = vma->vm;
|
|
__entry->offset = vma->node.start;
|
|
__entry->size = vma->node.size;
|
|
),
|
|
|
|
TP_printk("obj=%p, offset=%08x size=%x vm=%p",
|
|
__entry->obj, __entry->offset, __entry->size, __entry->vm)
|
|
);
|
|
|
|
TRACE_EVENT(i915_gem_object_change_domain,
|
|
TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
|
|
TP_ARGS(obj, old_read, old_write),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(struct drm_i915_gem_object *, obj)
|
|
__field(u32, read_domains)
|
|
__field(u32, write_domain)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->obj = obj;
|
|
__entry->read_domains = obj->base.read_domains | (old_read << 16);
|
|
__entry->write_domain = obj->base.write_domain | (old_write << 16);
|
|
),
|
|
|
|
TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
|
|
__entry->obj,
|
|
__entry->read_domains >> 16,
|
|
__entry->read_domains & 0xffff,
|
|
__entry->write_domain >> 16,
|
|
__entry->write_domain & 0xffff)
|
|
);
|
|
|
|
TRACE_EVENT(i915_gem_object_pwrite,
|
|
TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
|
|
TP_ARGS(obj, offset, len),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(struct drm_i915_gem_object *, obj)
|
|
__field(u32, offset)
|
|
__field(u32, len)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->obj = obj;
|
|
__entry->offset = offset;
|
|
__entry->len = len;
|
|
),
|
|
|
|
TP_printk("obj=%p, offset=%u, len=%u",
|
|
__entry->obj, __entry->offset, __entry->len)
|
|
);
|
|
|
|
TRACE_EVENT(i915_gem_object_pread,
|
|
TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
|
|
TP_ARGS(obj, offset, len),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(struct drm_i915_gem_object *, obj)
|
|
__field(u32, offset)
|
|
__field(u32, len)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->obj = obj;
|
|
__entry->offset = offset;
|
|
__entry->len = len;
|
|
),
|
|
|
|
TP_printk("obj=%p, offset=%u, len=%u",
|
|
__entry->obj, __entry->offset, __entry->len)
|
|
);
|
|
|
|
TRACE_EVENT(i915_gem_object_fault,
|
|
TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
|
|
TP_ARGS(obj, index, gtt, write),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(struct drm_i915_gem_object *, obj)
|
|
__field(u32, index)
|
|
__field(bool, gtt)
|
|
__field(bool, write)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->obj = obj;
|
|
__entry->index = index;
|
|
__entry->gtt = gtt;
|
|
__entry->write = write;
|
|
),
|
|
|
|
TP_printk("obj=%p, %s index=%u %s",
|
|
__entry->obj,
|
|
__entry->gtt ? "GTT" : "CPU",
|
|
__entry->index,
|
|
__entry->write ? ", writable" : "")
|
|
);
|
|
|
|
DECLARE_EVENT_CLASS(i915_gem_object,
|
|
TP_PROTO(struct drm_i915_gem_object *obj),
|
|
TP_ARGS(obj),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(struct drm_i915_gem_object *, obj)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->obj = obj;
|
|
),
|
|
|
|
TP_printk("obj=%p", __entry->obj)
|
|
);
|
|
|
|
DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
|
|
TP_PROTO(struct drm_i915_gem_object *obj),
|
|
TP_ARGS(obj)
|
|
);
|
|
|
|
DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
|
|
TP_PROTO(struct drm_i915_gem_object *obj),
|
|
TP_ARGS(obj)
|
|
);
|
|
|
|
TRACE_EVENT(i915_gem_evict,
|
|
TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable),
|
|
TP_ARGS(dev, size, align, mappable),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(u32, dev)
|
|
__field(u32, size)
|
|
__field(u32, align)
|
|
__field(bool, mappable)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->dev = dev->primary->index;
|
|
__entry->size = size;
|
|
__entry->align = align;
|
|
__entry->mappable = mappable;
|
|
),
|
|
|
|
TP_printk("dev=%d, size=%d, align=%d %s",
|
|
__entry->dev, __entry->size, __entry->align,
|
|
__entry->mappable ? ", mappable" : "")
|
|
);
|
|
|
|
TRACE_EVENT(i915_gem_evict_everything,
|
|
TP_PROTO(struct drm_device *dev),
|
|
TP_ARGS(dev),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(u32, dev)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->dev = dev->primary->index;
|
|
),
|
|
|
|
TP_printk("dev=%d", __entry->dev)
|
|
);
|
|
|
|
TRACE_EVENT(i915_gem_ring_dispatch,
|
|
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
|
|
TP_ARGS(ring, seqno, flags),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(u32, dev)
|
|
__field(u32, ring)
|
|
__field(u32, seqno)
|
|
__field(u32, flags)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->dev = ring->dev->primary->index;
|
|
__entry->ring = ring->id;
|
|
__entry->seqno = seqno;
|
|
__entry->flags = flags;
|
|
i915_trace_irq_get(ring, seqno);
|
|
),
|
|
|
|
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
|
|
__entry->dev, __entry->ring, __entry->seqno, __entry->flags)
|
|
);
|
|
|
|
TRACE_EVENT(i915_gem_ring_flush,
|
|
TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush),
|
|
TP_ARGS(ring, invalidate, flush),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(u32, dev)
|
|
__field(u32, ring)
|
|
__field(u32, invalidate)
|
|
__field(u32, flush)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->dev = ring->dev->primary->index;
|
|
__entry->ring = ring->id;
|
|
__entry->invalidate = invalidate;
|
|
__entry->flush = flush;
|
|
),
|
|
|
|
TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
|
|
__entry->dev, __entry->ring,
|
|
__entry->invalidate, __entry->flush)
|
|
);
|
|
|
|
DECLARE_EVENT_CLASS(i915_gem_request,
|
|
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
|
|
TP_ARGS(ring, seqno),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(u32, dev)
|
|
__field(u32, ring)
|
|
__field(u32, seqno)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->dev = ring->dev->primary->index;
|
|
__entry->ring = ring->id;
|
|
__entry->seqno = seqno;
|
|
),
|
|
|
|
TP_printk("dev=%u, ring=%u, seqno=%u",
|
|
__entry->dev, __entry->ring, __entry->seqno)
|
|
);
|
|
|
|
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
|
|
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
|
|
TP_ARGS(ring, seqno)
|
|
);
|
|
|
|
DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
|
|
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
|
|
TP_ARGS(ring, seqno)
|
|
);
|
|
|
|
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
|
|
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
|
|
TP_ARGS(ring, seqno)
|
|
);
|
|
|
|
TRACE_EVENT(i915_gem_request_wait_begin,
|
|
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
|
|
TP_ARGS(ring, seqno),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(u32, dev)
|
|
__field(u32, ring)
|
|
__field(u32, seqno)
|
|
__field(bool, blocking)
|
|
),
|
|
|
|
/* NB: the blocking information is racy since mutex_is_locked
|
|
* doesn't check that the current thread holds the lock. The only
|
|
* other option would be to pass the boolean information of whether
|
|
* or not the class was blocking down through the stack which is
|
|
* less desirable.
|
|
*/
|
|
TP_fast_assign(
|
|
__entry->dev = ring->dev->primary->index;
|
|
__entry->ring = ring->id;
|
|
__entry->seqno = seqno;
|
|
__entry->blocking = mutex_is_locked(&ring->dev->struct_mutex);
|
|
),
|
|
|
|
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
|
|
__entry->dev, __entry->ring, __entry->seqno,
|
|
__entry->blocking ? "yes (NB)" : "no")
|
|
);
|
|
|
|
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
|
|
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
|
|
TP_ARGS(ring, seqno)
|
|
);
|
|
|
|
DECLARE_EVENT_CLASS(i915_ring,
|
|
TP_PROTO(struct intel_ring_buffer *ring),
|
|
TP_ARGS(ring),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(u32, dev)
|
|
__field(u32, ring)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->dev = ring->dev->primary->index;
|
|
__entry->ring = ring->id;
|
|
),
|
|
|
|
TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring)
|
|
);
|
|
|
|
DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
|
|
TP_PROTO(struct intel_ring_buffer *ring),
|
|
TP_ARGS(ring)
|
|
);
|
|
|
|
DEFINE_EVENT(i915_ring, i915_ring_wait_end,
|
|
TP_PROTO(struct intel_ring_buffer *ring),
|
|
TP_ARGS(ring)
|
|
);
|
|
|
|
TRACE_EVENT(i915_flip_request,
|
|
TP_PROTO(int plane, struct drm_i915_gem_object *obj),
|
|
|
|
TP_ARGS(plane, obj),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(int, plane)
|
|
__field(struct drm_i915_gem_object *, obj)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->plane = plane;
|
|
__entry->obj = obj;
|
|
),
|
|
|
|
TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
|
|
);
|
|
|
|
TRACE_EVENT(i915_flip_complete,
|
|
TP_PROTO(int plane, struct drm_i915_gem_object *obj),
|
|
|
|
TP_ARGS(plane, obj),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(int, plane)
|
|
__field(struct drm_i915_gem_object *, obj)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->plane = plane;
|
|
__entry->obj = obj;
|
|
),
|
|
|
|
TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
|
|
);
|
|
|
|
TRACE_EVENT_CONDITION(i915_reg_rw,
|
|
TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace),
|
|
|
|
TP_ARGS(write, reg, val, len, trace),
|
|
|
|
TP_CONDITION(trace),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(u64, val)
|
|
__field(u32, reg)
|
|
__field(u16, write)
|
|
__field(u16, len)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->val = (u64)val;
|
|
__entry->reg = reg;
|
|
__entry->write = write;
|
|
__entry->len = len;
|
|
),
|
|
|
|
TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
|
|
__entry->write ? "write" : "read",
|
|
__entry->reg, __entry->len,
|
|
(u32)(__entry->val & 0xffffffff),
|
|
(u32)(__entry->val >> 32))
|
|
);
|
|
|
|
TRACE_EVENT(intel_gpu_freq_change,
|
|
TP_PROTO(u32 freq),
|
|
TP_ARGS(freq),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(u32, freq)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->freq = freq;
|
|
),
|
|
|
|
TP_printk("new_freq=%u", __entry->freq)
|
|
);
|
|
|
|
#endif /* _I915_TRACE_H_ */
|
|
|
|
/* This part must be outside protection */
|
|
#undef TRACE_INCLUDE_PATH
|
|
#define TRACE_INCLUDE_PATH .
|
|
#include <trace/define_trace.h>
|