linux_dsm_epyc7002/drivers/gpu/drm/i915/i915_trace.h

808 lines
20 KiB
C
Raw Normal View History

#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _I915_TRACE_H_
#include <linux/stringify.h>
#include <linux/types.h>
#include <linux/tracepoint.h>
#include <drm/drmP.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_ringbuffer.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i915
#define TRACE_INCLUDE_FILE i915_trace
/* pipe updates */
TRACE_EVENT(i915_pipe_update_start,
TP_PROTO(struct intel_crtc *crtc),
TP_ARGS(crtc),
TP_STRUCT__entry(
__field(enum pipe, pipe)
__field(u32, frame)
__field(u32, scanline)
__field(u32, min)
__field(u32, max)
),
TP_fast_assign(
__entry->pipe = crtc->pipe;
__entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
crtc->pipe);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->min = crtc->debug.min_vbl;
__entry->max = crtc->debug.max_vbl;
),
TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
pipe_name(__entry->pipe), __entry->frame,
__entry->scanline, __entry->min, __entry->max)
);
TRACE_EVENT(i915_pipe_update_vblank_evaded,
TP_PROTO(struct intel_crtc *crtc),
TP_ARGS(crtc),
TP_STRUCT__entry(
__field(enum pipe, pipe)
__field(u32, frame)
__field(u32, scanline)
__field(u32, min)
__field(u32, max)
),
TP_fast_assign(
__entry->pipe = crtc->pipe;
__entry->frame = crtc->debug.start_vbl_count;
__entry->scanline = crtc->debug.scanline_start;
__entry->min = crtc->debug.min_vbl;
__entry->max = crtc->debug.max_vbl;
),
TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
pipe_name(__entry->pipe), __entry->frame,
__entry->scanline, __entry->min, __entry->max)
);
TRACE_EVENT(i915_pipe_update_end,
TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end),
TP_ARGS(crtc, frame, scanline_end),
TP_STRUCT__entry(
__field(enum pipe, pipe)
__field(u32, frame)
__field(u32, scanline)
),
TP_fast_assign(
__entry->pipe = crtc->pipe;
__entry->frame = frame;
__entry->scanline = scanline_end;
),
TP_printk("pipe %c, frame=%u, scanline=%u",
pipe_name(__entry->pipe), __entry->frame,
__entry->scanline)
);
/* object tracking */
TRACE_EVENT(i915_gem_object_create,
TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(u32, size)
),
TP_fast_assign(
__entry->obj = obj;
__entry->size = obj->base.size;
),
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
);
TRACE_EVENT(i915_gem_shrink,
TP_PROTO(struct drm_i915_private *i915, unsigned long target, unsigned flags),
TP_ARGS(i915, target, flags),
TP_STRUCT__entry(
__field(int, dev)
__field(unsigned long, target)
__field(unsigned, flags)
),
TP_fast_assign(
__entry->dev = i915->dev->primary->index;
__entry->target = target;
__entry->flags = flags;
),
TP_printk("dev=%d, target=%lu, flags=%x",
__entry->dev, __entry->target, __entry->flags)
);
drm/i915: plumb VM into bind/unbind code As alluded to in several patches, and it will be reiterated later... A VMA is an abstraction for a GEM BO bound into an address space. Therefore it stands to reason, that the existing bind, and unbind are the ones which will be the most impacted. This patch implements this, and updates all callers which weren't already updated in the series (because it was too messy). This patch represents the bulk of an earlier, larger patch. I've pulled out a bunch of things by the request of Daniel. The history is preserved for posterity with the email convention of ">" One big change from the original patch aside from a bunch of cropping is I've created an i915_vma_unbind() function. That is because we always have the VMA anyway, and doing an extra lookup is useful. There is a caveat, we retain an i915_gem_object_ggtt_unbind, for the global cases which might not talk in VMAs. > drm/i915: plumb VM into object operations > > This patch was formerly known as: > "drm/i915: Create VMAs (part 3) - plumbing" > > This patch adds a VM argument, bind/unbind, and the object > offset/size/color getters/setters. It preserves the old ggtt helper > functions because things still need, and will continue to need them. > > Some code will still need to be ported over after this. > > v2: Fix purge to pick an object and unbind all vmas > This was doable because of the global bound list change. > > v3: With the commit to actually pin/unpin pages in place, there is no > longer a need to check if unbind succeeded before calling put_pages(). > Make put_pages only BUG() after checking pin count. > > v4: Rebased on top of the new hangcheck work by Mika > plumbed eb_destroy also > Many checkpatch related fixes > > v5: Very large rebase > > v6: > Change BUG_ON to WARN_ON (Daniel) > Rename vm to ggtt in preallocate stolen, since it is always ggtt when > dealing with stolen memory. (Daniel) > list_for_each will short-circuit already (Daniel) > remove superflous space (Daniel) > Use per object list of vmas (Daniel) > Make obj_bound_any() use obj_bound for each vm (Ben) > s/bind_to_gtt/bind_to_vm/ (Ben) > > Fixed up the inactive shrinker. As Daniel noticed the code could > potentially count the same object multiple times. While it's not > possible in the current case, since 1 object can only ever be bound into > 1 address space thus far - we may as well try to get something more > future proof in place now. With a prep patch before this to switch over > to using the bound list + inactive check, we're now able to carry that > forward for every address space an object is bound into. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> [danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA in destroy".] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 07:00:10 +07:00
TRACE_EVENT(i915_vma_bind,
TP_PROTO(struct i915_vma *vma, unsigned flags),
TP_ARGS(vma, flags),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
drm/i915: plumb VM into bind/unbind code As alluded to in several patches, and it will be reiterated later... A VMA is an abstraction for a GEM BO bound into an address space. Therefore it stands to reason, that the existing bind, and unbind are the ones which will be the most impacted. This patch implements this, and updates all callers which weren't already updated in the series (because it was too messy). This patch represents the bulk of an earlier, larger patch. I've pulled out a bunch of things by the request of Daniel. The history is preserved for posterity with the email convention of ">" One big change from the original patch aside from a bunch of cropping is I've created an i915_vma_unbind() function. That is because we always have the VMA anyway, and doing an extra lookup is useful. There is a caveat, we retain an i915_gem_object_ggtt_unbind, for the global cases which might not talk in VMAs. > drm/i915: plumb VM into object operations > > This patch was formerly known as: > "drm/i915: Create VMAs (part 3) - plumbing" > > This patch adds a VM argument, bind/unbind, and the object > offset/size/color getters/setters. It preserves the old ggtt helper > functions because things still need, and will continue to need them. > > Some code will still need to be ported over after this. > > v2: Fix purge to pick an object and unbind all vmas > This was doable because of the global bound list change. > > v3: With the commit to actually pin/unpin pages in place, there is no > longer a need to check if unbind succeeded before calling put_pages(). > Make put_pages only BUG() after checking pin count. > > v4: Rebased on top of the new hangcheck work by Mika > plumbed eb_destroy also > Many checkpatch related fixes > > v5: Very large rebase > > v6: > Change BUG_ON to WARN_ON (Daniel) > Rename vm to ggtt in preallocate stolen, since it is always ggtt when > dealing with stolen memory. (Daniel) > list_for_each will short-circuit already (Daniel) > remove superflous space (Daniel) > Use per object list of vmas (Daniel) > Make obj_bound_any() use obj_bound for each vm (Ben) > s/bind_to_gtt/bind_to_vm/ (Ben) > > Fixed up the inactive shrinker. As Daniel noticed the code could > potentially count the same object multiple times. While it's not > possible in the current case, since 1 object can only ever be bound into > 1 address space thus far - we may as well try to get something more > future proof in place now. With a prep patch before this to switch over > to using the bound list + inactive check, we're now able to carry that > forward for every address space an object is bound into. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> [danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA in destroy".] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 07:00:10 +07:00
__field(struct i915_address_space *, vm)
__field(u64, offset)
__field(u32, size)
__field(unsigned, flags)
),
TP_fast_assign(
drm/i915: plumb VM into bind/unbind code As alluded to in several patches, and it will be reiterated later... A VMA is an abstraction for a GEM BO bound into an address space. Therefore it stands to reason, that the existing bind, and unbind are the ones which will be the most impacted. This patch implements this, and updates all callers which weren't already updated in the series (because it was too messy). This patch represents the bulk of an earlier, larger patch. I've pulled out a bunch of things by the request of Daniel. The history is preserved for posterity with the email convention of ">" One big change from the original patch aside from a bunch of cropping is I've created an i915_vma_unbind() function. That is because we always have the VMA anyway, and doing an extra lookup is useful. There is a caveat, we retain an i915_gem_object_ggtt_unbind, for the global cases which might not talk in VMAs. > drm/i915: plumb VM into object operations > > This patch was formerly known as: > "drm/i915: Create VMAs (part 3) - plumbing" > > This patch adds a VM argument, bind/unbind, and the object > offset/size/color getters/setters. It preserves the old ggtt helper > functions because things still need, and will continue to need them. > > Some code will still need to be ported over after this. > > v2: Fix purge to pick an object and unbind all vmas > This was doable because of the global bound list change. > > v3: With the commit to actually pin/unpin pages in place, there is no > longer a need to check if unbind succeeded before calling put_pages(). > Make put_pages only BUG() after checking pin count. > > v4: Rebased on top of the new hangcheck work by Mika > plumbed eb_destroy also > Many checkpatch related fixes > > v5: Very large rebase > > v6: > Change BUG_ON to WARN_ON (Daniel) > Rename vm to ggtt in preallocate stolen, since it is always ggtt when > dealing with stolen memory. (Daniel) > list_for_each will short-circuit already (Daniel) > remove superflous space (Daniel) > Use per object list of vmas (Daniel) > Make obj_bound_any() use obj_bound for each vm (Ben) > s/bind_to_gtt/bind_to_vm/ (Ben) > > Fixed up the inactive shrinker. As Daniel noticed the code could > potentially count the same object multiple times. While it's not > possible in the current case, since 1 object can only ever be bound into > 1 address space thus far - we may as well try to get something more > future proof in place now. With a prep patch before this to switch over > to using the bound list + inactive check, we're now able to carry that > forward for every address space an object is bound into. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> [danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA in destroy".] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 07:00:10 +07:00
__entry->obj = vma->obj;
__entry->vm = vma->vm;
__entry->offset = vma->node.start;
__entry->size = vma->node.size;
__entry->flags = flags;
),
TP_printk("obj=%p, offset=%016llx size=%x%s vm=%p",
__entry->obj, __entry->offset, __entry->size,
__entry->flags & PIN_MAPPABLE ? ", mappable" : "",
drm/i915: plumb VM into bind/unbind code As alluded to in several patches, and it will be reiterated later... A VMA is an abstraction for a GEM BO bound into an address space. Therefore it stands to reason, that the existing bind, and unbind are the ones which will be the most impacted. This patch implements this, and updates all callers which weren't already updated in the series (because it was too messy). This patch represents the bulk of an earlier, larger patch. I've pulled out a bunch of things by the request of Daniel. The history is preserved for posterity with the email convention of ">" One big change from the original patch aside from a bunch of cropping is I've created an i915_vma_unbind() function. That is because we always have the VMA anyway, and doing an extra lookup is useful. There is a caveat, we retain an i915_gem_object_ggtt_unbind, for the global cases which might not talk in VMAs. > drm/i915: plumb VM into object operations > > This patch was formerly known as: > "drm/i915: Create VMAs (part 3) - plumbing" > > This patch adds a VM argument, bind/unbind, and the object > offset/size/color getters/setters. It preserves the old ggtt helper > functions because things still need, and will continue to need them. > > Some code will still need to be ported over after this. > > v2: Fix purge to pick an object and unbind all vmas > This was doable because of the global bound list change. > > v3: With the commit to actually pin/unpin pages in place, there is no > longer a need to check if unbind succeeded before calling put_pages(). > Make put_pages only BUG() after checking pin count. > > v4: Rebased on top of the new hangcheck work by Mika > plumbed eb_destroy also > Many checkpatch related fixes > > v5: Very large rebase > > v6: > Change BUG_ON to WARN_ON (Daniel) > Rename vm to ggtt in preallocate stolen, since it is always ggtt when > dealing with stolen memory. (Daniel) > list_for_each will short-circuit already (Daniel) > remove superflous space (Daniel) > Use per object list of vmas (Daniel) > Make obj_bound_any() use obj_bound for each vm (Ben) > s/bind_to_gtt/bind_to_vm/ (Ben) > > Fixed up the inactive shrinker. As Daniel noticed the code could > potentially count the same object multiple times. While it's not > possible in the current case, since 1 object can only ever be bound into > 1 address space thus far - we may as well try to get something more > future proof in place now. With a prep patch before this to switch over > to using the bound list + inactive check, we're now able to carry that > forward for every address space an object is bound into. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> [danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA in destroy".] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 07:00:10 +07:00
__entry->vm)
);
drm/i915: plumb VM into bind/unbind code As alluded to in several patches, and it will be reiterated later... A VMA is an abstraction for a GEM BO bound into an address space. Therefore it stands to reason, that the existing bind, and unbind are the ones which will be the most impacted. This patch implements this, and updates all callers which weren't already updated in the series (because it was too messy). This patch represents the bulk of an earlier, larger patch. I've pulled out a bunch of things by the request of Daniel. The history is preserved for posterity with the email convention of ">" One big change from the original patch aside from a bunch of cropping is I've created an i915_vma_unbind() function. That is because we always have the VMA anyway, and doing an extra lookup is useful. There is a caveat, we retain an i915_gem_object_ggtt_unbind, for the global cases which might not talk in VMAs. > drm/i915: plumb VM into object operations > > This patch was formerly known as: > "drm/i915: Create VMAs (part 3) - plumbing" > > This patch adds a VM argument, bind/unbind, and the object > offset/size/color getters/setters. It preserves the old ggtt helper > functions because things still need, and will continue to need them. > > Some code will still need to be ported over after this. > > v2: Fix purge to pick an object and unbind all vmas > This was doable because of the global bound list change. > > v3: With the commit to actually pin/unpin pages in place, there is no > longer a need to check if unbind succeeded before calling put_pages(). > Make put_pages only BUG() after checking pin count. > > v4: Rebased on top of the new hangcheck work by Mika > plumbed eb_destroy also > Many checkpatch related fixes > > v5: Very large rebase > > v6: > Change BUG_ON to WARN_ON (Daniel) > Rename vm to ggtt in preallocate stolen, since it is always ggtt when > dealing with stolen memory. (Daniel) > list_for_each will short-circuit already (Daniel) > remove superflous space (Daniel) > Use per object list of vmas (Daniel) > Make obj_bound_any() use obj_bound for each vm (Ben) > s/bind_to_gtt/bind_to_vm/ (Ben) > > Fixed up the inactive shrinker. As Daniel noticed the code could > potentially count the same object multiple times. While it's not > possible in the current case, since 1 object can only ever be bound into > 1 address space thus far - we may as well try to get something more > future proof in place now. With a prep patch before this to switch over > to using the bound list + inactive check, we're now able to carry that > forward for every address space an object is bound into. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> [danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA in destroy".] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 07:00:10 +07:00
TRACE_EVENT(i915_vma_unbind,
TP_PROTO(struct i915_vma *vma),
TP_ARGS(vma),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
drm/i915: plumb VM into bind/unbind code As alluded to in several patches, and it will be reiterated later... A VMA is an abstraction for a GEM BO bound into an address space. Therefore it stands to reason, that the existing bind, and unbind are the ones which will be the most impacted. This patch implements this, and updates all callers which weren't already updated in the series (because it was too messy). This patch represents the bulk of an earlier, larger patch. I've pulled out a bunch of things by the request of Daniel. The history is preserved for posterity with the email convention of ">" One big change from the original patch aside from a bunch of cropping is I've created an i915_vma_unbind() function. That is because we always have the VMA anyway, and doing an extra lookup is useful. There is a caveat, we retain an i915_gem_object_ggtt_unbind, for the global cases which might not talk in VMAs. > drm/i915: plumb VM into object operations > > This patch was formerly known as: > "drm/i915: Create VMAs (part 3) - plumbing" > > This patch adds a VM argument, bind/unbind, and the object > offset/size/color getters/setters. It preserves the old ggtt helper > functions because things still need, and will continue to need them. > > Some code will still need to be ported over after this. > > v2: Fix purge to pick an object and unbind all vmas > This was doable because of the global bound list change. > > v3: With the commit to actually pin/unpin pages in place, there is no > longer a need to check if unbind succeeded before calling put_pages(). > Make put_pages only BUG() after checking pin count. > > v4: Rebased on top of the new hangcheck work by Mika > plumbed eb_destroy also > Many checkpatch related fixes > > v5: Very large rebase > > v6: > Change BUG_ON to WARN_ON (Daniel) > Rename vm to ggtt in preallocate stolen, since it is always ggtt when > dealing with stolen memory. (Daniel) > list_for_each will short-circuit already (Daniel) > remove superflous space (Daniel) > Use per object list of vmas (Daniel) > Make obj_bound_any() use obj_bound for each vm (Ben) > s/bind_to_gtt/bind_to_vm/ (Ben) > > Fixed up the inactive shrinker. As Daniel noticed the code could > potentially count the same object multiple times. While it's not > possible in the current case, since 1 object can only ever be bound into > 1 address space thus far - we may as well try to get something more > future proof in place now. With a prep patch before this to switch over > to using the bound list + inactive check, we're now able to carry that > forward for every address space an object is bound into. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> [danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA in destroy".] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 07:00:10 +07:00
__field(struct i915_address_space *, vm)
__field(u64, offset)
__field(u32, size)
),
TP_fast_assign(
drm/i915: plumb VM into bind/unbind code As alluded to in several patches, and it will be reiterated later... A VMA is an abstraction for a GEM BO bound into an address space. Therefore it stands to reason, that the existing bind, and unbind are the ones which will be the most impacted. This patch implements this, and updates all callers which weren't already updated in the series (because it was too messy). This patch represents the bulk of an earlier, larger patch. I've pulled out a bunch of things by the request of Daniel. The history is preserved for posterity with the email convention of ">" One big change from the original patch aside from a bunch of cropping is I've created an i915_vma_unbind() function. That is because we always have the VMA anyway, and doing an extra lookup is useful. There is a caveat, we retain an i915_gem_object_ggtt_unbind, for the global cases which might not talk in VMAs. > drm/i915: plumb VM into object operations > > This patch was formerly known as: > "drm/i915: Create VMAs (part 3) - plumbing" > > This patch adds a VM argument, bind/unbind, and the object > offset/size/color getters/setters. It preserves the old ggtt helper > functions because things still need, and will continue to need them. > > Some code will still need to be ported over after this. > > v2: Fix purge to pick an object and unbind all vmas > This was doable because of the global bound list change. > > v3: With the commit to actually pin/unpin pages in place, there is no > longer a need to check if unbind succeeded before calling put_pages(). > Make put_pages only BUG() after checking pin count. > > v4: Rebased on top of the new hangcheck work by Mika > plumbed eb_destroy also > Many checkpatch related fixes > > v5: Very large rebase > > v6: > Change BUG_ON to WARN_ON (Daniel) > Rename vm to ggtt in preallocate stolen, since it is always ggtt when > dealing with stolen memory. (Daniel) > list_for_each will short-circuit already (Daniel) > remove superflous space (Daniel) > Use per object list of vmas (Daniel) > Make obj_bound_any() use obj_bound for each vm (Ben) > s/bind_to_gtt/bind_to_vm/ (Ben) > > Fixed up the inactive shrinker. As Daniel noticed the code could > potentially count the same object multiple times. While it's not > possible in the current case, since 1 object can only ever be bound into > 1 address space thus far - we may as well try to get something more > future proof in place now. With a prep patch before this to switch over > to using the bound list + inactive check, we're now able to carry that > forward for every address space an object is bound into. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> [danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA in destroy".] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 07:00:10 +07:00
__entry->obj = vma->obj;
__entry->vm = vma->vm;
__entry->offset = vma->node.start;
__entry->size = vma->node.size;
),
TP_printk("obj=%p, offset=%016llx size=%x vm=%p",
drm/i915: plumb VM into bind/unbind code As alluded to in several patches, and it will be reiterated later... A VMA is an abstraction for a GEM BO bound into an address space. Therefore it stands to reason, that the existing bind, and unbind are the ones which will be the most impacted. This patch implements this, and updates all callers which weren't already updated in the series (because it was too messy). This patch represents the bulk of an earlier, larger patch. I've pulled out a bunch of things by the request of Daniel. The history is preserved for posterity with the email convention of ">" One big change from the original patch aside from a bunch of cropping is I've created an i915_vma_unbind() function. That is because we always have the VMA anyway, and doing an extra lookup is useful. There is a caveat, we retain an i915_gem_object_ggtt_unbind, for the global cases which might not talk in VMAs. > drm/i915: plumb VM into object operations > > This patch was formerly known as: > "drm/i915: Create VMAs (part 3) - plumbing" > > This patch adds a VM argument, bind/unbind, and the object > offset/size/color getters/setters. It preserves the old ggtt helper > functions because things still need, and will continue to need them. > > Some code will still need to be ported over after this. > > v2: Fix purge to pick an object and unbind all vmas > This was doable because of the global bound list change. > > v3: With the commit to actually pin/unpin pages in place, there is no > longer a need to check if unbind succeeded before calling put_pages(). > Make put_pages only BUG() after checking pin count. > > v4: Rebased on top of the new hangcheck work by Mika > plumbed eb_destroy also > Many checkpatch related fixes > > v5: Very large rebase > > v6: > Change BUG_ON to WARN_ON (Daniel) > Rename vm to ggtt in preallocate stolen, since it is always ggtt when > dealing with stolen memory. (Daniel) > list_for_each will short-circuit already (Daniel) > remove superflous space (Daniel) > Use per object list of vmas (Daniel) > Make obj_bound_any() use obj_bound for each vm (Ben) > s/bind_to_gtt/bind_to_vm/ (Ben) > > Fixed up the inactive shrinker. As Daniel noticed the code could > potentially count the same object multiple times. While it's not > possible in the current case, since 1 object can only ever be bound into > 1 address space thus far - we may as well try to get something more > future proof in place now. With a prep patch before this to switch over > to using the bound list + inactive check, we're now able to carry that > forward for every address space an object is bound into. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> [danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA in destroy".] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 07:00:10 +07:00
__entry->obj, __entry->offset, __entry->size, __entry->vm)
);
TRACE_EVENT(i915_va_alloc,
TP_PROTO(struct i915_vma *vma),
TP_ARGS(vma),
TP_STRUCT__entry(
__field(struct i915_address_space *, vm)
__field(u64, start)
__field(u64, end)
),
TP_fast_assign(
__entry->vm = vma->vm;
__entry->start = vma->node.start;
__entry->end = vma->node.start + vma->node.size - 1;
),
TP_printk("vm=%p (%c), 0x%llx-0x%llx",
__entry->vm, i915_is_ggtt(__entry->vm) ? 'G' : 'P', __entry->start, __entry->end)
);
DECLARE_EVENT_CLASS(i915_px_entry,
TP_PROTO(struct i915_address_space *vm, u32 px, u64 start, u64 px_shift),
TP_ARGS(vm, px, start, px_shift),
TP_STRUCT__entry(
__field(struct i915_address_space *, vm)
__field(u32, px)
__field(u64, start)
__field(u64, end)
),
TP_fast_assign(
__entry->vm = vm;
__entry->px = px;
__entry->start = start;
__entry->end = ((start + (1ULL << px_shift)) & ~((1ULL << px_shift)-1)) - 1;
),
TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)",
__entry->vm, __entry->px, __entry->start, __entry->end)
);
DEFINE_EVENT(i915_px_entry, i915_page_table_entry_alloc,
TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
TP_ARGS(vm, pde, start, pde_shift)
);
DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_entry_alloc,
TP_PROTO(struct i915_address_space *vm, u32 pdpe, u64 start, u64 pdpe_shift),
TP_ARGS(vm, pdpe, start, pdpe_shift),
TP_printk("vm=%p, pdpe=%d (0x%llx-0x%llx)",
__entry->vm, __entry->px, __entry->start, __entry->end)
);
drm/i915/gen8: implement alloc/free for 4lvl PML4 has no special attributes, and there will always be a PML4. So simply initialize it at creation, and destroy it at the end. The code for 4lvl is able to call into the existing 3lvl page table code to handle all of the lower levels. v2: Return something at the end of gen8_alloc_va_range_4lvl to keep the compiler happy. And define ret only in one place. Updated gen8_ppgtt_unmap_pages and gen8_ppgtt_free to handle 4lvl. v3: Use i915_dma_unmap_single instead of pci API. Fix a couple of incorrect checks when unmapping pdp and pd pages (Akash). v4: Call __pdp_fini also for 32b PPGTT. Clean up alloc_pdp param list. v5: Prevent (harmless) out of range access in gen8_for_each_pml4e. v6: Simplify alloc_vma_range_4lvl and gen8_ppgtt_init_common error paths. (Akash) v7: Rebase, s/gen8_ppgtt_free_*/gen8_ppgtt_cleanup_*/. v8: Change location of pml4_init/fini. It will make next patches cleaner. v9: Rebase after Mika's ppgtt cleanup / scratch merge patch series, while trying to reuse as much as possible for pdp alloc. pml4_init/fini replaced by setup/cleanup_px macros. v10: Rebase after Mika's merged ppgtt cleanup patch series. v11: Rebase after final merged version of Mika's ppgtt/scratch patches. v12: Fix pdpe start value in trace (Akash) v13: Define all 4lvl functions in this patch directly, instead of previous patches, add i915_page_directory_pointer_entry_alloc here, use test_bit to detect when pdp is already allocated (Akash). v14: Move pdp allocation into a new gen8_ppgtt_alloc_page_dirpointers funtion, as we do for pds and pts; move pd and pdp setup functions to this patch (Akash). v15: Added kfree(pdp) from previous patch to this (Akash). Cc: Akash Goel <akash.goel@intel.com> Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Michel Thierry <michel.thierry@intel.com> (v2+) Reviewed-by: Akash Goel <akash.goel@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-07-30 17:05:29 +07:00
DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_pointer_entry_alloc,
TP_PROTO(struct i915_address_space *vm, u32 pml4e, u64 start, u64 pml4e_shift),
TP_ARGS(vm, pml4e, start, pml4e_shift),
TP_printk("vm=%p, pml4e=%d (0x%llx-0x%llx)",
__entry->vm, __entry->px, __entry->start, __entry->end)
);
/* Avoid extra math because we only support two sizes. The format is defined by
* bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */
#define TRACE_PT_SIZE(bits) \
((((bits) == 1024) ? 288 : 144) + 1)
DECLARE_EVENT_CLASS(i915_page_table_entry_update,
TP_PROTO(struct i915_address_space *vm, u32 pde,
struct i915_page_table *pt, u32 first, u32 count, u32 bits),
TP_ARGS(vm, pde, pt, first, count, bits),
TP_STRUCT__entry(
__field(struct i915_address_space *, vm)
__field(u32, pde)
__field(u32, first)
__field(u32, last)
__dynamic_array(char, cur_ptes, TRACE_PT_SIZE(bits))
),
TP_fast_assign(
__entry->vm = vm;
__entry->pde = pde;
__entry->first = first;
__entry->last = first + count - 1;
scnprintf(__get_str(cur_ptes),
TRACE_PT_SIZE(bits),
"%*pb",
bits,
pt->used_ptes);
),
TP_printk("vm=%p, pde=%d, updating %u:%u\t%s",
__entry->vm, __entry->pde, __entry->last, __entry->first,
__get_str(cur_ptes))
);
DEFINE_EVENT(i915_page_table_entry_update, i915_page_table_entry_map,
TP_PROTO(struct i915_address_space *vm, u32 pde,
struct i915_page_table *pt, u32 first, u32 count, u32 bits),
TP_ARGS(vm, pde, pt, first, count, bits)
);
TRACE_EVENT(i915_gem_object_change_domain,
TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
TP_ARGS(obj, old_read, old_write),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(u32, read_domains)
__field(u32, write_domain)
),
TP_fast_assign(
__entry->obj = obj;
__entry->read_domains = obj->base.read_domains | (old_read << 16);
__entry->write_domain = obj->base.write_domain | (old_write << 16);
),
TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
__entry->obj,
__entry->read_domains >> 16,
__entry->read_domains & 0xffff,
__entry->write_domain >> 16,
__entry->write_domain & 0xffff)
);
TRACE_EVENT(i915_gem_object_pwrite,
TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
TP_ARGS(obj, offset, len),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(u32, offset)
__field(u32, len)
),
TP_fast_assign(
__entry->obj = obj;
__entry->offset = offset;
__entry->len = len;
),
TP_printk("obj=%p, offset=%u, len=%u",
__entry->obj, __entry->offset, __entry->len)
);
TRACE_EVENT(i915_gem_object_pread,
TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
TP_ARGS(obj, offset, len),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(u32, offset)
__field(u32, len)
),
TP_fast_assign(
__entry->obj = obj;
__entry->offset = offset;
__entry->len = len;
),
TP_printk("obj=%p, offset=%u, len=%u",
__entry->obj, __entry->offset, __entry->len)
);
TRACE_EVENT(i915_gem_object_fault,
TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
TP_ARGS(obj, index, gtt, write),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(u32, index)
__field(bool, gtt)
__field(bool, write)
),
TP_fast_assign(
__entry->obj = obj;
__entry->index = index;
__entry->gtt = gtt;
__entry->write = write;
),
TP_printk("obj=%p, %s index=%u %s",
__entry->obj,
__entry->gtt ? "GTT" : "CPU",
__entry->index,
__entry->write ? ", writable" : "")
);
DECLARE_EVENT_CLASS(i915_gem_object,
TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
),
TP_fast_assign(
__entry->obj = obj;
),
TP_printk("obj=%p", __entry->obj)
);
DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
);
DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
);
TRACE_EVENT(i915_gem_evict,
TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
TP_ARGS(dev, size, align, flags),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, size)
__field(u32, align)
__field(unsigned, flags)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
__entry->size = size;
__entry->align = align;
__entry->flags = flags;
),
TP_printk("dev=%d, size=%d, align=%d %s",
__entry->dev, __entry->size, __entry->align,
__entry->flags & PIN_MAPPABLE ? ", mappable" : "")
);
TRACE_EVENT(i915_gem_evict_everything,
drm/i915: Track unbound pages When dealing with a working set larger than the GATT, or even the mappable aperture when touching through the GTT, we end up with evicting objects only to rebind them at a new offset again later. Moving an object into and out of the GTT requires clflushing the pages, thus causing a double-clflush penalty for rebinding. To avoid having to clflush on rebinding, we can track the pages as they are evicted from the GTT and only relinquish those pages on memory pressure. As usual, if it were not for the handling of out-of-memory condition and having to manually shrink our own bo caches, it would be a net reduction of code. Alas. Note: The patch also contains a few changes to the last-hope evict_everything logic in i916_gem_execbuffer.c - we no longer try to only evict the purgeable stuff in a first try (since that's superflous and only helps in OOM corner-cases, not fragmented-gtt trashing situations). Also, the extraction of the get_pages retry loop from bind_to_gtt (and other callsites) to get_pages should imo have been a separate patch. v2: Ditch the newly added put_pages (for unbound objects only) in i915_gem_reset. A quick irc discussion hasn't revealed any important reason for this, so if we need this, I'd like to have a git blame'able explanation for it. v3: Undo the s/drm_malloc_ab/kmalloc/ in get_pages that Chris noticed. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> [danvet: Split out code movements and rant a bit in the commit message with a few Notes. Done v2] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-08-20 16:40:46 +07:00
TP_PROTO(struct drm_device *dev),
TP_ARGS(dev),
TP_STRUCT__entry(
__field(u32, dev)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
),
drm/i915: Track unbound pages When dealing with a working set larger than the GATT, or even the mappable aperture when touching through the GTT, we end up with evicting objects only to rebind them at a new offset again later. Moving an object into and out of the GTT requires clflushing the pages, thus causing a double-clflush penalty for rebinding. To avoid having to clflush on rebinding, we can track the pages as they are evicted from the GTT and only relinquish those pages on memory pressure. As usual, if it were not for the handling of out-of-memory condition and having to manually shrink our own bo caches, it would be a net reduction of code. Alas. Note: The patch also contains a few changes to the last-hope evict_everything logic in i916_gem_execbuffer.c - we no longer try to only evict the purgeable stuff in a first try (since that's superflous and only helps in OOM corner-cases, not fragmented-gtt trashing situations). Also, the extraction of the get_pages retry loop from bind_to_gtt (and other callsites) to get_pages should imo have been a separate patch. v2: Ditch the newly added put_pages (for unbound objects only) in i915_gem_reset. A quick irc discussion hasn't revealed any important reason for this, so if we need this, I'd like to have a git blame'able explanation for it. v3: Undo the s/drm_malloc_ab/kmalloc/ in get_pages that Chris noticed. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> [danvet: Split out code movements and rant a bit in the commit message with a few Notes. Done v2] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-08-20 16:40:46 +07:00
TP_printk("dev=%d", __entry->dev)
);
TRACE_EVENT(i915_gem_evict_vm,
TP_PROTO(struct i915_address_space *vm),
TP_ARGS(vm),
TP_STRUCT__entry(
__field(u32, dev)
__field(struct i915_address_space *, vm)
),
TP_fast_assign(
__entry->dev = vm->dev->primary->index;
__entry->vm = vm;
),
TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
);
TRACE_EVENT(i915_gem_ring_sync_to,
TP_PROTO(struct drm_i915_gem_request *to_req,
struct intel_engine_cs *from,
struct drm_i915_gem_request *req),
TP_ARGS(to_req, from, req),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, sync_from)
__field(u32, sync_to)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = from->dev->primary->index;
__entry->sync_from = from->id;
__entry->sync_to = to_req->ring->id;
__entry->seqno = i915_gem_request_get_seqno(req);
),
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
__entry->dev,
__entry->sync_from, __entry->sync_to,
__entry->seqno)
);
TRACE_EVENT(i915_gem_ring_dispatch,
TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
TP_ARGS(req, flags),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
__field(u32, flags)
),
TP_fast_assign(
struct intel_engine_cs *ring =
i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = i915_gem_request_get_seqno(req);
__entry->flags = flags;
i915_trace_irq_get(ring, req);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
__entry->dev, __entry->ring, __entry->seqno, __entry->flags)
);
TRACE_EVENT(i915_gem_ring_flush,
TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush),
TP_ARGS(req, invalidate, flush),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, invalidate)
__field(u32, flush)
),
TP_fast_assign(
__entry->dev = req->ring->dev->primary->index;
__entry->ring = req->ring->id;
__entry->invalidate = invalidate;
__entry->flush = flush;
),
TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
__entry->dev, __entry->ring,
__entry->invalidate, __entry->flush)
);
DECLARE_EVENT_CLASS(i915_gem_request,
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
),
TP_fast_assign(
struct intel_engine_cs *ring =
i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = i915_gem_request_get_seqno(req);
),
TP_printk("dev=%u, ring=%u, seqno=%u",
__entry->dev, __entry->ring, __entry->seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req)
);
TRACE_EVENT(i915_gem_request_notify,
TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = ring->get_seqno(ring, false);
),
TP_printk("dev=%u, ring=%u, seqno=%u",
__entry->dev, __entry->ring, __entry->seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req)
);
TRACE_EVENT(i915_gem_request_wait_begin,
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
__field(bool, blocking)
),
/* NB: the blocking information is racy since mutex_is_locked
* doesn't check that the current thread holds the lock. The only
* other option would be to pass the boolean information of whether
* or not the class was blocking down through the stack which is
* less desirable.
*/
TP_fast_assign(
struct intel_engine_cs *ring =
i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = i915_gem_request_get_seqno(req);
__entry->blocking =
mutex_is_locked(&ring->dev->struct_mutex);
),
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
__entry->dev, __entry->ring,
__entry->seqno, __entry->blocking ? "yes (NB)" : "no")
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req)
);
TRACE_EVENT(i915_flip_request,
TP_PROTO(int plane, struct drm_i915_gem_object *obj),
TP_ARGS(plane, obj),
TP_STRUCT__entry(
__field(int, plane)
__field(struct drm_i915_gem_object *, obj)
),
TP_fast_assign(
__entry->plane = plane;
__entry->obj = obj;
),
TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
);
TRACE_EVENT(i915_flip_complete,
TP_PROTO(int plane, struct drm_i915_gem_object *obj),
TP_ARGS(plane, obj),
TP_STRUCT__entry(
__field(int, plane)
__field(struct drm_i915_gem_object *, obj)
),
TP_fast_assign(
__entry->plane = plane;
__entry->obj = obj;
),
TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
);
TRACE_EVENT_CONDITION(i915_reg_rw,
drm/i915: Type safe register read/write Make I915_READ and I915_WRITE more type safe by wrapping the register offset in a struct. This should eliminate most of the fumbles we've had with misplaced parens. This only takes care of normal mmio registers. We could extend the idea to other register types and define each with its own struct. That way you wouldn't be able to accidentally pass the wrong thing to a specific register access function. The gpio_reg setup is probably the ugliest thing left. But I figure I'd just leave it for now, and wait for some divine inspiration to strike before making it nice. As for the generated code, it's actually a bit better sometimes. Eg. looking at i915_irq_handler(), we can see the following change: lea 0x70024(%rdx,%rax,1),%r9d mov $0x1,%edx - movslq %r9d,%r9 - mov %r9,%rsi - mov %r9,-0x58(%rbp) - callq *0xd8(%rbx) + mov %r9d,%esi + mov %r9d,-0x48(%rbp) callq *0xd8(%rbx) So previously gcc thought the register offset might be signed and decided to sign extend it, just in case. The rest appears to be mostly just minor shuffling of instructions. v2: i915_mmio_reg_{offset,equal,valid}() helpers added s/_REG/_MMIO/ in the register defines mo more switch statements left to worry about ring_emit stuff got sorted in a prep patch cmd parser, lrc context and w/a batch buildup also in prep patch vgpu stuff cleaned up and moved to a prep patch all other unrelated changes split out v3: Rebased due to BXT DSI/BLC, MOCS, etc. v4: Rebased due to churn, s/i915_mmio_reg_t/i915_reg_t/ Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1447853606-2751-1-git-send-email-ville.syrjala@linux.intel.com
2015-11-18 20:33:26 +07:00
TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
TP_ARGS(write, reg, val, len, trace),
TP_CONDITION(trace),
TP_STRUCT__entry(
__field(u64, val)
__field(u32, reg)
__field(u16, write)
__field(u16, len)
),
TP_fast_assign(
__entry->val = (u64)val;
drm/i915: Type safe register read/write Make I915_READ and I915_WRITE more type safe by wrapping the register offset in a struct. This should eliminate most of the fumbles we've had with misplaced parens. This only takes care of normal mmio registers. We could extend the idea to other register types and define each with its own struct. That way you wouldn't be able to accidentally pass the wrong thing to a specific register access function. The gpio_reg setup is probably the ugliest thing left. But I figure I'd just leave it for now, and wait for some divine inspiration to strike before making it nice. As for the generated code, it's actually a bit better sometimes. Eg. looking at i915_irq_handler(), we can see the following change: lea 0x70024(%rdx,%rax,1),%r9d mov $0x1,%edx - movslq %r9d,%r9 - mov %r9,%rsi - mov %r9,-0x58(%rbp) - callq *0xd8(%rbx) + mov %r9d,%esi + mov %r9d,-0x48(%rbp) callq *0xd8(%rbx) So previously gcc thought the register offset might be signed and decided to sign extend it, just in case. The rest appears to be mostly just minor shuffling of instructions. v2: i915_mmio_reg_{offset,equal,valid}() helpers added s/_REG/_MMIO/ in the register defines mo more switch statements left to worry about ring_emit stuff got sorted in a prep patch cmd parser, lrc context and w/a batch buildup also in prep patch vgpu stuff cleaned up and moved to a prep patch all other unrelated changes split out v3: Rebased due to BXT DSI/BLC, MOCS, etc. v4: Rebased due to churn, s/i915_mmio_reg_t/i915_reg_t/ Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1447853606-2751-1-git-send-email-ville.syrjala@linux.intel.com
2015-11-18 20:33:26 +07:00
__entry->reg = i915_mmio_reg_offset(reg);
__entry->write = write;
__entry->len = len;
),
TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
__entry->write ? "write" : "read",
__entry->reg, __entry->len,
(u32)(__entry->val & 0xffffffff),
(u32)(__entry->val >> 32))
);
TRACE_EVENT(intel_gpu_freq_change,
TP_PROTO(u32 freq),
TP_ARGS(freq),
TP_STRUCT__entry(
__field(u32, freq)
),
TP_fast_assign(
__entry->freq = freq;
),
TP_printk("new_freq=%u", __entry->freq)
);
/**
* DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
*
* With full ppgtt enabled each process using drm will allocate at least one
* translation table. With these traces it is possible to keep track of the
* allocation and of the lifetime of the tables; this can be used during
* testing/debug to verify that we are not leaking ppgtts.
* These traces identify the ppgtt through the vm pointer, which is also printed
* by the i915_vma_bind and i915_vma_unbind tracepoints.
*/
DECLARE_EVENT_CLASS(i915_ppgtt,
TP_PROTO(struct i915_address_space *vm),
TP_ARGS(vm),
TP_STRUCT__entry(
__field(struct i915_address_space *, vm)
__field(u32, dev)
),
TP_fast_assign(
__entry->vm = vm;
__entry->dev = vm->dev->primary->index;
),
TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
)
DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
TP_PROTO(struct i915_address_space *vm),
TP_ARGS(vm)
);
DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
TP_PROTO(struct i915_address_space *vm),
TP_ARGS(vm)
);
/**
* DOC: i915_context_create and i915_context_free tracepoints
*
* These tracepoints are used to track creation and deletion of contexts.
* If full ppgtt is enabled, they also print the address of the vm assigned to
* the context.
*/
DECLARE_EVENT_CLASS(i915_context,
TP_PROTO(struct intel_context *ctx),
TP_ARGS(ctx),
TP_STRUCT__entry(
__field(u32, dev)
__field(struct intel_context *, ctx)
__field(struct i915_address_space *, vm)
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
drm/i915: Store device pointer in contexts for late tracepoint usafe [ 1572.417121] BUG: unable to handle kernel NULL pointer dereference at (null) [ 1572.421010] IP: [<ffffffffa00b2514>] ftrace_raw_event_i915_context+0x5d/0x70 [i915] [ 1572.424970] PGD 1766a3067 PUD 1767a2067 PMD 0 [ 1572.428892] Oops: 0000 [#1] SMP [ 1572.432787] Modules linked in: ipv6 dm_mod iTCO_wdt iTCO_vendor_support snd_hda_codec_realtek snd_hda_codec_generic snd_hda_intel snd_hda_controller snd_hda_codec snd_hda_core snd_hwdep snd_pcm snd_timer snd soundcore serio_raw pcspkr lpc_ich i2c_i801 mfd_core battery ac acpi_cpufreq i915 button video drm_kms_helper drm [ 1572.441720] CPU: 2 PID: 18853 Comm: kworker/u8:0 Not tainted 4.0.0_kcloud_3f0360_20150429+ #588 [ 1572.446298] Workqueue: i915 i915_gem_retire_work_handler [i915] [ 1572.450876] task: ffff880002f428f0 ti: ffff880035724000 task.ti: ffff880035724000 [ 1572.455557] RIP: 0010:[<ffffffffa00b2514>] [<ffffffffa00b2514>] ftrace_raw_event_i915_context+0x5d/0x70 [i915] [ 1572.460423] RSP: 0018:ffff880035727ce8 EFLAGS: 00010286 [ 1572.465262] RAX: ffff880073f1643c RBX: ffff880002da9058 RCX: ffff880073e5db40 [ 1572.470179] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff880035727ce8 [ 1572.475107] RBP: ffff88007bb11a00 R08: 0000000000000000 R09: 0000000000000000 [ 1572.480034] R10: 0000000000362200 R11: 0000000000000008 R12: 0000000000000000 [ 1572.484952] R13: ffff880035727d78 R14: ffff880002dc1c98 R15: ffff880002dc1dc8 [ 1572.489886] FS: 0000000000000000(0000) GS:ffff88017fd00000(0000) knlGS:0000000000000000 [ 1572.494883] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b [ 1572.499859] CR2: 0000000000000000 CR3: 000000017572a000 CR4: 00000000001006e0 [ 1572.504842] Stack: [ 1572.509834] ffff88017b0090c0 ffff880073f16438 ffff880002da9058 ffff880073f1643c [ 1572.514904] 0000000000000246 ffff880100000000 ffff88007bb11a00 ffff880002ddeb10 [ 1572.519985] ffff8801759f79c0 ffffffffa0092ff0 0000000000000000 ffff88007bb11a00 [ 1572.525049] Call Trace: [ 1572.530093] [<ffffffffa0092ff0>] ? i915_gem_context_free+0xa8/0xc1 [i915] [ 1572.535227] [<ffffffffa009b969>] ? i915_gem_request_free+0x4e/0x50 [i915] [ 1572.540347] [<ffffffffa00b5533>] ? intel_execlists_retire_requests+0x14c/0x159 [i915] [ 1572.545500] [<ffffffffa009d9ea>] ? i915_gem_retire_requests+0x9d/0xeb [i915] [ 1572.550664] [<ffffffffa009dd8c>] ? i915_gem_retire_work_handler+0x4c/0x61 [i915] [ 1572.555825] [<ffffffff8104ca7f>] ? process_one_work+0x1b2/0x31d [ 1572.560951] [<ffffffff8104d278>] ? worker_thread+0x24d/0x339 [ 1572.566033] [<ffffffff8104d02b>] ? cancel_delayed_work_sync+0xa/0xa [ 1572.571140] [<ffffffff81050b25>] ? kthread+0xce/0xd6 [ 1572.576191] [<ffffffff81050a57>] ? kthread_create_on_node+0x162/0x162 [ 1572.581228] [<ffffffff8179b3c8>] ? ret_from_fork+0x58/0x90 [ 1572.586259] [<ffffffff81050a57>] ? kthread_create_on_node+0x162/0x162 [ 1572.591318] Code: de 48 89 e7 e8 09 4d 00 e1 48 85 c0 74 27 48 89 68 10 48 8b 55 38 48 89 e7 48 89 50 18 48 8b 55 10 48 8b 12 48 8b 12 48 8b 52 38 <8b> 12 89 50 08 e8 95 4d 00 e1 48 83 c4 30 5b 5d 41 5c c3 41 55 [ 1572.596981] RIP [<ffffffffa00b2514>] ftrace_raw_event_i915_context+0x5d/0x70 [i915] [ 1572.602464] RSP <ffff880035727ce8> [ 1572.607911] CR2: 0000000000000000 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=90112#c23 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-05-05 15:17:29 +07:00
__entry->dev = ctx->i915->dev->primary->index;
),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
__entry->dev, __entry->ctx, __entry->vm)
)
DEFINE_EVENT(i915_context, i915_context_create,
TP_PROTO(struct intel_context *ctx),
TP_ARGS(ctx)
);
DEFINE_EVENT(i915_context, i915_context_free,
TP_PROTO(struct intel_context *ctx),
TP_ARGS(ctx)
);
/**
* DOC: switch_mm tracepoint
*
* This tracepoint allows tracking of the mm switch, which is an important point
* in the lifetime of the vm in the legacy submission path. This tracepoint is
* called only if full ppgtt is enabled.
*/
TRACE_EVENT(switch_mm,
TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to),
TP_ARGS(ring, to),
TP_STRUCT__entry(
__field(u32, ring)
__field(struct intel_context *, to)
__field(struct i915_address_space *, vm)
__field(u32, dev)
),
TP_fast_assign(
__entry->ring = ring->id;
__entry->to = to;
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
__entry->dev = ring->dev->primary->index;
),
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
__entry->dev, __entry->ring, __entry->to, __entry->vm)
);
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#include <trace/define_trace.h>