2009-08-25 17:15:50 +07:00
|
|
|
#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
|
|
|
|
#define _I915_TRACE_H_
|
|
|
|
|
|
|
|
#include <linux/stringify.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/tracepoint.h>
|
|
|
|
|
|
|
|
#include <drm/drmP.h>
|
2010-11-09 02:18:58 +07:00
|
|
|
#include "i915_drv.h"
|
2011-02-03 18:57:46 +07:00
|
|
|
#include "intel_ringbuffer.h"
|
2009-08-25 17:15:50 +07:00
|
|
|
|
|
|
|
#undef TRACE_SYSTEM
|
|
|
|
#define TRACE_SYSTEM i915
|
|
|
|
#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
|
|
|
|
#define TRACE_INCLUDE_FILE i915_trace
|
|
|
|
|
|
|
|
/* object tracking */
|
|
|
|
|
|
|
|
TRACE_EVENT(i915_gem_object_create,
|
2010-11-09 02:18:58 +07:00
|
|
|
TP_PROTO(struct drm_i915_gem_object *obj),
|
2009-08-25 17:15:50 +07:00
|
|
|
TP_ARGS(obj),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2010-11-09 02:18:58 +07:00
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
2009-08-25 17:15:50 +07:00
|
|
|
__field(u32, size)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->obj = obj;
|
2010-11-09 02:18:58 +07:00
|
|
|
__entry->size = obj->base.size;
|
2009-08-25 17:15:50 +07:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
|
|
|
|
);
|
|
|
|
|
drm/i915: plumb VM into bind/unbind code
As alluded to in several patches, and it will be reiterated later... A
VMA is an abstraction for a GEM BO bound into an address space.
Therefore it stands to reason, that the existing bind, and unbind are
the ones which will be the most impacted. This patch implements this,
and updates all callers which weren't already updated in the series
(because it was too messy).
This patch represents the bulk of an earlier, larger patch. I've pulled
out a bunch of things by the request of Daniel. The history is preserved
for posterity with the email convention of ">" One big change from the
original patch aside from a bunch of cropping is I've created an
i915_vma_unbind() function. That is because we always have the VMA
anyway, and doing an extra lookup is useful. There is a caveat, we
retain an i915_gem_object_ggtt_unbind, for the global cases which might
not talk in VMAs.
> drm/i915: plumb VM into object operations
>
> This patch was formerly known as:
> "drm/i915: Create VMAs (part 3) - plumbing"
>
> This patch adds a VM argument, bind/unbind, and the object
> offset/size/color getters/setters. It preserves the old ggtt helper
> functions because things still need, and will continue to need them.
>
> Some code will still need to be ported over after this.
>
> v2: Fix purge to pick an object and unbind all vmas
> This was doable because of the global bound list change.
>
> v3: With the commit to actually pin/unpin pages in place, there is no
> longer a need to check if unbind succeeded before calling put_pages().
> Make put_pages only BUG() after checking pin count.
>
> v4: Rebased on top of the new hangcheck work by Mika
> plumbed eb_destroy also
> Many checkpatch related fixes
>
> v5: Very large rebase
>
> v6:
> Change BUG_ON to WARN_ON (Daniel)
> Rename vm to ggtt in preallocate stolen, since it is always ggtt when
> dealing with stolen memory. (Daniel)
> list_for_each will short-circuit already (Daniel)
> remove superflous space (Daniel)
> Use per object list of vmas (Daniel)
> Make obj_bound_any() use obj_bound for each vm (Ben)
> s/bind_to_gtt/bind_to_vm/ (Ben)
>
> Fixed up the inactive shrinker. As Daniel noticed the code could
> potentially count the same object multiple times. While it's not
> possible in the current case, since 1 object can only ever be bound into
> 1 address space thus far - we may as well try to get something more
> future proof in place now. With a prep patch before this to switch over
> to using the bound list + inactive check, we're now able to carry that
> forward for every address space an object is bound into.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA
in destroy".]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 07:00:10 +07:00
|
|
|
TRACE_EVENT(i915_vma_bind,
|
|
|
|
TP_PROTO(struct i915_vma *vma, bool mappable),
|
|
|
|
TP_ARGS(vma, mappable),
|
2009-08-25 17:15:50 +07:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2010-11-09 02:18:58 +07:00
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
drm/i915: plumb VM into bind/unbind code
As alluded to in several patches, and it will be reiterated later... A
VMA is an abstraction for a GEM BO bound into an address space.
Therefore it stands to reason, that the existing bind, and unbind are
the ones which will be the most impacted. This patch implements this,
and updates all callers which weren't already updated in the series
(because it was too messy).
This patch represents the bulk of an earlier, larger patch. I've pulled
out a bunch of things by the request of Daniel. The history is preserved
for posterity with the email convention of ">" One big change from the
original patch aside from a bunch of cropping is I've created an
i915_vma_unbind() function. That is because we always have the VMA
anyway, and doing an extra lookup is useful. There is a caveat, we
retain an i915_gem_object_ggtt_unbind, for the global cases which might
not talk in VMAs.
> drm/i915: plumb VM into object operations
>
> This patch was formerly known as:
> "drm/i915: Create VMAs (part 3) - plumbing"
>
> This patch adds a VM argument, bind/unbind, and the object
> offset/size/color getters/setters. It preserves the old ggtt helper
> functions because things still need, and will continue to need them.
>
> Some code will still need to be ported over after this.
>
> v2: Fix purge to pick an object and unbind all vmas
> This was doable because of the global bound list change.
>
> v3: With the commit to actually pin/unpin pages in place, there is no
> longer a need to check if unbind succeeded before calling put_pages().
> Make put_pages only BUG() after checking pin count.
>
> v4: Rebased on top of the new hangcheck work by Mika
> plumbed eb_destroy also
> Many checkpatch related fixes
>
> v5: Very large rebase
>
> v6:
> Change BUG_ON to WARN_ON (Daniel)
> Rename vm to ggtt in preallocate stolen, since it is always ggtt when
> dealing with stolen memory. (Daniel)
> list_for_each will short-circuit already (Daniel)
> remove superflous space (Daniel)
> Use per object list of vmas (Daniel)
> Make obj_bound_any() use obj_bound for each vm (Ben)
> s/bind_to_gtt/bind_to_vm/ (Ben)
>
> Fixed up the inactive shrinker. As Daniel noticed the code could
> potentially count the same object multiple times. While it's not
> possible in the current case, since 1 object can only ever be bound into
> 1 address space thus far - we may as well try to get something more
> future proof in place now. With a prep patch before this to switch over
> to using the bound list + inactive check, we're now able to carry that
> forward for every address space an object is bound into.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA
in destroy".]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 07:00:10 +07:00
|
|
|
__field(struct i915_address_space *, vm)
|
2011-02-03 18:57:46 +07:00
|
|
|
__field(u32, offset)
|
|
|
|
__field(u32, size)
|
2010-10-01 04:42:15 +07:00
|
|
|
__field(bool, mappable)
|
2009-08-25 17:15:50 +07:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
drm/i915: plumb VM into bind/unbind code
As alluded to in several patches, and it will be reiterated later... A
VMA is an abstraction for a GEM BO bound into an address space.
Therefore it stands to reason, that the existing bind, and unbind are
the ones which will be the most impacted. This patch implements this,
and updates all callers which weren't already updated in the series
(because it was too messy).
This patch represents the bulk of an earlier, larger patch. I've pulled
out a bunch of things by the request of Daniel. The history is preserved
for posterity with the email convention of ">" One big change from the
original patch aside from a bunch of cropping is I've created an
i915_vma_unbind() function. That is because we always have the VMA
anyway, and doing an extra lookup is useful. There is a caveat, we
retain an i915_gem_object_ggtt_unbind, for the global cases which might
not talk in VMAs.
> drm/i915: plumb VM into object operations
>
> This patch was formerly known as:
> "drm/i915: Create VMAs (part 3) - plumbing"
>
> This patch adds a VM argument, bind/unbind, and the object
> offset/size/color getters/setters. It preserves the old ggtt helper
> functions because things still need, and will continue to need them.
>
> Some code will still need to be ported over after this.
>
> v2: Fix purge to pick an object and unbind all vmas
> This was doable because of the global bound list change.
>
> v3: With the commit to actually pin/unpin pages in place, there is no
> longer a need to check if unbind succeeded before calling put_pages().
> Make put_pages only BUG() after checking pin count.
>
> v4: Rebased on top of the new hangcheck work by Mika
> plumbed eb_destroy also
> Many checkpatch related fixes
>
> v5: Very large rebase
>
> v6:
> Change BUG_ON to WARN_ON (Daniel)
> Rename vm to ggtt in preallocate stolen, since it is always ggtt when
> dealing with stolen memory. (Daniel)
> list_for_each will short-circuit already (Daniel)
> remove superflous space (Daniel)
> Use per object list of vmas (Daniel)
> Make obj_bound_any() use obj_bound for each vm (Ben)
> s/bind_to_gtt/bind_to_vm/ (Ben)
>
> Fixed up the inactive shrinker. As Daniel noticed the code could
> potentially count the same object multiple times. While it's not
> possible in the current case, since 1 object can only ever be bound into
> 1 address space thus far - we may as well try to get something more
> future proof in place now. With a prep patch before this to switch over
> to using the bound list + inactive check, we're now able to carry that
> forward for every address space an object is bound into.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA
in destroy".]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 07:00:10 +07:00
|
|
|
__entry->obj = vma->obj;
|
|
|
|
__entry->vm = vma->vm;
|
|
|
|
__entry->offset = vma->node.start;
|
|
|
|
__entry->size = vma->node.size;
|
2010-10-01 04:42:15 +07:00
|
|
|
__entry->mappable = mappable;
|
2009-08-25 17:15:50 +07:00
|
|
|
),
|
|
|
|
|
drm/i915: plumb VM into bind/unbind code
As alluded to in several patches, and it will be reiterated later... A
VMA is an abstraction for a GEM BO bound into an address space.
Therefore it stands to reason, that the existing bind, and unbind are
the ones which will be the most impacted. This patch implements this,
and updates all callers which weren't already updated in the series
(because it was too messy).
This patch represents the bulk of an earlier, larger patch. I've pulled
out a bunch of things by the request of Daniel. The history is preserved
for posterity with the email convention of ">" One big change from the
original patch aside from a bunch of cropping is I've created an
i915_vma_unbind() function. That is because we always have the VMA
anyway, and doing an extra lookup is useful. There is a caveat, we
retain an i915_gem_object_ggtt_unbind, for the global cases which might
not talk in VMAs.
> drm/i915: plumb VM into object operations
>
> This patch was formerly known as:
> "drm/i915: Create VMAs (part 3) - plumbing"
>
> This patch adds a VM argument, bind/unbind, and the object
> offset/size/color getters/setters. It preserves the old ggtt helper
> functions because things still need, and will continue to need them.
>
> Some code will still need to be ported over after this.
>
> v2: Fix purge to pick an object and unbind all vmas
> This was doable because of the global bound list change.
>
> v3: With the commit to actually pin/unpin pages in place, there is no
> longer a need to check if unbind succeeded before calling put_pages().
> Make put_pages only BUG() after checking pin count.
>
> v4: Rebased on top of the new hangcheck work by Mika
> plumbed eb_destroy also
> Many checkpatch related fixes
>
> v5: Very large rebase
>
> v6:
> Change BUG_ON to WARN_ON (Daniel)
> Rename vm to ggtt in preallocate stolen, since it is always ggtt when
> dealing with stolen memory. (Daniel)
> list_for_each will short-circuit already (Daniel)
> remove superflous space (Daniel)
> Use per object list of vmas (Daniel)
> Make obj_bound_any() use obj_bound for each vm (Ben)
> s/bind_to_gtt/bind_to_vm/ (Ben)
>
> Fixed up the inactive shrinker. As Daniel noticed the code could
> potentially count the same object multiple times. While it's not
> possible in the current case, since 1 object can only ever be bound into
> 1 address space thus far - we may as well try to get something more
> future proof in place now. With a prep patch before this to switch over
> to using the bound list + inactive check, we're now able to carry that
> forward for every address space an object is bound into.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA
in destroy".]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 07:00:10 +07:00
|
|
|
TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
|
2011-02-03 18:57:46 +07:00
|
|
|
__entry->obj, __entry->offset, __entry->size,
|
drm/i915: plumb VM into bind/unbind code
As alluded to in several patches, and it will be reiterated later... A
VMA is an abstraction for a GEM BO bound into an address space.
Therefore it stands to reason, that the existing bind, and unbind are
the ones which will be the most impacted. This patch implements this,
and updates all callers which weren't already updated in the series
(because it was too messy).
This patch represents the bulk of an earlier, larger patch. I've pulled
out a bunch of things by the request of Daniel. The history is preserved
for posterity with the email convention of ">" One big change from the
original patch aside from a bunch of cropping is I've created an
i915_vma_unbind() function. That is because we always have the VMA
anyway, and doing an extra lookup is useful. There is a caveat, we
retain an i915_gem_object_ggtt_unbind, for the global cases which might
not talk in VMAs.
> drm/i915: plumb VM into object operations
>
> This patch was formerly known as:
> "drm/i915: Create VMAs (part 3) - plumbing"
>
> This patch adds a VM argument, bind/unbind, and the object
> offset/size/color getters/setters. It preserves the old ggtt helper
> functions because things still need, and will continue to need them.
>
> Some code will still need to be ported over after this.
>
> v2: Fix purge to pick an object and unbind all vmas
> This was doable because of the global bound list change.
>
> v3: With the commit to actually pin/unpin pages in place, there is no
> longer a need to check if unbind succeeded before calling put_pages().
> Make put_pages only BUG() after checking pin count.
>
> v4: Rebased on top of the new hangcheck work by Mika
> plumbed eb_destroy also
> Many checkpatch related fixes
>
> v5: Very large rebase
>
> v6:
> Change BUG_ON to WARN_ON (Daniel)
> Rename vm to ggtt in preallocate stolen, since it is always ggtt when
> dealing with stolen memory. (Daniel)
> list_for_each will short-circuit already (Daniel)
> remove superflous space (Daniel)
> Use per object list of vmas (Daniel)
> Make obj_bound_any() use obj_bound for each vm (Ben)
> s/bind_to_gtt/bind_to_vm/ (Ben)
>
> Fixed up the inactive shrinker. As Daniel noticed the code could
> potentially count the same object multiple times. While it's not
> possible in the current case, since 1 object can only ever be bound into
> 1 address space thus far - we may as well try to get something more
> future proof in place now. With a prep patch before this to switch over
> to using the bound list + inactive check, we're now able to carry that
> forward for every address space an object is bound into.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA
in destroy".]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 07:00:10 +07:00
|
|
|
__entry->mappable ? ", mappable" : "",
|
|
|
|
__entry->vm)
|
2009-08-25 17:15:50 +07:00
|
|
|
);
|
|
|
|
|
drm/i915: plumb VM into bind/unbind code
As alluded to in several patches, and it will be reiterated later... A
VMA is an abstraction for a GEM BO bound into an address space.
Therefore it stands to reason, that the existing bind, and unbind are
the ones which will be the most impacted. This patch implements this,
and updates all callers which weren't already updated in the series
(because it was too messy).
This patch represents the bulk of an earlier, larger patch. I've pulled
out a bunch of things by the request of Daniel. The history is preserved
for posterity with the email convention of ">" One big change from the
original patch aside from a bunch of cropping is I've created an
i915_vma_unbind() function. That is because we always have the VMA
anyway, and doing an extra lookup is useful. There is a caveat, we
retain an i915_gem_object_ggtt_unbind, for the global cases which might
not talk in VMAs.
> drm/i915: plumb VM into object operations
>
> This patch was formerly known as:
> "drm/i915: Create VMAs (part 3) - plumbing"
>
> This patch adds a VM argument, bind/unbind, and the object
> offset/size/color getters/setters. It preserves the old ggtt helper
> functions because things still need, and will continue to need them.
>
> Some code will still need to be ported over after this.
>
> v2: Fix purge to pick an object and unbind all vmas
> This was doable because of the global bound list change.
>
> v3: With the commit to actually pin/unpin pages in place, there is no
> longer a need to check if unbind succeeded before calling put_pages().
> Make put_pages only BUG() after checking pin count.
>
> v4: Rebased on top of the new hangcheck work by Mika
> plumbed eb_destroy also
> Many checkpatch related fixes
>
> v5: Very large rebase
>
> v6:
> Change BUG_ON to WARN_ON (Daniel)
> Rename vm to ggtt in preallocate stolen, since it is always ggtt when
> dealing with stolen memory. (Daniel)
> list_for_each will short-circuit already (Daniel)
> remove superflous space (Daniel)
> Use per object list of vmas (Daniel)
> Make obj_bound_any() use obj_bound for each vm (Ben)
> s/bind_to_gtt/bind_to_vm/ (Ben)
>
> Fixed up the inactive shrinker. As Daniel noticed the code could
> potentially count the same object multiple times. While it's not
> possible in the current case, since 1 object can only ever be bound into
> 1 address space thus far - we may as well try to get something more
> future proof in place now. With a prep patch before this to switch over
> to using the bound list + inactive check, we're now able to carry that
> forward for every address space an object is bound into.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA
in destroy".]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 07:00:10 +07:00
|
|
|
TRACE_EVENT(i915_vma_unbind,
|
|
|
|
TP_PROTO(struct i915_vma *vma),
|
|
|
|
TP_ARGS(vma),
|
2011-02-03 18:57:46 +07:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
drm/i915: plumb VM into bind/unbind code
As alluded to in several patches, and it will be reiterated later... A
VMA is an abstraction for a GEM BO bound into an address space.
Therefore it stands to reason, that the existing bind, and unbind are
the ones which will be the most impacted. This patch implements this,
and updates all callers which weren't already updated in the series
(because it was too messy).
This patch represents the bulk of an earlier, larger patch. I've pulled
out a bunch of things by the request of Daniel. The history is preserved
for posterity with the email convention of ">" One big change from the
original patch aside from a bunch of cropping is I've created an
i915_vma_unbind() function. That is because we always have the VMA
anyway, and doing an extra lookup is useful. There is a caveat, we
retain an i915_gem_object_ggtt_unbind, for the global cases which might
not talk in VMAs.
> drm/i915: plumb VM into object operations
>
> This patch was formerly known as:
> "drm/i915: Create VMAs (part 3) - plumbing"
>
> This patch adds a VM argument, bind/unbind, and the object
> offset/size/color getters/setters. It preserves the old ggtt helper
> functions because things still need, and will continue to need them.
>
> Some code will still need to be ported over after this.
>
> v2: Fix purge to pick an object and unbind all vmas
> This was doable because of the global bound list change.
>
> v3: With the commit to actually pin/unpin pages in place, there is no
> longer a need to check if unbind succeeded before calling put_pages().
> Make put_pages only BUG() after checking pin count.
>
> v4: Rebased on top of the new hangcheck work by Mika
> plumbed eb_destroy also
> Many checkpatch related fixes
>
> v5: Very large rebase
>
> v6:
> Change BUG_ON to WARN_ON (Daniel)
> Rename vm to ggtt in preallocate stolen, since it is always ggtt when
> dealing with stolen memory. (Daniel)
> list_for_each will short-circuit already (Daniel)
> remove superflous space (Daniel)
> Use per object list of vmas (Daniel)
> Make obj_bound_any() use obj_bound for each vm (Ben)
> s/bind_to_gtt/bind_to_vm/ (Ben)
>
> Fixed up the inactive shrinker. As Daniel noticed the code could
> potentially count the same object multiple times. While it's not
> possible in the current case, since 1 object can only ever be bound into
> 1 address space thus far - we may as well try to get something more
> future proof in place now. With a prep patch before this to switch over
> to using the bound list + inactive check, we're now able to carry that
> forward for every address space an object is bound into.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA
in destroy".]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 07:00:10 +07:00
|
|
|
__field(struct i915_address_space *, vm)
|
2011-02-03 18:57:46 +07:00
|
|
|
__field(u32, offset)
|
|
|
|
__field(u32, size)
|
|
|
|
),
|
2009-08-25 17:15:50 +07:00
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_fast_assign(
|
drm/i915: plumb VM into bind/unbind code
As alluded to in several patches, and it will be reiterated later... A
VMA is an abstraction for a GEM BO bound into an address space.
Therefore it stands to reason, that the existing bind, and unbind are
the ones which will be the most impacted. This patch implements this,
and updates all callers which weren't already updated in the series
(because it was too messy).
This patch represents the bulk of an earlier, larger patch. I've pulled
out a bunch of things by the request of Daniel. The history is preserved
for posterity with the email convention of ">" One big change from the
original patch aside from a bunch of cropping is I've created an
i915_vma_unbind() function. That is because we always have the VMA
anyway, and doing an extra lookup is useful. There is a caveat, we
retain an i915_gem_object_ggtt_unbind, for the global cases which might
not talk in VMAs.
> drm/i915: plumb VM into object operations
>
> This patch was formerly known as:
> "drm/i915: Create VMAs (part 3) - plumbing"
>
> This patch adds a VM argument, bind/unbind, and the object
> offset/size/color getters/setters. It preserves the old ggtt helper
> functions because things still need, and will continue to need them.
>
> Some code will still need to be ported over after this.
>
> v2: Fix purge to pick an object and unbind all vmas
> This was doable because of the global bound list change.
>
> v3: With the commit to actually pin/unpin pages in place, there is no
> longer a need to check if unbind succeeded before calling put_pages().
> Make put_pages only BUG() after checking pin count.
>
> v4: Rebased on top of the new hangcheck work by Mika
> plumbed eb_destroy also
> Many checkpatch related fixes
>
> v5: Very large rebase
>
> v6:
> Change BUG_ON to WARN_ON (Daniel)
> Rename vm to ggtt in preallocate stolen, since it is always ggtt when
> dealing with stolen memory. (Daniel)
> list_for_each will short-circuit already (Daniel)
> remove superflous space (Daniel)
> Use per object list of vmas (Daniel)
> Make obj_bound_any() use obj_bound for each vm (Ben)
> s/bind_to_gtt/bind_to_vm/ (Ben)
>
> Fixed up the inactive shrinker. As Daniel noticed the code could
> potentially count the same object multiple times. While it's not
> possible in the current case, since 1 object can only ever be bound into
> 1 address space thus far - we may as well try to get something more
> future proof in place now. With a prep patch before this to switch over
> to using the bound list + inactive check, we're now able to carry that
> forward for every address space an object is bound into.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA
in destroy".]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 07:00:10 +07:00
|
|
|
__entry->obj = vma->obj;
|
|
|
|
__entry->vm = vma->vm;
|
|
|
|
__entry->offset = vma->node.start;
|
|
|
|
__entry->size = vma->node.size;
|
2011-02-03 18:57:46 +07:00
|
|
|
),
|
2009-08-25 17:15:50 +07:00
|
|
|
|
drm/i915: plumb VM into bind/unbind code
As alluded to in several patches, and it will be reiterated later... A
VMA is an abstraction for a GEM BO bound into an address space.
Therefore it stands to reason, that the existing bind, and unbind are
the ones which will be the most impacted. This patch implements this,
and updates all callers which weren't already updated in the series
(because it was too messy).
This patch represents the bulk of an earlier, larger patch. I've pulled
out a bunch of things by the request of Daniel. The history is preserved
for posterity with the email convention of ">" One big change from the
original patch aside from a bunch of cropping is I've created an
i915_vma_unbind() function. That is because we always have the VMA
anyway, and doing an extra lookup is useful. There is a caveat, we
retain an i915_gem_object_ggtt_unbind, for the global cases which might
not talk in VMAs.
> drm/i915: plumb VM into object operations
>
> This patch was formerly known as:
> "drm/i915: Create VMAs (part 3) - plumbing"
>
> This patch adds a VM argument, bind/unbind, and the object
> offset/size/color getters/setters. It preserves the old ggtt helper
> functions because things still need, and will continue to need them.
>
> Some code will still need to be ported over after this.
>
> v2: Fix purge to pick an object and unbind all vmas
> This was doable because of the global bound list change.
>
> v3: With the commit to actually pin/unpin pages in place, there is no
> longer a need to check if unbind succeeded before calling put_pages().
> Make put_pages only BUG() after checking pin count.
>
> v4: Rebased on top of the new hangcheck work by Mika
> plumbed eb_destroy also
> Many checkpatch related fixes
>
> v5: Very large rebase
>
> v6:
> Change BUG_ON to WARN_ON (Daniel)
> Rename vm to ggtt in preallocate stolen, since it is always ggtt when
> dealing with stolen memory. (Daniel)
> list_for_each will short-circuit already (Daniel)
> remove superflous space (Daniel)
> Use per object list of vmas (Daniel)
> Make obj_bound_any() use obj_bound for each vm (Ben)
> s/bind_to_gtt/bind_to_vm/ (Ben)
>
> Fixed up the inactive shrinker. As Daniel noticed the code could
> potentially count the same object multiple times. While it's not
> possible in the current case, since 1 object can only ever be bound into
> 1 address space thus far - we may as well try to get something more
> future proof in place now. With a prep patch before this to switch over
> to using the bound list + inactive check, we're now able to carry that
> forward for every address space an object is bound into.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA
in destroy".]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 07:00:10 +07:00
|
|
|
TP_printk("obj=%p, offset=%08x size=%x vm=%p",
|
|
|
|
__entry->obj, __entry->offset, __entry->size, __entry->vm)
|
2011-02-03 18:57:46 +07:00
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(i915_gem_object_change_domain,
|
|
|
|
TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
|
|
|
|
TP_ARGS(obj, old_read, old_write),
|
2009-08-25 17:15:50 +07:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2010-11-09 02:18:58 +07:00
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
2009-08-25 17:15:50 +07:00
|
|
|
__field(u32, read_domains)
|
|
|
|
__field(u32, write_domain)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->obj = obj;
|
2011-02-03 18:57:46 +07:00
|
|
|
__entry->read_domains = obj->base.read_domains | (old_read << 16);
|
|
|
|
__entry->write_domain = obj->base.write_domain | (old_write << 16);
|
2009-08-25 17:15:50 +07:00
|
|
|
),
|
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
|
2009-08-25 17:15:50 +07:00
|
|
|
__entry->obj,
|
2011-02-03 18:57:46 +07:00
|
|
|
__entry->read_domains >> 16,
|
|
|
|
__entry->read_domains & 0xffff,
|
|
|
|
__entry->write_domain >> 16,
|
|
|
|
__entry->write_domain & 0xffff)
|
2009-08-25 17:15:50 +07:00
|
|
|
);
|
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
TRACE_EVENT(i915_gem_object_pwrite,
|
|
|
|
TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
|
|
|
|
TP_ARGS(obj, offset, len),
|
2009-08-25 17:15:50 +07:00
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
|
|
|
__field(u32, offset)
|
|
|
|
__field(u32, len)
|
|
|
|
),
|
2009-08-25 17:15:50 +07:00
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_fast_assign(
|
|
|
|
__entry->obj = obj;
|
|
|
|
__entry->offset = offset;
|
|
|
|
__entry->len = len;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("obj=%p, offset=%u, len=%u",
|
|
|
|
__entry->obj, __entry->offset, __entry->len)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(i915_gem_object_pread,
|
|
|
|
TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
|
|
|
|
TP_ARGS(obj, offset, len),
|
2009-08-25 17:15:50 +07:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2010-11-09 02:18:58 +07:00
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
2011-02-03 18:57:46 +07:00
|
|
|
__field(u32, offset)
|
|
|
|
__field(u32, len)
|
2009-08-25 17:15:50 +07:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->obj = obj;
|
2011-02-03 18:57:46 +07:00
|
|
|
__entry->offset = offset;
|
|
|
|
__entry->len = len;
|
2009-08-25 17:15:50 +07:00
|
|
|
),
|
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_printk("obj=%p, offset=%u, len=%u",
|
|
|
|
__entry->obj, __entry->offset, __entry->len)
|
2009-08-25 17:15:50 +07:00
|
|
|
);
|
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
TRACE_EVENT(i915_gem_object_fault,
|
|
|
|
TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
|
|
|
|
TP_ARGS(obj, index, gtt, write),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
|
|
|
__field(u32, index)
|
|
|
|
__field(bool, gtt)
|
|
|
|
__field(bool, write)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->obj = obj;
|
|
|
|
__entry->index = index;
|
|
|
|
__entry->gtt = gtt;
|
|
|
|
__entry->write = write;
|
|
|
|
),
|
2010-05-24 15:25:44 +07:00
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_printk("obj=%p, %s index=%u %s",
|
|
|
|
__entry->obj,
|
|
|
|
__entry->gtt ? "GTT" : "CPU",
|
|
|
|
__entry->index,
|
|
|
|
__entry->write ? ", writable" : "")
|
|
|
|
);
|
|
|
|
|
|
|
|
DECLARE_EVENT_CLASS(i915_gem_object,
|
2010-11-09 02:18:58 +07:00
|
|
|
TP_PROTO(struct drm_i915_gem_object *obj),
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_ARGS(obj),
|
2010-05-24 15:25:44 +07:00
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->obj = obj;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("obj=%p", __entry->obj)
|
2010-05-24 15:25:44 +07:00
|
|
|
);
|
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
|
|
|
|
TP_PROTO(struct drm_i915_gem_object *obj),
|
|
|
|
TP_ARGS(obj)
|
|
|
|
);
|
2009-08-25 17:15:50 +07:00
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
|
2010-11-09 02:18:58 +07:00
|
|
|
TP_PROTO(struct drm_i915_gem_object *obj),
|
2010-03-11 15:41:45 +07:00
|
|
|
TP_ARGS(obj)
|
|
|
|
);
|
2009-08-25 17:15:50 +07:00
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
TRACE_EVENT(i915_gem_evict,
|
|
|
|
TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable),
|
|
|
|
TP_ARGS(dev, size, align, mappable),
|
2009-08-25 17:15:50 +07:00
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(u32, dev)
|
|
|
|
__field(u32, size)
|
|
|
|
__field(u32, align)
|
|
|
|
__field(bool, mappable)
|
|
|
|
),
|
2009-08-25 17:15:50 +07:00
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_fast_assign(
|
|
|
|
__entry->dev = dev->primary->index;
|
|
|
|
__entry->size = size;
|
|
|
|
__entry->align = align;
|
|
|
|
__entry->mappable = mappable;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("dev=%d, size=%d, align=%d %s",
|
|
|
|
__entry->dev, __entry->size, __entry->align,
|
|
|
|
__entry->mappable ? ", mappable" : "")
|
2009-08-25 17:15:50 +07:00
|
|
|
);
|
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
TRACE_EVENT(i915_gem_evict_everything,
|
drm/i915: Track unbound pages
When dealing with a working set larger than the GATT, or even the
mappable aperture when touching through the GTT, we end up with evicting
objects only to rebind them at a new offset again later. Moving an
object into and out of the GTT requires clflushing the pages, thus
causing a double-clflush penalty for rebinding.
To avoid having to clflush on rebinding, we can track the pages as they
are evicted from the GTT and only relinquish those pages on memory
pressure.
As usual, if it were not for the handling of out-of-memory condition and
having to manually shrink our own bo caches, it would be a net reduction
of code. Alas.
Note: The patch also contains a few changes to the last-hope
evict_everything logic in i916_gem_execbuffer.c - we no longer try to
only evict the purgeable stuff in a first try (since that's superflous
and only helps in OOM corner-cases, not fragmented-gtt trashing
situations).
Also, the extraction of the get_pages retry loop from bind_to_gtt (and
other callsites) to get_pages should imo have been a separate patch.
v2: Ditch the newly added put_pages (for unbound objects only) in
i915_gem_reset. A quick irc discussion hasn't revealed any important
reason for this, so if we need this, I'd like to have a git blame'able
explanation for it.
v3: Undo the s/drm_malloc_ab/kmalloc/ in get_pages that Chris noticed.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
[danvet: Split out code movements and rant a bit in the commit message
with a few Notes. Done v2]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-08-20 16:40:46 +07:00
|
|
|
TP_PROTO(struct drm_device *dev),
|
|
|
|
TP_ARGS(dev),
|
2009-08-25 17:15:50 +07:00
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(u32, dev)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->dev = dev->primary->index;
|
|
|
|
),
|
2009-08-25 17:15:50 +07:00
|
|
|
|
drm/i915: Track unbound pages
When dealing with a working set larger than the GATT, or even the
mappable aperture when touching through the GTT, we end up with evicting
objects only to rebind them at a new offset again later. Moving an
object into and out of the GTT requires clflushing the pages, thus
causing a double-clflush penalty for rebinding.
To avoid having to clflush on rebinding, we can track the pages as they
are evicted from the GTT and only relinquish those pages on memory
pressure.
As usual, if it were not for the handling of out-of-memory condition and
having to manually shrink our own bo caches, it would be a net reduction
of code. Alas.
Note: The patch also contains a few changes to the last-hope
evict_everything logic in i916_gem_execbuffer.c - we no longer try to
only evict the purgeable stuff in a first try (since that's superflous
and only helps in OOM corner-cases, not fragmented-gtt trashing
situations).
Also, the extraction of the get_pages retry loop from bind_to_gtt (and
other callsites) to get_pages should imo have been a separate patch.
v2: Ditch the newly added put_pages (for unbound objects only) in
i915_gem_reset. A quick irc discussion hasn't revealed any important
reason for this, so if we need this, I'd like to have a git blame'able
explanation for it.
v3: Undo the s/drm_malloc_ab/kmalloc/ in get_pages that Chris noticed.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
[danvet: Split out code movements and rant a bit in the commit message
with a few Notes. Done v2]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-08-20 16:40:46 +07:00
|
|
|
TP_printk("dev=%d", __entry->dev)
|
2011-02-03 18:57:46 +07:00
|
|
|
);
|
2009-08-25 17:15:50 +07:00
|
|
|
|
2013-09-24 23:57:56 +07:00
|
|
|
TRACE_EVENT(i915_gem_evict_vm,
|
|
|
|
TP_PROTO(struct i915_address_space *vm),
|
|
|
|
TP_ARGS(vm),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(struct i915_address_space *, vm)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->vm = vm;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm)
|
|
|
|
);
|
|
|
|
|
2013-09-25 17:43:28 +07:00
|
|
|
TRACE_EVENT(i915_gem_ring_sync_to,
|
|
|
|
TP_PROTO(struct intel_ring_buffer *from,
|
|
|
|
struct intel_ring_buffer *to,
|
|
|
|
u32 seqno),
|
|
|
|
TP_ARGS(from, to, seqno),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(u32, dev)
|
|
|
|
__field(u32, sync_from)
|
|
|
|
__field(u32, sync_to)
|
|
|
|
__field(u32, seqno)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->dev = from->dev->primary->index;
|
|
|
|
__entry->sync_from = from->id;
|
|
|
|
__entry->sync_to = to->id;
|
|
|
|
__entry->seqno = seqno;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
|
|
|
|
__entry->dev,
|
|
|
|
__entry->sync_from, __entry->sync_to,
|
|
|
|
__entry->seqno)
|
|
|
|
);
|
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
TRACE_EVENT(i915_gem_ring_dispatch,
|
2012-10-17 18:09:54 +07:00
|
|
|
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
|
|
|
|
TP_ARGS(ring, seqno, flags),
|
2009-08-25 17:15:50 +07:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2009-09-24 06:23:33 +07:00
|
|
|
__field(u32, dev)
|
2011-02-03 18:57:46 +07:00
|
|
|
__field(u32, ring)
|
2009-08-25 17:15:50 +07:00
|
|
|
__field(u32, seqno)
|
2012-10-17 18:09:54 +07:00
|
|
|
__field(u32, flags)
|
2009-08-25 17:15:50 +07:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2011-02-03 18:57:46 +07:00
|
|
|
__entry->dev = ring->dev->primary->index;
|
|
|
|
__entry->ring = ring->id;
|
2009-08-25 17:15:50 +07:00
|
|
|
__entry->seqno = seqno;
|
2012-10-17 18:09:54 +07:00
|
|
|
__entry->flags = flags;
|
2011-02-03 18:57:46 +07:00
|
|
|
i915_trace_irq_get(ring, seqno);
|
2009-08-25 17:15:50 +07:00
|
|
|
),
|
|
|
|
|
2012-10-17 18:09:54 +07:00
|
|
|
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
|
|
|
|
__entry->dev, __entry->ring, __entry->seqno, __entry->flags)
|
2009-08-25 17:15:50 +07:00
|
|
|
);
|
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
TRACE_EVENT(i915_gem_ring_flush,
|
|
|
|
TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush),
|
|
|
|
TP_ARGS(ring, invalidate, flush),
|
2009-08-25 17:15:50 +07:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2009-09-24 06:23:33 +07:00
|
|
|
__field(u32, dev)
|
2011-02-03 18:57:46 +07:00
|
|
|
__field(u32, ring)
|
|
|
|
__field(u32, invalidate)
|
|
|
|
__field(u32, flush)
|
2009-08-25 17:15:50 +07:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2011-02-03 18:57:46 +07:00
|
|
|
__entry->dev = ring->dev->primary->index;
|
|
|
|
__entry->ring = ring->id;
|
|
|
|
__entry->invalidate = invalidate;
|
|
|
|
__entry->flush = flush;
|
2009-08-25 17:15:50 +07:00
|
|
|
),
|
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
|
|
|
|
__entry->dev, __entry->ring,
|
|
|
|
__entry->invalidate, __entry->flush)
|
2009-08-25 17:15:50 +07:00
|
|
|
);
|
|
|
|
|
2010-03-11 15:41:45 +07:00
|
|
|
DECLARE_EVENT_CLASS(i915_gem_request,
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
|
|
|
|
TP_ARGS(ring, seqno),
|
2009-08-25 17:15:50 +07:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2009-09-24 06:23:33 +07:00
|
|
|
__field(u32, dev)
|
2011-02-03 18:57:46 +07:00
|
|
|
__field(u32, ring)
|
2009-08-25 17:15:50 +07:00
|
|
|
__field(u32, seqno)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2011-02-03 18:57:46 +07:00
|
|
|
__entry->dev = ring->dev->primary->index;
|
|
|
|
__entry->ring = ring->id;
|
2009-08-25 17:15:50 +07:00
|
|
|
__entry->seqno = seqno;
|
|
|
|
),
|
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_printk("dev=%u, ring=%u, seqno=%u",
|
|
|
|
__entry->dev, __entry->ring, __entry->seqno)
|
2009-08-25 17:15:50 +07:00
|
|
|
);
|
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
|
|
|
|
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
|
|
|
|
TP_ARGS(ring, seqno)
|
|
|
|
);
|
2009-08-25 17:15:50 +07:00
|
|
|
|
2013-09-24 03:33:19 +07:00
|
|
|
TRACE_EVENT(i915_gem_request_complete,
|
|
|
|
TP_PROTO(struct intel_ring_buffer *ring),
|
|
|
|
TP_ARGS(ring),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(u32, dev)
|
|
|
|
__field(u32, ring)
|
|
|
|
__field(u32, seqno)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->dev = ring->dev->primary->index;
|
|
|
|
__entry->ring = ring->id;
|
|
|
|
__entry->seqno = ring->get_seqno(ring, false);
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("dev=%u, ring=%u, seqno=%u",
|
|
|
|
__entry->dev, __entry->ring, __entry->seqno)
|
2009-08-25 17:15:50 +07:00
|
|
|
);
|
|
|
|
|
2010-03-11 15:41:45 +07:00
|
|
|
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
|
|
|
|
TP_ARGS(ring, seqno)
|
2009-08-25 17:15:50 +07:00
|
|
|
);
|
|
|
|
|
2012-05-25 05:03:09 +07:00
|
|
|
TRACE_EVENT(i915_gem_request_wait_begin,
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
|
2012-05-25 05:03:09 +07:00
|
|
|
TP_ARGS(ring, seqno),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(u32, dev)
|
|
|
|
__field(u32, ring)
|
|
|
|
__field(u32, seqno)
|
|
|
|
__field(bool, blocking)
|
|
|
|
),
|
|
|
|
|
|
|
|
/* NB: the blocking information is racy since mutex_is_locked
|
|
|
|
* doesn't check that the current thread holds the lock. The only
|
|
|
|
* other option would be to pass the boolean information of whether
|
|
|
|
* or not the class was blocking down through the stack which is
|
|
|
|
* less desirable.
|
|
|
|
*/
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->dev = ring->dev->primary->index;
|
|
|
|
__entry->ring = ring->id;
|
|
|
|
__entry->seqno = seqno;
|
|
|
|
__entry->blocking = mutex_is_locked(&ring->dev->struct_mutex);
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
|
|
|
|
__entry->dev, __entry->ring, __entry->seqno,
|
|
|
|
__entry->blocking ? "yes (NB)" : "no")
|
2010-03-11 15:41:45 +07:00
|
|
|
);
|
2009-08-25 17:15:50 +07:00
|
|
|
|
2010-03-11 15:41:45 +07:00
|
|
|
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
|
|
|
|
TP_ARGS(ring, seqno)
|
2009-08-25 17:15:50 +07:00
|
|
|
);
|
|
|
|
|
2010-03-11 15:41:45 +07:00
|
|
|
DECLARE_EVENT_CLASS(i915_ring,
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_PROTO(struct intel_ring_buffer *ring),
|
|
|
|
TP_ARGS(ring),
|
2009-08-25 17:15:50 +07:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2009-09-24 06:23:33 +07:00
|
|
|
__field(u32, dev)
|
2011-02-03 18:57:46 +07:00
|
|
|
__field(u32, ring)
|
2009-08-25 17:15:50 +07:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2011-02-03 18:57:46 +07:00
|
|
|
__entry->dev = ring->dev->primary->index;
|
|
|
|
__entry->ring = ring->id;
|
2009-08-25 17:15:50 +07:00
|
|
|
),
|
|
|
|
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring)
|
2009-08-25 17:15:50 +07:00
|
|
|
);
|
|
|
|
|
2010-03-11 15:41:45 +07:00
|
|
|
DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_PROTO(struct intel_ring_buffer *ring),
|
|
|
|
TP_ARGS(ring)
|
2010-03-11 15:41:45 +07:00
|
|
|
);
|
2009-08-25 17:15:50 +07:00
|
|
|
|
2010-03-11 15:41:45 +07:00
|
|
|
DEFINE_EVENT(i915_ring, i915_ring_wait_end,
|
2011-02-03 18:57:46 +07:00
|
|
|
TP_PROTO(struct intel_ring_buffer *ring),
|
|
|
|
TP_ARGS(ring)
|
2009-08-25 17:15:50 +07:00
|
|
|
);
|
|
|
|
|
2010-07-02 06:48:37 +07:00
|
|
|
TRACE_EVENT(i915_flip_request,
|
2010-11-09 02:18:58 +07:00
|
|
|
TP_PROTO(int plane, struct drm_i915_gem_object *obj),
|
2010-07-02 06:48:37 +07:00
|
|
|
|
|
|
|
TP_ARGS(plane, obj),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(int, plane)
|
2010-11-09 02:18:58 +07:00
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
2010-07-02 06:48:37 +07:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->plane = plane;
|
|
|
|
__entry->obj = obj;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(i915_flip_complete,
|
2010-11-09 02:18:58 +07:00
|
|
|
TP_PROTO(int plane, struct drm_i915_gem_object *obj),
|
2010-07-02 06:48:37 +07:00
|
|
|
|
|
|
|
TP_ARGS(plane, obj),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(int, plane)
|
2010-11-09 02:18:58 +07:00
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
2010-07-02 06:48:37 +07:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->plane = plane;
|
|
|
|
__entry->obj = obj;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
|
|
|
|
);
|
|
|
|
|
2013-07-20 02:36:56 +07:00
|
|
|
TRACE_EVENT_CONDITION(i915_reg_rw,
|
|
|
|
TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace),
|
2011-08-17 02:34:10 +07:00
|
|
|
|
2013-07-20 02:36:56 +07:00
|
|
|
TP_ARGS(write, reg, val, len, trace),
|
|
|
|
|
|
|
|
TP_CONDITION(trace),
|
2011-08-17 02:34:10 +07:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(u64, val)
|
|
|
|
__field(u32, reg)
|
|
|
|
__field(u16, write)
|
|
|
|
__field(u16, len)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->val = (u64)val;
|
|
|
|
__entry->reg = reg;
|
|
|
|
__entry->write = write;
|
|
|
|
__entry->len = len;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
|
|
|
|
__entry->write ? "write" : "read",
|
|
|
|
__entry->reg, __entry->len,
|
|
|
|
(u32)(__entry->val & 0xffffffff),
|
|
|
|
(u32)(__entry->val >> 32))
|
2010-11-08 16:09:41 +07:00
|
|
|
);
|
|
|
|
|
2012-08-30 18:26:48 +07:00
|
|
|
TRACE_EVENT(intel_gpu_freq_change,
|
|
|
|
TP_PROTO(u32 freq),
|
|
|
|
TP_ARGS(freq),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(u32, freq)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->freq = freq;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("new_freq=%u", __entry->freq)
|
|
|
|
);
|
|
|
|
|
2009-08-25 17:15:50 +07:00
|
|
|
#endif /* _I915_TRACE_H_ */
|
|
|
|
|
|
|
|
/* This part must be outside protection */
|
|
|
|
#undef TRACE_INCLUDE_PATH
|
2010-05-03 19:24:41 +07:00
|
|
|
#define TRACE_INCLUDE_PATH .
|
2009-08-25 17:15:50 +07:00
|
|
|
#include <trace/define_trace.h>
|