2019-01-16 22:33:04 +07:00
|
|
|
/*
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* Copyright © 2008-2018 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef I915_RESET_H
|
|
|
|
#define I915_RESET_H
|
|
|
|
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
#include <linux/types.h>
|
2019-02-08 22:37:03 +07:00
|
|
|
#include <linux/srcu.h>
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
#include "intel_engine_types.h"
|
|
|
|
#include "intel_reset_types.h"
|
2019-04-01 23:26:39 +07:00
|
|
|
|
2019-04-05 18:00:08 +07:00
|
|
|
struct i915_request;
|
2019-01-16 22:33:04 +07:00
|
|
|
struct intel_engine_cs;
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_gt;
|
2019-01-16 22:33:04 +07:00
|
|
|
struct intel_guc;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
void intel_gt_init_reset(struct intel_gt *gt);
|
|
|
|
void intel_gt_fini_reset(struct intel_gt *gt);
|
|
|
|
|
2019-01-16 22:33:04 +07:00
|
|
|
__printf(4, 5)
|
2019-07-13 02:29:53 +07:00
|
|
|
void intel_gt_handle_error(struct intel_gt *gt,
|
|
|
|
intel_engine_mask_t engine_mask,
|
|
|
|
unsigned long flags,
|
|
|
|
const char *fmt, ...);
|
2019-01-16 22:33:04 +07:00
|
|
|
#define I915_ERROR_CAPTURE BIT(0)
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
void intel_gt_reset(struct intel_gt *gt,
|
|
|
|
intel_engine_mask_t stalled_mask,
|
|
|
|
const char *reason);
|
|
|
|
int intel_engine_reset(struct intel_engine_cs *engine,
|
|
|
|
const char *reason);
|
2019-01-25 20:22:28 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
void __i915_request_reset(struct i915_request *rq, bool guilty);
|
2019-02-08 22:37:03 +07:00
|
|
|
|
2019-09-12 23:08:34 +07:00
|
|
|
int __must_check intel_gt_reset_trylock(struct intel_gt *gt, int *srcu);
|
2019-07-13 02:29:53 +07:00
|
|
|
void intel_gt_reset_unlock(struct intel_gt *gt, int tag);
|
2019-02-20 21:56:37 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
void intel_gt_set_wedged(struct intel_gt *gt);
|
|
|
|
bool intel_gt_unset_wedged(struct intel_gt *gt);
|
|
|
|
int intel_gt_terminally_wedged(struct intel_gt *gt);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-09-26 20:31:40 +07:00
|
|
|
/*
|
|
|
|
* There's no unset_wedged_on_init paired with this one.
|
|
|
|
* Once we're wedged on init, there's no going back.
|
|
|
|
*/
|
|
|
|
void intel_gt_set_wedged_on_init(struct intel_gt *gt);
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
int intel_reset_guc(struct intel_gt *gt);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_wedge_me {
|
2019-01-16 22:33:04 +07:00
|
|
|
struct delayed_work work;
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_gt *gt;
|
2019-01-16 22:33:04 +07:00
|
|
|
const char *name;
|
|
|
|
};
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
void __intel_init_wedge(struct intel_wedge_me *w,
|
|
|
|
struct intel_gt *gt,
|
|
|
|
long timeout,
|
|
|
|
const char *name);
|
|
|
|
void __intel_fini_wedge(struct intel_wedge_me *w);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
#define intel_wedge_on_timeout(W, GT, TIMEOUT) \
|
|
|
|
for (__intel_init_wedge((W), (GT), (TIMEOUT), __func__); \
|
|
|
|
(W)->gt; \
|
|
|
|
__intel_fini_wedge((W)))
|
|
|
|
|
|
|
|
static inline bool __intel_reset_failed(const struct intel_reset *reset)
|
|
|
|
{
|
2019-09-26 20:31:40 +07:00
|
|
|
GEM_BUG_ON(test_bit(I915_WEDGED_ON_INIT, &reset->flags) ?
|
|
|
|
!test_bit(I915_WEDGED, &reset->flags) : false);
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
return unlikely(test_bit(I915_WEDGED, &reset->flags));
|
|
|
|
}
|
|
|
|
|
2019-09-28 04:17:47 +07:00
|
|
|
bool intel_has_gpu_reset(const struct intel_gt *gt);
|
|
|
|
bool intel_has_reset_engine(const struct intel_gt *gt);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
#endif /* I915_RESET_H */
|