mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-19 16:49:50 +07:00
e1fee72c2e
In the current Execlists feeding mechanism, full preemption is not supported yet: only lite-restores are allowed (this is: the GPU simply samples a new tail pointer for the context currently in execution). But we have identified an scenario in which a full preemption occurs: 1) We submit two contexts for execution (A & B). 2) The GPU finishes with the first one (A), switches to the second one (B) and informs us. 3) We submit B again (hoping to cause a lite restore) together with C, but in the time we spend writing to the ELSP, the GPU finishes B. 4) The GPU start executing B again (since we told it so). 5) We receive a B finished interrupt and, mistakenly, we submit C (again) and D, causing a full preemption of B. The race is avoided by keeping track of how many times a context has been submitted to the hardware and by better discriminating the received context switch interrupts: in the example, when we have submitted B twice, we won´t submit C and D as soon as we receive the notification that B is completed because we were expecting to get a LITE_RESTORE and we didn´t, so we know a second completion will be received shortly. Without this explicit checking, somehow, the batch buffer execution order gets messed with. This can be verified with the IGT test I sent together with the series. I don´t know the exact mechanism by which the pre-emption messes with the execution order but, since other people is working on the Scheduler + Preemption on Execlists, I didn´t try to fix it. In these series, only Lite Restores are supported (other kind of preemptions WARN). v2: elsp_submitted belongs in the new intel_ctx_submit_request. Several rebase changes. v3: Clarify how the race is avoided, as requested by Daniel. Signed-off-by: Oscar Mateo <oscar.mateo@intel.com> Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> [danvet: Align function parameters ...] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
76 lines
2.8 KiB
C
76 lines
2.8 KiB
C
/*
|
|
* Copyright © 2014 Intel Corporation
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice (including the next
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
* Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
* DEALINGS IN THE SOFTWARE.
|
|
*/
|
|
|
|
#ifndef _INTEL_LRC_H_
|
|
#define _INTEL_LRC_H_
|
|
|
|
/* Logical Rings */
|
|
void intel_logical_ring_stop(struct intel_engine_cs *ring);
|
|
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
|
|
int intel_logical_rings_init(struct drm_device *dev);
|
|
|
|
int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf);
|
|
void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf);
|
|
static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
|
|
{
|
|
ringbuf->tail &= ringbuf->size - 1;
|
|
}
|
|
static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
|
|
u32 data)
|
|
{
|
|
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
|
|
ringbuf->tail += 4;
|
|
}
|
|
int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords);
|
|
|
|
/* Logical Ring Contexts */
|
|
void intel_lr_context_free(struct intel_context *ctx);
|
|
int intel_lr_context_deferred_create(struct intel_context *ctx,
|
|
struct intel_engine_cs *ring);
|
|
|
|
/* Execlists */
|
|
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
|
|
int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
|
|
struct intel_engine_cs *ring,
|
|
struct intel_context *ctx,
|
|
struct drm_i915_gem_execbuffer2 *args,
|
|
struct list_head *vmas,
|
|
struct drm_i915_gem_object *batch_obj,
|
|
u64 exec_start, u32 flags);
|
|
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
|
|
|
|
struct intel_ctx_submit_request {
|
|
struct intel_context *ctx;
|
|
struct intel_engine_cs *ring;
|
|
u32 tail;
|
|
|
|
struct list_head execlist_link;
|
|
struct work_struct work;
|
|
|
|
int elsp_submitted;
|
|
};
|
|
|
|
void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
|
|
|
|
#endif /* _INTEL_LRC_H_ */
|