mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-05 04:56:46 +07:00
drm/i915: Convert flip_work to a list.
This will be required to allow more than 1 update in the future. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1463490484-19540-10-git-send-email-maarten.lankhorst@linux.intel.com Reviewed-by: Patrik Jakobsson <patrik.jakobsson@linux.intel.com>
This commit is contained in:
parent
aa420ddd8e
commit
6885843ae1
@ -592,6 +592,53 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i915_dump_pageflip(struct seq_file *m,
|
||||
struct drm_i915_private *dev_priv,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_flip_work *work)
|
||||
{
|
||||
const char pipe = pipe_name(crtc->pipe);
|
||||
const char plane = plane_name(crtc->plane);
|
||||
u32 pending;
|
||||
u32 addr;
|
||||
|
||||
pending = atomic_read(&work->pending);
|
||||
if (pending) {
|
||||
seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
|
||||
pipe, plane);
|
||||
} else {
|
||||
seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
|
||||
pipe, plane);
|
||||
}
|
||||
if (work->flip_queued_req) {
|
||||
struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);
|
||||
|
||||
seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
|
||||
engine->name,
|
||||
i915_gem_request_get_seqno(work->flip_queued_req),
|
||||
dev_priv->next_seqno,
|
||||
engine->get_seqno(engine),
|
||||
i915_gem_request_completed(work->flip_queued_req, true));
|
||||
} else
|
||||
seq_printf(m, "Flip not associated with any ring\n");
|
||||
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
|
||||
work->flip_queued_vblank,
|
||||
work->flip_ready_vblank,
|
||||
intel_crtc_get_vblank_counter(crtc));
|
||||
seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen >= 4)
|
||||
addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
|
||||
else
|
||||
addr = I915_READ(DSPADDR(crtc->plane));
|
||||
seq_printf(m, "Current scanout address 0x%08x\n", addr);
|
||||
|
||||
if (work->pending_flip_obj) {
|
||||
seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
|
||||
seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
|
||||
}
|
||||
}
|
||||
|
||||
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = m->private;
|
||||
@ -610,48 +657,13 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
|
||||
struct intel_flip_work *work;
|
||||
|
||||
spin_lock_irq(&dev->event_lock);
|
||||
work = crtc->flip_work;
|
||||
if (work == NULL) {
|
||||
if (list_empty(&crtc->flip_work)) {
|
||||
seq_printf(m, "No flip due on pipe %c (plane %c)\n",
|
||||
pipe, plane);
|
||||
} else {
|
||||
u32 pending;
|
||||
u32 addr;
|
||||
|
||||
pending = atomic_read(&work->pending);
|
||||
if (pending) {
|
||||
seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
|
||||
pipe, plane);
|
||||
} else {
|
||||
seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
|
||||
pipe, plane);
|
||||
}
|
||||
if (work->flip_queued_req) {
|
||||
struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);
|
||||
|
||||
seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
|
||||
engine->name,
|
||||
i915_gem_request_get_seqno(work->flip_queued_req),
|
||||
dev_priv->next_seqno,
|
||||
engine->get_seqno(engine),
|
||||
i915_gem_request_completed(work->flip_queued_req, true));
|
||||
} else
|
||||
seq_printf(m, "Flip not associated with any ring\n");
|
||||
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
|
||||
work->flip_queued_vblank,
|
||||
work->flip_ready_vblank,
|
||||
intel_crtc_get_vblank_counter(crtc));
|
||||
seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
|
||||
else
|
||||
addr = I915_READ(DSPADDR(crtc->plane));
|
||||
seq_printf(m, "Current scanout address 0x%08x\n", addr);
|
||||
|
||||
if (work->pending_flip_obj) {
|
||||
seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
|
||||
seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
|
||||
list_for_each_entry(work, &crtc->flip_work, head) {
|
||||
i915_dump_pageflip(m, dev_priv, crtc, work);
|
||||
seq_puts(m, "\n");
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&dev->event_lock);
|
||||
|
@ -622,7 +622,7 @@ struct drm_i915_display_funcs {
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags);
|
||||
uint64_t gtt_offset);
|
||||
void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
|
||||
/* clock updates for mode set */
|
||||
/* cursor updates */
|
||||
|
@ -3213,17 +3213,12 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
unsigned reset_counter;
|
||||
bool pending;
|
||||
|
||||
reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
|
||||
if (intel_crtc->reset_counter != reset_counter)
|
||||
return false;
|
||||
|
||||
spin_lock_irq(&dev->event_lock);
|
||||
pending = to_intel_crtc(crtc)->flip_work != NULL;
|
||||
spin_unlock_irq(&dev->event_lock);
|
||||
|
||||
return pending;
|
||||
return !list_empty_careful(&to_intel_crtc(crtc)->flip_work);
|
||||
}
|
||||
|
||||
static void intel_update_pipe_config(struct intel_crtc *crtc,
|
||||
@ -3799,7 +3794,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
|
||||
if (atomic_read(&crtc->unpin_work_count) == 0)
|
||||
continue;
|
||||
|
||||
if (crtc->flip_work)
|
||||
if (!list_empty_careful(&crtc->flip_work))
|
||||
intel_wait_for_vblank(dev, crtc->pipe);
|
||||
|
||||
return true;
|
||||
@ -3808,12 +3803,11 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void page_flip_completed(struct intel_crtc *intel_crtc)
|
||||
static void page_flip_completed(struct intel_crtc *intel_crtc, struct intel_flip_work *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
|
||||
struct intel_flip_work *work = intel_crtc->flip_work;
|
||||
|
||||
intel_crtc->flip_work = NULL;
|
||||
list_del_init(&work->head);
|
||||
|
||||
if (work->event)
|
||||
drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
|
||||
@ -3848,10 +3842,16 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
|
||||
struct intel_flip_work *work;
|
||||
|
||||
spin_lock_irq(&dev->event_lock);
|
||||
work = intel_crtc->flip_work;
|
||||
|
||||
/*
|
||||
* If we're waiting for page flips, it's the first
|
||||
* flip on the list that's stuck.
|
||||
*/
|
||||
work = list_first_entry_or_null(&intel_crtc->flip_work,
|
||||
struct intel_flip_work, head);
|
||||
if (work && !is_mmio_work(work)) {
|
||||
WARN_ONCE(1, "Removing stuck page flip\n");
|
||||
page_flip_completed(intel_crtc);
|
||||
page_flip_completed(intel_crtc, work);
|
||||
}
|
||||
spin_unlock_irq(&dev->event_lock);
|
||||
}
|
||||
@ -6232,7 +6232,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
|
||||
return;
|
||||
|
||||
if (to_intel_plane_state(crtc->primary->state)->visible) {
|
||||
WARN_ON(intel_crtc->flip_work);
|
||||
WARN_ON(list_empty(&intel_crtc->flip_work));
|
||||
|
||||
intel_pre_disable_primary_noatomic(crtc);
|
||||
|
||||
@ -10831,15 +10831,19 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
|
||||
struct intel_flip_work *work;
|
||||
|
||||
spin_lock_irq(&dev->event_lock);
|
||||
work = intel_crtc->flip_work;
|
||||
intel_crtc->flip_work = NULL;
|
||||
spin_unlock_irq(&dev->event_lock);
|
||||
while (!list_empty(&intel_crtc->flip_work)) {
|
||||
work = list_first_entry(&intel_crtc->flip_work,
|
||||
struct intel_flip_work, head);
|
||||
list_del_init(&work->head);
|
||||
spin_unlock_irq(&dev->event_lock);
|
||||
|
||||
if (work) {
|
||||
cancel_work_sync(&work->mmio_work);
|
||||
cancel_work_sync(&work->unpin_work);
|
||||
kfree(work);
|
||||
|
||||
spin_lock_irq(&dev->event_lock);
|
||||
}
|
||||
spin_unlock_irq(&dev->event_lock);
|
||||
|
||||
drm_crtc_cleanup(crtc);
|
||||
|
||||
@ -10924,9 +10928,9 @@ static bool __pageflip_finished_cs(struct intel_crtc *crtc,
|
||||
* anyway, we don't really care.
|
||||
*/
|
||||
return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
|
||||
crtc->flip_work->gtt_offset &&
|
||||
work->gtt_offset &&
|
||||
g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
|
||||
crtc->flip_work->flip_count);
|
||||
work->flip_count);
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -10976,13 +10980,19 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
|
||||
* lost pageflips) so needs the full irqsave spinlocks.
|
||||
*/
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
work = intel_crtc->flip_work;
|
||||
while (!list_empty(&intel_crtc->flip_work)) {
|
||||
work = list_first_entry(&intel_crtc->flip_work,
|
||||
struct intel_flip_work,
|
||||
head);
|
||||
|
||||
if (work != NULL &&
|
||||
!is_mmio_work(work) &&
|
||||
pageflip_finished(intel_crtc, work))
|
||||
page_flip_completed(intel_crtc);
|
||||
if (is_mmio_work(work))
|
||||
break;
|
||||
|
||||
if (!pageflip_finished(intel_crtc, work))
|
||||
break;
|
||||
|
||||
page_flip_completed(intel_crtc, work);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
|
||||
@ -11003,13 +11013,19 @@ void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
|
||||
* lost pageflips) so needs the full irqsave spinlocks.
|
||||
*/
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
work = intel_crtc->flip_work;
|
||||
while (!list_empty(&intel_crtc->flip_work)) {
|
||||
work = list_first_entry(&intel_crtc->flip_work,
|
||||
struct intel_flip_work,
|
||||
head);
|
||||
|
||||
if (work != NULL &&
|
||||
is_mmio_work(work) &&
|
||||
pageflip_finished(intel_crtc, work))
|
||||
page_flip_completed(intel_crtc);
|
||||
if (!is_mmio_work(work))
|
||||
break;
|
||||
|
||||
if (!pageflip_finished(intel_crtc, work))
|
||||
break;
|
||||
|
||||
page_flip_completed(intel_crtc, work);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
|
||||
@ -11028,7 +11044,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
uint64_t gtt_offset)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
@ -11051,7 +11067,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
|
||||
intel_ring_emit(engine, MI_DISPLAY_FLIP |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
intel_ring_emit(engine, fb->pitches[0]);
|
||||
intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
|
||||
intel_ring_emit(engine, gtt_offset);
|
||||
intel_ring_emit(engine, 0); /* aux display base address, unused */
|
||||
|
||||
return 0;
|
||||
@ -11062,7 +11078,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
uint64_t gtt_offset)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
@ -11082,7 +11098,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
|
||||
intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
intel_ring_emit(engine, fb->pitches[0]);
|
||||
intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
|
||||
intel_ring_emit(engine, gtt_offset);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
|
||||
return 0;
|
||||
@ -11093,7 +11109,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
uint64_t gtt_offset)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -11112,8 +11128,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
|
||||
intel_ring_emit(engine, MI_DISPLAY_FLIP |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
intel_ring_emit(engine, fb->pitches[0]);
|
||||
intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
|
||||
obj->tiling_mode);
|
||||
intel_ring_emit(engine, gtt_offset | obj->tiling_mode);
|
||||
|
||||
/* XXX Enabling the panel-fitter across page-flip is so far
|
||||
* untested on non-native modes, so ignore it for now.
|
||||
@ -11131,7 +11146,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
uint64_t gtt_offset)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -11146,7 +11161,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
|
||||
intel_ring_emit(engine, MI_DISPLAY_FLIP |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
|
||||
intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
|
||||
intel_ring_emit(engine, gtt_offset);
|
||||
|
||||
/* Contrary to the suggestions in the documentation,
|
||||
* "Enable Panel Fitter" does not seem to be required when page
|
||||
@ -11166,7 +11181,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
uint64_t gtt_offset)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
@ -11249,7 +11264,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
|
||||
|
||||
intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
|
||||
intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
|
||||
intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
|
||||
intel_ring_emit(engine, gtt_offset);
|
||||
intel_ring_emit(engine, (MI_NOOP));
|
||||
|
||||
return 0;
|
||||
@ -11316,7 +11331,7 @@ static int intel_default_queue_flip(struct drm_device *dev,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
uint64_t gtt_offset)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -11371,20 +11386,26 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
|
||||
return;
|
||||
|
||||
spin_lock(&dev->event_lock);
|
||||
work = intel_crtc->flip_work;
|
||||
while (!list_empty(&intel_crtc->flip_work)) {
|
||||
work = list_first_entry(&intel_crtc->flip_work,
|
||||
struct intel_flip_work, head);
|
||||
|
||||
if (work != NULL && !is_mmio_work(work) &&
|
||||
__pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
|
||||
WARN_ONCE(1,
|
||||
"Kicking stuck page flip: queued at %d, now %d\n",
|
||||
work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
|
||||
page_flip_completed(intel_crtc);
|
||||
work = NULL;
|
||||
if (is_mmio_work(work))
|
||||
break;
|
||||
|
||||
if (__pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
|
||||
WARN_ONCE(1,
|
||||
"Kicking stuck page flip: queued at %d, now %d\n",
|
||||
work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
|
||||
page_flip_completed(intel_crtc, work);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
|
||||
intel_queue_rps_boost_for_request(work->flip_queued_req);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (work != NULL && !is_mmio_work(work) &&
|
||||
intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
|
||||
intel_queue_rps_boost_for_request(work->flip_queued_req);
|
||||
spin_unlock(&dev->event_lock);
|
||||
}
|
||||
|
||||
@ -11445,13 +11466,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
|
||||
/* We borrow the event spin lock for protecting flip_work */
|
||||
spin_lock_irq(&dev->event_lock);
|
||||
if (intel_crtc->flip_work) {
|
||||
if (!list_empty(&intel_crtc->flip_work)) {
|
||||
struct intel_flip_work *old_work;
|
||||
|
||||
old_work = list_last_entry(&intel_crtc->flip_work,
|
||||
struct intel_flip_work, head);
|
||||
|
||||
/* Before declaring the flip queue wedged, check if
|
||||
* the hardware completed the operation behind our backs.
|
||||
*/
|
||||
if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
|
||||
if (pageflip_finished(intel_crtc, old_work)) {
|
||||
DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
|
||||
page_flip_completed(intel_crtc);
|
||||
page_flip_completed(intel_crtc, old_work);
|
||||
} else {
|
||||
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
|
||||
spin_unlock_irq(&dev->event_lock);
|
||||
@ -11461,7 +11487,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
intel_crtc->flip_work = work;
|
||||
list_add_tail(&work->head, &intel_crtc->flip_work);
|
||||
spin_unlock_irq(&dev->event_lock);
|
||||
|
||||
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
|
||||
@ -11543,7 +11569,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
} else {
|
||||
i915_gem_request_assign(&work->flip_queued_req, request);
|
||||
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
|
||||
page_flip_flags);
|
||||
work->gtt_offset);
|
||||
if (ret)
|
||||
goto cleanup_unpin;
|
||||
|
||||
@ -11578,7 +11604,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
drm_framebuffer_unreference(work->old_fb);
|
||||
|
||||
spin_lock_irq(&dev->event_lock);
|
||||
intel_crtc->flip_work = NULL;
|
||||
list_del(&work->head);
|
||||
spin_unlock_irq(&dev->event_lock);
|
||||
|
||||
drm_crtc_vblank_put(crtc);
|
||||
@ -14184,6 +14210,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
|
||||
intel_crtc->base.state = &crtc_state->base;
|
||||
crtc_state->base.crtc = &intel_crtc->base;
|
||||
|
||||
INIT_LIST_HEAD(&intel_crtc->flip_work);
|
||||
|
||||
/* initialize shared scalers */
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
if (pipe == PIPE_C)
|
||||
|
@ -641,7 +641,7 @@ struct intel_crtc {
|
||||
unsigned long enabled_power_domains;
|
||||
bool lowfreq_avail;
|
||||
struct intel_overlay *overlay;
|
||||
struct intel_flip_work *flip_work;
|
||||
struct list_head flip_work;
|
||||
|
||||
atomic_t unpin_work_count;
|
||||
|
||||
@ -970,6 +970,8 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
|
||||
}
|
||||
|
||||
struct intel_flip_work {
|
||||
struct list_head head;
|
||||
|
||||
struct work_struct unpin_work;
|
||||
struct work_struct mmio_work;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user