mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-04 14:56:48 +07:00
V4L/DVB (9726): cx18: Restore buffers that have fallen out of the transfer rotation
Restore buffers that have fallen out of the transfer rotation, and check for coherent mailbox data when processing a stale mailbox. Signed-off-by: Andy Walls <awalls@radix.net> Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
This commit is contained in:
parent
d6c7e5f8fa
commit
bca11a5721
@ -212,6 +212,7 @@ struct cx18_buffer {
|
||||
dma_addr_t dma_handle;
|
||||
u32 id;
|
||||
unsigned long b_flags;
|
||||
unsigned skipped;
|
||||
char *buf;
|
||||
|
||||
u32 bytesused;
|
||||
|
@ -120,7 +120,7 @@ static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name)
|
||||
|
||||
static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order)
|
||||
{
|
||||
u32 handle, mdl_ack_count;
|
||||
u32 handle, mdl_ack_count, id;
|
||||
struct cx18_mailbox *mb;
|
||||
struct cx18_mdl_ack *mdl_ack;
|
||||
struct cx18_stream *s;
|
||||
@ -133,19 +133,50 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order)
|
||||
|
||||
if (s == NULL) {
|
||||
CX18_WARN("Got DMA done notification for unknown/inactive"
|
||||
" handle %d\n", handle);
|
||||
" handle %d, %s mailbox seq no %d\n", handle,
|
||||
(order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ?
|
||||
"stale" : "good", mb->request);
|
||||
return;
|
||||
}
|
||||
|
||||
mdl_ack_count = mb->args[2];
|
||||
mdl_ack = order->mdl_ack;
|
||||
for (i = 0; i < mdl_ack_count; i++, mdl_ack++) {
|
||||
buf = cx18_queue_get_buf(s, mdl_ack->id, mdl_ack->data_used);
|
||||
CX18_DEBUG_HI_DMA("DMA DONE for %s (buffer %d)\n", s->name,
|
||||
mdl_ack->id);
|
||||
id = mdl_ack->id;
|
||||
/*
|
||||
* Simple integrity check for processing a stale (and possibly
|
||||
* inconsistent mailbox): make sure the buffer id is in the
|
||||
* valid range for the stream.
|
||||
*
|
||||
* We go through the trouble of dealing with stale mailboxes
|
||||
* because most of the time, the mailbox data is still valid and
|
||||
* unchanged (and in practice the firmware ping-pongs the
|
||||
* two mdl_ack buffers so mdl_acks are not stale).
|
||||
*
|
||||
* There are occasions when we get a half changed mailbox,
|
||||
* which this check catches for a handle & id mismatch. If the
|
||||
* handle and id do correspond, the worst case is that we
|
||||
* completely lost the old buffer, but pick up the new buffer
|
||||
* early (but the new mdl_ack is guaranteed to be good in this
|
||||
* case as the firmware wouldn't point us to a new mdl_ack until
|
||||
* it's filled in).
|
||||
*
|
||||
* cx18_queue_get buf() will detect the lost buffers
|
||||
* and put them back in rotation eventually.
|
||||
*/
|
||||
if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) &&
|
||||
!(id >= s->mdl_offset &&
|
||||
id < (s->mdl_offset + s->buffers))) {
|
||||
CX18_WARN("Fell behind! Ignoring stale mailbox with "
|
||||
" inconsistent data. Lost buffer for mailbox "
|
||||
"seq no %d\n", mb->request);
|
||||
break;
|
||||
}
|
||||
buf = cx18_queue_get_buf(s, id, mdl_ack->data_used);
|
||||
CX18_DEBUG_HI_DMA("DMA DONE for %s (buffer %d)\n", s->name, id);
|
||||
if (buf == NULL) {
|
||||
CX18_WARN("Could not find buf %d for stream %s\n",
|
||||
mdl_ack->id, s->name);
|
||||
id, s->name);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -158,6 +189,7 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order)
|
||||
buf->bytesused);
|
||||
|
||||
cx18_buf_sync_for_device(s, buf);
|
||||
cx18_enqueue(s, buf, &s->q_free);
|
||||
|
||||
if (s->handle != CX18_INVALID_TASK_HANDLE &&
|
||||
test_bit(CX18_F_S_STREAMING, &s->s_flags))
|
||||
@ -257,10 +289,10 @@ static void mb_ack_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
|
||||
/* Don't ack if the RPU has gotten impatient and timed us out */
|
||||
if (req != cx18_readl(cx, &ack_mb->request) ||
|
||||
req == cx18_readl(cx, &ack_mb->ack)) {
|
||||
CX18_WARN("Possibly falling behind: %s self-ack'ed our incoming"
|
||||
" %s to EPU mailbox (sequence no. %u) while "
|
||||
"processing\n",
|
||||
rpu_str[order->rpu], rpu_str[order->rpu], req);
|
||||
CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
|
||||
"incoming %s to EPU mailbox (sequence no. %u) "
|
||||
"while processing\n",
|
||||
rpu_str[order->rpu], rpu_str[order->rpu], req);
|
||||
order->flags |= CX18_F_EWO_MB_STALE_WHILE_PROC;
|
||||
return;
|
||||
}
|
||||
@ -407,9 +439,10 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
|
||||
2 * sizeof(u32));
|
||||
|
||||
if (order_mb->request == order_mb->ack) {
|
||||
CX18_WARN("Possibly falling behind: %s self-ack'ed our incoming"
|
||||
" %s to EPU mailbox (sequence no. %u)\n",
|
||||
rpu_str[rpu], rpu_str[rpu], order_mb->request);
|
||||
CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
|
||||
"incoming %s to EPU mailbox (sequence no. %u)"
|
||||
"\n",
|
||||
rpu_str[rpu], rpu_str[rpu], order_mb->request);
|
||||
dump_mb(cx, order_mb, "incoming");
|
||||
order->flags = CX18_F_EWO_MB_STALE_UPON_RECEIPT;
|
||||
}
|
||||
|
@ -49,6 +49,7 @@ void cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
|
||||
buf->bytesused = 0;
|
||||
buf->readpos = 0;
|
||||
buf->b_flags = 0;
|
||||
buf->skipped = 0;
|
||||
}
|
||||
mutex_lock(&s->qlock);
|
||||
list_add_tail(&buf->list, &q->list);
|
||||
@ -67,6 +68,7 @@ struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
|
||||
list_del_init(q->list.next);
|
||||
atomic_dec(&q->buffers);
|
||||
q->bytesused -= buf->bytesused - buf->readpos;
|
||||
buf->skipped = 0;
|
||||
}
|
||||
mutex_unlock(&s->qlock);
|
||||
return buf;
|
||||
@ -76,34 +78,63 @@ struct cx18_buffer *cx18_queue_get_buf(struct cx18_stream *s, u32 id,
|
||||
u32 bytesused)
|
||||
{
|
||||
struct cx18 *cx = s->cx;
|
||||
struct list_head *p;
|
||||
struct cx18_buffer *buf;
|
||||
struct cx18_buffer *ret = NULL;
|
||||
struct list_head *p, *t;
|
||||
LIST_HEAD(r);
|
||||
|
||||
mutex_lock(&s->qlock);
|
||||
list_for_each(p, &s->q_free.list) {
|
||||
struct cx18_buffer *buf =
|
||||
list_entry(p, struct cx18_buffer, list);
|
||||
list_for_each_safe(p, t, &s->q_free.list) {
|
||||
buf = list_entry(p, struct cx18_buffer, list);
|
||||
|
||||
if (buf->id != id) {
|
||||
CX18_DEBUG_HI_DMA("Skipping buffer %d searching for %d "
|
||||
"in stream %s q_free\n", buf->id, id,
|
||||
s->name);
|
||||
buf->skipped++;
|
||||
if (buf->skipped >= atomic_read(&s->q_free.buffers)-1) {
|
||||
/* buffer must have fallen out of rotation */
|
||||
atomic_dec(&s->q_free.buffers);
|
||||
list_move_tail(&buf->list, &r);
|
||||
CX18_WARN("Skipped %s, buffer %d, %d "
|
||||
"times - it must have dropped out of "
|
||||
"rotation\n", s->name, buf->id,
|
||||
buf->skipped);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
buf->bytesused = bytesused;
|
||||
if (s->type != CX18_ENC_STREAM_TYPE_TS) {
|
||||
atomic_dec(&s->q_free.buffers);
|
||||
atomic_dec(&s->q_free.buffers);
|
||||
if (s->type == CX18_ENC_STREAM_TYPE_TS) {
|
||||
/*
|
||||
* TS doesn't use q_full, but for sweeping up lost
|
||||
* buffers, we want the TS to requeue the buffer just
|
||||
* before sending the MDL back to the firmware, so we
|
||||
* pull it off the list here.
|
||||
*/
|
||||
list_del_init(&buf->list);
|
||||
} else {
|
||||
atomic_inc(&s->q_full.buffers);
|
||||
s->q_full.bytesused += buf->bytesused;
|
||||
list_move_tail(&buf->list, &s->q_full.list);
|
||||
}
|
||||
|
||||
mutex_unlock(&s->qlock);
|
||||
return buf;
|
||||
ret = buf;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&s->qlock);
|
||||
CX18_ERR("Cannot find buffer %d for stream %s\n", id, s->name);
|
||||
return NULL;
|
||||
|
||||
/* Put lost buffers back into firmware transfer rotation */
|
||||
while (!list_empty(&r)) {
|
||||
buf = list_entry(r.next, struct cx18_buffer, list);
|
||||
list_del_init(r.next);
|
||||
cx18_enqueue(s, buf, &s->q_free);
|
||||
cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle,
|
||||
(void __iomem *)&cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
|
||||
1, buf->id, s->buf_size);
|
||||
CX18_INFO("Returning %s, buffer %d back to transfer rotation\n",
|
||||
s->name, buf->id);
|
||||
/* and there was much rejoicing... */
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Move all buffers of a queue to q_free, while flushing the buffers */
|
||||
@ -118,7 +149,7 @@ static void cx18_queue_flush(struct cx18_stream *s, struct cx18_queue *q)
|
||||
while (!list_empty(&q->list)) {
|
||||
buf = list_entry(q->list.next, struct cx18_buffer, list);
|
||||
list_move_tail(q->list.next, &s->q_free.list);
|
||||
buf->bytesused = buf->readpos = buf->b_flags = 0;
|
||||
buf->bytesused = buf->readpos = buf->b_flags = buf->skipped = 0;
|
||||
atomic_inc(&s->q_free.buffers);
|
||||
}
|
||||
cx18_queue_init(q);
|
||||
|
Loading…
Reference in New Issue
Block a user