mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 05:24:59 +07:00
6e6d76cdc5
If the V4L2_EVENT_SUB_FL_SEND_INITIAL was set, then the application expects
to receive an initial event of the initial value of the control.
However, commit c53c254933
that added the new
v4l2_subscribed_event_ops introduced a regression: while the code still queued
that initial event the __v4l2_event_queue_fh() function was modified to ignore
such requests if sev->elems was 0 (meaning that the event subscription wasn't
finished yet).
And sev->elems was only set to a non-zero value after the add operation
returned.
This patch fixes this by passing the elems value to the add function. Then the
add function can set it before queuing the initial event.
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Acked-by: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
314 lines
7.3 KiB
C
314 lines
7.3 KiB
C
/*
|
|
* v4l2-event.c
|
|
*
|
|
* V4L2 events.
|
|
*
|
|
* Copyright (C) 2009--2010 Nokia Corporation.
|
|
*
|
|
* Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* version 2 as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
|
* 02110-1301 USA
|
|
*/
|
|
|
|
#include <media/v4l2-dev.h>
|
|
#include <media/v4l2-fh.h>
|
|
#include <media/v4l2-event.h>
|
|
|
|
#include <linux/sched.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/export.h>
|
|
|
|
static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
|
|
{
|
|
idx += sev->first;
|
|
return idx >= sev->elems ? idx - sev->elems : idx;
|
|
}
|
|
|
|
static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
|
|
{
|
|
struct v4l2_kevent *kev;
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
|
|
|
if (list_empty(&fh->available)) {
|
|
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
|
return -ENOENT;
|
|
}
|
|
|
|
WARN_ON(fh->navailable == 0);
|
|
|
|
kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
|
|
list_del(&kev->list);
|
|
fh->navailable--;
|
|
|
|
kev->event.pending = fh->navailable;
|
|
*event = kev->event;
|
|
kev->sev->first = sev_pos(kev->sev, 1);
|
|
kev->sev->in_use--;
|
|
|
|
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
|
|
int nonblocking)
|
|
{
|
|
int ret;
|
|
|
|
if (nonblocking)
|
|
return __v4l2_event_dequeue(fh, event);
|
|
|
|
/* Release the vdev lock while waiting */
|
|
if (fh->vdev->lock)
|
|
mutex_unlock(fh->vdev->lock);
|
|
|
|
do {
|
|
ret = wait_event_interruptible(fh->wait,
|
|
fh->navailable != 0);
|
|
if (ret < 0)
|
|
break;
|
|
|
|
ret = __v4l2_event_dequeue(fh, event);
|
|
} while (ret == -ENOENT);
|
|
|
|
if (fh->vdev->lock)
|
|
mutex_lock(fh->vdev->lock);
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
|
|
|
|
/* Caller must hold fh->vdev->fh_lock! */
|
|
static struct v4l2_subscribed_event *v4l2_event_subscribed(
|
|
struct v4l2_fh *fh, u32 type, u32 id)
|
|
{
|
|
struct v4l2_subscribed_event *sev;
|
|
|
|
assert_spin_locked(&fh->vdev->fh_lock);
|
|
|
|
list_for_each_entry(sev, &fh->subscribed, list)
|
|
if (sev->type == type && sev->id == id)
|
|
return sev;
|
|
|
|
return NULL;
|
|
}
|
|
|
|
static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
|
|
const struct timespec *ts)
|
|
{
|
|
struct v4l2_subscribed_event *sev;
|
|
struct v4l2_kevent *kev;
|
|
bool copy_payload = true;
|
|
|
|
/* Are we subscribed? */
|
|
sev = v4l2_event_subscribed(fh, ev->type, ev->id);
|
|
if (sev == NULL)
|
|
return;
|
|
|
|
/*
|
|
* If the event has been added to the fh->subscribed list, but its
|
|
* add op has not completed yet elems will be 0, treat this as
|
|
* not being subscribed.
|
|
*/
|
|
if (!sev->elems)
|
|
return;
|
|
|
|
/* Increase event sequence number on fh. */
|
|
fh->sequence++;
|
|
|
|
/* Do we have any free events? */
|
|
if (sev->in_use == sev->elems) {
|
|
/* no, remove the oldest one */
|
|
kev = sev->events + sev_pos(sev, 0);
|
|
list_del(&kev->list);
|
|
sev->in_use--;
|
|
sev->first = sev_pos(sev, 1);
|
|
fh->navailable--;
|
|
if (sev->elems == 1) {
|
|
if (sev->ops && sev->ops->replace) {
|
|
sev->ops->replace(&kev->event, ev);
|
|
copy_payload = false;
|
|
}
|
|
} else if (sev->ops && sev->ops->merge) {
|
|
struct v4l2_kevent *second_oldest =
|
|
sev->events + sev_pos(sev, 0);
|
|
sev->ops->merge(&kev->event, &second_oldest->event);
|
|
}
|
|
}
|
|
|
|
/* Take one and fill it. */
|
|
kev = sev->events + sev_pos(sev, sev->in_use);
|
|
kev->event.type = ev->type;
|
|
if (copy_payload)
|
|
kev->event.u = ev->u;
|
|
kev->event.id = ev->id;
|
|
kev->event.timestamp = *ts;
|
|
kev->event.sequence = fh->sequence;
|
|
sev->in_use++;
|
|
list_add_tail(&kev->list, &fh->available);
|
|
|
|
fh->navailable++;
|
|
|
|
wake_up_all(&fh->wait);
|
|
}
|
|
|
|
void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
|
|
{
|
|
struct v4l2_fh *fh;
|
|
unsigned long flags;
|
|
struct timespec timestamp;
|
|
|
|
ktime_get_ts(×tamp);
|
|
|
|
spin_lock_irqsave(&vdev->fh_lock, flags);
|
|
|
|
list_for_each_entry(fh, &vdev->fh_list, list)
|
|
__v4l2_event_queue_fh(fh, ev, ×tamp);
|
|
|
|
spin_unlock_irqrestore(&vdev->fh_lock, flags);
|
|
}
|
|
EXPORT_SYMBOL_GPL(v4l2_event_queue);
|
|
|
|
void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
|
|
{
|
|
unsigned long flags;
|
|
struct timespec timestamp;
|
|
|
|
ktime_get_ts(×tamp);
|
|
|
|
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
|
__v4l2_event_queue_fh(fh, ev, ×tamp);
|
|
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
|
}
|
|
EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
|
|
|
|
int v4l2_event_pending(struct v4l2_fh *fh)
|
|
{
|
|
return fh->navailable;
|
|
}
|
|
EXPORT_SYMBOL_GPL(v4l2_event_pending);
|
|
|
|
int v4l2_event_subscribe(struct v4l2_fh *fh,
|
|
struct v4l2_event_subscription *sub, unsigned elems,
|
|
const struct v4l2_subscribed_event_ops *ops)
|
|
{
|
|
struct v4l2_subscribed_event *sev, *found_ev;
|
|
unsigned long flags;
|
|
unsigned i;
|
|
|
|
if (sub->type == V4L2_EVENT_ALL)
|
|
return -EINVAL;
|
|
|
|
if (elems < 1)
|
|
elems = 1;
|
|
|
|
sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
|
|
if (!sev)
|
|
return -ENOMEM;
|
|
for (i = 0; i < elems; i++)
|
|
sev->events[i].sev = sev;
|
|
sev->type = sub->type;
|
|
sev->id = sub->id;
|
|
sev->flags = sub->flags;
|
|
sev->fh = fh;
|
|
sev->ops = ops;
|
|
|
|
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
|
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
|
|
if (!found_ev)
|
|
list_add(&sev->list, &fh->subscribed);
|
|
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
|
|
|
if (found_ev) {
|
|
kfree(sev);
|
|
return 0; /* Already listening */
|
|
}
|
|
|
|
if (sev->ops && sev->ops->add) {
|
|
int ret = sev->ops->add(sev, elems);
|
|
if (ret) {
|
|
sev->ops = NULL;
|
|
v4l2_event_unsubscribe(fh, sub);
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
/* Mark as ready for use */
|
|
sev->elems = elems;
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
|
|
|
|
void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
|
|
{
|
|
struct v4l2_event_subscription sub;
|
|
struct v4l2_subscribed_event *sev;
|
|
unsigned long flags;
|
|
|
|
do {
|
|
sev = NULL;
|
|
|
|
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
|
if (!list_empty(&fh->subscribed)) {
|
|
sev = list_first_entry(&fh->subscribed,
|
|
struct v4l2_subscribed_event, list);
|
|
sub.type = sev->type;
|
|
sub.id = sev->id;
|
|
}
|
|
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
|
if (sev)
|
|
v4l2_event_unsubscribe(fh, &sub);
|
|
} while (sev);
|
|
}
|
|
EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
|
|
|
|
int v4l2_event_unsubscribe(struct v4l2_fh *fh,
|
|
struct v4l2_event_subscription *sub)
|
|
{
|
|
struct v4l2_subscribed_event *sev;
|
|
unsigned long flags;
|
|
int i;
|
|
|
|
if (sub->type == V4L2_EVENT_ALL) {
|
|
v4l2_event_unsubscribe_all(fh);
|
|
return 0;
|
|
}
|
|
|
|
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
|
|
|
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
|
|
if (sev != NULL) {
|
|
/* Remove any pending events for this subscription */
|
|
for (i = 0; i < sev->in_use; i++) {
|
|
list_del(&sev->events[sev_pos(sev, i)].list);
|
|
fh->navailable--;
|
|
}
|
|
list_del(&sev->list);
|
|
}
|
|
|
|
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
|
|
|
if (sev && sev->ops && sev->ops->del)
|
|
sev->ops->del(sev);
|
|
|
|
kfree(sev);
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
|