mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-01 09:36:49 +07:00
9e69c935fa
Since the buffer is accessed by userspace we can not just free the buffers memory once we are done with it in kernel space. There might still be open file descriptors and userspace still might be accessing the buffer. This patch adds support for reference counting to the IIO buffers. When a buffer is created and initialized its initial reference count is set to 1. Instead of freeing the memory of the buffer the buffer's _free() function will drop that reference again. But only after the last reference to the buffer has been dropped the buffer the buffer's memory will be freed. The IIO device will take a reference to its primary buffer. The patch adds a small helper function for this called iio_device_attach_buffer() which will get a reference to the buffer and assign the buffer to the IIO device. This function must be used instead of assigning the buffer to the device by hand. The reference is only dropped once the IIO device is freed and we can be sure that there are no more open file handles. A reference to a buffer will also be taken whenever the buffer is active to avoid the buffer being freed while data is still being send to it. Signed-off-by: Lars-Peter Clausen <lars@metafoo.de> Signed-off-by: Jonathan Cameron <jic23@kernel.org>
128 lines
3.1 KiB
C
128 lines
3.1 KiB
C
#include <linux/kernel.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/err.h>
|
|
#include <linux/export.h>
|
|
#include <linux/iio/buffer.h>
|
|
#include <linux/iio/consumer.h>
|
|
|
|
struct iio_cb_buffer {
|
|
struct iio_buffer buffer;
|
|
int (*cb)(const void *data, void *private);
|
|
void *private;
|
|
struct iio_channel *channels;
|
|
};
|
|
|
|
static struct iio_cb_buffer *buffer_to_cb_buffer(struct iio_buffer *buffer)
|
|
{
|
|
return container_of(buffer, struct iio_cb_buffer, buffer);
|
|
}
|
|
|
|
static int iio_buffer_cb_store_to(struct iio_buffer *buffer, const void *data)
|
|
{
|
|
struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
|
|
return cb_buff->cb(data, cb_buff->private);
|
|
}
|
|
|
|
static void iio_buffer_cb_release(struct iio_buffer *buffer)
|
|
{
|
|
struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
|
|
kfree(cb_buff->buffer.scan_mask);
|
|
kfree(cb_buff);
|
|
}
|
|
|
|
static const struct iio_buffer_access_funcs iio_cb_access = {
|
|
.store_to = &iio_buffer_cb_store_to,
|
|
.release = &iio_buffer_cb_release,
|
|
};
|
|
|
|
struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
|
|
int (*cb)(const void *data,
|
|
void *private),
|
|
void *private)
|
|
{
|
|
int ret;
|
|
struct iio_cb_buffer *cb_buff;
|
|
struct iio_dev *indio_dev;
|
|
struct iio_channel *chan;
|
|
|
|
cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
|
|
if (cb_buff == NULL) {
|
|
ret = -ENOMEM;
|
|
goto error_ret;
|
|
}
|
|
|
|
iio_buffer_init(&cb_buff->buffer);
|
|
|
|
cb_buff->private = private;
|
|
cb_buff->cb = cb;
|
|
cb_buff->buffer.access = &iio_cb_access;
|
|
INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
|
|
|
|
cb_buff->channels = iio_channel_get_all(dev);
|
|
if (IS_ERR(cb_buff->channels)) {
|
|
ret = PTR_ERR(cb_buff->channels);
|
|
goto error_free_cb_buff;
|
|
}
|
|
|
|
indio_dev = cb_buff->channels[0].indio_dev;
|
|
cb_buff->buffer.scan_mask
|
|
= kcalloc(BITS_TO_LONGS(indio_dev->masklength), sizeof(long),
|
|
GFP_KERNEL);
|
|
if (cb_buff->buffer.scan_mask == NULL) {
|
|
ret = -ENOMEM;
|
|
goto error_release_channels;
|
|
}
|
|
chan = &cb_buff->channels[0];
|
|
while (chan->indio_dev) {
|
|
if (chan->indio_dev != indio_dev) {
|
|
ret = -EINVAL;
|
|
goto error_free_scan_mask;
|
|
}
|
|
set_bit(chan->channel->scan_index,
|
|
cb_buff->buffer.scan_mask);
|
|
chan++;
|
|
}
|
|
|
|
return cb_buff;
|
|
|
|
error_free_scan_mask:
|
|
kfree(cb_buff->buffer.scan_mask);
|
|
error_release_channels:
|
|
iio_channel_release_all(cb_buff->channels);
|
|
error_free_cb_buff:
|
|
kfree(cb_buff);
|
|
error_ret:
|
|
return ERR_PTR(ret);
|
|
}
|
|
EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
|
|
|
|
int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
|
|
{
|
|
return iio_update_buffers(cb_buff->channels[0].indio_dev,
|
|
&cb_buff->buffer,
|
|
NULL);
|
|
}
|
|
EXPORT_SYMBOL_GPL(iio_channel_start_all_cb);
|
|
|
|
void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff)
|
|
{
|
|
iio_update_buffers(cb_buff->channels[0].indio_dev,
|
|
NULL,
|
|
&cb_buff->buffer);
|
|
}
|
|
EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
|
|
|
|
void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
|
|
{
|
|
iio_channel_release_all(cb_buff->channels);
|
|
iio_buffer_put(&cb_buff->buffer);
|
|
}
|
|
EXPORT_SYMBOL_GPL(iio_channel_release_all_cb);
|
|
|
|
struct iio_channel
|
|
*iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer)
|
|
{
|
|
return cb_buffer->channels;
|
|
}
|
|
EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels);
|