mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 22:13:50 +07:00
staging: most: add fair buffer distribution
This patch ensures a fair distribution of buffers, when two AIMs share a single channel. The AIMs then won't be able to use more than half of all pre-allocated buffers of the linked channel. However, in case the channel is not shared, the AIM can exclusively use all available buffers. Signed-off-by: Andrey Shvetsov <andrey.shvetsov@k2l.de> Signed-off-by: Christian Gromm <christian.gromm@microchip.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
c81c9c3e0f
commit
71457d4827
@ -165,7 +165,7 @@ static ssize_t aim_write(struct file *filp, const char __user *buf,
|
||||
}
|
||||
mutex_unlock(&channel->io_mutex);
|
||||
|
||||
mbo = most_get_mbo(channel->iface, channel->channel_id);
|
||||
mbo = most_get_mbo(channel->iface, channel->channel_id, &cdev_aim);
|
||||
|
||||
if (!mbo) {
|
||||
if ((filp->f_flags & O_NONBLOCK))
|
||||
@ -173,7 +173,8 @@ static ssize_t aim_write(struct file *filp, const char __user *buf,
|
||||
if (wait_event_interruptible(
|
||||
channel->wq,
|
||||
(mbo = most_get_mbo(channel->iface,
|
||||
channel->channel_id)) ||
|
||||
channel->channel_id,
|
||||
&cdev_aim)) ||
|
||||
(!channel->dev)))
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
|
@ -245,7 +245,7 @@ static netdev_tx_t most_nd_start_xmit(struct sk_buff *skb,
|
||||
|
||||
BUG_ON(nd->dev != dev);
|
||||
|
||||
mbo = most_get_mbo(nd->iface, nd->tx.ch_id);
|
||||
mbo = most_get_mbo(nd->iface, nd->tx.ch_id, &aim);
|
||||
|
||||
if (!mbo) {
|
||||
netif_stop_queue(dev);
|
||||
|
@ -251,7 +251,8 @@ static int playback_thread(void *data)
|
||||
wait_event_interruptible(
|
||||
channel->playback_waitq,
|
||||
kthread_should_stop() ||
|
||||
(mbo = most_get_mbo(channel->iface, channel->id)));
|
||||
(mbo = most_get_mbo(channel->iface, channel->id,
|
||||
&audio_aim)));
|
||||
|
||||
if (!mbo)
|
||||
continue;
|
||||
|
@ -669,7 +669,7 @@ static void request_netinfo(struct most_interface *most_iface, int ch_idx)
|
||||
return;
|
||||
}
|
||||
|
||||
mbo = most_get_mbo(&dev->most_iface, dev->atx_idx);
|
||||
mbo = most_get_mbo(&dev->most_iface, dev->atx_idx, NULL);
|
||||
if (!mbo)
|
||||
return;
|
||||
|
||||
|
@ -36,6 +36,7 @@ static struct class *most_class;
|
||||
static struct device *class_glue_dir;
|
||||
static struct ida mdev_id;
|
||||
static int modref;
|
||||
static int dummy_num_buffers;
|
||||
|
||||
struct most_c_obj {
|
||||
struct kobject kobj;
|
||||
@ -59,6 +60,8 @@ struct most_c_obj {
|
||||
struct most_aim *second_aim;
|
||||
int first_aim_refs;
|
||||
int second_aim_refs;
|
||||
int first_num_buffers;
|
||||
int second_num_buffers;
|
||||
struct list_head trash_fifo;
|
||||
struct task_struct *hdm_enqueue_task;
|
||||
struct mutex stop_task_mutex;
|
||||
@ -1233,6 +1236,7 @@ static void arm_mbo(struct mbo *mbo)
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&c->fifo_lock, flags);
|
||||
++*mbo->num_buffers_ptr;
|
||||
list_add_tail(&mbo->list, &c->fifo);
|
||||
spin_unlock_irqrestore(&c->fifo_lock, flags);
|
||||
|
||||
@ -1286,6 +1290,7 @@ static int arm_mbo_chain(struct most_c_obj *c, int dir,
|
||||
goto _error1;
|
||||
}
|
||||
mbo->complete = compl;
|
||||
mbo->num_buffers_ptr = &dummy_num_buffers;
|
||||
if (dir == MOST_CH_RX) {
|
||||
nq_hdm_mbo(mbo);
|
||||
atomic_inc(&c->mbo_nq_level);
|
||||
@ -1384,22 +1389,40 @@ most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
|
||||
* This attempts to get a free buffer out of the channel fifo.
|
||||
* Returns a pointer to MBO on success or NULL otherwise.
|
||||
*/
|
||||
struct mbo *most_get_mbo(struct most_interface *iface, int id)
|
||||
struct mbo *most_get_mbo(struct most_interface *iface, int id,
|
||||
struct most_aim *aim)
|
||||
{
|
||||
struct mbo *mbo;
|
||||
struct most_c_obj *c;
|
||||
unsigned long flags;
|
||||
int *num_buffers_ptr;
|
||||
|
||||
c = get_channel_by_iface(iface, id);
|
||||
if (unlikely(!c))
|
||||
return NULL;
|
||||
|
||||
if (c->first_aim_refs && c->second_aim_refs &&
|
||||
((aim == c->first_aim && c->first_num_buffers <= 0) ||
|
||||
(aim == c->second_aim && c->second_num_buffers <= 0)))
|
||||
return NULL;
|
||||
|
||||
if (aim == c->first_aim)
|
||||
num_buffers_ptr = &c->first_num_buffers;
|
||||
else if (aim == c->second_aim)
|
||||
num_buffers_ptr = &c->second_num_buffers;
|
||||
else
|
||||
num_buffers_ptr = &dummy_num_buffers;
|
||||
|
||||
spin_lock_irqsave(&c->fifo_lock, flags);
|
||||
if (list_empty(&c->fifo)) {
|
||||
spin_unlock_irqrestore(&c->fifo_lock, flags);
|
||||
return NULL;
|
||||
}
|
||||
mbo = list_pop_mbo(&c->fifo);
|
||||
--*num_buffers_ptr;
|
||||
spin_unlock_irqrestore(&c->fifo_lock, flags);
|
||||
|
||||
mbo->num_buffers_ptr = num_buffers_ptr;
|
||||
mbo->buffer_length = c->cfg.buffer_size;
|
||||
return mbo;
|
||||
}
|
||||
@ -1530,6 +1553,8 @@ int most_start_channel(struct most_interface *iface, int id,
|
||||
goto error;
|
||||
|
||||
c->is_starving = 0;
|
||||
c->first_num_buffers = c->cfg.num_buffers / 2;
|
||||
c->second_num_buffers = c->cfg.num_buffers - c->first_num_buffers;
|
||||
atomic_set(&c->mbo_ref, num_buffer);
|
||||
|
||||
out:
|
||||
|
@ -190,6 +190,7 @@ struct mbo {
|
||||
void *priv;
|
||||
struct list_head list;
|
||||
struct most_interface *ifp;
|
||||
int *num_buffers_ptr;
|
||||
u16 hdm_channel_id;
|
||||
void *virt_address;
|
||||
dma_addr_t bus_address;
|
||||
@ -307,7 +308,8 @@ void most_stop_enqueue(struct most_interface *iface, int channel_idx);
|
||||
void most_resume_enqueue(struct most_interface *iface, int channel_idx);
|
||||
int most_register_aim(struct most_aim *aim);
|
||||
int most_deregister_aim(struct most_aim *aim);
|
||||
struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx);
|
||||
struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx,
|
||||
struct most_aim *);
|
||||
void most_put_mbo(struct mbo *mbo);
|
||||
int most_start_channel(struct most_interface *iface, int channel_idx,
|
||||
struct most_aim *);
|
||||
|
Loading…
Reference in New Issue
Block a user