2009-07-14 06:02:34 +07:00
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Copyright (c) 2009, Microsoft Corporation.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2, as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along with
|
|
|
|
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
|
|
|
* Place - Suite 330, Boston, MA 02111-1307 USA.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Haiyang Zhang <haiyangz@microsoft.com>
|
|
|
|
* Hank Janssen <hjanssen@microsoft.com>
|
2011-05-10 21:55:30 +07:00
|
|
|
* K. Y. Srinivasan <kys@microsoft.com>
|
2009-07-14 06:02:34 +07:00
|
|
|
*
|
|
|
|
*/
|
2011-03-30 03:58:47 +07:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2009-08-18 07:22:08 +07:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/mm.h>
|
2011-10-05 02:29:52 +07:00
|
|
|
#include <linux/hyperv.h>
|
2014-02-02 10:02:20 +07:00
|
|
|
#include <linux/uio.h>
|
2016-09-02 19:58:20 +07:00
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/slab.h>
|
2011-05-13 09:34:15 +07:00
|
|
|
|
2011-05-13 09:34:28 +07:00
|
|
|
#include "hyperv_vmbus.h"
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2017-02-28 01:26:48 +07:00
|
|
|
#define VMBUS_PKT_TRAILER 8
|
|
|
|
|
2012-12-01 21:46:36 +07:00
|
|
|
/*
|
|
|
|
* When we write to the ring buffer, check if the host needs to
|
|
|
|
* be signaled. Here is the details of this protocol:
|
|
|
|
*
|
|
|
|
* 1. The host guarantees that while it is draining the
|
|
|
|
* ring buffer, it will set the interrupt_mask to
|
|
|
|
* indicate it does not need to be interrupted when
|
|
|
|
* new data is placed.
|
|
|
|
*
|
|
|
|
* 2. The host guarantees that it will completely drain
|
|
|
|
* the ring buffer before exiting the read loop. Further,
|
|
|
|
* once the ring buffer is empty, it will clear the
|
|
|
|
* interrupt_mask and re-check to see if new data has
|
|
|
|
* arrived.
|
2016-11-07 04:14:17 +07:00
|
|
|
*
|
|
|
|
* KYS: Oct. 30, 2016:
|
|
|
|
* It looks like Windows hosts have logic to deal with DOS attacks that
|
|
|
|
* can be triggered if it receives interrupts when it is not expecting
|
|
|
|
* the interrupt. The host expects interrupts only when the ring
|
|
|
|
* transitions from empty to non-empty (or full to non full on the guest
|
|
|
|
* to host ring).
|
|
|
|
* So, base the signaling decision solely on the ring state until the
|
|
|
|
* host logic is fixed.
|
2012-12-01 21:46:36 +07:00
|
|
|
*/
|
|
|
|
|
2017-02-06 07:20:32 +07:00
|
|
|
static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
|
2012-12-01 21:46:36 +07:00
|
|
|
{
|
2016-11-07 04:14:17 +07:00
|
|
|
struct hv_ring_buffer_info *rbi = &channel->outbound;
|
|
|
|
|
2016-04-03 07:59:48 +07:00
|
|
|
virt_mb();
|
2016-04-03 07:59:47 +07:00
|
|
|
if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
|
2016-11-07 04:14:17 +07:00
|
|
|
return;
|
2012-12-01 21:46:36 +07:00
|
|
|
|
2013-06-20 11:58:57 +07:00
|
|
|
/* check interrupt_mask before read_index */
|
2016-04-03 07:59:48 +07:00
|
|
|
virt_rmb();
|
2012-12-01 21:46:36 +07:00
|
|
|
/*
|
|
|
|
* This is the only case we need to signal when the
|
|
|
|
* ring transitions from being empty to non-empty.
|
|
|
|
*/
|
2016-04-03 07:59:47 +07:00
|
|
|
if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
|
2016-11-07 04:14:17 +07:00
|
|
|
vmbus_setevent(channel);
|
2012-12-01 21:46:36 +07:00
|
|
|
}
|
|
|
|
|
2015-12-15 10:01:57 +07:00
|
|
|
/* Get the next write location for the specified ring buffer. */
|
2009-07-15 05:09:36 +07:00
|
|
|
static inline u32
|
2011-05-10 21:55:29 +07:00
|
|
|
hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
|
2009-07-14 06:02:34 +07:00
|
|
|
{
|
2010-11-09 05:04:46 +07:00
|
|
|
u32 next = ring_info->ring_buffer->write_index;
|
2009-07-14 06:02:34 +07:00
|
|
|
|
|
|
|
return next;
|
|
|
|
}
|
|
|
|
|
2015-12-15 10:01:57 +07:00
|
|
|
/* Set the next write location for the specified ring buffer. */
|
2009-07-14 06:02:34 +07:00
|
|
|
static inline void
|
2011-05-10 21:55:29 +07:00
|
|
|
hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
|
2010-11-09 05:04:46 +07:00
|
|
|
u32 next_write_location)
|
2009-07-14 06:02:34 +07:00
|
|
|
{
|
2010-11-09 05:04:46 +07:00
|
|
|
ring_info->ring_buffer->write_index = next_write_location;
|
2009-07-14 06:02:34 +07:00
|
|
|
}
|
|
|
|
|
2015-12-15 10:01:57 +07:00
|
|
|
/* Get the next read location for the specified ring buffer. */
|
2009-07-15 05:09:36 +07:00
|
|
|
static inline u32
|
2017-02-12 13:02:24 +07:00
|
|
|
hv_get_next_read_location(const struct hv_ring_buffer_info *ring_info)
|
2009-07-14 06:02:34 +07:00
|
|
|
{
|
2017-02-12 13:02:24 +07:00
|
|
|
return ring_info->ring_buffer->read_index;
|
2009-07-14 06:02:34 +07:00
|
|
|
}
|
|
|
|
|
2011-05-10 21:55:30 +07:00
|
|
|
/*
|
|
|
|
* Get the next read location + offset for the specified ring buffer.
|
2015-12-15 10:01:57 +07:00
|
|
|
* This allows the caller to skip.
|
2011-05-10 21:55:30 +07:00
|
|
|
*/
|
2009-07-15 05:09:36 +07:00
|
|
|
static inline u32
|
2017-02-12 13:02:24 +07:00
|
|
|
hv_get_next_readlocation_withoffset(const struct hv_ring_buffer_info *ring_info,
|
|
|
|
u32 offset)
|
2009-07-14 06:02:34 +07:00
|
|
|
{
|
2010-11-09 05:04:46 +07:00
|
|
|
u32 next = ring_info->ring_buffer->read_index;
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2010-11-09 05:04:46 +07:00
|
|
|
next += offset;
|
2017-02-12 13:02:25 +07:00
|
|
|
if (next >= ring_info->ring_datasize)
|
|
|
|
next -= ring_info->ring_datasize;
|
2009-07-14 06:02:34 +07:00
|
|
|
|
|
|
|
return next;
|
|
|
|
}
|
|
|
|
|
2015-12-15 10:01:57 +07:00
|
|
|
/* Set the next read location for the specified ring buffer. */
|
2009-07-14 06:02:34 +07:00
|
|
|
static inline void
|
2011-05-10 21:55:29 +07:00
|
|
|
hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
|
2010-11-09 05:04:46 +07:00
|
|
|
u32 next_read_location)
|
2009-07-14 06:02:34 +07:00
|
|
|
{
|
2010-11-09 05:04:46 +07:00
|
|
|
ring_info->ring_buffer->read_index = next_read_location;
|
2016-04-03 07:59:51 +07:00
|
|
|
ring_info->priv_read_index = next_read_location;
|
2009-07-14 06:02:34 +07:00
|
|
|
}
|
|
|
|
|
2015-12-15 10:01:57 +07:00
|
|
|
/* Get the size of the ring buffer. */
|
2009-07-15 05:09:36 +07:00
|
|
|
static inline u32
|
2017-02-12 13:02:24 +07:00
|
|
|
hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
|
2009-07-14 06:02:34 +07:00
|
|
|
{
|
2010-11-09 05:04:46 +07:00
|
|
|
return ring_info->ring_datasize;
|
2009-07-14 06:02:34 +07:00
|
|
|
}
|
|
|
|
|
2015-12-15 10:01:57 +07:00
|
|
|
/* Get the read and write indices as u64 of the specified ring buffer. */
|
2009-07-15 05:10:26 +07:00
|
|
|
static inline u64
|
2011-05-10 21:55:29 +07:00
|
|
|
hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
|
2009-07-14 06:02:34 +07:00
|
|
|
{
|
2010-11-09 05:04:46 +07:00
|
|
|
return (u64)ring_info->ring_buffer->write_index << 32;
|
2009-07-14 06:02:34 +07:00
|
|
|
}
|
|
|
|
|
2011-05-10 21:55:31 +07:00
|
|
|
/*
|
|
|
|
* Helper routine to copy to source from ring buffer.
|
|
|
|
* Assume there is enough room. Handles wrap-around in src case only!!
|
|
|
|
*/
|
|
|
|
static u32 hv_copyfrom_ringbuffer(
|
2017-02-12 13:02:24 +07:00
|
|
|
const struct hv_ring_buffer_info *ring_info,
|
2011-05-10 21:55:31 +07:00
|
|
|
void *dest,
|
|
|
|
u32 destlen,
|
|
|
|
u32 start_read_offset)
|
|
|
|
{
|
|
|
|
void *ring_buffer = hv_get_ring_buffer(ring_info);
|
|
|
|
u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
|
|
|
|
|
2016-09-02 19:58:21 +07:00
|
|
|
memcpy(dest, ring_buffer + start_read_offset, destlen);
|
2011-05-10 21:55:31 +07:00
|
|
|
|
|
|
|
start_read_offset += destlen;
|
2017-02-12 13:02:25 +07:00
|
|
|
if (start_read_offset >= ring_buffer_size)
|
|
|
|
start_read_offset -= ring_buffer_size;
|
2011-05-10 21:55:31 +07:00
|
|
|
|
|
|
|
return start_read_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-05-10 21:55:32 +07:00
|
|
|
/*
|
|
|
|
* Helper routine to copy from source to ring buffer.
|
|
|
|
* Assume there is enough room. Handles wrap-around in dest case only!!
|
|
|
|
*/
|
|
|
|
static u32 hv_copyto_ringbuffer(
|
2010-11-09 05:04:46 +07:00
|
|
|
struct hv_ring_buffer_info *ring_info,
|
|
|
|
u32 start_write_offset,
|
2017-02-12 13:02:24 +07:00
|
|
|
const void *src,
|
2011-05-10 21:55:32 +07:00
|
|
|
u32 srclen)
|
|
|
|
{
|
|
|
|
void *ring_buffer = hv_get_ring_buffer(ring_info);
|
|
|
|
u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
|
2016-09-02 19:58:21 +07:00
|
|
|
|
|
|
|
memcpy(ring_buffer + start_write_offset, src, srclen);
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2011-05-10 21:55:32 +07:00
|
|
|
start_write_offset += srclen;
|
2017-02-12 13:02:25 +07:00
|
|
|
if (start_write_offset >= ring_buffer_size)
|
|
|
|
start_write_offset -= ring_buffer_size;
|
2011-05-10 21:55:32 +07:00
|
|
|
|
|
|
|
return start_write_offset;
|
|
|
|
}
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2015-12-15 10:01:57 +07:00
|
|
|
/* Get various debug metrics for the specified ring buffer. */
|
2017-02-12 13:02:24 +07:00
|
|
|
void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
|
|
|
struct hv_ring_buffer_debug_info *debug_info)
|
2009-07-14 06:02:34 +07:00
|
|
|
{
|
2010-11-09 05:04:46 +07:00
|
|
|
u32 bytes_avail_towrite;
|
|
|
|
u32 bytes_avail_toread;
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2010-11-09 05:04:46 +07:00
|
|
|
if (ring_info->ring_buffer) {
|
2011-05-10 21:55:29 +07:00
|
|
|
hv_get_ringbuffer_availbytes(ring_info,
|
2010-11-09 05:04:46 +07:00
|
|
|
&bytes_avail_toread,
|
|
|
|
&bytes_avail_towrite);
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2010-11-09 05:04:46 +07:00
|
|
|
debug_info->bytes_avail_toread = bytes_avail_toread;
|
|
|
|
debug_info->bytes_avail_towrite = bytes_avail_towrite;
|
2010-11-09 05:04:45 +07:00
|
|
|
debug_info->current_read_index =
|
2010-11-09 05:04:46 +07:00
|
|
|
ring_info->ring_buffer->read_index;
|
2010-11-09 05:04:45 +07:00
|
|
|
debug_info->current_write_index =
|
2010-11-09 05:04:46 +07:00
|
|
|
ring_info->ring_buffer->write_index;
|
2010-11-09 05:04:45 +07:00
|
|
|
debug_info->current_interrupt_mask =
|
2010-11-09 05:04:46 +07:00
|
|
|
ring_info->ring_buffer->interrupt_mask;
|
2009-07-14 06:02:34 +07:00
|
|
|
}
|
|
|
|
}
|
2017-03-05 08:27:18 +07:00
|
|
|
EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2015-12-15 10:01:57 +07:00
|
|
|
/* Initialize the ring buffer. */
|
2011-05-10 21:55:21 +07:00
|
|
|
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
|
2016-09-02 19:58:20 +07:00
|
|
|
struct page *pages, u32 page_cnt)
|
2009-07-14 06:02:34 +07:00
|
|
|
{
|
2016-09-02 19:58:20 +07:00
|
|
|
int i;
|
|
|
|
struct page **pages_wraparound;
|
|
|
|
|
|
|
|
BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2010-11-09 05:04:46 +07:00
|
|
|
memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2016-09-02 19:58:20 +07:00
|
|
|
/*
|
|
|
|
* First page holds struct hv_ring_buffer, do wraparound mapping for
|
|
|
|
* the rest.
|
|
|
|
*/
|
|
|
|
pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!pages_wraparound)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
pages_wraparound[0] = pages;
|
|
|
|
for (i = 0; i < 2 * (page_cnt - 1); i++)
|
|
|
|
pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
|
|
|
|
|
|
|
|
ring_info->ring_buffer = (struct hv_ring_buffer *)
|
|
|
|
vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
|
|
|
|
|
|
|
|
kfree(pages_wraparound);
|
|
|
|
|
|
|
|
|
|
|
|
if (!ring_info->ring_buffer)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2010-11-09 05:04:46 +07:00
|
|
|
ring_info->ring_buffer->read_index =
|
|
|
|
ring_info->ring_buffer->write_index = 0;
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2015-12-15 10:01:57 +07:00
|
|
|
/* Set the feature bit for enabling flow control. */
|
2014-09-06 07:29:12 +07:00
|
|
|
ring_info->ring_buffer->feature_bits.value = 1;
|
|
|
|
|
2016-09-02 19:58:20 +07:00
|
|
|
ring_info->ring_size = page_cnt << PAGE_SHIFT;
|
|
|
|
ring_info->ring_datasize = ring_info->ring_size -
|
|
|
|
sizeof(struct hv_ring_buffer);
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2010-11-09 05:04:46 +07:00
|
|
|
spin_lock_init(&ring_info->ring_lock);
|
2009-07-14 06:02:34 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-12-15 10:01:57 +07:00
|
|
|
/* Cleanup the ring buffer. */
|
2011-05-10 21:55:22 +07:00
|
|
|
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
|
2009-07-14 06:02:34 +07:00
|
|
|
{
|
2016-09-02 19:58:20 +07:00
|
|
|
vunmap(ring_info->ring_buffer);
|
2009-07-14 06:02:34 +07:00
|
|
|
}
|
|
|
|
|
2015-12-15 10:01:57 +07:00
|
|
|
/* Write to the ring buffer. */
|
2016-11-07 04:14:17 +07:00
|
|
|
int hv_ringbuffer_write(struct vmbus_channel *channel,
|
2017-02-12 13:02:24 +07:00
|
|
|
const struct kvec *kv_list, u32 kv_count)
|
2009-07-14 06:02:34 +07:00
|
|
|
{
|
2017-03-05 08:27:13 +07:00
|
|
|
int i;
|
2010-11-09 05:04:46 +07:00
|
|
|
u32 bytes_avail_towrite;
|
2017-03-05 08:27:13 +07:00
|
|
|
u32 totalbytes_towrite = sizeof(u64);
|
2011-05-10 21:55:33 +07:00
|
|
|
u32 next_write_location;
|
2012-12-01 21:46:36 +07:00
|
|
|
u32 old_write;
|
2017-03-05 08:27:13 +07:00
|
|
|
u64 prev_indices;
|
|
|
|
unsigned long flags;
|
2016-11-07 04:14:17 +07:00
|
|
|
struct hv_ring_buffer_info *outring_info = &channel->outbound;
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2016-12-07 16:16:28 +07:00
|
|
|
if (channel->rescind)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2014-02-02 10:02:20 +07:00
|
|
|
for (i = 0; i < kv_count; i++)
|
|
|
|
totalbytes_towrite += kv_list[i].iov_len;
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2017-02-12 13:02:22 +07:00
|
|
|
spin_lock_irqsave(&outring_info->ring_lock, flags);
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2016-04-03 07:59:46 +07:00
|
|
|
bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2015-12-15 10:01:57 +07:00
|
|
|
/*
|
|
|
|
* If there is only room for the packet, assume it is full.
|
|
|
|
* Otherwise, the next time around, we think the ring buffer
|
|
|
|
* is empty since the read index == write index.
|
|
|
|
*/
|
2010-11-09 05:04:46 +07:00
|
|
|
if (bytes_avail_towrite <= totalbytes_towrite) {
|
2017-02-12 13:02:22 +07:00
|
|
|
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
|
2011-08-25 23:48:58 +07:00
|
|
|
return -EAGAIN;
|
2009-07-14 06:02:34 +07:00
|
|
|
}
|
|
|
|
|
2009-07-28 03:47:24 +07:00
|
|
|
/* Write to the ring buffer */
|
2011-05-10 21:55:29 +07:00
|
|
|
next_write_location = hv_get_next_write_location(outring_info);
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2012-12-01 21:46:36 +07:00
|
|
|
old_write = next_write_location;
|
|
|
|
|
2014-02-02 10:02:20 +07:00
|
|
|
for (i = 0; i < kv_count; i++) {
|
2011-05-10 21:55:29 +07:00
|
|
|
next_write_location = hv_copyto_ringbuffer(outring_info,
|
2010-11-09 05:04:46 +07:00
|
|
|
next_write_location,
|
2014-02-02 10:02:20 +07:00
|
|
|
kv_list[i].iov_base,
|
|
|
|
kv_list[i].iov_len);
|
2009-07-14 06:02:34 +07:00
|
|
|
}
|
|
|
|
|
2009-07-28 03:47:24 +07:00
|
|
|
/* Set previous packet start */
|
2011-05-10 21:55:29 +07:00
|
|
|
prev_indices = hv_get_ring_bufferindices(outring_info);
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2011-05-10 21:55:29 +07:00
|
|
|
next_write_location = hv_copyto_ringbuffer(outring_info,
|
2010-11-09 05:04:46 +07:00
|
|
|
next_write_location,
|
|
|
|
&prev_indices,
|
2009-07-30 22:37:23 +07:00
|
|
|
sizeof(u64));
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2012-12-01 21:46:36 +07:00
|
|
|
/* Issue a full memory barrier before updating the write index */
|
2016-04-03 07:59:48 +07:00
|
|
|
virt_mb();
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2009-07-28 03:47:24 +07:00
|
|
|
/* Now, update the write location */
|
2011-05-10 21:55:29 +07:00
|
|
|
hv_set_next_write_location(outring_info, next_write_location);
|
2009-07-14 06:02:34 +07:00
|
|
|
|
|
|
|
|
2017-02-12 13:02:22 +07:00
|
|
|
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
|
2012-12-01 21:46:36 +07:00
|
|
|
|
2017-02-06 07:20:32 +07:00
|
|
|
hv_signal_on_write(old_write, channel);
|
2016-12-07 16:16:28 +07:00
|
|
|
|
|
|
|
if (channel->rescind)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2009-07-14 06:02:34 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-28 01:26:48 +07:00
|
|
|
static inline void
|
|
|
|
init_cached_read_index(struct hv_ring_buffer_info *rbi)
|
|
|
|
{
|
|
|
|
rbi->cached_read_index = rbi->ring_buffer->read_index;
|
|
|
|
}
|
|
|
|
|
2016-11-07 04:14:18 +07:00
|
|
|
int hv_ringbuffer_read(struct vmbus_channel *channel,
|
2015-12-15 10:02:01 +07:00
|
|
|
void *buffer, u32 buflen, u32 *buffer_actual_len,
|
2016-11-07 04:14:18 +07:00
|
|
|
u64 *requestid, bool raw)
|
2009-07-14 06:02:34 +07:00
|
|
|
{
|
2010-11-09 05:04:46 +07:00
|
|
|
u32 bytes_avail_toread;
|
2017-03-05 08:27:13 +07:00
|
|
|
u32 next_read_location;
|
2010-11-09 05:04:46 +07:00
|
|
|
u64 prev_indices = 0;
|
2015-12-15 10:02:01 +07:00
|
|
|
struct vmpacket_descriptor desc;
|
|
|
|
u32 offset;
|
|
|
|
u32 packetlen;
|
2016-11-07 04:14:18 +07:00
|
|
|
struct hv_ring_buffer_info *inring_info = &channel->inbound;
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2010-11-09 05:04:46 +07:00
|
|
|
if (buflen <= 0)
|
2010-05-06 02:27:50 +07:00
|
|
|
return -EINVAL;
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2015-12-15 10:02:01 +07:00
|
|
|
*buffer_actual_len = 0;
|
|
|
|
*requestid = 0;
|
|
|
|
|
2016-04-03 07:59:46 +07:00
|
|
|
bytes_avail_toread = hv_get_bytes_to_read(inring_info);
|
2009-07-28 03:47:24 +07:00
|
|
|
/* Make sure there is something to read */
|
2015-12-15 10:02:01 +07:00
|
|
|
if (bytes_avail_toread < sizeof(desc)) {
|
|
|
|
/*
|
|
|
|
* No error is set when there is even no header, drivers are
|
|
|
|
* supposed to analyze buffer_actual_len.
|
|
|
|
*/
|
2017-03-05 08:27:15 +07:00
|
|
|
return 0;
|
2015-12-15 10:02:01 +07:00
|
|
|
}
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2017-02-28 01:26:48 +07:00
|
|
|
init_cached_read_index(inring_info);
|
|
|
|
|
2015-12-15 10:02:01 +07:00
|
|
|
next_read_location = hv_get_next_read_location(inring_info);
|
|
|
|
next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
|
|
|
|
sizeof(desc),
|
|
|
|
next_read_location);
|
|
|
|
|
|
|
|
offset = raw ? 0 : (desc.offset8 << 3);
|
|
|
|
packetlen = (desc.len8 << 3) - offset;
|
|
|
|
*buffer_actual_len = packetlen;
|
|
|
|
*requestid = desc.trans_id;
|
|
|
|
|
2016-01-28 13:29:44 +07:00
|
|
|
if (bytes_avail_toread < packetlen + offset)
|
|
|
|
return -EAGAIN;
|
2015-12-15 10:02:01 +07:00
|
|
|
|
2016-01-28 13:29:44 +07:00
|
|
|
if (packetlen > buflen)
|
|
|
|
return -ENOBUFS;
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2010-11-09 05:04:47 +07:00
|
|
|
next_read_location =
|
2011-05-10 21:55:29 +07:00
|
|
|
hv_get_next_readlocation_withoffset(inring_info, offset);
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2011-05-10 21:55:29 +07:00
|
|
|
next_read_location = hv_copyfrom_ringbuffer(inring_info,
|
2010-11-09 05:04:46 +07:00
|
|
|
buffer,
|
2015-12-15 10:02:01 +07:00
|
|
|
packetlen,
|
2010-11-09 05:04:46 +07:00
|
|
|
next_read_location);
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2011-05-10 21:55:29 +07:00
|
|
|
next_read_location = hv_copyfrom_ringbuffer(inring_info,
|
2010-11-09 05:04:46 +07:00
|
|
|
&prev_indices,
|
2010-02-03 22:34:27 +07:00
|
|
|
sizeof(u64),
|
2010-11-09 05:04:46 +07:00
|
|
|
next_read_location);
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2015-12-15 10:01:57 +07:00
|
|
|
/*
|
|
|
|
* Make sure all reads are done before we update the read index since
|
|
|
|
* the writer may start writing to the read area once the read index
|
|
|
|
* is updated.
|
|
|
|
*/
|
2016-04-03 07:59:48 +07:00
|
|
|
virt_mb();
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2009-07-28 03:47:24 +07:00
|
|
|
/* Update the read index */
|
2011-05-10 21:55:29 +07:00
|
|
|
hv_set_next_read_location(inring_info, next_read_location);
|
2009-07-14 06:02:34 +07:00
|
|
|
|
2016-11-07 04:14:18 +07:00
|
|
|
hv_signal_on_read(channel);
|
2012-12-01 21:46:57 +07:00
|
|
|
|
2017-03-05 08:27:15 +07:00
|
|
|
return 0;
|
2015-12-15 10:01:59 +07:00
|
|
|
}
|
2017-02-28 01:26:48 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Determine number of bytes available in ring buffer after
|
|
|
|
* the current iterator (priv_read_index) location.
|
|
|
|
*
|
|
|
|
* This is similar to hv_get_bytes_to_read but with private
|
|
|
|
* read index instead.
|
|
|
|
*/
|
|
|
|
static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
|
|
|
|
{
|
|
|
|
u32 priv_read_loc = rbi->priv_read_index;
|
|
|
|
u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
|
|
|
|
|
|
|
|
if (write_loc >= priv_read_loc)
|
|
|
|
return write_loc - priv_read_loc;
|
|
|
|
else
|
|
|
|
return (rbi->ring_datasize - priv_read_loc) + write_loc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get first vmbus packet from ring buffer after read_index
|
|
|
|
*
|
|
|
|
* If ring buffer is empty, returns NULL and no other action needed.
|
|
|
|
*/
|
|
|
|
struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
|
|
|
|
{
|
|
|
|
struct hv_ring_buffer_info *rbi = &channel->inbound;
|
|
|
|
|
|
|
|
/* set state for later hv_signal_on_read() */
|
|
|
|
init_cached_read_index(rbi);
|
|
|
|
|
|
|
|
if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get next vmbus packet from ring buffer.
|
|
|
|
*
|
|
|
|
* Advances the current location (priv_read_index) and checks for more
|
|
|
|
* data. If the end of the ring buffer is reached, then return NULL.
|
|
|
|
*/
|
|
|
|
struct vmpacket_descriptor *
|
|
|
|
__hv_pkt_iter_next(struct vmbus_channel *channel,
|
|
|
|
const struct vmpacket_descriptor *desc)
|
|
|
|
{
|
|
|
|
struct hv_ring_buffer_info *rbi = &channel->inbound;
|
|
|
|
u32 packetlen = desc->len8 << 3;
|
|
|
|
u32 dsize = rbi->ring_datasize;
|
|
|
|
|
|
|
|
/* bump offset to next potential packet */
|
|
|
|
rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
|
|
|
|
if (rbi->priv_read_index >= dsize)
|
|
|
|
rbi->priv_read_index -= dsize;
|
|
|
|
|
|
|
|
/* more data? */
|
|
|
|
if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
|
|
|
|
return NULL;
|
|
|
|
else
|
|
|
|
return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update host ring buffer after iterating over packets.
|
|
|
|
*/
|
|
|
|
void hv_pkt_iter_close(struct vmbus_channel *channel)
|
|
|
|
{
|
|
|
|
struct hv_ring_buffer_info *rbi = &channel->inbound;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure all reads are done before we update the read index since
|
|
|
|
* the writer may start writing to the read area once the read index
|
|
|
|
* is updated.
|
|
|
|
*/
|
|
|
|
virt_rmb();
|
|
|
|
rbi->ring_buffer->read_index = rbi->priv_read_index;
|
|
|
|
|
|
|
|
hv_signal_on_read(channel);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
|