mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-18 13:46:15 +07:00
udmabuf: implement begin_cpu_access/end_cpu_access hooks
With the misc device, we should end up using the result of get_arch_dma_ops(..) or dma-direct ops. This can allow us to have WC mappings in the guest after synchronization. Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org> Link: http://patchwork.freedesktop.org/patch/msgid/20191203013627.85991-4-gurchetansingh@chromium.org Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
This commit is contained in:
parent
17a7ce2034
commit
284562e1f3
@ -18,6 +18,7 @@ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */
|
|||||||
struct udmabuf {
|
struct udmabuf {
|
||||||
pgoff_t pagecount;
|
pgoff_t pagecount;
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
|
struct sg_table *sg;
|
||||||
struct miscdevice *device;
|
struct miscdevice *device;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -98,20 +99,58 @@ static void unmap_udmabuf(struct dma_buf_attachment *at,
|
|||||||
static void release_udmabuf(struct dma_buf *buf)
|
static void release_udmabuf(struct dma_buf *buf)
|
||||||
{
|
{
|
||||||
struct udmabuf *ubuf = buf->priv;
|
struct udmabuf *ubuf = buf->priv;
|
||||||
|
struct device *dev = ubuf->device->this_device;
|
||||||
pgoff_t pg;
|
pgoff_t pg;
|
||||||
|
|
||||||
|
if (ubuf->sg)
|
||||||
|
put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
for (pg = 0; pg < ubuf->pagecount; pg++)
|
for (pg = 0; pg < ubuf->pagecount; pg++)
|
||||||
put_page(ubuf->pages[pg]);
|
put_page(ubuf->pages[pg]);
|
||||||
kfree(ubuf->pages);
|
kfree(ubuf->pages);
|
||||||
kfree(ubuf);
|
kfree(ubuf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int begin_cpu_udmabuf(struct dma_buf *buf,
|
||||||
|
enum dma_data_direction direction)
|
||||||
|
{
|
||||||
|
struct udmabuf *ubuf = buf->priv;
|
||||||
|
struct device *dev = ubuf->device->this_device;
|
||||||
|
|
||||||
|
if (!ubuf->sg) {
|
||||||
|
ubuf->sg = get_sg_table(dev, buf, direction);
|
||||||
|
if (IS_ERR(ubuf->sg))
|
||||||
|
return PTR_ERR(ubuf->sg);
|
||||||
|
} else {
|
||||||
|
dma_sync_sg_for_device(dev, ubuf->sg->sgl,
|
||||||
|
ubuf->sg->nents,
|
||||||
|
direction);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int end_cpu_udmabuf(struct dma_buf *buf,
|
||||||
|
enum dma_data_direction direction)
|
||||||
|
{
|
||||||
|
struct udmabuf *ubuf = buf->priv;
|
||||||
|
struct device *dev = ubuf->device->this_device;
|
||||||
|
|
||||||
|
if (!ubuf->sg)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct dma_buf_ops udmabuf_ops = {
|
static const struct dma_buf_ops udmabuf_ops = {
|
||||||
.cache_sgt_mapping = true,
|
.cache_sgt_mapping = true,
|
||||||
.map_dma_buf = map_udmabuf,
|
.map_dma_buf = map_udmabuf,
|
||||||
.unmap_dma_buf = unmap_udmabuf,
|
.unmap_dma_buf = unmap_udmabuf,
|
||||||
.release = release_udmabuf,
|
.release = release_udmabuf,
|
||||||
.mmap = mmap_udmabuf,
|
.mmap = mmap_udmabuf,
|
||||||
|
.begin_cpu_access = begin_cpu_udmabuf,
|
||||||
|
.end_cpu_access = end_cpu_udmabuf,
|
||||||
};
|
};
|
||||||
|
|
||||||
#define SEALS_WANTED (F_SEAL_SHRINK)
|
#define SEALS_WANTED (F_SEAL_SHRINK)
|
||||||
|
Loading…
Reference in New Issue
Block a user