mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 01:50:54 +07:00
Merge branch 'for-5.10-drm-sg-fix' of https://github.com/mszyprow/linux into drm-next
Please pull a set of fixes for various DRM drivers that finally resolve incorrect usage of the scatterlists (struct sg_table nents and orig_nents entries), what causes issues when IOMMU is used. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Marek Szyprowski <m.szyprowski@samsung.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200910080505.24456-1-m.szyprowski@samsung.com
This commit is contained in:
commit
b40be05ed2
@ -140,13 +140,12 @@ struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
|
|||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
struct dma_heaps_attachment *a = attachment->priv;
|
struct dma_heaps_attachment *a = attachment->priv;
|
||||||
struct sg_table *table;
|
struct sg_table *table = &a->table;
|
||||||
|
int ret;
|
||||||
|
|
||||||
table = &a->table;
|
ret = dma_map_sgtable(attachment->dev, table, direction, 0);
|
||||||
|
if (ret)
|
||||||
if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
|
table = ERR_PTR(ret);
|
||||||
direction))
|
|
||||||
table = ERR_PTR(-ENOMEM);
|
|
||||||
return table;
|
return table;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -154,7 +153,7 @@ static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
|
|||||||
struct sg_table *table,
|
struct sg_table *table,
|
||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
|
dma_unmap_sgtable(attachment->dev, table, direction, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf)
|
static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf)
|
||||||
|
@ -63,10 +63,9 @@ static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
|
|||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err;
|
goto err;
|
||||||
if (!dma_map_sg(dev, sg->sgl, sg->nents, direction)) {
|
ret = dma_map_sgtable(dev, sg, direction, 0);
|
||||||
ret = -EINVAL;
|
if (ret < 0)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
|
||||||
return sg;
|
return sg;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
@ -78,7 +77,7 @@ static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
|
|||||||
static void put_sg_table(struct device *dev, struct sg_table *sg,
|
static void put_sg_table(struct device *dev, struct sg_table *sg,
|
||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
dma_unmap_sg(dev, sg->sgl, sg->nents, direction);
|
dma_unmap_sgtable(dev, sg, direction, 0);
|
||||||
sg_free_table(sg);
|
sg_free_table(sg);
|
||||||
kfree(sg);
|
kfree(sg);
|
||||||
}
|
}
|
||||||
|
@ -379,7 +379,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
|
|||||||
struct armada_gem_object *dobj = drm_to_armada_gem(obj);
|
struct armada_gem_object *dobj = drm_to_armada_gem(obj);
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
struct sg_table *sgt;
|
struct sg_table *sgt;
|
||||||
int i, num;
|
int i;
|
||||||
|
|
||||||
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
|
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
|
||||||
if (!sgt)
|
if (!sgt)
|
||||||
@ -395,22 +395,18 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
|
|||||||
|
|
||||||
mapping = dobj->obj.filp->f_mapping;
|
mapping = dobj->obj.filp->f_mapping;
|
||||||
|
|
||||||
for_each_sg(sgt->sgl, sg, count, i) {
|
for_each_sgtable_sg(sgt, sg, i) {
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
page = shmem_read_mapping_page(mapping, i);
|
page = shmem_read_mapping_page(mapping, i);
|
||||||
if (IS_ERR(page)) {
|
if (IS_ERR(page))
|
||||||
num = i;
|
|
||||||
goto release;
|
goto release;
|
||||||
}
|
|
||||||
|
|
||||||
sg_set_page(sg, page, PAGE_SIZE, 0);
|
sg_set_page(sg, page, PAGE_SIZE, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
|
if (dma_map_sgtable(attach->dev, sgt, dir, 0))
|
||||||
num = sgt->nents;
|
|
||||||
goto release;
|
goto release;
|
||||||
}
|
|
||||||
} else if (dobj->page) {
|
} else if (dobj->page) {
|
||||||
/* Single contiguous page */
|
/* Single contiguous page */
|
||||||
if (sg_alloc_table(sgt, 1, GFP_KERNEL))
|
if (sg_alloc_table(sgt, 1, GFP_KERNEL))
|
||||||
@ -418,7 +414,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
|
|||||||
|
|
||||||
sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
|
sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
|
||||||
|
|
||||||
if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
|
if (dma_map_sgtable(attach->dev, sgt, dir, 0))
|
||||||
goto free_table;
|
goto free_table;
|
||||||
} else if (dobj->linear) {
|
} else if (dobj->linear) {
|
||||||
/* Single contiguous physical region - no struct page */
|
/* Single contiguous physical region - no struct page */
|
||||||
@ -432,8 +428,9 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
|
|||||||
return sgt;
|
return sgt;
|
||||||
|
|
||||||
release:
|
release:
|
||||||
for_each_sg(sgt->sgl, sg, num, i)
|
for_each_sgtable_sg(sgt, sg, i)
|
||||||
put_page(sg_page(sg));
|
if (sg_page(sg))
|
||||||
|
put_page(sg_page(sg));
|
||||||
free_table:
|
free_table:
|
||||||
sg_free_table(sgt);
|
sg_free_table(sgt);
|
||||||
free_sgt:
|
free_sgt:
|
||||||
@ -449,11 +446,12 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!dobj->linear)
|
if (!dobj->linear)
|
||||||
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
|
dma_unmap_sgtable(attach->dev, sgt, dir, 0);
|
||||||
|
|
||||||
if (dobj->obj.filp) {
|
if (dobj->obj.filp) {
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
for_each_sg(sgt->sgl, sg, sgt->nents, i)
|
|
||||||
|
for_each_sgtable_sg(sgt, sg, i)
|
||||||
put_page(sg_page(sg));
|
put_page(sg_page(sg));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -127,7 +127,7 @@ drm_clflush_sg(struct sg_table *st)
|
|||||||
struct sg_page_iter sg_iter;
|
struct sg_page_iter sg_iter;
|
||||||
|
|
||||||
mb(); /*CLFLUSH is ordered only by using memory barriers*/
|
mb(); /*CLFLUSH is ordered only by using memory barriers*/
|
||||||
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
|
for_each_sgtable_page(st, &sg_iter, 0)
|
||||||
drm_clflush_page(sg_page_iter_page(&sg_iter));
|
drm_clflush_page(sg_page_iter_page(&sg_iter));
|
||||||
mb(); /*Make sure that all cache line entry is flushed*/
|
mb(); /*Make sure that all cache line entry is flushed*/
|
||||||
|
|
||||||
|
@ -471,26 +471,9 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
|
|||||||
{
|
{
|
||||||
struct drm_gem_cma_object *cma_obj;
|
struct drm_gem_cma_object *cma_obj;
|
||||||
|
|
||||||
if (sgt->nents != 1) {
|
/* check if the entries in the sg_table are contiguous */
|
||||||
/* check if the entries in the sg_table are contiguous */
|
if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
|
||||||
dma_addr_t next_addr = sg_dma_address(sgt->sgl);
|
return ERR_PTR(-EINVAL);
|
||||||
struct scatterlist *s;
|
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
for_each_sg(sgt->sgl, s, sgt->nents, i) {
|
|
||||||
/*
|
|
||||||
* sg_dma_address(s) is only valid for entries
|
|
||||||
* that have sg_dma_len(s) != 0
|
|
||||||
*/
|
|
||||||
if (!sg_dma_len(s))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (sg_dma_address(s) != next_addr)
|
|
||||||
return ERR_PTR(-EINVAL);
|
|
||||||
|
|
||||||
next_addr = sg_dma_address(s) + sg_dma_len(s);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Create a CMA GEM buffer. */
|
/* Create a CMA GEM buffer. */
|
||||||
cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
|
cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
|
||||||
|
@ -126,8 +126,8 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj)
|
|||||||
drm_prime_gem_destroy(obj, shmem->sgt);
|
drm_prime_gem_destroy(obj, shmem->sgt);
|
||||||
} else {
|
} else {
|
||||||
if (shmem->sgt) {
|
if (shmem->sgt) {
|
||||||
dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
|
dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
|
||||||
shmem->sgt->nents, DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL, 0);
|
||||||
sg_free_table(shmem->sgt);
|
sg_free_table(shmem->sgt);
|
||||||
kfree(shmem->sgt);
|
kfree(shmem->sgt);
|
||||||
}
|
}
|
||||||
@ -424,8 +424,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
|
|||||||
|
|
||||||
WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
|
WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
|
||||||
|
|
||||||
dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
|
dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
|
||||||
shmem->sgt->nents, DMA_BIDIRECTIONAL);
|
|
||||||
sg_free_table(shmem->sgt);
|
sg_free_table(shmem->sgt);
|
||||||
kfree(shmem->sgt);
|
kfree(shmem->sgt);
|
||||||
shmem->sgt = NULL;
|
shmem->sgt = NULL;
|
||||||
@ -697,12 +696,17 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
|
|||||||
goto err_put_pages;
|
goto err_put_pages;
|
||||||
}
|
}
|
||||||
/* Map the pages for use by the h/w. */
|
/* Map the pages for use by the h/w. */
|
||||||
dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
|
ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
|
||||||
|
if (ret)
|
||||||
|
goto err_free_sgt;
|
||||||
|
|
||||||
shmem->sgt = sgt;
|
shmem->sgt = sgt;
|
||||||
|
|
||||||
return sgt;
|
return sgt;
|
||||||
|
|
||||||
|
err_free_sgt:
|
||||||
|
sg_free_table(sgt);
|
||||||
|
kfree(sgt);
|
||||||
err_put_pages:
|
err_put_pages:
|
||||||
drm_gem_shmem_put_pages(shmem);
|
drm_gem_shmem_put_pages(shmem);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
@ -617,6 +617,7 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
|
|||||||
{
|
{
|
||||||
struct drm_gem_object *obj = attach->dmabuf->priv;
|
struct drm_gem_object *obj = attach->dmabuf->priv;
|
||||||
struct sg_table *sgt;
|
struct sg_table *sgt;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (WARN_ON(dir == DMA_NONE))
|
if (WARN_ON(dir == DMA_NONE))
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
@ -626,11 +627,12 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
|
|||||||
else
|
else
|
||||||
sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
|
sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
|
||||||
|
|
||||||
if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
|
ret = dma_map_sgtable(attach->dev, sgt, dir,
|
||||||
DMA_ATTR_SKIP_CPU_SYNC)) {
|
DMA_ATTR_SKIP_CPU_SYNC);
|
||||||
|
if (ret) {
|
||||||
sg_free_table(sgt);
|
sg_free_table(sgt);
|
||||||
kfree(sgt);
|
kfree(sgt);
|
||||||
sgt = ERR_PTR(-ENOMEM);
|
sgt = ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
return sgt;
|
return sgt;
|
||||||
@ -652,8 +654,7 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
|
|||||||
if (!sgt)
|
if (!sgt)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
|
dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||||
DMA_ATTR_SKIP_CPU_SYNC);
|
|
||||||
sg_free_table(sgt);
|
sg_free_table(sgt);
|
||||||
kfree(sgt);
|
kfree(sgt);
|
||||||
}
|
}
|
||||||
@ -825,6 +826,37 @@ struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_page
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_prime_pages_to_sg);
|
EXPORT_SYMBOL(drm_prime_pages_to_sg);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_prime_get_contiguous_size - returns the contiguous size of the buffer
|
||||||
|
* @sgt: sg_table describing the buffer to check
|
||||||
|
*
|
||||||
|
* This helper calculates the contiguous size in the DMA address space
|
||||||
|
* of the the buffer described by the provided sg_table.
|
||||||
|
*
|
||||||
|
* This is useful for implementing
|
||||||
|
* &drm_gem_object_funcs.gem_prime_import_sg_table.
|
||||||
|
*/
|
||||||
|
unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
|
||||||
|
{
|
||||||
|
dma_addr_t expected = sg_dma_address(sgt->sgl);
|
||||||
|
struct scatterlist *sg;
|
||||||
|
unsigned long size = 0;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for_each_sgtable_dma_sg(sgt, sg, i) {
|
||||||
|
unsigned int len = sg_dma_len(sg);
|
||||||
|
|
||||||
|
if (!len)
|
||||||
|
break;
|
||||||
|
if (sg_dma_address(sg) != expected)
|
||||||
|
break;
|
||||||
|
expected += len;
|
||||||
|
size += len;
|
||||||
|
}
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_prime_get_contiguous_size);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_gem_prime_export - helper library implementation of the export callback
|
* drm_gem_prime_export - helper library implementation of the export callback
|
||||||
* @obj: GEM object to export
|
* @obj: GEM object to export
|
||||||
@ -959,45 +991,26 @@ EXPORT_SYMBOL(drm_gem_prime_import);
|
|||||||
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
|
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
|
||||||
dma_addr_t *addrs, int max_entries)
|
dma_addr_t *addrs, int max_entries)
|
||||||
{
|
{
|
||||||
unsigned count;
|
struct sg_dma_page_iter dma_iter;
|
||||||
struct scatterlist *sg;
|
struct sg_page_iter page_iter;
|
||||||
struct page *page;
|
struct page **p = pages;
|
||||||
u32 page_len, page_index;
|
dma_addr_t *a = addrs;
|
||||||
dma_addr_t addr;
|
|
||||||
u32 dma_len, dma_index;
|
|
||||||
|
|
||||||
/*
|
if (pages) {
|
||||||
* Scatterlist elements contains both pages and DMA addresses, but
|
for_each_sgtable_page(sgt, &page_iter, 0) {
|
||||||
* one shoud not assume 1:1 relation between them. The sg->length is
|
if (WARN_ON(p - pages >= max_entries))
|
||||||
* the size of the physical memory chunk described by the sg->page,
|
|
||||||
* while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk
|
|
||||||
* described by the sg_dma_address(sg).
|
|
||||||
*/
|
|
||||||
page_index = 0;
|
|
||||||
dma_index = 0;
|
|
||||||
for_each_sg(sgt->sgl, sg, sgt->nents, count) {
|
|
||||||
page_len = sg->length;
|
|
||||||
page = sg_page(sg);
|
|
||||||
dma_len = sg_dma_len(sg);
|
|
||||||
addr = sg_dma_address(sg);
|
|
||||||
|
|
||||||
while (pages && page_len > 0) {
|
|
||||||
if (WARN_ON(page_index >= max_entries))
|
|
||||||
return -1;
|
return -1;
|
||||||
pages[page_index] = page;
|
*p++ = sg_page_iter_page(&page_iter);
|
||||||
page++;
|
|
||||||
page_len -= PAGE_SIZE;
|
|
||||||
page_index++;
|
|
||||||
}
|
|
||||||
while (addrs && dma_len > 0) {
|
|
||||||
if (WARN_ON(dma_index >= max_entries))
|
|
||||||
return -1;
|
|
||||||
addrs[dma_index] = addr;
|
|
||||||
addr += PAGE_SIZE;
|
|
||||||
dma_len -= PAGE_SIZE;
|
|
||||||
dma_index++;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (addrs) {
|
||||||
|
for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
|
||||||
|
if (WARN_ON(a - addrs >= max_entries))
|
||||||
|
return -1;
|
||||||
|
*a++ = sg_page_iter_dma_address(&dma_iter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
|
EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
|
||||||
|
@ -27,7 +27,7 @@ static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
|
|||||||
* because display controller, GPU, etc. are not coherent.
|
* because display controller, GPU, etc. are not coherent.
|
||||||
*/
|
*/
|
||||||
if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
|
if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
|
||||||
dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
|
dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
|
static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
|
||||||
@ -51,7 +51,7 @@ static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj
|
|||||||
* discard those writes.
|
* discard those writes.
|
||||||
*/
|
*/
|
||||||
if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
|
if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
|
||||||
dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
|
dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* called with etnaviv_obj->lock held */
|
/* called with etnaviv_obj->lock held */
|
||||||
@ -404,9 +404,8 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
|
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
|
||||||
dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
|
dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
|
||||||
etnaviv_obj->sgt->nents,
|
etnaviv_op_to_dma_dir(op));
|
||||||
etnaviv_op_to_dma_dir(op));
|
|
||||||
etnaviv_obj->last_cpu_prep_op = op;
|
etnaviv_obj->last_cpu_prep_op = op;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -421,8 +420,7 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
|
|||||||
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
|
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
|
||||||
/* fini without a prep is almost certainly a userspace error */
|
/* fini without a prep is almost certainly a userspace error */
|
||||||
WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
|
WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
|
||||||
dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
|
dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
|
||||||
etnaviv_obj->sgt->nents,
|
|
||||||
etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
|
etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
|
||||||
etnaviv_obj->last_cpu_prep_op = 0;
|
etnaviv_obj->last_cpu_prep_op = 0;
|
||||||
}
|
}
|
||||||
|
@ -73,13 +73,13 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
|
|||||||
struct sg_table *sgt, unsigned len, int prot)
|
struct sg_table *sgt, unsigned len, int prot)
|
||||||
{ struct scatterlist *sg;
|
{ struct scatterlist *sg;
|
||||||
unsigned int da = iova;
|
unsigned int da = iova;
|
||||||
unsigned int i, j;
|
unsigned int i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!context || !sgt)
|
if (!context || !sgt)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
for_each_sgtable_dma_sg(sgt, sg, i) {
|
||||||
u32 pa = sg_dma_address(sg) - sg->offset;
|
u32 pa = sg_dma_address(sg) - sg->offset;
|
||||||
size_t bytes = sg_dma_len(sg) + sg->offset;
|
size_t bytes = sg_dma_len(sg) + sg->offset;
|
||||||
|
|
||||||
@ -95,14 +95,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
da = iova;
|
etnaviv_context_unmap(context, iova, da - iova);
|
||||||
|
|
||||||
for_each_sg(sgt->sgl, sg, i, j) {
|
|
||||||
size_t bytes = sg_dma_len(sg) + sg->offset;
|
|
||||||
|
|
||||||
etnaviv_context_unmap(context, da, bytes);
|
|
||||||
da += bytes;
|
|
||||||
}
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,7 +106,7 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
|
|||||||
unsigned int da = iova;
|
unsigned int da = iova;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
for_each_sgtable_dma_sg(sgt, sg, i) {
|
||||||
size_t bytes = sg_dma_len(sg) + sg->offset;
|
size_t bytes = sg_dma_len(sg) + sg->offset;
|
||||||
|
|
||||||
etnaviv_context_unmap(context, da, bytes);
|
etnaviv_context_unmap(context, da, bytes);
|
||||||
|
@ -395,8 +395,8 @@ static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
dma_unmap_sg(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt->sgl,
|
dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt,
|
||||||
g2d_userptr->sgt->nents, DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL, 0);
|
||||||
|
|
||||||
pages = frame_vector_pages(g2d_userptr->vec);
|
pages = frame_vector_pages(g2d_userptr->vec);
|
||||||
if (!IS_ERR(pages)) {
|
if (!IS_ERR(pages)) {
|
||||||
@ -511,10 +511,10 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
|
|||||||
|
|
||||||
g2d_userptr->sgt = sgt;
|
g2d_userptr->sgt = sgt;
|
||||||
|
|
||||||
if (!dma_map_sg(to_dma_dev(g2d->drm_dev), sgt->sgl, sgt->nents,
|
ret = dma_map_sgtable(to_dma_dev(g2d->drm_dev), sgt,
|
||||||
DMA_BIDIRECTIONAL)) {
|
DMA_BIDIRECTIONAL, 0);
|
||||||
|
if (ret) {
|
||||||
DRM_DEV_ERROR(g2d->dev, "failed to map sgt with dma region.\n");
|
DRM_DEV_ERROR(g2d->dev, "failed to map sgt with dma region.\n");
|
||||||
ret = -ENOMEM;
|
|
||||||
goto err_sg_free_table;
|
goto err_sg_free_table;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -431,27 +431,10 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
|
|||||||
{
|
{
|
||||||
struct exynos_drm_gem *exynos_gem;
|
struct exynos_drm_gem *exynos_gem;
|
||||||
|
|
||||||
if (sgt->nents < 1)
|
/* check if the entries in the sg_table are contiguous */
|
||||||
|
if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) {
|
||||||
|
DRM_ERROR("buffer chunks must be mapped contiguously");
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if the provided buffer has been mapped as contiguous
|
|
||||||
* into DMA address space.
|
|
||||||
*/
|
|
||||||
if (sgt->nents > 1) {
|
|
||||||
dma_addr_t next_addr = sg_dma_address(sgt->sgl);
|
|
||||||
struct scatterlist *s;
|
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
for_each_sg(sgt->sgl, s, sgt->nents, i) {
|
|
||||||
if (!sg_dma_len(s))
|
|
||||||
break;
|
|
||||||
if (sg_dma_address(s) != next_addr) {
|
|
||||||
DRM_ERROR("buffer chunks must be mapped contiguously");
|
|
||||||
return ERR_PTR(-EINVAL);
|
|
||||||
}
|
|
||||||
next_addr = sg_dma_address(s) + sg_dma_len(s);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
|
exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
|
||||||
|
@ -48,12 +48,9 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
|
|||||||
src = sg_next(src);
|
src = sg_next(src);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!dma_map_sg_attrs(attachment->dev,
|
ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||||
st->sgl, st->nents, dir,
|
if (ret)
|
||||||
DMA_ATTR_SKIP_CPU_SYNC)) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto err_free_sg;
|
goto err_free_sg;
|
||||||
}
|
|
||||||
|
|
||||||
return st;
|
return st;
|
||||||
|
|
||||||
@ -73,9 +70,7 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
|
|||||||
{
|
{
|
||||||
struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
|
struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
|
||||||
|
|
||||||
dma_unmap_sg_attrs(attachment->dev,
|
dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||||
sg->sgl, sg->nents, dir,
|
|
||||||
DMA_ATTR_SKIP_CPU_SYNC);
|
|
||||||
sg_free_table(sg);
|
sg_free_table(sg);
|
||||||
kfree(sg);
|
kfree(sg);
|
||||||
|
|
||||||
|
@ -28,10 +28,9 @@ static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment,
|
|||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
|
err = dma_map_sgtable(attachment->dev, st, dir, 0);
|
||||||
err = -ENOMEM;
|
if (err)
|
||||||
goto err_st;
|
goto err_st;
|
||||||
}
|
|
||||||
|
|
||||||
return st;
|
return st;
|
||||||
|
|
||||||
@ -46,7 +45,7 @@ static void mock_unmap_dma_buf(struct dma_buf_attachment *attachment,
|
|||||||
struct sg_table *st,
|
struct sg_table *st,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
dma_unmap_sg(attachment->dev, st->sgl, st->nents, dir);
|
dma_unmap_sgtable(attachment->dev, st, dir, 0);
|
||||||
sg_free_table(st);
|
sg_free_table(st);
|
||||||
kfree(st);
|
kfree(st);
|
||||||
}
|
}
|
||||||
|
@ -69,8 +69,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (bo->base.sgt) {
|
if (bo->base.sgt) {
|
||||||
dma_unmap_sg(dev, bo->base.sgt->sgl,
|
dma_unmap_sgtable(dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
|
||||||
bo->base.sgt->nents, DMA_BIDIRECTIONAL);
|
|
||||||
sg_free_table(bo->base.sgt);
|
sg_free_table(bo->base.sgt);
|
||||||
} else {
|
} else {
|
||||||
bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
|
bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
|
||||||
@ -80,7 +79,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_map_sg(dev, sgt.sgl, sgt.nents, DMA_BIDIRECTIONAL);
|
ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
|
||||||
|
if (ret) {
|
||||||
|
sg_free_table(&sgt);
|
||||||
|
kfree(bo->base.sgt);
|
||||||
|
bo->base.sgt = NULL;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
*bo->base.sgt = sgt;
|
*bo->base.sgt = sgt;
|
||||||
|
|
||||||
|
@ -124,7 +124,7 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
|
|||||||
if (err)
|
if (err)
|
||||||
goto err_out1;
|
goto err_out1;
|
||||||
|
|
||||||
for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, bo->base.sgt->nents, 0) {
|
for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, 0) {
|
||||||
err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
|
err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
|
||||||
bo_va->node.start + offset);
|
bo_va->node.start + offset);
|
||||||
if (err)
|
if (err)
|
||||||
@ -298,8 +298,7 @@ int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff)
|
|||||||
mutex_lock(&vm->lock);
|
mutex_lock(&vm->lock);
|
||||||
|
|
||||||
base = bo_va->node.start + (pageoff << PAGE_SHIFT);
|
base = bo_va->node.start + (pageoff << PAGE_SHIFT);
|
||||||
for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter,
|
for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, pageoff) {
|
||||||
bo->base.sgt->nents, pageoff) {
|
|
||||||
err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
|
err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
|
||||||
base + offset);
|
base + offset);
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -212,46 +212,28 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
|
|||||||
struct dma_buf_attachment *attach, struct sg_table *sg)
|
struct dma_buf_attachment *attach, struct sg_table *sg)
|
||||||
{
|
{
|
||||||
struct mtk_drm_gem_obj *mtk_gem;
|
struct mtk_drm_gem_obj *mtk_gem;
|
||||||
int ret;
|
|
||||||
struct scatterlist *s;
|
/* check if the entries in the sg_table are contiguous */
|
||||||
unsigned int i;
|
if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
|
||||||
dma_addr_t expected;
|
DRM_ERROR("sg_table is not contiguous");
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size);
|
mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size);
|
||||||
|
|
||||||
if (IS_ERR(mtk_gem))
|
if (IS_ERR(mtk_gem))
|
||||||
return ERR_CAST(mtk_gem);
|
return ERR_CAST(mtk_gem);
|
||||||
|
|
||||||
expected = sg_dma_address(sg->sgl);
|
|
||||||
for_each_sg(sg->sgl, s, sg->nents, i) {
|
|
||||||
if (!sg_dma_len(s))
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (sg_dma_address(s) != expected) {
|
|
||||||
DRM_ERROR("sg_table is not contiguous");
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto err_gem_free;
|
|
||||||
}
|
|
||||||
expected = sg_dma_address(s) + sg_dma_len(s);
|
|
||||||
}
|
|
||||||
|
|
||||||
mtk_gem->dma_addr = sg_dma_address(sg->sgl);
|
mtk_gem->dma_addr = sg_dma_address(sg->sgl);
|
||||||
mtk_gem->sg = sg;
|
mtk_gem->sg = sg;
|
||||||
|
|
||||||
return &mtk_gem->base;
|
return &mtk_gem->base;
|
||||||
|
|
||||||
err_gem_free:
|
|
||||||
kfree(mtk_gem);
|
|
||||||
return ERR_PTR(ret);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
|
void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
|
struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
|
||||||
struct sg_table *sgt;
|
struct sg_table *sgt;
|
||||||
struct sg_page_iter iter;
|
|
||||||
unsigned int npages;
|
unsigned int npages;
|
||||||
unsigned int i = 0;
|
|
||||||
|
|
||||||
if (mtk_gem->kvaddr)
|
if (mtk_gem->kvaddr)
|
||||||
return mtk_gem->kvaddr;
|
return mtk_gem->kvaddr;
|
||||||
@ -265,11 +247,8 @@ void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
|
|||||||
if (!mtk_gem->pages)
|
if (!mtk_gem->pages)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
|
drm_prime_sg_to_page_addr_arrays(sgt, mtk_gem->pages, NULL, npages);
|
||||||
mtk_gem->pages[i++] = sg_page_iter_page(&iter);
|
|
||||||
if (i > npages)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
|
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
|
||||||
pgprot_writecombine(PAGE_KERNEL));
|
pgprot_writecombine(PAGE_KERNEL));
|
||||||
|
|
||||||
|
@ -53,11 +53,10 @@ static void sync_for_device(struct msm_gem_object *msm_obj)
|
|||||||
struct device *dev = msm_obj->base.dev->dev;
|
struct device *dev = msm_obj->base.dev->dev;
|
||||||
|
|
||||||
if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
|
if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
|
||||||
dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
|
dma_sync_sgtable_for_device(dev, msm_obj->sgt,
|
||||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
} else {
|
} else {
|
||||||
dma_map_sg(dev, msm_obj->sgt->sgl,
|
dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
|
||||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -66,11 +65,9 @@ static void sync_for_cpu(struct msm_gem_object *msm_obj)
|
|||||||
struct device *dev = msm_obj->base.dev->dev;
|
struct device *dev = msm_obj->base.dev->dev;
|
||||||
|
|
||||||
if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
|
if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
|
||||||
dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
|
dma_sync_sgtable_for_cpu(dev, msm_obj->sgt, DMA_BIDIRECTIONAL);
|
||||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
|
||||||
} else {
|
} else {
|
||||||
dma_unmap_sg(dev, msm_obj->sgt->sgl,
|
dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
|
||||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,21 +30,20 @@ static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
|
|||||||
{
|
{
|
||||||
struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
|
struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
|
||||||
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
|
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
|
||||||
struct scatterlist *sg;
|
struct sg_dma_page_iter dma_iter;
|
||||||
unsigned prot_bits = 0;
|
unsigned prot_bits = 0;
|
||||||
unsigned i, j;
|
|
||||||
|
|
||||||
if (prot & IOMMU_WRITE)
|
if (prot & IOMMU_WRITE)
|
||||||
prot_bits |= 1;
|
prot_bits |= 1;
|
||||||
if (prot & IOMMU_READ)
|
if (prot & IOMMU_READ)
|
||||||
prot_bits |= 2;
|
prot_bits |= 2;
|
||||||
|
|
||||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
|
||||||
dma_addr_t addr = sg->dma_address;
|
dma_addr_t addr = sg_page_iter_dma_address(&dma_iter);
|
||||||
for (j = 0; j < sg->length / GPUMMU_PAGE_SIZE; j++, idx++) {
|
int i;
|
||||||
gpummu->table[idx] = addr | prot_bits;
|
|
||||||
addr += GPUMMU_PAGE_SIZE;
|
for (i = 0; i < PAGE_SIZE; i += GPUMMU_PAGE_SIZE)
|
||||||
}
|
gpummu->table[idx++] = (addr + i) | prot_bits;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* we can improve by deferring flush for multiple map() */
|
/* we can improve by deferring flush for multiple map() */
|
||||||
|
@ -36,7 +36,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
|
|||||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||||
size_t ret;
|
size_t ret;
|
||||||
|
|
||||||
ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
|
ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
|
||||||
WARN_ON(!ret);
|
WARN_ON(!ret);
|
||||||
|
|
||||||
return (ret == len) ? 0 : -EINVAL;
|
return (ret == len) ? 0 : -EINVAL;
|
||||||
|
@ -1297,10 +1297,9 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
|
|||||||
omap_obj->dma_addr = sg_dma_address(sgt->sgl);
|
omap_obj->dma_addr = sg_dma_address(sgt->sgl);
|
||||||
} else {
|
} else {
|
||||||
/* Create pages list from sgt */
|
/* Create pages list from sgt */
|
||||||
struct sg_page_iter iter;
|
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
unsigned int npages;
|
unsigned int npages;
|
||||||
unsigned int i = 0;
|
unsigned int ret;
|
||||||
|
|
||||||
npages = DIV_ROUND_UP(size, PAGE_SIZE);
|
npages = DIV_ROUND_UP(size, PAGE_SIZE);
|
||||||
pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
|
pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
|
||||||
@ -1311,14 +1310,9 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
omap_obj->pages = pages;
|
omap_obj->pages = pages;
|
||||||
|
ret = drm_prime_sg_to_page_addr_arrays(sgt, pages, NULL,
|
||||||
for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
|
npages);
|
||||||
pages[i++] = sg_page_iter_page(&iter);
|
if (ret) {
|
||||||
if (i > npages)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (WARN_ON(i != npages)) {
|
|
||||||
omap_gem_free_object(obj);
|
omap_gem_free_object(obj);
|
||||||
obj = ERR_PTR(-ENOMEM);
|
obj = ERR_PTR(-ENOMEM);
|
||||||
goto done;
|
goto done;
|
||||||
|
@ -41,8 +41,8 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
|
|||||||
|
|
||||||
for (i = 0; i < n_sgt; i++) {
|
for (i = 0; i < n_sgt; i++) {
|
||||||
if (bo->sgts[i].sgl) {
|
if (bo->sgts[i].sgl) {
|
||||||
dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl,
|
dma_unmap_sgtable(pfdev->dev, &bo->sgts[i],
|
||||||
bo->sgts[i].nents, DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL, 0);
|
||||||
sg_free_table(&bo->sgts[i]);
|
sg_free_table(&bo->sgts[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -253,7 +253,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
|
|||||||
struct io_pgtable_ops *ops = mmu->pgtbl_ops;
|
struct io_pgtable_ops *ops = mmu->pgtbl_ops;
|
||||||
u64 start_iova = iova;
|
u64 start_iova = iova;
|
||||||
|
|
||||||
for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
|
for_each_sgtable_dma_sg(sgt, sgl, count) {
|
||||||
unsigned long paddr = sg_dma_address(sgl);
|
unsigned long paddr = sg_dma_address(sgl);
|
||||||
size_t len = sg_dma_len(sgl);
|
size_t len = sg_dma_len(sgl);
|
||||||
|
|
||||||
@ -517,10 +517,9 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err_pages;
|
goto err_pages;
|
||||||
|
|
||||||
if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) {
|
ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
|
||||||
ret = -EINVAL;
|
if (ret)
|
||||||
goto err_map;
|
goto err_map;
|
||||||
}
|
|
||||||
|
|
||||||
mmu_map_sg(pfdev, bomapping->mmu, addr,
|
mmu_map_sg(pfdev, bomapping->mmu, addr,
|
||||||
IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
|
IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
|
||||||
|
@ -197,9 +197,8 @@ int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
|
|||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
ret = vsp1_du_map_sg(vsp->vsp, sgt);
|
ret = vsp1_du_map_sg(vsp->vsp, sgt);
|
||||||
if (!ret) {
|
if (ret) {
|
||||||
sg_free_table(sgt);
|
sg_free_table(sgt);
|
||||||
ret = -ENOMEM;
|
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -36,8 +36,8 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
|
|||||||
|
|
||||||
rk_obj->dma_addr = rk_obj->mm.start;
|
rk_obj->dma_addr = rk_obj->mm.start;
|
||||||
|
|
||||||
ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
|
ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
|
||||||
rk_obj->sgt->nents, prot);
|
prot);
|
||||||
if (ret < rk_obj->base.size) {
|
if (ret < rk_obj->base.size) {
|
||||||
DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
|
DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
|
||||||
ret, rk_obj->base.size);
|
ret, rk_obj->base.size);
|
||||||
@ -98,11 +98,10 @@ static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
|
|||||||
* TODO: Replace this by drm_clflush_sg() once it can be implemented
|
* TODO: Replace this by drm_clflush_sg() once it can be implemented
|
||||||
* without relying on symbols that are not exported.
|
* without relying on symbols that are not exported.
|
||||||
*/
|
*/
|
||||||
for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
|
for_each_sgtable_sg(rk_obj->sgt, s, i)
|
||||||
sg_dma_address(s) = sg_phys(s);
|
sg_dma_address(s) = sg_phys(s);
|
||||||
|
|
||||||
dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
|
dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE);
|
||||||
DMA_TO_DEVICE);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -350,8 +349,8 @@ void rockchip_gem_free_object(struct drm_gem_object *obj)
|
|||||||
if (private->domain) {
|
if (private->domain) {
|
||||||
rockchip_gem_iommu_unmap(rk_obj);
|
rockchip_gem_iommu_unmap(rk_obj);
|
||||||
} else {
|
} else {
|
||||||
dma_unmap_sg(drm->dev, rk_obj->sgt->sgl,
|
dma_unmap_sgtable(drm->dev, rk_obj->sgt,
|
||||||
rk_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL, 0);
|
||||||
}
|
}
|
||||||
drm_prime_gem_destroy(obj, rk_obj->sgt);
|
drm_prime_gem_destroy(obj, rk_obj->sgt);
|
||||||
} else {
|
} else {
|
||||||
@ -460,23 +459,6 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
|
|||||||
return sgt;
|
return sgt;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt,
|
|
||||||
int count)
|
|
||||||
{
|
|
||||||
struct scatterlist *s;
|
|
||||||
dma_addr_t expected = sg_dma_address(sgt->sgl);
|
|
||||||
unsigned int i;
|
|
||||||
unsigned long size = 0;
|
|
||||||
|
|
||||||
for_each_sg(sgt->sgl, s, count, i) {
|
|
||||||
if (sg_dma_address(s) != expected)
|
|
||||||
break;
|
|
||||||
expected = sg_dma_address(s) + sg_dma_len(s);
|
|
||||||
size += sg_dma_len(s);
|
|
||||||
}
|
|
||||||
return size;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
rockchip_gem_iommu_map_sg(struct drm_device *drm,
|
rockchip_gem_iommu_map_sg(struct drm_device *drm,
|
||||||
struct dma_buf_attachment *attach,
|
struct dma_buf_attachment *attach,
|
||||||
@ -493,15 +475,13 @@ rockchip_gem_dma_map_sg(struct drm_device *drm,
|
|||||||
struct sg_table *sg,
|
struct sg_table *sg,
|
||||||
struct rockchip_gem_object *rk_obj)
|
struct rockchip_gem_object *rk_obj)
|
||||||
{
|
{
|
||||||
int count = dma_map_sg(drm->dev, sg->sgl, sg->nents,
|
int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
|
||||||
DMA_BIDIRECTIONAL);
|
if (err)
|
||||||
if (!count)
|
return err;
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) {
|
if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
|
||||||
DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
|
DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
|
||||||
dma_unmap_sg(drm->dev, sg->sgl, sg->nents,
|
dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
|
||||||
DMA_BIDIRECTIONAL);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,8 +98,8 @@ static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
|
|||||||
* the SG table needs to be copied to avoid overwriting any
|
* the SG table needs to be copied to avoid overwriting any
|
||||||
* other potential users of the original SG table.
|
* other potential users of the original SG table.
|
||||||
*/
|
*/
|
||||||
err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents,
|
err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl,
|
||||||
GFP_KERNEL);
|
obj->sgt->orig_nents, GFP_KERNEL);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto free;
|
goto free;
|
||||||
} else {
|
} else {
|
||||||
@ -196,8 +196,7 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
|
|||||||
|
|
||||||
bo->iova = bo->mm->start;
|
bo->iova = bo->mm->start;
|
||||||
|
|
||||||
bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
|
bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
|
||||||
bo->sgt->nents, prot);
|
|
||||||
if (!bo->size) {
|
if (!bo->size) {
|
||||||
dev_err(tegra->drm->dev, "failed to map buffer\n");
|
dev_err(tegra->drm->dev, "failed to map buffer\n");
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
@ -264,8 +263,7 @@ static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
|
|||||||
static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
|
static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
|
||||||
{
|
{
|
||||||
if (bo->pages) {
|
if (bo->pages) {
|
||||||
dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
|
dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
drm_gem_put_pages(&bo->gem, bo->pages, true, true);
|
drm_gem_put_pages(&bo->gem, bo->pages, true, true);
|
||||||
sg_free_table(bo->sgt);
|
sg_free_table(bo->sgt);
|
||||||
kfree(bo->sgt);
|
kfree(bo->sgt);
|
||||||
@ -290,12 +288,9 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
|
|||||||
goto put_pages;
|
goto put_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
|
err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
|
||||||
DMA_FROM_DEVICE);
|
if (err)
|
||||||
if (err == 0) {
|
|
||||||
err = -EFAULT;
|
|
||||||
goto free_sgt;
|
goto free_sgt;
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -571,7 +566,7 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
|
|||||||
goto free;
|
goto free;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
|
if (dma_map_sgtable(attach->dev, sgt, dir, 0))
|
||||||
goto free;
|
goto free;
|
||||||
|
|
||||||
return sgt;
|
return sgt;
|
||||||
@ -590,7 +585,7 @@ static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
|
|||||||
struct tegra_bo *bo = to_tegra_bo(gem);
|
struct tegra_bo *bo = to_tegra_bo(gem);
|
||||||
|
|
||||||
if (bo->pages)
|
if (bo->pages)
|
||||||
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
|
dma_unmap_sgtable(attach->dev, sgt, dir, 0);
|
||||||
|
|
||||||
sg_free_table(sgt);
|
sg_free_table(sgt);
|
||||||
kfree(sgt);
|
kfree(sgt);
|
||||||
@ -609,8 +604,7 @@ static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
|
|||||||
struct drm_device *drm = gem->dev;
|
struct drm_device *drm = gem->dev;
|
||||||
|
|
||||||
if (bo->pages)
|
if (bo->pages)
|
||||||
dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
|
dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -623,8 +617,7 @@ static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
|
|||||||
struct drm_device *drm = gem->dev;
|
struct drm_device *drm = gem->dev;
|
||||||
|
|
||||||
if (bo->pages)
|
if (bo->pages)
|
||||||
dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
|
dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
|
||||||
DMA_TO_DEVICE);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -131,12 +131,9 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (sgt) {
|
if (sgt) {
|
||||||
err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents,
|
err = dma_map_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
|
||||||
DMA_TO_DEVICE);
|
if (err)
|
||||||
if (err == 0) {
|
|
||||||
err = -ENOMEM;
|
|
||||||
goto unpin;
|
goto unpin;
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The display controller needs contiguous memory, so
|
* The display controller needs contiguous memory, so
|
||||||
@ -144,7 +141,7 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
|
|||||||
* map its SG table to a single contiguous chunk of
|
* map its SG table to a single contiguous chunk of
|
||||||
* I/O virtual memory.
|
* I/O virtual memory.
|
||||||
*/
|
*/
|
||||||
if (err > 1) {
|
if (sgt->nents > 1) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto unpin;
|
goto unpin;
|
||||||
}
|
}
|
||||||
@ -166,8 +163,7 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
|
|||||||
struct sg_table *sgt = state->sgt[i];
|
struct sg_table *sgt = state->sgt[i];
|
||||||
|
|
||||||
if (sgt)
|
if (sgt)
|
||||||
dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
|
dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
|
||||||
DMA_TO_DEVICE);
|
|
||||||
|
|
||||||
host1x_bo_unpin(dc->dev, &bo->base, sgt);
|
host1x_bo_unpin(dc->dev, &bo->base, sgt);
|
||||||
state->iova[i] = DMA_MAPPING_ERROR;
|
state->iova[i] = DMA_MAPPING_ERROR;
|
||||||
@ -186,8 +182,7 @@ static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
|
|||||||
struct sg_table *sgt = state->sgt[i];
|
struct sg_table *sgt = state->sgt[i];
|
||||||
|
|
||||||
if (sgt)
|
if (sgt)
|
||||||
dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
|
dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
|
||||||
DMA_TO_DEVICE);
|
|
||||||
|
|
||||||
host1x_bo_unpin(dc->dev, &bo->base, sgt);
|
host1x_bo_unpin(dc->dev, &bo->base, sgt);
|
||||||
state->iova[i] = DMA_MAPPING_ERROR;
|
state->iova[i] = DMA_MAPPING_ERROR;
|
||||||
|
@ -90,18 +90,17 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo)
|
|||||||
struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev);
|
struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev);
|
||||||
u32 page = bo->node.start;
|
u32 page = bo->node.start;
|
||||||
u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
|
u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
|
||||||
unsigned int count;
|
struct sg_dma_page_iter dma_iter;
|
||||||
struct scatterlist *sgl;
|
|
||||||
|
|
||||||
for_each_sg(shmem_obj->sgt->sgl, sgl, shmem_obj->sgt->nents, count) {
|
for_each_sgtable_dma_page(shmem_obj->sgt, &dma_iter, 0) {
|
||||||
u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT;
|
dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter);
|
||||||
|
u32 page_address = dma_addr >> V3D_MMU_PAGE_SHIFT;
|
||||||
u32 pte = page_prot | page_address;
|
u32 pte = page_prot | page_address;
|
||||||
u32 i;
|
u32 i;
|
||||||
|
|
||||||
BUG_ON(page_address + (sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT) >=
|
BUG_ON(page_address + (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) >=
|
||||||
BIT(24));
|
BIT(24));
|
||||||
|
for (i = 0; i < PAGE_SIZE >> V3D_MMU_PAGE_SHIFT; i++)
|
||||||
for (i = 0; i < sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT; i++)
|
|
||||||
v3d->pt[page++] = pte + i;
|
v3d->pt[page++] = pte + i;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,9 +72,8 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
|
|||||||
|
|
||||||
if (shmem->pages) {
|
if (shmem->pages) {
|
||||||
if (shmem->mapped) {
|
if (shmem->mapped) {
|
||||||
dma_unmap_sg(vgdev->vdev->dev.parent,
|
dma_unmap_sgtable(vgdev->vdev->dev.parent,
|
||||||
shmem->pages->sgl, shmem->mapped,
|
shmem->pages, DMA_TO_DEVICE, 0);
|
||||||
DMA_TO_DEVICE);
|
|
||||||
shmem->mapped = 0;
|
shmem->mapped = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -164,13 +163,13 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (use_dma_api) {
|
if (use_dma_api) {
|
||||||
shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
|
ret = dma_map_sgtable(vgdev->vdev->dev.parent,
|
||||||
shmem->pages->sgl,
|
shmem->pages, DMA_TO_DEVICE, 0);
|
||||||
shmem->pages->nents,
|
if (ret)
|
||||||
DMA_TO_DEVICE);
|
return ret;
|
||||||
*nents = shmem->mapped;
|
*nents = shmem->mapped = shmem->pages->nents;
|
||||||
} else {
|
} else {
|
||||||
*nents = shmem->pages->nents;
|
*nents = shmem->pages->orig_nents;
|
||||||
}
|
}
|
||||||
|
|
||||||
*ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
|
*ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
|
||||||
@ -180,13 +179,20 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_sg(shmem->pages->sgl, sg, *nents, si) {
|
if (use_dma_api) {
|
||||||
(*ents)[si].addr = cpu_to_le64(use_dma_api
|
for_each_sgtable_dma_sg(shmem->pages, sg, si) {
|
||||||
? sg_dma_address(sg)
|
(*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
|
||||||
: sg_phys(sg));
|
(*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
|
||||||
(*ents)[si].length = cpu_to_le32(sg->length);
|
(*ents)[si].padding = 0;
|
||||||
(*ents)[si].padding = 0;
|
}
|
||||||
|
} else {
|
||||||
|
for_each_sgtable_sg(shmem->pages, sg, si) {
|
||||||
|
(*ents)[si].addr = cpu_to_le64(sg_phys(sg));
|
||||||
|
(*ents)[si].length = cpu_to_le32(sg->length);
|
||||||
|
(*ents)[si].padding = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -302,7 +302,7 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_sg(sgt->sgl, sg, *sg_ents, i) {
|
for_each_sgtable_sg(sgt, sg, i) {
|
||||||
pg = vmalloc_to_page(data);
|
pg = vmalloc_to_page(data);
|
||||||
if (!pg) {
|
if (!pg) {
|
||||||
sg_free_table(sgt);
|
sg_free_table(sgt);
|
||||||
@ -603,9 +603,8 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
|
|||||||
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
|
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
|
||||||
|
|
||||||
if (use_dma_api)
|
if (use_dma_api)
|
||||||
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
|
dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
|
||||||
shmem->pages->sgl, shmem->pages->nents,
|
shmem->pages, DMA_TO_DEVICE);
|
||||||
DMA_TO_DEVICE);
|
|
||||||
|
|
||||||
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
|
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
|
||||||
memset(cmd_p, 0, sizeof(*cmd_p));
|
memset(cmd_p, 0, sizeof(*cmd_p));
|
||||||
@ -1019,9 +1018,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
|
|||||||
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
|
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
|
||||||
|
|
||||||
if (use_dma_api)
|
if (use_dma_api)
|
||||||
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
|
dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
|
||||||
shmem->pages->sgl, shmem->pages->nents,
|
shmem->pages, DMA_TO_DEVICE);
|
||||||
DMA_TO_DEVICE);
|
|
||||||
|
|
||||||
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
|
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
|
||||||
memset(cmd_p, 0, sizeof(*cmd_p));
|
memset(cmd_p, 0, sizeof(*cmd_p));
|
||||||
|
@ -362,8 +362,7 @@ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
|
|||||||
{
|
{
|
||||||
struct device *dev = vmw_tt->dev_priv->dev->dev;
|
struct device *dev = vmw_tt->dev_priv->dev->dev;
|
||||||
|
|
||||||
dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
|
dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
|
||||||
DMA_BIDIRECTIONAL);
|
|
||||||
vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
|
vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -383,16 +382,8 @@ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
|
|||||||
static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
|
static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
|
||||||
{
|
{
|
||||||
struct device *dev = vmw_tt->dev_priv->dev->dev;
|
struct device *dev = vmw_tt->dev_priv->dev->dev;
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
|
return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
|
||||||
DMA_BIDIRECTIONAL);
|
|
||||||
if (unlikely(ret == 0))
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
vmw_tt->sgt.nents = ret;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -449,10 +440,10 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
|
|||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
goto out_sg_alloc_fail;
|
goto out_sg_alloc_fail;
|
||||||
|
|
||||||
if (vsgt->num_pages > vmw_tt->sgt.nents) {
|
if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
|
||||||
uint64_t over_alloc =
|
uint64_t over_alloc =
|
||||||
sgl_size * (vsgt->num_pages -
|
sgl_size * (vsgt->num_pages -
|
||||||
vmw_tt->sgt.nents);
|
vmw_tt->sgt.orig_nents);
|
||||||
|
|
||||||
ttm_mem_global_free(glob, over_alloc);
|
ttm_mem_global_free(glob, over_alloc);
|
||||||
vmw_tt->sg_alloc_size -= over_alloc;
|
vmw_tt->sg_alloc_size -= over_alloc;
|
||||||
|
@ -217,7 +217,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
|
|||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
|
DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
|
||||||
size, sgt->nents);
|
size, sgt->orig_nents);
|
||||||
|
|
||||||
return &xen_obj->base;
|
return &xen_obj->base;
|
||||||
}
|
}
|
||||||
|
@ -170,11 +170,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
|||||||
goto unpin;
|
goto unpin;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
|
err = dma_map_sgtable(dev, sgt, dir, 0);
|
||||||
if (!err) {
|
if (err)
|
||||||
err = -ENOMEM;
|
|
||||||
goto unpin;
|
goto unpin;
|
||||||
}
|
|
||||||
|
|
||||||
job->unpins[job->num_unpins].dev = dev;
|
job->unpins[job->num_unpins].dev = dev;
|
||||||
job->unpins[job->num_unpins].dir = dir;
|
job->unpins[job->num_unpins].dir = dir;
|
||||||
@ -228,7 +226,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (host->domain) {
|
if (host->domain) {
|
||||||
for_each_sg(sgt->sgl, sg, sgt->nents, j)
|
for_each_sgtable_sg(sgt, sg, j)
|
||||||
gather_size += sg->length;
|
gather_size += sg->length;
|
||||||
gather_size = iova_align(&host->iova, gather_size);
|
gather_size = iova_align(&host->iova, gather_size);
|
||||||
|
|
||||||
@ -240,9 +238,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
|||||||
goto put;
|
goto put;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = iommu_map_sg(host->domain,
|
err = iommu_map_sgtable(host->domain,
|
||||||
iova_dma_addr(&host->iova, alloc),
|
iova_dma_addr(&host->iova, alloc),
|
||||||
sgt->sgl, sgt->nents, IOMMU_READ);
|
sgt, IOMMU_READ);
|
||||||
if (err == 0) {
|
if (err == 0) {
|
||||||
__free_iova(&host->iova, alloc);
|
__free_iova(&host->iova, alloc);
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
@ -252,12 +250,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
|||||||
job->unpins[job->num_unpins].size = gather_size;
|
job->unpins[job->num_unpins].size = gather_size;
|
||||||
phys_addr = iova_dma_addr(&host->iova, alloc);
|
phys_addr = iova_dma_addr(&host->iova, alloc);
|
||||||
} else if (sgt) {
|
} else if (sgt) {
|
||||||
err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
|
err = dma_map_sgtable(host->dev, sgt, DMA_TO_DEVICE, 0);
|
||||||
DMA_TO_DEVICE);
|
if (err)
|
||||||
if (!err) {
|
|
||||||
err = -ENOMEM;
|
|
||||||
goto put;
|
goto put;
|
||||||
}
|
|
||||||
|
|
||||||
job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
|
job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
|
||||||
job->unpins[job->num_unpins].dev = host->dev;
|
job->unpins[job->num_unpins].dev = host->dev;
|
||||||
@ -660,8 +655,7 @@ void host1x_job_unpin(struct host1x_job *job)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (unpin->dev && sgt)
|
if (unpin->dev && sgt)
|
||||||
dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents,
|
dma_unmap_sgtable(unpin->dev, sgt, unpin->dir, 0);
|
||||||
unpin->dir);
|
|
||||||
|
|
||||||
host1x_bo_unpin(dev, unpin->bo, sgt);
|
host1x_bo_unpin(dev, unpin->bo, sgt);
|
||||||
host1x_bo_put(unpin->bo);
|
host1x_bo_put(unpin->bo);
|
||||||
|
@ -912,8 +912,8 @@ int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt)
|
|||||||
* skip cache sync. This will need to be revisited when support for
|
* skip cache sync. This will need to be revisited when support for
|
||||||
* non-coherent buffers will be added to the DU driver.
|
* non-coherent buffers will be added to the DU driver.
|
||||||
*/
|
*/
|
||||||
return dma_map_sg_attrs(vsp1->bus_master, sgt->sgl, sgt->nents,
|
return dma_map_sgtable(vsp1->bus_master, sgt, DMA_TO_DEVICE,
|
||||||
DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
|
DMA_ATTR_SKIP_CPU_SYNC);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(vsp1_du_map_sg);
|
EXPORT_SYMBOL_GPL(vsp1_du_map_sg);
|
||||||
|
|
||||||
@ -921,8 +921,8 @@ void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt)
|
|||||||
{
|
{
|
||||||
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
|
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
|
||||||
|
|
||||||
dma_unmap_sg_attrs(vsp1->bus_master, sgt->sgl, sgt->nents,
|
dma_unmap_sgtable(vsp1->bus_master, sgt, DMA_TO_DEVICE,
|
||||||
DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
|
DMA_ATTR_SKIP_CPU_SYNC);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(vsp1_du_unmap_sg);
|
EXPORT_SYMBOL_GPL(vsp1_du_unmap_sg);
|
||||||
|
|
||||||
|
@ -247,10 +247,9 @@ static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
|
|||||||
|
|
||||||
if (sgt) {
|
if (sgt) {
|
||||||
if (gntdev_dmabuf_attach->dir != DMA_NONE)
|
if (gntdev_dmabuf_attach->dir != DMA_NONE)
|
||||||
dma_unmap_sg_attrs(attach->dev, sgt->sgl,
|
dma_unmap_sgtable(attach->dev, sgt,
|
||||||
sgt->nents,
|
gntdev_dmabuf_attach->dir,
|
||||||
gntdev_dmabuf_attach->dir,
|
DMA_ATTR_SKIP_CPU_SYNC);
|
||||||
DMA_ATTR_SKIP_CPU_SYNC);
|
|
||||||
sg_free_table(sgt);
|
sg_free_table(sgt);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -288,8 +287,8 @@ dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
|
|||||||
sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
|
sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
|
||||||
gntdev_dmabuf->nr_pages);
|
gntdev_dmabuf->nr_pages);
|
||||||
if (!IS_ERR(sgt)) {
|
if (!IS_ERR(sgt)) {
|
||||||
if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
|
if (dma_map_sgtable(attach->dev, sgt, dir,
|
||||||
DMA_ATTR_SKIP_CPU_SYNC)) {
|
DMA_ATTR_SKIP_CPU_SYNC)) {
|
||||||
sg_free_table(sgt);
|
sg_free_table(sgt);
|
||||||
kfree(sgt);
|
kfree(sgt);
|
||||||
sgt = ERR_PTR(-ENOMEM);
|
sgt = ERR_PTR(-ENOMEM);
|
||||||
@ -633,7 +632,7 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
|
|||||||
|
|
||||||
/* Now convert sgt to array of pages and check for page validity. */
|
/* Now convert sgt to array of pages and check for page validity. */
|
||||||
i = 0;
|
i = 0;
|
||||||
for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
|
for_each_sgtable_page(sgt, &sg_iter, 0) {
|
||||||
struct page *page = sg_page_iter_page(&sg_iter);
|
struct page *page = sg_page_iter_page(&sg_iter);
|
||||||
/*
|
/*
|
||||||
* Check if page is valid: this can happen if we are given
|
* Check if page is valid: this can happen if we are given
|
||||||
|
@ -92,6 +92,8 @@ struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_page
|
|||||||
struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
|
struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
|
||||||
int flags);
|
int flags);
|
||||||
|
|
||||||
|
unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt);
|
||||||
|
|
||||||
/* helper functions for importing */
|
/* helper functions for importing */
|
||||||
struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
|
struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
|
||||||
struct dma_buf *dma_buf,
|
struct dma_buf *dma_buf,
|
||||||
|
@ -846,7 +846,7 @@ static struct sg_table *mbochs_map_dmabuf(struct dma_buf_attachment *at,
|
|||||||
if (sg_alloc_table_from_pages(sg, dmabuf->pages, dmabuf->pagecount,
|
if (sg_alloc_table_from_pages(sg, dmabuf->pages, dmabuf->pagecount,
|
||||||
0, dmabuf->mode.size, GFP_KERNEL) < 0)
|
0, dmabuf->mode.size, GFP_KERNEL) < 0)
|
||||||
goto err2;
|
goto err2;
|
||||||
if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction))
|
if (dma_map_sgtable(at->dev, sg, direction, 0))
|
||||||
goto err3;
|
goto err3;
|
||||||
|
|
||||||
return sg;
|
return sg;
|
||||||
@ -868,6 +868,7 @@ static void mbochs_unmap_dmabuf(struct dma_buf_attachment *at,
|
|||||||
|
|
||||||
dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
|
dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
|
||||||
|
|
||||||
|
dma_unmap_sgtable(at->dev, sg, direction, 0);
|
||||||
sg_free_table(sg);
|
sg_free_table(sg);
|
||||||
kfree(sg);
|
kfree(sg);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user