mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-22 19:43:07 +07:00
drm: remove prime sg_table caching
That is now done by the DMA-buf helpers instead. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.kernel.org/patch/10943055/
This commit is contained in:
parent
f13e143e74
commit
c614d7e66c
@ -86,11 +86,6 @@ struct drm_prime_member {
|
|||||||
struct rb_node handle_rb;
|
struct rb_node handle_rb;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct drm_prime_attachment {
|
|
||||||
struct sg_table *sgt;
|
|
||||||
enum dma_data_direction dir;
|
|
||||||
};
|
|
||||||
|
|
||||||
static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
|
static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
|
||||||
struct dma_buf *dma_buf, uint32_t handle)
|
struct dma_buf *dma_buf, uint32_t handle)
|
||||||
{
|
{
|
||||||
@ -188,25 +183,16 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
|
|||||||
* @dma_buf: buffer to attach device to
|
* @dma_buf: buffer to attach device to
|
||||||
* @attach: buffer attachment data
|
* @attach: buffer attachment data
|
||||||
*
|
*
|
||||||
* Allocates &drm_prime_attachment and calls &drm_driver.gem_prime_pin for
|
* Calls &drm_driver.gem_prime_pin for device specific handling. This can be
|
||||||
* device specific attachment. This can be used as the &dma_buf_ops.attach
|
* used as the &dma_buf_ops.attach callback.
|
||||||
* callback.
|
|
||||||
*
|
*
|
||||||
* Returns 0 on success, negative error code on failure.
|
* Returns 0 on success, negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int drm_gem_map_attach(struct dma_buf *dma_buf,
|
int drm_gem_map_attach(struct dma_buf *dma_buf,
|
||||||
struct dma_buf_attachment *attach)
|
struct dma_buf_attachment *attach)
|
||||||
{
|
{
|
||||||
struct drm_prime_attachment *prime_attach;
|
|
||||||
struct drm_gem_object *obj = dma_buf->priv;
|
struct drm_gem_object *obj = dma_buf->priv;
|
||||||
|
|
||||||
prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
|
|
||||||
if (!prime_attach)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
prime_attach->dir = DMA_NONE;
|
|
||||||
attach->priv = prime_attach;
|
|
||||||
|
|
||||||
return drm_gem_pin(obj);
|
return drm_gem_pin(obj);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_gem_map_attach);
|
EXPORT_SYMBOL(drm_gem_map_attach);
|
||||||
@ -222,26 +208,8 @@ EXPORT_SYMBOL(drm_gem_map_attach);
|
|||||||
void drm_gem_map_detach(struct dma_buf *dma_buf,
|
void drm_gem_map_detach(struct dma_buf *dma_buf,
|
||||||
struct dma_buf_attachment *attach)
|
struct dma_buf_attachment *attach)
|
||||||
{
|
{
|
||||||
struct drm_prime_attachment *prime_attach = attach->priv;
|
|
||||||
struct drm_gem_object *obj = dma_buf->priv;
|
struct drm_gem_object *obj = dma_buf->priv;
|
||||||
|
|
||||||
if (prime_attach) {
|
|
||||||
struct sg_table *sgt = prime_attach->sgt;
|
|
||||||
|
|
||||||
if (sgt) {
|
|
||||||
if (prime_attach->dir != DMA_NONE)
|
|
||||||
dma_unmap_sg_attrs(attach->dev, sgt->sgl,
|
|
||||||
sgt->nents,
|
|
||||||
prime_attach->dir,
|
|
||||||
DMA_ATTR_SKIP_CPU_SYNC);
|
|
||||||
sg_free_table(sgt);
|
|
||||||
}
|
|
||||||
|
|
||||||
kfree(sgt);
|
|
||||||
kfree(prime_attach);
|
|
||||||
attach->priv = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
drm_gem_unpin(obj);
|
drm_gem_unpin(obj);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_gem_map_detach);
|
EXPORT_SYMBOL(drm_gem_map_detach);
|
||||||
@ -286,39 +254,22 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
|
|||||||
struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
|
struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
struct drm_prime_attachment *prime_attach = attach->priv;
|
|
||||||
struct drm_gem_object *obj = attach->dmabuf->priv;
|
struct drm_gem_object *obj = attach->dmabuf->priv;
|
||||||
struct sg_table *sgt;
|
struct sg_table *sgt;
|
||||||
|
|
||||||
if (WARN_ON(dir == DMA_NONE || !prime_attach))
|
if (WARN_ON(dir == DMA_NONE))
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
/* return the cached mapping when possible */
|
|
||||||
if (prime_attach->dir == dir)
|
|
||||||
return prime_attach->sgt;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* two mappings with different directions for the same attachment are
|
|
||||||
* not allowed
|
|
||||||
*/
|
|
||||||
if (WARN_ON(prime_attach->dir != DMA_NONE))
|
|
||||||
return ERR_PTR(-EBUSY);
|
|
||||||
|
|
||||||
if (obj->funcs)
|
if (obj->funcs)
|
||||||
sgt = obj->funcs->get_sg_table(obj);
|
sgt = obj->funcs->get_sg_table(obj);
|
||||||
else
|
else
|
||||||
sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
|
sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
|
||||||
|
|
||||||
if (!IS_ERR(sgt)) {
|
if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
|
||||||
if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
|
DMA_ATTR_SKIP_CPU_SYNC)) {
|
||||||
DMA_ATTR_SKIP_CPU_SYNC)) {
|
sg_free_table(sgt);
|
||||||
sg_free_table(sgt);
|
kfree(sgt);
|
||||||
kfree(sgt);
|
sgt = ERR_PTR(-ENOMEM);
|
||||||
sgt = ERR_PTR(-ENOMEM);
|
|
||||||
} else {
|
|
||||||
prime_attach->sgt = sgt;
|
|
||||||
prime_attach->dir = dir;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return sgt;
|
return sgt;
|
||||||
@ -331,14 +282,19 @@ EXPORT_SYMBOL(drm_gem_map_dma_buf);
|
|||||||
* @sgt: scatterlist info of the buffer to unmap
|
* @sgt: scatterlist info of the buffer to unmap
|
||||||
* @dir: direction of DMA transfer
|
* @dir: direction of DMA transfer
|
||||||
*
|
*
|
||||||
* Not implemented. The unmap is done at drm_gem_map_detach(). This can be
|
* This can be used as the &dma_buf_ops.unmap_dma_buf callback.
|
||||||
* used as the &dma_buf_ops.unmap_dma_buf callback.
|
|
||||||
*/
|
*/
|
||||||
void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
|
void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
|
||||||
struct sg_table *sgt,
|
struct sg_table *sgt,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
/* nothing to be done here */
|
if (!sgt)
|
||||||
|
return;
|
||||||
|
|
||||||
|
dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
|
||||||
|
DMA_ATTR_SKIP_CPU_SYNC);
|
||||||
|
sg_free_table(sgt);
|
||||||
|
kfree(sgt);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
|
EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
|
||||||
|
|
||||||
@ -452,6 +408,7 @@ int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
|
|||||||
EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
|
EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
|
||||||
|
|
||||||
static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
|
static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
|
||||||
|
.cache_sgt_mapping = true,
|
||||||
.attach = drm_gem_map_attach,
|
.attach = drm_gem_map_attach,
|
||||||
.detach = drm_gem_map_detach,
|
.detach = drm_gem_map_detach,
|
||||||
.map_dma_buf = drm_gem_map_dma_buf,
|
.map_dma_buf = drm_gem_map_dma_buf,
|
||||||
|
Loading…
Reference in New Issue
Block a user