mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-27 03:30:53 +07:00
Merge branch 'for-airlied-next' of git://people.freedesktop.org/~mlankhorst/linux into drm-next
fixups for nouveau and fencing * 'for-airlied-next' of git://people.freedesktop.org/~mlankhorst/linux: drm/nouveau: export reservation_object from dmabuf to ttm drm/ttm: add reservation_object as argument to ttm_bo_init drm: Pass dma-buf as argument to gem_prime_import_sg_table drm/nouveau: assign fence_chan->name correctly drm/nouveau: specify if interruptible wait is desired in nouveau_fence_sync drm/nouveau: bump driver patchlevel to 1.2.1
This commit is contained in:
commit
7a42e83d36
@ -339,7 +339,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
|
||||
ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
|
||||
ttm_bo_type_device, &astbo->placement,
|
||||
align >> PAGE_SHIFT, false, NULL, acc_size,
|
||||
NULL, ast_bo_ttm_destroy);
|
||||
NULL, NULL, ast_bo_ttm_destroy);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -377,7 +377,7 @@ static int bochs_bo_create(struct drm_device *dev, int size, int align,
|
||||
ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size,
|
||||
ttm_bo_type_device, &bochsbo->placement,
|
||||
align >> PAGE_SHIFT, false, NULL, acc_size,
|
||||
NULL, bochs_bo_ttm_destroy);
|
||||
NULL, NULL, bochs_bo_ttm_destroy);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -343,7 +343,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
|
||||
ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
|
||||
ttm_bo_type_device, &cirrusbo->placement,
|
||||
align >> PAGE_SHIFT, false, NULL, acc_size,
|
||||
NULL, cirrus_bo_ttm_destroy);
|
||||
NULL, NULL, cirrus_bo_ttm_destroy);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -316,7 +316,8 @@ struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
|
||||
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
|
||||
|
||||
struct drm_gem_object *
|
||||
drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size,
|
||||
drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *sgt)
|
||||
{
|
||||
struct drm_gem_cma_object *cma_obj;
|
||||
@ -325,14 +326,14 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size,
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/* Create a CMA GEM buffer. */
|
||||
cma_obj = __drm_gem_cma_create(dev, size);
|
||||
cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
|
||||
if (IS_ERR(cma_obj))
|
||||
return ERR_CAST(cma_obj);
|
||||
|
||||
cma_obj->paddr = sg_dma_address(sgt->sgl);
|
||||
cma_obj->sgt = sgt;
|
||||
|
||||
DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, size);
|
||||
DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
|
||||
|
||||
return &cma_obj->base;
|
||||
}
|
||||
|
@ -525,7 +525,7 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
|
||||
goto fail_detach;
|
||||
}
|
||||
|
||||
obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
|
||||
obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
|
||||
if (IS_ERR(obj)) {
|
||||
ret = PTR_ERR(obj);
|
||||
goto fail_unmap;
|
||||
|
@ -339,7 +339,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
|
||||
ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
|
||||
ttm_bo_type_device, &mgabo->placement,
|
||||
align >> PAGE_SHIFT, false, NULL, acc_size,
|
||||
NULL, mgag200_bo_ttm_destroy);
|
||||
NULL, NULL, mgag200_bo_ttm_destroy);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -171,7 +171,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
|
||||
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
|
||||
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
|
||||
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
size_t size, struct sg_table *sg);
|
||||
struct dma_buf_attachment *attach, struct sg_table *sg);
|
||||
int msm_gem_prime_pin(struct drm_gem_object *obj);
|
||||
void msm_gem_prime_unpin(struct drm_gem_object *obj);
|
||||
void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "msm_drv.h"
|
||||
#include "msm_gem.h"
|
||||
|
||||
#include <linux/dma-buf.h>
|
||||
|
||||
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
|
||||
{
|
||||
@ -37,9 +38,9 @@ void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
|
||||
}
|
||||
|
||||
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
size_t size, struct sg_table *sg)
|
||||
struct dma_buf_attachment *attach, struct sg_table *sg)
|
||||
{
|
||||
return msm_gem_import(dev, size, sg);
|
||||
return msm_gem_import(dev, attach->dmabuf->size, sg);
|
||||
}
|
||||
|
||||
int msm_gem_prime_pin(struct drm_gem_object *obj)
|
||||
|
@ -1127,7 +1127,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
|
||||
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
|
||||
|
||||
ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
|
||||
0, 0x0000, NULL, NULL, &nv_crtc->cursor.nvbo);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
|
||||
if (!ret) {
|
||||
|
@ -181,7 +181,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
|
||||
int
|
||||
nouveau_bo_new(struct drm_device *dev, int size, int align,
|
||||
uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
|
||||
struct sg_table *sg,
|
||||
struct sg_table *sg, struct reservation_object *robj,
|
||||
struct nouveau_bo **pnvbo)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
@ -230,7 +230,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
|
||||
ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
|
||||
type, &nvbo->placement,
|
||||
align >> PAGE_SHIFT, false, NULL, acc_size, sg,
|
||||
nouveau_bo_del_ttm);
|
||||
robj, nouveau_bo_del_ttm);
|
||||
if (ret) {
|
||||
/* ttm will call nouveau_bo_del_ttm if it fails.. */
|
||||
return ret;
|
||||
@ -970,7 +970,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
|
||||
}
|
||||
|
||||
mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
|
||||
ret = nouveau_fence_sync(nouveau_bo(bo), chan, true);
|
||||
ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
|
||||
if (ret == 0) {
|
||||
ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
|
||||
if (ret == 0) {
|
||||
|
@ -70,6 +70,7 @@ extern struct ttm_bo_driver nouveau_bo_driver;
|
||||
void nouveau_bo_move_init(struct nouveau_drm *);
|
||||
int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags,
|
||||
u32 tile_mode, u32 tile_flags, struct sg_table *sg,
|
||||
struct reservation_object *robj,
|
||||
struct nouveau_bo **);
|
||||
int nouveau_bo_pin(struct nouveau_bo *, u32 flags);
|
||||
int nouveau_bo_unpin(struct nouveau_bo *);
|
||||
|
@ -106,7 +106,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
|
||||
if (nouveau_vram_pushbuf)
|
||||
target = TTM_PL_FLAG_VRAM;
|
||||
|
||||
ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL,
|
||||
ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL,
|
||||
&chan->push.buffer);
|
||||
if (ret == 0) {
|
||||
ret = nouveau_bo_pin(chan->push.buffer, target);
|
||||
|
@ -658,7 +658,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
/* Synchronize with the old framebuffer */
|
||||
ret = nouveau_fence_sync(old_bo, chan, false);
|
||||
ret = nouveau_fence_sync(old_bo, chan, false, false);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
@ -722,7 +722,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
goto fail_unpin;
|
||||
|
||||
/* synchronise rendering channel with the kernel's channel */
|
||||
ret = nouveau_fence_sync(new_bo, chan, false);
|
||||
ret = nouveau_fence_sync(new_bo, chan, false, true);
|
||||
if (ret) {
|
||||
ttm_bo_unreserve(&new_bo->bo);
|
||||
goto fail_unpin;
|
||||
|
@ -10,7 +10,7 @@
|
||||
|
||||
#define DRIVER_MAJOR 1
|
||||
#define DRIVER_MINOR 2
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
#define DRIVER_PATCHLEVEL 1
|
||||
|
||||
/*
|
||||
* 1.1.1:
|
||||
@ -26,6 +26,8 @@
|
||||
* 1.2.0:
|
||||
* - object api exposed to userspace
|
||||
* - fermi,kepler,maxwell zbc
|
||||
* 1.2.1:
|
||||
* - allow concurrent access to bo's mapped read/write.
|
||||
*/
|
||||
|
||||
#include <nvif/client.h>
|
||||
|
@ -100,6 +100,18 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
|
||||
spin_unlock_irq(&fctx->lock);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_fence_context_put(struct kref *fence_ref)
|
||||
{
|
||||
kfree(container_of(fence_ref, struct nouveau_fence_chan, fence_ref));
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
|
||||
{
|
||||
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
|
||||
{
|
||||
@ -141,6 +153,7 @@ void
|
||||
nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
|
||||
{
|
||||
struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
|
||||
struct nouveau_cli *cli = (void *)nvif_client(chan->object);
|
||||
int ret;
|
||||
|
||||
INIT_LIST_HEAD(&fctx->flip);
|
||||
@ -148,6 +161,14 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
|
||||
spin_lock_init(&fctx->lock);
|
||||
fctx->context = priv->context_base + chan->chid;
|
||||
|
||||
if (chan == chan->drm->cechan)
|
||||
strcpy(fctx->name, "copy engine channel");
|
||||
else if (chan == chan->drm->channel)
|
||||
strcpy(fctx->name, "generic kernel channel");
|
||||
else
|
||||
strcpy(fctx->name, nvkm_client(&cli->base)->name);
|
||||
|
||||
kref_init(&fctx->fence_ref);
|
||||
if (!priv->uevent)
|
||||
return;
|
||||
|
||||
@ -195,8 +216,12 @@ nouveau_fence_work(struct fence *fence,
|
||||
|
||||
work = kmalloc(sizeof(*work), GFP_KERNEL);
|
||||
if (!work) {
|
||||
/*
|
||||
* this might not be a nouveau fence any more,
|
||||
* so force a lazy wait here
|
||||
*/
|
||||
WARN_ON(nouveau_fence_wait((struct nouveau_fence *)fence,
|
||||
false, false));
|
||||
true, false));
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -226,12 +251,11 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
|
||||
|
||||
if (priv->uevent)
|
||||
fence_init(&fence->base, &nouveau_fence_ops_uevent,
|
||||
&fctx->lock,
|
||||
priv->context_base + chan->chid, ++fctx->sequence);
|
||||
&fctx->lock, fctx->context, ++fctx->sequence);
|
||||
else
|
||||
fence_init(&fence->base, &nouveau_fence_ops_legacy,
|
||||
&fctx->lock,
|
||||
priv->context_base + chan->chid, ++fctx->sequence);
|
||||
&fctx->lock, fctx->context, ++fctx->sequence);
|
||||
kref_get(&fctx->fence_ref);
|
||||
|
||||
trace_fence_emit(&fence->base);
|
||||
ret = fctx->emit(fence);
|
||||
@ -342,7 +366,7 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive)
|
||||
nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
|
||||
{
|
||||
struct nouveau_fence_chan *fctx = chan->fence;
|
||||
struct fence *fence;
|
||||
@ -369,7 +393,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
|
||||
prev = f->channel;
|
||||
|
||||
if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan))))
|
||||
ret = fence_wait(fence, true);
|
||||
ret = fence_wait(fence, intr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -387,8 +411,8 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
|
||||
if (f)
|
||||
prev = f->channel;
|
||||
|
||||
if (!prev || (ret = fctx->sync(f, prev, chan)))
|
||||
ret = fence_wait(fence, true);
|
||||
if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan))))
|
||||
ret = fence_wait(fence, intr);
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
@ -482,13 +506,22 @@ static bool nouveau_fence_no_signaling(struct fence *f)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nouveau_fence_release(struct fence *f)
|
||||
{
|
||||
struct nouveau_fence *fence = from_fence(f);
|
||||
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
|
||||
|
||||
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
|
||||
fence_free(&fence->base);
|
||||
}
|
||||
|
||||
static const struct fence_ops nouveau_fence_ops_legacy = {
|
||||
.get_driver_name = nouveau_fence_get_get_driver_name,
|
||||
.get_timeline_name = nouveau_fence_get_timeline_name,
|
||||
.enable_signaling = nouveau_fence_no_signaling,
|
||||
.signaled = nouveau_fence_is_signaled,
|
||||
.wait = nouveau_fence_wait_legacy,
|
||||
.release = NULL
|
||||
.release = nouveau_fence_release
|
||||
};
|
||||
|
||||
static bool nouveau_fence_enable_signaling(struct fence *f)
|
||||
|
@ -26,10 +26,12 @@ int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
|
||||
bool nouveau_fence_done(struct nouveau_fence *);
|
||||
void nouveau_fence_work(struct fence *, void (*)(void *), void *);
|
||||
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
|
||||
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive);
|
||||
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
|
||||
|
||||
struct nouveau_fence_chan {
|
||||
spinlock_t lock;
|
||||
struct kref fence_ref;
|
||||
|
||||
struct list_head pending;
|
||||
struct list_head flip;
|
||||
|
||||
@ -42,7 +44,7 @@ struct nouveau_fence_chan {
|
||||
|
||||
u32 sequence;
|
||||
u32 context;
|
||||
char name[24];
|
||||
char name[32];
|
||||
|
||||
struct nvif_notify notify;
|
||||
int notify_ref;
|
||||
@ -63,6 +65,7 @@ struct nouveau_fence_priv {
|
||||
|
||||
void nouveau_fence_context_new(struct nouveau_channel *, struct nouveau_fence_chan *);
|
||||
void nouveau_fence_context_del(struct nouveau_fence_chan *);
|
||||
void nouveau_fence_context_free(struct nouveau_fence_chan *);
|
||||
|
||||
int nv04_fence_create(struct nouveau_drm *);
|
||||
int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32);
|
||||
|
@ -165,7 +165,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
|
||||
flags |= TTM_PL_FLAG_SYSTEM;
|
||||
|
||||
ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
|
||||
tile_flags, NULL, pnvbo);
|
||||
tile_flags, NULL, NULL, pnvbo);
|
||||
if (ret)
|
||||
return ret;
|
||||
nvbo = *pnvbo;
|
||||
@ -459,7 +459,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains);
|
||||
ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
|
||||
if (unlikely(ret)) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
NV_PRINTK(error, cli, "fail post-validate sync\n");
|
||||
|
@ -39,7 +39,7 @@ struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *);
|
||||
extern void nouveau_gem_prime_unpin(struct drm_gem_object *);
|
||||
extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *);
|
||||
extern struct drm_gem_object *nouveau_gem_prime_import_sg_table(
|
||||
struct drm_device *, size_t size, struct sg_table *);
|
||||
struct drm_device *, struct dma_buf_attachment *, struct sg_table *);
|
||||
extern void *nouveau_gem_prime_vmap(struct drm_gem_object *);
|
||||
extern void nouveau_gem_prime_vunmap(struct drm_gem_object *, void *);
|
||||
|
||||
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <linux/dma-buf.h>
|
||||
|
||||
#include "nouveau_drm.h"
|
||||
#include "nouveau_gem.h"
|
||||
@ -56,17 +57,20 @@ void nouveau_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
|
||||
}
|
||||
|
||||
struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
size_t size,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *sg)
|
||||
{
|
||||
struct nouveau_bo *nvbo;
|
||||
struct reservation_object *robj = attach->dmabuf->resv;
|
||||
u32 flags = 0;
|
||||
int ret;
|
||||
|
||||
flags = TTM_PL_FLAG_TT;
|
||||
|
||||
ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
|
||||
sg, &nvbo);
|
||||
ww_mutex_lock(&robj->lock, NULL);
|
||||
ret = nouveau_bo_new(dev, attach->dmabuf->size, 0, flags, 0, 0,
|
||||
sg, robj, &nvbo);
|
||||
ww_mutex_unlock(&robj->lock);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
|
@ -67,7 +67,7 @@ nv04_fence_context_del(struct nouveau_channel *chan)
|
||||
struct nv04_fence_chan *fctx = chan->fence;
|
||||
nouveau_fence_context_del(&fctx->base);
|
||||
chan->fence = NULL;
|
||||
kfree(fctx);
|
||||
nouveau_fence_context_free(&fctx->base);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -63,7 +63,7 @@ nv10_fence_context_del(struct nouveau_channel *chan)
|
||||
nvif_object_fini(&fctx->head[i]);
|
||||
nvif_object_fini(&fctx->sema);
|
||||
chan->fence = NULL;
|
||||
kfree(fctx);
|
||||
nouveau_fence_context_free(&fctx->base);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -129,7 +129,7 @@ nv17_fence_create(struct nouveau_drm *drm)
|
||||
spin_lock_init(&priv->lock);
|
||||
|
||||
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, NULL, &priv->bo);
|
||||
0, 0x0000, NULL, NULL, &priv->bo);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
|
||||
if (!ret) {
|
||||
|
@ -1383,7 +1383,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
|
||||
drm_mode_crtc_set_gamma_size(crtc, 256);
|
||||
|
||||
ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, NULL, &head->base.lut.nvbo);
|
||||
0, 0x0000, NULL, NULL, &head->base.lut.nvbo);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM);
|
||||
if (!ret) {
|
||||
@ -1406,7 +1406,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
|
||||
goto out;
|
||||
|
||||
ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, NULL, &head->base.cursor.nvbo);
|
||||
0, 0x0000, NULL, NULL, &head->base.cursor.nvbo);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM);
|
||||
if (!ret) {
|
||||
@ -2468,7 +2468,7 @@ nv50_display_create(struct drm_device *dev)
|
||||
|
||||
/* small shared memory area we use for notifiers and semaphores */
|
||||
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, NULL, &disp->sync);
|
||||
0, 0x0000, NULL, NULL, &disp->sync);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
|
||||
if (!ret) {
|
||||
|
@ -100,7 +100,7 @@ nv50_fence_create(struct nouveau_drm *drm)
|
||||
spin_lock_init(&priv->lock);
|
||||
|
||||
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, NULL, &priv->bo);
|
||||
0, 0x0000, NULL, NULL, &priv->bo);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
|
||||
if (!ret) {
|
||||
|
@ -125,7 +125,7 @@ nv84_fence_context_del(struct nouveau_channel *chan)
|
||||
nouveau_bo_vma_del(priv->bo, &fctx->vma);
|
||||
nouveau_fence_context_del(&fctx->base);
|
||||
chan->fence = NULL;
|
||||
kfree(fctx);
|
||||
nouveau_fence_context_free(&fctx->base);
|
||||
}
|
||||
|
||||
int
|
||||
@ -232,7 +232,7 @@ nv84_fence_create(struct nouveau_drm *drm)
|
||||
priv->base.uevent = true;
|
||||
|
||||
ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
|
||||
TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
|
||||
TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL, &priv->bo);
|
||||
if (ret == 0) {
|
||||
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
|
||||
if (ret == 0) {
|
||||
@ -246,7 +246,7 @@ nv84_fence_create(struct nouveau_drm *drm)
|
||||
|
||||
if (ret == 0)
|
||||
ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
|
||||
TTM_PL_FLAG_TT, 0, 0, NULL,
|
||||
TTM_PL_FLAG_TT, 0, 0, NULL, NULL,
|
||||
&priv->bo_gart);
|
||||
if (ret == 0) {
|
||||
ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT);
|
||||
|
@ -537,7 +537,7 @@ int qxl_gem_prime_pin(struct drm_gem_object *obj);
|
||||
void qxl_gem_prime_unpin(struct drm_gem_object *obj);
|
||||
struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj);
|
||||
struct drm_gem_object *qxl_gem_prime_import_sg_table(
|
||||
struct drm_device *dev, size_t size,
|
||||
struct drm_device *dev, struct dma_buf_attachment *attach,
|
||||
struct sg_table *sgt);
|
||||
void *qxl_gem_prime_vmap(struct drm_gem_object *obj);
|
||||
void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
|
||||
|
@ -110,7 +110,7 @@ int qxl_bo_create(struct qxl_device *qdev,
|
||||
|
||||
r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
|
||||
&bo->placement, 0, !kernel, NULL, size,
|
||||
NULL, &qxl_ttm_bo_destroy);
|
||||
NULL, NULL, &qxl_ttm_bo_destroy);
|
||||
if (unlikely(r != 0)) {
|
||||
if (r != -ERESTARTSYS)
|
||||
dev_err(qdev->dev,
|
||||
|
@ -46,7 +46,7 @@ struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
|
||||
}
|
||||
|
||||
struct drm_gem_object *qxl_gem_prime_import_sg_table(
|
||||
struct drm_device *dev, size_t size,
|
||||
struct drm_device *dev, struct dma_buf_attachment *attach,
|
||||
struct sg_table *table)
|
||||
{
|
||||
WARN_ONCE(1, "not implemented");
|
||||
|
@ -135,7 +135,7 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_mode_create_dumb *args);
|
||||
struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
|
||||
struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
size_t size,
|
||||
struct dma_buf_attachment *,
|
||||
struct sg_table *sg);
|
||||
int radeon_gem_prime_pin(struct drm_gem_object *obj);
|
||||
void radeon_gem_prime_unpin(struct drm_gem_object *obj);
|
||||
|
@ -216,7 +216,7 @@ int radeon_bo_create(struct radeon_device *rdev,
|
||||
down_read(&rdev->pm.mclk_lock);
|
||||
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
|
||||
&bo->placement, page_align, !kernel, NULL,
|
||||
acc_size, sg, &radeon_ttm_bo_destroy);
|
||||
acc_size, sg, NULL, &radeon_ttm_bo_destroy);
|
||||
up_read(&rdev->pm.mclk_lock);
|
||||
if (unlikely(r != 0)) {
|
||||
return r;
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include "radeon.h"
|
||||
#include <drm/radeon_drm.h>
|
||||
#include <linux/dma-buf.h>
|
||||
|
||||
struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
|
||||
{
|
||||
@ -57,14 +58,14 @@ void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
|
||||
}
|
||||
|
||||
struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
size_t size,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *sg)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_bo *bo;
|
||||
int ret;
|
||||
|
||||
ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
|
||||
ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
|
||||
RADEON_GEM_DOMAIN_GTT, 0, sg, &bo);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
@ -1068,6 +1068,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
|
||||
struct file *persistent_swap_storage,
|
||||
size_t acc_size,
|
||||
struct sg_table *sg,
|
||||
struct reservation_object *resv,
|
||||
void (*destroy) (struct ttm_buffer_object *))
|
||||
{
|
||||
int ret = 0;
|
||||
@ -1121,8 +1122,13 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
|
||||
bo->persistent_swap_storage = persistent_swap_storage;
|
||||
bo->acc_size = acc_size;
|
||||
bo->sg = sg;
|
||||
bo->resv = &bo->ttm_resv;
|
||||
reservation_object_init(bo->resv);
|
||||
if (resv) {
|
||||
bo->resv = resv;
|
||||
lockdep_assert_held(&bo->resv->lock.base);
|
||||
} else {
|
||||
bo->resv = &bo->ttm_resv;
|
||||
reservation_object_init(&bo->ttm_resv);
|
||||
}
|
||||
atomic_inc(&bo->glob->bo_count);
|
||||
drm_vma_node_reset(&bo->vma_node);
|
||||
|
||||
@ -1135,13 +1141,19 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
|
||||
ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
|
||||
bo->mem.num_pages);
|
||||
|
||||
locked = ww_mutex_trylock(&bo->resv->lock);
|
||||
WARN_ON(!locked);
|
||||
/* passed reservation objects should already be locked,
|
||||
* since otherwise lockdep will be angered in radeon.
|
||||
*/
|
||||
if (!resv) {
|
||||
locked = ww_mutex_trylock(&bo->resv->lock);
|
||||
WARN_ON(!locked);
|
||||
}
|
||||
|
||||
if (likely(!ret))
|
||||
ret = ttm_bo_validate(bo, placement, interruptible, false);
|
||||
|
||||
ttm_bo_unreserve(bo);
|
||||
if (!resv)
|
||||
ttm_bo_unreserve(bo);
|
||||
|
||||
if (unlikely(ret))
|
||||
ttm_bo_unref(&bo);
|
||||
@ -1199,7 +1211,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
|
||||
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
|
||||
ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
|
||||
interruptible, persistent_swap_storage, acc_size,
|
||||
NULL, NULL);
|
||||
NULL, NULL, NULL);
|
||||
if (likely(ret == 0))
|
||||
*p_bo = bo;
|
||||
|
||||
|
@ -430,7 +430,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
|
||||
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
|
||||
ttm_bo_type_device, placement,
|
||||
0, interruptible,
|
||||
NULL, acc_size, NULL, bo_free);
|
||||
NULL, acc_size, NULL, NULL, bo_free);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -87,6 +87,7 @@ struct drm_gem_object;
|
||||
struct device_node;
|
||||
struct videomode;
|
||||
struct reservation_object;
|
||||
struct dma_buf_attachment;
|
||||
|
||||
/*
|
||||
* 4 debug categories are defined:
|
||||
@ -570,7 +571,8 @@ struct drm_driver {
|
||||
struct drm_gem_object *obj);
|
||||
struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
|
||||
struct drm_gem_object *(*gem_prime_import_sg_table)(
|
||||
struct drm_device *dev, size_t size,
|
||||
struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *sgt);
|
||||
void *(*gem_prime_vmap)(struct drm_gem_object *obj);
|
||||
void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
|
||||
|
@ -45,7 +45,8 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m);
|
||||
|
||||
struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj);
|
||||
struct drm_gem_object *
|
||||
drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size,
|
||||
drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *sgt);
|
||||
int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
|
||||
struct vm_area_struct *vma);
|
||||
|
@ -460,6 +460,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
|
||||
* point to the shmem object backing a GEM object if TTM is used to back a
|
||||
* GEM user interface.
|
||||
* @acc_size: Accounted size for this object.
|
||||
* @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
|
||||
* @destroy: Destroy function. Use NULL for kfree().
|
||||
*
|
||||
* This function initializes a pre-allocated struct ttm_buffer_object.
|
||||
@ -487,6 +488,7 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
|
||||
struct file *persistent_swap_storage,
|
||||
size_t acc_size,
|
||||
struct sg_table *sg,
|
||||
struct reservation_object *resv,
|
||||
void (*destroy) (struct ttm_buffer_object *));
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user