linux_dsm_epyc7002/drivers/gpu/drm/nouveau/nouveau_gem.c

925 lines
24 KiB
C
Raw Normal View History

/*
* Copyright (C) 2008 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
#include "nouveau_abi16.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"
#include "nouveau_vmm.h"
#include <nvif/class.h>
void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
struct device *dev = drm->dev->dev;
int ret;
ret = pm_runtime_get_sync(dev);
if (WARN_ON(ret < 0 && ret != -EACCES))
return;
if (gem->import_attach)
drm_prime_gem_destroy(gem, nvbo->bo.sg);
drm_gem_object_release(gem);
/* reset filp so nouveau_bo_del_ttm() can test for it */
gem->filp = NULL;
ttm_bo_unref(&bo);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
int
nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct device *dev = drm->dev->dev;
struct nouveau_vma *vma;
int ret;
if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return 0;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return ret;
ret = pm_runtime_get_sync(dev);
if (ret < 0 && ret != -EACCES)
goto out;
ret = nouveau_vma_new(nvbo, &cli->vmm, &vma);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
out:
ttm_bo_unreserve(&nvbo->bo);
return ret;
}
struct nouveau_gem_object_unmap {
struct nouveau_cli_work work;
struct nouveau_vma *vma;
};
static void
nouveau_gem_object_delete(struct nouveau_vma *vma)
{
nouveau_vma_del(&vma);
}
static void
nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
{
struct nouveau_gem_object_unmap *work =
container_of(w, typeof(*work), work);
nouveau_gem_object_delete(work->vma);
kfree(work);
}
static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{
const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
struct reservation_object *resv = nvbo->bo.resv;
struct reservation_object_list *fobj;
struct nouveau_gem_object_unmap *work;
dma-buf: Rename struct fence to dma_fence I plan to usurp the short name of struct fence for a core kernel struct, and so I need to rename the specialised fence/timeline for DMA operations to make room. A consensus was reached in https://lists.freedesktop.org/archives/dri-devel/2016-July/113083.html that making clear this fence applies to DMA operations was a good thing. Since then the patch has grown a bit as usage increases, so hopefully it remains a good thing! (v2...: rebase, rerun spatch) v3: Compile on msm, spotted a manual fixup that I broke. v4: Try again for msm, sorry Daniel coccinelle script: @@ @@ - struct fence + struct dma_fence @@ @@ - struct fence_ops + struct dma_fence_ops @@ @@ - struct fence_cb + struct dma_fence_cb @@ @@ - struct fence_array + struct dma_fence_array @@ @@ - enum fence_flag_bits + enum dma_fence_flag_bits @@ @@ ( - fence_init + dma_fence_init | - fence_release + dma_fence_release | - fence_free + dma_fence_free | - fence_get + dma_fence_get | - fence_get_rcu + dma_fence_get_rcu | - fence_put + dma_fence_put | - fence_signal + dma_fence_signal | - fence_signal_locked + dma_fence_signal_locked | - fence_default_wait + dma_fence_default_wait | - fence_add_callback + dma_fence_add_callback | - fence_remove_callback + dma_fence_remove_callback | - fence_enable_sw_signaling + dma_fence_enable_sw_signaling | - fence_is_signaled_locked + dma_fence_is_signaled_locked | - fence_is_signaled + dma_fence_is_signaled | - fence_is_later + dma_fence_is_later | - fence_later + dma_fence_later | - fence_wait_timeout + dma_fence_wait_timeout | - fence_wait_any_timeout + dma_fence_wait_any_timeout | - fence_wait + dma_fence_wait | - fence_context_alloc + dma_fence_context_alloc | - fence_array_create + dma_fence_array_create | - to_fence_array + to_dma_fence_array | - fence_is_array + dma_fence_is_array | - trace_fence_emit + trace_dma_fence_emit | - FENCE_TRACE + DMA_FENCE_TRACE | - FENCE_WARN + DMA_FENCE_WARN | - FENCE_ERR + DMA_FENCE_ERR ) ( ... ) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20161025120045.28839-1-chris@chris-wilson.co.uk
2016-10-25 19:00:45 +07:00
struct dma_fence *fence = NULL;
fobj = reservation_object_get_list(resv);
list_del_init(&vma->head);
if (fobj && fobj->shared_count > 1)
ttm_bo_wait(&nvbo->bo, false, false);
else if (fobj && fobj->shared_count == 1)
fence = rcu_dereference_protected(fobj->shared[0],
reservation_object_held(resv));
else
fence = reservation_object_get_excl(nvbo->bo.resv);
if (!fence || !mapped) {
nouveau_gem_object_delete(vma);
return;
}
if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
nouveau_gem_object_delete(vma);
return;
}
work->work.func = nouveau_gem_object_delete_work;
work->vma = vma;
nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
}
void
nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct device *dev = drm->dev->dev;
struct nouveau_vma *vma;
int ret;
if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return;
vma = nouveau_vma_find(nvbo, &cli->vmm);
if (vma) {
if (--vma->refs == 0) {
ret = pm_runtime_get_sync(dev);
if (!WARN_ON(ret < 0 && ret != -EACCES)) {
nouveau_gem_object_unmap(nvbo, vma);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
}
}
ttm_bo_unreserve(&nvbo->bo);
}
int
nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
uint32_t tile_mode, uint32_t tile_flags,
struct nouveau_bo **pnvbo)
{
struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo;
u32 flags = 0;
int ret;
if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
flags |= TTM_PL_FLAG_VRAM;
if (domain & NOUVEAU_GEM_DOMAIN_GART)
flags |= TTM_PL_FLAG_TT;
if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
flags |= TTM_PL_FLAG_SYSTEM;
if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
flags |= TTM_PL_FLAG_UNCACHED;
ret = nouveau_bo_new(cli, size, align, flags, tile_mode,
tile_flags, NULL, NULL, pnvbo);
if (ret)
return ret;
nvbo = *pnvbo;
/* we restrict allowed domains on nv50+ to only the types
* that were requested at creation time. not possibly on
* earlier chips without busting the ABI.
*/
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
nvbo->valid_domains &= domain;
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller, instead of a normal nouveau_bo ttm reference. */
ret = drm_gem_object_init(drm->dev, &nvbo->gem, nvbo->bo.mem.size);
if (ret) {
nouveau_bo_ref(NULL, pnvbo);
return -ENOMEM;
}
nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
return 0;
}
static int
nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
struct drm_nouveau_gem_info *rep)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_vma *vma;
if (is_power_of_2(nvbo->valid_domains))
rep->domain = nvbo->valid_domains;
else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
rep->domain = NOUVEAU_GEM_DOMAIN_GART;
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
rep->offset = nvbo->bo.offset;
if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
vma = nouveau_vma_find(nvbo, &cli->vmm);
if (!vma)
return -EINVAL;
rep->offset = vma->addr;
}
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
rep->tile_mode = nvbo->mode;
rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
rep->tile_flags |= nvbo->kind << 8;
else
if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
else
rep->tile_flags |= nvbo->zeta;
return 0;
}
int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
int ret = 0;
ret = nouveau_gem_new(cli, req->info.size, req->align,
req->info.domain, req->info.tile_mode,
req->info.tile_flags, &nvbo);
if (ret)
return ret;
ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
if (ret == 0) {
ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
if (ret)
drm_gem_handle_delete(file_priv, req->info.handle);
}
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(&nvbo->gem);
return ret;
}
static int
nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
uint32_t write_domains, uint32_t valid_domains)
{
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct ttm_buffer_object *bo = &nvbo->bo;
uint32_t domains = valid_domains & nvbo->valid_domains &
(write_domains ? write_domains : read_domains);
uint32_t pref_flags = 0, valid_flags = 0;
if (!domains)
return -EINVAL;
if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
valid_flags |= TTM_PL_FLAG_VRAM;
if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
valid_flags |= TTM_PL_FLAG_TT;
if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
bo->mem.mem_type == TTM_PL_VRAM)
pref_flags |= TTM_PL_FLAG_VRAM;
else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
bo->mem.mem_type == TTM_PL_TT)
pref_flags |= TTM_PL_FLAG_TT;
else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
pref_flags |= TTM_PL_FLAG_VRAM;
else
pref_flags |= TTM_PL_FLAG_TT;
nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
return 0;
}
struct validate_op {
struct list_head list;
struct ww_acquire_ctx ticket;
};
static void
validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
struct drm_nouveau_gem_pushbuf_bo *pbbo)
{
struct nouveau_bo *nvbo;
struct drm_nouveau_gem_pushbuf_bo *b;
while (!list_empty(&op->list)) {
nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
b = &pbbo[nvbo->pbbo_index];
if (likely(fence))
nouveau_bo_fence(nvbo, fence, !!b->write_domains);
if (unlikely(nvbo->validate_mapped)) {
ttm_bo_kunmap(&nvbo->kmap);
nvbo->validate_mapped = false;
}
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve_ticket(&nvbo->bo, &op->ticket);
drm_gem_object_unreference_unlocked(&nvbo->gem);
}
}
static void
validate_fini(struct validate_op *op, struct nouveau_fence *fence,
struct drm_nouveau_gem_pushbuf_bo *pbbo)
{
validate_fini_no_ticket(op, fence, pbbo);
ww_acquire_fini(&op->ticket);
}
static int
validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
struct drm_nouveau_gem_pushbuf_bo *pbbo,
int nr_buffers, struct validate_op *op)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
int trycnt = 0;
int ret = -EINVAL, i;
struct nouveau_bo *res_bo = NULL;
LIST_HEAD(gart_list);
LIST_HEAD(vram_list);
LIST_HEAD(both_list);
ww_acquire_init(&op->ticket, &reservation_ww_class);
retry:
if (++trycnt > 100000) {
NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
return -EINVAL;
}
for (i = 0; i < nr_buffers; i++) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
struct drm_gem_object *gem;
struct nouveau_bo *nvbo;
gem = drm_gem_object_lookup(file_priv, b->handle);
if (!gem) {
NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
ret = -ENOENT;
break;
}
nvbo = nouveau_gem_object(gem);
if (nvbo == res_bo) {
res_bo = NULL;
drm_gem_object_unreference_unlocked(gem);
continue;
}
if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
NV_PRINTK(err, cli, "multiple instances of buffer %d on "
"validation list\n", b->handle);
drm_gem_object_unreference_unlocked(gem);
ret = -EINVAL;
break;
}
ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
if (ret) {
list_splice_tail_init(&vram_list, &op->list);
list_splice_tail_init(&gart_list, &op->list);
list_splice_tail_init(&both_list, &op->list);
validate_fini_no_ticket(op, NULL, NULL);
if (unlikely(ret == -EDEADLK)) {
ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
&op->ticket);
if (!ret)
res_bo = nvbo;
}
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "fail reserve\n");
break;
}
}
b->user_priv = (uint64_t)(unsigned long)nvbo;
nvbo->reserved_by = file_priv;
nvbo->pbbo_index = i;
if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
(b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
list_add_tail(&nvbo->entry, &both_list);
else
if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
list_add_tail(&nvbo->entry, &vram_list);
else
if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
list_add_tail(&nvbo->entry, &gart_list);
else {
NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
b->valid_domains);
list_add_tail(&nvbo->entry, &both_list);
ret = -EINVAL;
break;
}
if (nvbo == res_bo)
goto retry;
}
ww_acquire_done(&op->ticket);
list_splice_tail(&vram_list, &op->list);
list_splice_tail(&gart_list, &op->list);
list_splice_tail(&both_list, &op->list);
if (ret)
validate_fini(op, NULL, NULL);
return ret;
}
static int
validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
uint64_t user_pbbo_ptr)
{
struct nouveau_drm *drm = chan->drm;
struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
(void __force __user *)(uintptr_t)user_pbbo_ptr;
struct nouveau_bo *nvbo;
int ret, relocs = 0;
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
b->write_domains,
b->valid_domains);
if (unlikely(ret)) {
NV_PRINTK(err, cli, "fail set_domain\n");
return ret;
}
ret = nouveau_bo_validate(nvbo, true, false);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "fail ttm_validate\n");
return ret;
}
ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "fail post-validate sync\n");
return ret;
}
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
if (nvbo->bo.offset == b->presumed.offset &&
((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
(nvbo->bo.mem.mem_type == TTM_PL_TT &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
continue;
if (nvbo->bo.mem.mem_type == TTM_PL_TT)
b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
else
b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
b->presumed.offset = nvbo->bo.offset;
b->presumed.valid = 0;
relocs++;
if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
&b->presumed, sizeof(b->presumed)))
return -EFAULT;
}
}
return relocs;
}
static int
nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
struct drm_file *file_priv,
struct drm_nouveau_gem_pushbuf_bo *pbbo,
uint64_t user_buffers, int nr_buffers,
struct validate_op *op, int *apply_relocs)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
int ret;
INIT_LIST_HEAD(&op->list);
if (nr_buffers == 0)
return 0;
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "validate_init\n");
return ret;
}
ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "validating bo list\n");
validate_fini(op, NULL, NULL);
return ret;
}
*apply_relocs = ret;
return 0;
}
static inline void
u_free(void *addr)
{
kvfree(addr);
}
static inline void *
u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
{
void *mem;
void __user *userptr = (void __force __user *)(uintptr_t)user;
size *= nmemb;
treewide: use kv[mz]alloc* rather than opencoded variants There are many code paths opencoding kvmalloc. Let's use the helper instead. The main difference to kvmalloc is that those users are usually not considering all the aspects of the memory allocator. E.g. allocation requests <= 32kB (with 4kB pages) are basically never failing and invoke OOM killer to satisfy the allocation. This sounds too disruptive for something that has a reasonable fallback - the vmalloc. On the other hand those requests might fallback to vmalloc even when the memory allocator would succeed after several more reclaim/compaction attempts previously. There is no guarantee something like that happens though. This patch converts many of those places to kv[mz]alloc* helpers because they are more conservative. Link: http://lkml.kernel.org/r/20170306103327.2766-2-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> # Xen bits Acked-by: Kees Cook <keescook@chromium.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Andreas Dilger <andreas.dilger@intel.com> # Lustre Acked-by: Christian Borntraeger <borntraeger@de.ibm.com> # KVM/s390 Acked-by: Dan Williams <dan.j.williams@intel.com> # nvdim Acked-by: David Sterba <dsterba@suse.com> # btrfs Acked-by: Ilya Dryomov <idryomov@gmail.com> # Ceph Acked-by: Tariq Toukan <tariqt@mellanox.com> # mlx4 Acked-by: Leon Romanovsky <leonro@mellanox.com> # mlx5 Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: Anton Vorontsov <anton@enomsg.org> Cc: Colin Cross <ccross@android.com> Cc: Tony Luck <tony.luck@intel.com> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: Ben Skeggs <bskeggs@redhat.com> Cc: Kent Overstreet <kent.overstreet@gmail.com> Cc: Santosh Raspatur <santosh@chelsio.com> Cc: Hariprasad S <hariprasad@chelsio.com> Cc: Yishai Hadas <yishaih@mellanox.com> Cc: Oleg Drokin <oleg.drokin@intel.com> Cc: "Yan, Zheng" <zyan@redhat.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: David Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-05-09 05:57:27 +07:00
mem = kvmalloc(size, GFP_KERNEL);
if (!mem)
return ERR_PTR(-ENOMEM);
if (copy_from_user(mem, userptr, size)) {
u_free(mem);
return ERR_PTR(-EFAULT);
}
return mem;
}
static int
nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
struct drm_nouveau_gem_pushbuf *req,
struct drm_nouveau_gem_pushbuf_bo *bo)
{
struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
int ret = 0;
unsigned i;
reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
if (IS_ERR(reloc))
return PTR_ERR(reloc);
for (i = 0; i < req->nr_relocs; i++) {
struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
struct drm_nouveau_gem_pushbuf_bo *b;
struct nouveau_bo *nvbo;
uint32_t data;
if (unlikely(r->bo_index > req->nr_buffers)) {
NV_PRINTK(err, cli, "reloc bo index invalid\n");
ret = -EINVAL;
break;
}
b = &bo[r->bo_index];
if (b->presumed.valid)
continue;
if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
NV_PRINTK(err, cli, "reloc container bo index invalid\n");
ret = -EINVAL;
break;
}
nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
if (unlikely(r->reloc_bo_offset + 4 >
nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
NV_PRINTK(err, cli, "reloc outside of bo\n");
ret = -EINVAL;
break;
}
if (!nvbo->kmap.virtual) {
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
&nvbo->kmap);
if (ret) {
NV_PRINTK(err, cli, "failed kmap for reloc\n");
break;
}
nvbo->validate_mapped = true;
}
if (r->flags & NOUVEAU_GEM_RELOC_LOW)
data = b->presumed.offset + r->data;
else
if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
data = (b->presumed.offset + r->data) >> 32;
else
data = r->data;
if (r->flags & NOUVEAU_GEM_RELOC_OR) {
if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
data |= r->tor;
else
data |= r->vor;
}
ret = ttm_bo_wait(&nvbo->bo, false, false);
if (ret) {
NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
break;
}
nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
}
u_free(reloc);
return ret;
}
int
nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_abi16_chan *temp;
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_nouveau_gem_pushbuf *req = data;
struct drm_nouveau_gem_pushbuf_push *push;
struct drm_nouveau_gem_pushbuf_bo *bo;
struct nouveau_channel *chan = NULL;
struct validate_op op;
struct nouveau_fence *fence = NULL;
int i, j, ret = 0, do_reloc = 0;
if (unlikely(!abi16))
return -ENOMEM;
list_for_each_entry(temp, &abi16->channels, head) {
if (temp->chan->chid == req->channel) {
chan = temp->chan;
break;
}
}
if (!chan)
return nouveau_abi16_put(abi16, -ENOENT);
req->vram_available = drm->gem.vram_available;
req->gart_available = drm->gem.gart_available;
if (unlikely(req->nr_push == 0))
goto out_next;
if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
req->nr_push, NOUVEAU_GEM_MAX_PUSH);
return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
return nouveau_abi16_put(abi16, -EINVAL);
}
push = u_memcpya(req->push, req->nr_push, sizeof(*push));
if (IS_ERR(push))
return nouveau_abi16_put(abi16, PTR_ERR(push));
bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
if (IS_ERR(bo)) {
u_free(push);
return nouveau_abi16_put(abi16, PTR_ERR(bo));
}
/* Ensure all push buffers are on validate list */
for (i = 0; i < req->nr_push; i++) {
if (push[i].bo_index >= req->nr_buffers) {
NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
ret = -EINVAL;
drm/nouveau: properly handle pushbuffer check failures When "buffer in list" check does not pass, don't free validation lists - they were not initialized yet. Fixes this oops: [drm] nouveau 0000:02:00.0: push 105 buffer not in list BUG: unable to handle kernel NULL pointer dereference at 000000000000057c IP: [<ffffffff81236aa4>] do_raw_spin_lock+0x14/0x13c PGD 1ac6cb067 PUD 1aaa52067 PMD 0 CPU 0 Modules linked in: nouveau ttm drm_kms_helper snd_hda_codec_realtek snd_hda_intel snd_hda_codec Pid: 6265, comm: OilRush_x86 Not tainted 2.6.38-rc6-nv+ #632 System manufacturer System Product Name/P6T SE RIP: 0010:[<ffffffff81236aa4>] [<ffffffff81236aa4>] do_raw_spin_lock+0x14/0x13c (...) Process OilRush_x86 (pid: 6265, threadinfo ffff8801a6aee000, task ffff8801a26c0000) 0000000000000000 ffff8801ac74c618 0000000000000000 0000000000000578 0000000000000000 ffff8801ac74c618 0000000000000000 ffff8801bd9d0000 [<ffffffff81417f78>] _raw_spin_lock+0x1e/0x22 [<ffffffffa00a2746>] nouveau_bo_fence+0x2e/0x60 [nouveau] [<ffffffffa00a540b>] validate_fini_list+0x35/0xeb [nouveau] [<ffffffffa00a54d3>] validate_fini+0x12/0x31 [nouveau] [<ffffffffa00a6386>] nouveau_gem_ioctl_pushbuf+0xe94/0xf6b [nouveau] [<ffffffff8141ac56>] ? sub_preempt_count+0x9e/0xb2 [<ffffffff81417e94>] ? _raw_spin_unlock_irqrestore+0x30/0x4d [<ffffffff8105dea2>] ? __wake_up+0x3f/0x48 [<ffffffff812aebb4>] drm_ioctl+0x289/0x361 [<ffffffff8141ac56>] ? sub_preempt_count+0x9e/0xb2 [<ffffffffa00a54f2>] ? nouveau_gem_ioctl_pushbuf+0x0/0xf6b [nouveau] [<ffffffff8141ac56>] ? sub_preempt_count+0x9e/0xb2 [<ffffffffa010caa2>] nouveau_compat_ioctl+0x16/0x1c [nouveau] [<ffffffff81142c0d>] compat_sys_ioctl+0x1c8/0x12d7 [<ffffffff814179ca>] ? trace_hardirqs_off_thunk+0x3a/0x6c [<ffffffff81058099>] sysenter_dispatch+0x7/0x30 [<ffffffff8141798e>] ? trace_hardirqs_on_thunk+0x3a/0x3c RIP [<ffffffff81236aa4>] do_raw_spin_lock+0x14/0x13c RSP <ffff8801a6aefb88> ---[ end trace 0014d5d93e6147e1 ]--- Additionally, don't call validate_fini twice in case of validation failure. Signed-off-by: Marcin Slusarz <marcin.slusarz@gmail.com> Signed-off-by: Maarten Maathuis <madman2003@gmail.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2011-03-07 18:31:35 +07:00
goto out_prevalid;
}
}
/* Validate buffer list */
ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
req->nr_buffers, &op, &do_reloc);
if (ret) {
if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "validate: %d\n", ret);
drm/nouveau: properly handle pushbuffer check failures When "buffer in list" check does not pass, don't free validation lists - they were not initialized yet. Fixes this oops: [drm] nouveau 0000:02:00.0: push 105 buffer not in list BUG: unable to handle kernel NULL pointer dereference at 000000000000057c IP: [<ffffffff81236aa4>] do_raw_spin_lock+0x14/0x13c PGD 1ac6cb067 PUD 1aaa52067 PMD 0 CPU 0 Modules linked in: nouveau ttm drm_kms_helper snd_hda_codec_realtek snd_hda_intel snd_hda_codec Pid: 6265, comm: OilRush_x86 Not tainted 2.6.38-rc6-nv+ #632 System manufacturer System Product Name/P6T SE RIP: 0010:[<ffffffff81236aa4>] [<ffffffff81236aa4>] do_raw_spin_lock+0x14/0x13c (...) Process OilRush_x86 (pid: 6265, threadinfo ffff8801a6aee000, task ffff8801a26c0000) 0000000000000000 ffff8801ac74c618 0000000000000000 0000000000000578 0000000000000000 ffff8801ac74c618 0000000000000000 ffff8801bd9d0000 [<ffffffff81417f78>] _raw_spin_lock+0x1e/0x22 [<ffffffffa00a2746>] nouveau_bo_fence+0x2e/0x60 [nouveau] [<ffffffffa00a540b>] validate_fini_list+0x35/0xeb [nouveau] [<ffffffffa00a54d3>] validate_fini+0x12/0x31 [nouveau] [<ffffffffa00a6386>] nouveau_gem_ioctl_pushbuf+0xe94/0xf6b [nouveau] [<ffffffff8141ac56>] ? sub_preempt_count+0x9e/0xb2 [<ffffffff81417e94>] ? _raw_spin_unlock_irqrestore+0x30/0x4d [<ffffffff8105dea2>] ? __wake_up+0x3f/0x48 [<ffffffff812aebb4>] drm_ioctl+0x289/0x361 [<ffffffff8141ac56>] ? sub_preempt_count+0x9e/0xb2 [<ffffffffa00a54f2>] ? nouveau_gem_ioctl_pushbuf+0x0/0xf6b [nouveau] [<ffffffff8141ac56>] ? sub_preempt_count+0x9e/0xb2 [<ffffffffa010caa2>] nouveau_compat_ioctl+0x16/0x1c [nouveau] [<ffffffff81142c0d>] compat_sys_ioctl+0x1c8/0x12d7 [<ffffffff814179ca>] ? trace_hardirqs_off_thunk+0x3a/0x6c [<ffffffff81058099>] sysenter_dispatch+0x7/0x30 [<ffffffff8141798e>] ? trace_hardirqs_on_thunk+0x3a/0x3c RIP [<ffffffff81236aa4>] do_raw_spin_lock+0x14/0x13c RSP <ffff8801a6aefb88> ---[ end trace 0014d5d93e6147e1 ]--- Additionally, don't call validate_fini twice in case of validation failure. Signed-off-by: Marcin Slusarz <marcin.slusarz@gmail.com> Signed-off-by: Maarten Maathuis <madman2003@gmail.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2011-03-07 18:31:35 +07:00
goto out_prevalid;
}
/* Apply any relocations that are required */
if (do_reloc) {
ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
if (ret) {
NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
goto out;
}
}
if (chan->dma.ib_max) {
ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
if (ret) {
NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
goto out;
}
for (i = 0; i < req->nr_push; i++) {
struct nouveau_bo *nvbo = (void *)(unsigned long)
bo[push[i].bo_index].user_priv;
nv50_dma_push(chan, nvbo, push[i].offset,
push[i].length);
}
} else
if (drm->client.device.info.chipset >= 0x25) {
ret = RING_SPACE(chan, req->nr_push * 2);
if (ret) {
NV_PRINTK(err, cli, "cal_space: %d\n", ret);
goto out;
}
for (i = 0; i < req->nr_push; i++) {
struct nouveau_bo *nvbo = (void *)(unsigned long)
bo[push[i].bo_index].user_priv;
OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
OUT_RING(chan, 0);
}
} else {
ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
if (ret) {
NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
goto out;
}
for (i = 0; i < req->nr_push; i++) {
struct nouveau_bo *nvbo = (void *)(unsigned long)
bo[push[i].bo_index].user_priv;
uint32_t cmd;
cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
cmd |= 0x20000000;
if (unlikely(cmd != req->suffix0)) {
if (!nvbo->kmap.virtual) {
ret = ttm_bo_kmap(&nvbo->bo, 0,
nvbo->bo.mem.
num_pages,
&nvbo->kmap);
if (ret) {
WIND_RING(chan);
goto out;
}
nvbo->validate_mapped = true;
}
nouveau_bo_wr32(nvbo, (push[i].offset +
push[i].length - 8) / 4, cmd);
}
OUT_RING(chan, 0x20000000 |
(nvbo->bo.offset + push[i].offset));
OUT_RING(chan, 0);
for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
OUT_RING(chan, 0);
}
}
ret = nouveau_fence_new(chan, false, &fence);
if (ret) {
NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
goto out;
}
out:
validate_fini(&op, fence, bo);
nouveau_fence_unref(&fence);
drm/nouveau: properly handle pushbuffer check failures When "buffer in list" check does not pass, don't free validation lists - they were not initialized yet. Fixes this oops: [drm] nouveau 0000:02:00.0: push 105 buffer not in list BUG: unable to handle kernel NULL pointer dereference at 000000000000057c IP: [<ffffffff81236aa4>] do_raw_spin_lock+0x14/0x13c PGD 1ac6cb067 PUD 1aaa52067 PMD 0 CPU 0 Modules linked in: nouveau ttm drm_kms_helper snd_hda_codec_realtek snd_hda_intel snd_hda_codec Pid: 6265, comm: OilRush_x86 Not tainted 2.6.38-rc6-nv+ #632 System manufacturer System Product Name/P6T SE RIP: 0010:[<ffffffff81236aa4>] [<ffffffff81236aa4>] do_raw_spin_lock+0x14/0x13c (...) Process OilRush_x86 (pid: 6265, threadinfo ffff8801a6aee000, task ffff8801a26c0000) 0000000000000000 ffff8801ac74c618 0000000000000000 0000000000000578 0000000000000000 ffff8801ac74c618 0000000000000000 ffff8801bd9d0000 [<ffffffff81417f78>] _raw_spin_lock+0x1e/0x22 [<ffffffffa00a2746>] nouveau_bo_fence+0x2e/0x60 [nouveau] [<ffffffffa00a540b>] validate_fini_list+0x35/0xeb [nouveau] [<ffffffffa00a54d3>] validate_fini+0x12/0x31 [nouveau] [<ffffffffa00a6386>] nouveau_gem_ioctl_pushbuf+0xe94/0xf6b [nouveau] [<ffffffff8141ac56>] ? sub_preempt_count+0x9e/0xb2 [<ffffffff81417e94>] ? _raw_spin_unlock_irqrestore+0x30/0x4d [<ffffffff8105dea2>] ? __wake_up+0x3f/0x48 [<ffffffff812aebb4>] drm_ioctl+0x289/0x361 [<ffffffff8141ac56>] ? sub_preempt_count+0x9e/0xb2 [<ffffffffa00a54f2>] ? nouveau_gem_ioctl_pushbuf+0x0/0xf6b [nouveau] [<ffffffff8141ac56>] ? sub_preempt_count+0x9e/0xb2 [<ffffffffa010caa2>] nouveau_compat_ioctl+0x16/0x1c [nouveau] [<ffffffff81142c0d>] compat_sys_ioctl+0x1c8/0x12d7 [<ffffffff814179ca>] ? trace_hardirqs_off_thunk+0x3a/0x6c [<ffffffff81058099>] sysenter_dispatch+0x7/0x30 [<ffffffff8141798e>] ? trace_hardirqs_on_thunk+0x3a/0x3c RIP [<ffffffff81236aa4>] do_raw_spin_lock+0x14/0x13c RSP <ffff8801a6aefb88> ---[ end trace 0014d5d93e6147e1 ]--- Additionally, don't call validate_fini twice in case of validation failure. Signed-off-by: Marcin Slusarz <marcin.slusarz@gmail.com> Signed-off-by: Maarten Maathuis <madman2003@gmail.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2011-03-07 18:31:35 +07:00
out_prevalid:
u_free(bo);
u_free(push);
out_next:
if (chan->dma.ib_max) {
req->suffix0 = 0x00000000;
req->suffix1 = 0x00000000;
} else
if (drm->client.device.info.chipset >= 0x25) {
req->suffix0 = 0x00020000;
req->suffix1 = 0x00000000;
} else {
req->suffix0 = 0x20000000 |
(chan->push.addr + ((chan->dma.cur + 2) << 2));
req->suffix1 = 0x00000000;
}
return nouveau_abi16_put(abi16, ret);
}
int
nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_gem_cpu_prep *req = data;
struct drm_gem_object *gem;
struct nouveau_bo *nvbo;
bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
long lret;
int ret;
gem = drm_gem_object_lookup(file_priv, req->handle);
if (!gem)
return -ENOENT;
nvbo = nouveau_gem_object(gem);
lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true,
no_wait ? 0 : 30 * HZ);
if (!lret)
ret = -EBUSY;
else if (lret > 0)
ret = 0;
else
ret = lret;
nouveau_bo_sync_for_cpu(nvbo);
drm_gem_object_unreference_unlocked(gem);
return ret;
}
int
nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_gem_cpu_fini *req = data;
struct drm_gem_object *gem;
struct nouveau_bo *nvbo;
gem = drm_gem_object_lookup(file_priv, req->handle);
if (!gem)
return -ENOENT;
nvbo = nouveau_gem_object(gem);
nouveau_bo_sync_for_device(nvbo);
drm_gem_object_unreference_unlocked(gem);
return 0;
}
int
nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_gem_info *req = data;
struct drm_gem_object *gem;
int ret;
gem = drm_gem_object_lookup(file_priv, req->handle);
if (!gem)
return -ENOENT;
ret = nouveau_gem_info(file_priv, gem, req);
drm_gem_object_unreference_unlocked(gem);
return ret;
}