2015-04-21 03:55:21 +07:00
|
|
|
/*
|
|
|
|
* Copyright 2008 Jerome Glisse.
|
|
|
|
* All Rights Reserved.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
* DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Jerome Glisse <glisse@freedesktop.org>
|
|
|
|
*/
|
2016-03-17 11:30:49 +07:00
|
|
|
#include <linux/pagemap.h>
|
2017-09-13 03:42:14 +07:00
|
|
|
#include <linux/sync_file.h>
|
2015-04-21 03:55:21 +07:00
|
|
|
#include <drm/drmP.h>
|
|
|
|
#include <drm/amdgpu_drm.h>
|
2017-03-14 05:18:15 +07:00
|
|
|
#include <drm/drm_syncobj.h>
|
2015-04-21 03:55:21 +07:00
|
|
|
#include "amdgpu.h"
|
|
|
|
#include "amdgpu_trace.h"
|
|
|
|
|
2015-12-14 22:42:31 +07:00
|
|
|
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
|
2016-05-07 03:14:00 +07:00
|
|
|
struct drm_amdgpu_cs_chunk_fence *data,
|
|
|
|
uint32_t *offset)
|
2015-12-14 22:42:31 +07:00
|
|
|
{
|
|
|
|
struct drm_gem_object *gobj;
|
2016-09-09 16:21:43 +07:00
|
|
|
unsigned long size;
|
2015-12-14 22:42:31 +07:00
|
|
|
|
2016-05-09 17:04:54 +07:00
|
|
|
gobj = drm_gem_object_lookup(p->filp, data->handle);
|
2015-12-14 22:42:31 +07:00
|
|
|
if (gobj == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-05-07 03:14:00 +07:00
|
|
|
p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
|
2015-12-14 22:42:31 +07:00
|
|
|
p->uf_entry.priority = 0;
|
|
|
|
p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
|
|
|
|
p->uf_entry.tv.shared = true;
|
2016-02-23 18:36:59 +07:00
|
|
|
p->uf_entry.user_pages = NULL;
|
2016-09-09 16:21:43 +07:00
|
|
|
|
|
|
|
size = amdgpu_bo_size(p->uf_entry.robj);
|
|
|
|
if (size != PAGE_SIZE || (data->offset + 8) > size)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-05-07 03:14:00 +07:00
|
|
|
*offset = data->offset;
|
2015-12-14 22:42:31 +07:00
|
|
|
|
2017-08-03 18:58:16 +07:00
|
|
|
drm_gem_object_put_unlocked(gobj);
|
2016-05-07 03:14:00 +07:00
|
|
|
|
|
|
|
if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
|
|
|
|
amdgpu_bo_unref(&p->uf_entry.robj);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-12-14 22:42:31 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-21 03:35:04 +07:00
|
|
|
static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2016-02-01 17:20:37 +07:00
|
|
|
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
2016-04-19 19:11:32 +07:00
|
|
|
struct amdgpu_vm *vm = &fpriv->vm;
|
2015-04-21 03:55:21 +07:00
|
|
|
union drm_amdgpu_cs *cs = data;
|
|
|
|
uint64_t *chunk_array_user;
|
2015-09-23 17:59:28 +07:00
|
|
|
uint64_t *chunk_array;
|
2016-02-03 19:44:52 +07:00
|
|
|
unsigned size, num_ibs = 0;
|
2016-05-07 03:14:00 +07:00
|
|
|
uint32_t uf_offset = 0;
|
2015-09-25 18:36:55 +07:00
|
|
|
int i;
|
2015-09-23 17:59:28 +07:00
|
|
|
int ret;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2015-09-23 17:59:28 +07:00
|
|
|
if (cs->in.num_chunks == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
|
|
|
|
if (!chunk_array)
|
|
|
|
return -ENOMEM;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2015-05-11 20:34:59 +07:00
|
|
|
p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
|
|
|
|
if (!p->ctx) {
|
2015-09-23 17:59:28 +07:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto free_chunk;
|
2015-05-11 20:34:59 +07:00
|
|
|
}
|
2015-09-23 17:59:28 +07:00
|
|
|
|
2017-10-17 11:08:02 +07:00
|
|
|
/* skip guilty context job */
|
|
|
|
if (atomic_read(&p->ctx->guilty) == 1) {
|
|
|
|
ret = -ECANCELED;
|
|
|
|
goto free_chunk;
|
|
|
|
}
|
|
|
|
|
2017-10-11 03:50:17 +07:00
|
|
|
mutex_lock(&p->ctx->lock);
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/* get chunks */
|
2017-07-26 22:02:52 +07:00
|
|
|
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (copy_from_user(chunk_array, chunk_array_user,
|
|
|
|
sizeof(uint64_t)*cs->in.num_chunks)) {
|
2015-09-23 17:59:28 +07:00
|
|
|
ret = -EFAULT;
|
2017-10-12 04:02:02 +07:00
|
|
|
goto free_chunk;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
p->nchunks = cs->in.num_chunks;
|
2015-07-17 17:39:25 +07:00
|
|
|
p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
|
2015-04-21 03:55:21 +07:00
|
|
|
GFP_KERNEL);
|
2015-09-23 17:59:28 +07:00
|
|
|
if (!p->chunks) {
|
|
|
|
ret = -ENOMEM;
|
2017-10-12 04:02:02 +07:00
|
|
|
goto free_chunk;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < p->nchunks; i++) {
|
|
|
|
struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
|
|
|
|
struct drm_amdgpu_cs_chunk user_chunk;
|
|
|
|
uint32_t __user *cdata;
|
|
|
|
|
2017-07-26 22:02:52 +07:00
|
|
|
chunk_ptr = u64_to_user_ptr(chunk_array[i]);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (copy_from_user(&user_chunk, chunk_ptr,
|
|
|
|
sizeof(struct drm_amdgpu_cs_chunk))) {
|
2015-09-23 17:59:28 +07:00
|
|
|
ret = -EFAULT;
|
|
|
|
i--;
|
|
|
|
goto free_partial_kdata;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
p->chunks[i].chunk_id = user_chunk.chunk_id;
|
|
|
|
p->chunks[i].length_dw = user_chunk.length_dw;
|
|
|
|
|
|
|
|
size = p->chunks[i].length_dw;
|
2017-07-26 22:02:52 +07:00
|
|
|
cdata = u64_to_user_ptr(user_chunk.chunk_data);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-05-17 19:23:12 +07:00
|
|
|
p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (p->chunks[i].kdata == NULL) {
|
2015-09-23 17:59:28 +07:00
|
|
|
ret = -ENOMEM;
|
|
|
|
i--;
|
|
|
|
goto free_partial_kdata;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
size *= sizeof(uint32_t);
|
|
|
|
if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
|
2015-09-23 17:59:28 +07:00
|
|
|
ret = -EFAULT;
|
|
|
|
goto free_partial_kdata;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2015-06-23 22:07:03 +07:00
|
|
|
switch (p->chunks[i].chunk_id) {
|
|
|
|
case AMDGPU_CHUNK_ID_IB:
|
2016-02-03 19:44:52 +07:00
|
|
|
++num_ibs;
|
2015-06-23 22:07:03 +07:00
|
|
|
break;
|
|
|
|
|
|
|
|
case AMDGPU_CHUNK_ID_FENCE:
|
2015-04-21 03:55:21 +07:00
|
|
|
size = sizeof(struct drm_amdgpu_cs_chunk_fence);
|
2015-12-14 22:42:31 +07:00
|
|
|
if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
|
2015-09-23 17:59:28 +07:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto free_partial_kdata;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
2015-12-14 22:42:31 +07:00
|
|
|
|
2016-05-07 03:14:00 +07:00
|
|
|
ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
|
|
|
|
&uf_offset);
|
2015-12-14 22:42:31 +07:00
|
|
|
if (ret)
|
|
|
|
goto free_partial_kdata;
|
|
|
|
|
2015-06-23 22:07:03 +07:00
|
|
|
break;
|
|
|
|
|
2015-06-19 22:31:29 +07:00
|
|
|
case AMDGPU_CHUNK_ID_DEPENDENCIES:
|
2017-03-14 05:18:15 +07:00
|
|
|
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
|
|
|
|
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
|
2015-06-19 22:31:29 +07:00
|
|
|
break;
|
|
|
|
|
2015-06-23 22:07:03 +07:00
|
|
|
default:
|
2015-09-23 17:59:28 +07:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto free_partial_kdata;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-19 19:11:32 +07:00
|
|
|
ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
|
2016-02-03 19:44:52 +07:00
|
|
|
if (ret)
|
2016-01-31 17:32:04 +07:00
|
|
|
goto free_all_kdata;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-10-09 20:18:43 +07:00
|
|
|
if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
|
|
|
|
ret = -ECANCELED;
|
|
|
|
goto free_all_kdata;
|
|
|
|
}
|
2017-10-09 20:04:41 +07:00
|
|
|
|
2016-06-29 18:26:41 +07:00
|
|
|
if (p->uf_entry.robj)
|
|
|
|
p->job->uf_addr = uf_offset;
|
2015-04-21 03:55:21 +07:00
|
|
|
kfree(chunk_array);
|
2015-09-23 17:59:28 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
free_all_kdata:
|
|
|
|
i = p->nchunks - 1;
|
|
|
|
free_partial_kdata:
|
|
|
|
for (; i >= 0; i--)
|
2017-05-17 19:23:12 +07:00
|
|
|
kvfree(p->chunks[i].kdata);
|
2015-09-23 17:59:28 +07:00
|
|
|
kfree(p->chunks);
|
2017-03-10 09:13:04 +07:00
|
|
|
p->chunks = NULL;
|
|
|
|
p->nchunks = 0;
|
2015-09-23 17:59:28 +07:00
|
|
|
free_chunk:
|
|
|
|
kfree(chunk_array);
|
|
|
|
|
|
|
|
return ret;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2016-08-18 04:49:27 +07:00
|
|
|
/* Convert microseconds to bytes. */
|
|
|
|
static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
|
|
|
|
{
|
|
|
|
if (us <= 0 || !adev->mm_stats.log2_max_MBps)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Since accum_us is incremented by a million per second, just
|
|
|
|
* multiply it by the number of MB/s to get the number of bytes.
|
|
|
|
*/
|
|
|
|
return us << adev->mm_stats.log2_max_MBps;
|
|
|
|
}
|
|
|
|
|
|
|
|
static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
|
|
|
|
{
|
|
|
|
if (!adev->mm_stats.log2_max_MBps)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return bytes >> adev->mm_stats.log2_max_MBps;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns how many bytes TTM can move right now. If no bytes can be moved,
|
|
|
|
* it returns 0. If it returns non-zero, it's OK to move at least one buffer,
|
|
|
|
* which means it can go over the threshold once. If that happens, the driver
|
|
|
|
* will be in debt and no other buffer migrations can be done until that debt
|
|
|
|
* is repaid.
|
|
|
|
*
|
|
|
|
* This approach allows moving a buffer of any size (it's important to allow
|
|
|
|
* that).
|
|
|
|
*
|
|
|
|
* The currency is simply time in microseconds and it increases as the clock
|
|
|
|
* ticks. The accumulated microseconds (us) are converted to bytes and
|
|
|
|
* returned.
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
2017-06-28 09:33:18 +07:00
|
|
|
static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
|
|
|
|
u64 *max_bytes,
|
|
|
|
u64 *max_vis_bytes)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2016-08-18 04:49:27 +07:00
|
|
|
s64 time_us, increment_us;
|
|
|
|
u64 free_vram, total_vram, used_vram;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-08-18 04:49:27 +07:00
|
|
|
/* Allow a maximum of 200 accumulated ms. This is basically per-IB
|
|
|
|
* throttling.
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
2016-08-18 04:49:27 +07:00
|
|
|
* It means that in order to get full max MBps, at least 5 IBs per
|
|
|
|
* second must be submitted and not more than 200ms apart from each
|
|
|
|
* other.
|
|
|
|
*/
|
|
|
|
const s64 us_upper_bound = 200000;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-06-28 09:33:18 +07:00
|
|
|
if (!adev->mm_stats.log2_max_MBps) {
|
|
|
|
*max_bytes = 0;
|
|
|
|
*max_vis_bytes = 0;
|
|
|
|
return;
|
|
|
|
}
|
2016-08-18 04:49:27 +07:00
|
|
|
|
2018-01-12 20:52:22 +07:00
|
|
|
total_vram = adev->gmc.real_vram_size - adev->vram_pin_size;
|
2017-08-07 22:46:49 +07:00
|
|
|
used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
2016-08-18 04:49:27 +07:00
|
|
|
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
|
|
|
|
|
|
|
|
spin_lock(&adev->mm_stats.lock);
|
|
|
|
|
|
|
|
/* Increase the amount of accumulated us. */
|
|
|
|
time_us = ktime_to_us(ktime_get());
|
|
|
|
increment_us = time_us - adev->mm_stats.last_update_us;
|
|
|
|
adev->mm_stats.last_update_us = time_us;
|
|
|
|
adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
|
|
|
|
us_upper_bound);
|
|
|
|
|
|
|
|
/* This prevents the short period of low performance when the VRAM
|
|
|
|
* usage is low and the driver is in debt or doesn't have enough
|
|
|
|
* accumulated us to fill VRAM quickly.
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
2016-08-18 04:49:27 +07:00
|
|
|
* The situation can occur in these cases:
|
|
|
|
* - a lot of VRAM is freed by userspace
|
|
|
|
* - the presence of a big buffer causes a lot of evictions
|
|
|
|
* (solution: split buffers into smaller ones)
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
2016-08-18 04:49:27 +07:00
|
|
|
* If 128 MB or 1/8th of VRAM is free, start filling it now by setting
|
|
|
|
* accum_us to a positive number.
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
2016-08-18 04:49:27 +07:00
|
|
|
if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
|
|
|
|
s64 min_us;
|
|
|
|
|
|
|
|
/* Be more aggresive on dGPUs. Try to fill a portion of free
|
|
|
|
* VRAM now.
|
|
|
|
*/
|
|
|
|
if (!(adev->flags & AMD_IS_APU))
|
|
|
|
min_us = bytes_to_us(adev, free_vram / 4);
|
|
|
|
else
|
|
|
|
min_us = 0; /* Reset accum_us on APUs. */
|
|
|
|
|
|
|
|
adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-06-28 09:33:18 +07:00
|
|
|
/* This is set to 0 if the driver is in debt to disallow (optional)
|
2016-08-18 04:49:27 +07:00
|
|
|
* buffer moves.
|
|
|
|
*/
|
2017-06-28 09:33:18 +07:00
|
|
|
*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
|
|
|
|
|
|
|
|
/* Do the same for visible VRAM if half of it is free */
|
2018-01-12 20:52:22 +07:00
|
|
|
if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size) {
|
|
|
|
u64 total_vis_vram = adev->gmc.visible_vram_size;
|
2017-08-07 22:46:49 +07:00
|
|
|
u64 used_vis_vram =
|
|
|
|
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
|
2017-06-28 09:33:18 +07:00
|
|
|
|
|
|
|
if (used_vis_vram < total_vis_vram) {
|
|
|
|
u64 free_vis_vram = total_vis_vram - used_vis_vram;
|
|
|
|
adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
|
|
|
|
increment_us, us_upper_bound);
|
|
|
|
|
|
|
|
if (free_vis_vram >= total_vis_vram / 2)
|
|
|
|
adev->mm_stats.accum_us_vis =
|
|
|
|
max(bytes_to_us(adev, free_vis_vram / 2),
|
|
|
|
adev->mm_stats.accum_us_vis);
|
|
|
|
}
|
|
|
|
|
|
|
|
*max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
|
|
|
|
} else {
|
|
|
|
*max_vis_bytes = 0;
|
|
|
|
}
|
2016-08-18 04:49:27 +07:00
|
|
|
|
|
|
|
spin_unlock(&adev->mm_stats.lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Report how many bytes have really been moved for the last command
|
|
|
|
* submission. This can result in a debt that can stop buffer migrations
|
|
|
|
* temporarily.
|
|
|
|
*/
|
2017-06-28 09:33:18 +07:00
|
|
|
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
|
|
|
|
u64 num_vis_bytes)
|
2016-08-18 04:49:27 +07:00
|
|
|
{
|
|
|
|
spin_lock(&adev->mm_stats.lock);
|
|
|
|
adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
|
2017-06-28 09:33:18 +07:00
|
|
|
adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
|
2016-08-18 04:49:27 +07:00
|
|
|
spin_unlock(&adev->mm_stats.lock);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2016-08-04 12:05:46 +07:00
|
|
|
static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
|
|
|
|
struct amdgpu_bo *bo)
|
|
|
|
{
|
2016-09-15 19:58:48 +07:00
|
|
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
2017-12-08 12:31:52 +07:00
|
|
|
struct ttm_operation_ctx ctx = {
|
|
|
|
.interruptible = true,
|
|
|
|
.no_wait_gpu = false,
|
2018-02-06 10:22:57 +07:00
|
|
|
.resv = bo->tbo.resv,
|
|
|
|
.flags = 0
|
2017-12-08 12:31:52 +07:00
|
|
|
};
|
2016-08-04 12:05:46 +07:00
|
|
|
uint32_t domain;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (bo->pin_count)
|
|
|
|
return 0;
|
|
|
|
|
2016-08-18 04:49:27 +07:00
|
|
|
/* Don't move this buffer if we have depleted our allowance
|
|
|
|
* to move it. Don't move anything if the threshold is zero.
|
2016-08-04 12:05:46 +07:00
|
|
|
*/
|
2017-06-28 09:33:18 +07:00
|
|
|
if (p->bytes_moved < p->bytes_moved_threshold) {
|
2018-01-12 20:52:22 +07:00
|
|
|
if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
|
2017-06-28 09:33:18 +07:00
|
|
|
(bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
|
|
|
|
/* And don't move a CPU_ACCESS_REQUIRED BO to limited
|
|
|
|
* visible VRAM if we've depleted our allowance to do
|
|
|
|
* that.
|
|
|
|
*/
|
|
|
|
if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
|
2017-08-08 18:58:01 +07:00
|
|
|
domain = bo->preferred_domains;
|
2017-06-28 09:33:18 +07:00
|
|
|
else
|
|
|
|
domain = bo->allowed_domains;
|
|
|
|
} else {
|
2017-08-08 18:58:01 +07:00
|
|
|
domain = bo->preferred_domains;
|
2017-06-28 09:33:18 +07:00
|
|
|
}
|
|
|
|
} else {
|
2016-08-04 12:05:46 +07:00
|
|
|
domain = bo->allowed_domains;
|
2017-06-28 09:33:18 +07:00
|
|
|
}
|
2016-08-04 12:05:46 +07:00
|
|
|
|
|
|
|
retry:
|
|
|
|
amdgpu_ttm_placement_from_domain(bo, domain);
|
2017-04-12 19:24:39 +07:00
|
|
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
2017-04-27 23:20:47 +07:00
|
|
|
|
|
|
|
p->bytes_moved += ctx.bytes_moved;
|
2018-01-12 20:52:22 +07:00
|
|
|
if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
|
2017-06-28 09:33:18 +07:00
|
|
|
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
2018-01-12 20:52:22 +07:00
|
|
|
bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
|
2017-04-27 23:20:47 +07:00
|
|
|
p->bytes_moved_vis += ctx.bytes_moved;
|
2016-08-04 12:05:46 +07:00
|
|
|
|
2018-04-02 10:20:44 +07:00
|
|
|
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains &&
|
|
|
|
!(bo->flags & AMDGPU_GEM_CREATE_NO_FALLBACK)) {
|
2016-08-31 22:28:11 +07:00
|
|
|
domain = bo->allowed_domains;
|
|
|
|
goto retry;
|
2016-08-04 12:05:46 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2016-09-01 17:13:18 +07:00
|
|
|
/* Last resort, try to evict something from the current working set */
|
|
|
|
static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
|
2016-09-28 17:03:04 +07:00
|
|
|
struct amdgpu_bo *validated)
|
2016-09-01 17:13:18 +07:00
|
|
|
{
|
2016-09-28 17:03:04 +07:00
|
|
|
uint32_t domain = validated->allowed_domains;
|
2017-04-12 19:24:39 +07:00
|
|
|
struct ttm_operation_ctx ctx = { true, false };
|
2016-09-01 17:13:18 +07:00
|
|
|
int r;
|
|
|
|
|
|
|
|
if (!p->evictable)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (;&p->evictable->tv.head != &p->validated;
|
|
|
|
p->evictable = list_prev_entry(p->evictable, tv.head)) {
|
|
|
|
|
|
|
|
struct amdgpu_bo_list_entry *candidate = p->evictable;
|
|
|
|
struct amdgpu_bo *bo = candidate->robj;
|
2016-09-15 19:58:48 +07:00
|
|
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
2017-06-28 09:33:18 +07:00
|
|
|
u64 initial_bytes_moved, bytes_moved;
|
|
|
|
bool update_bytes_moved_vis;
|
2016-09-01 17:13:18 +07:00
|
|
|
uint32_t other;
|
|
|
|
|
|
|
|
/* If we reached our current BO we can forget it */
|
2016-09-28 17:03:04 +07:00
|
|
|
if (candidate->robj == validated)
|
2016-09-01 17:13:18 +07:00
|
|
|
break;
|
|
|
|
|
2017-11-24 17:39:30 +07:00
|
|
|
/* We can't move pinned BOs here */
|
|
|
|
if (bo->pin_count)
|
|
|
|
continue;
|
|
|
|
|
2016-09-01 17:13:18 +07:00
|
|
|
other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
|
|
|
|
|
|
|
|
/* Check if this BO is in one of the domains we need space for */
|
|
|
|
if (!(other & domain))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Check if we can move this BO somewhere else */
|
|
|
|
other = bo->allowed_domains & ~domain;
|
|
|
|
if (!other)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Good we can try to move this BO somewhere else */
|
|
|
|
amdgpu_ttm_placement_from_domain(bo, other);
|
2017-06-28 09:33:18 +07:00
|
|
|
update_bytes_moved_vis =
|
2018-01-12 20:52:22 +07:00
|
|
|
adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
|
2017-06-28 09:33:18 +07:00
|
|
|
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
2018-01-12 20:52:22 +07:00
|
|
|
bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT;
|
2016-09-15 19:58:48 +07:00
|
|
|
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
|
2017-04-12 19:24:39 +07:00
|
|
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
2017-06-28 09:33:18 +07:00
|
|
|
bytes_moved = atomic64_read(&adev->num_bytes_moved) -
|
2016-09-01 17:13:18 +07:00
|
|
|
initial_bytes_moved;
|
2017-06-28 09:33:18 +07:00
|
|
|
p->bytes_moved += bytes_moved;
|
|
|
|
if (update_bytes_moved_vis)
|
|
|
|
p->bytes_moved_vis += bytes_moved;
|
2016-09-01 17:13:18 +07:00
|
|
|
|
|
|
|
if (unlikely(r))
|
|
|
|
break;
|
|
|
|
|
|
|
|
p->evictable = list_prev_entry(p->evictable, tv.head);
|
|
|
|
list_move(&candidate->tv.head, &p->validated);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-09-28 17:03:04 +07:00
|
|
|
static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
|
|
|
|
{
|
|
|
|
struct amdgpu_cs_parser *p = param;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
do {
|
|
|
|
r = amdgpu_cs_bo_validate(p, bo);
|
|
|
|
} while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
if (bo->shadow)
|
2016-12-01 05:19:40 +07:00
|
|
|
r = amdgpu_cs_bo_validate(p, bo->shadow);
|
2016-09-28 17:03:04 +07:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2016-09-03 12:57:14 +07:00
|
|
|
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
2015-09-03 21:40:39 +07:00
|
|
|
struct list_head *validated)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2017-04-12 19:24:39 +07:00
|
|
|
struct ttm_operation_ctx ctx = { true, false };
|
2015-04-21 03:55:21 +07:00
|
|
|
struct amdgpu_bo_list_entry *lobj;
|
|
|
|
int r;
|
|
|
|
|
2015-09-03 21:40:39 +07:00
|
|
|
list_for_each_entry(lobj, validated, tv.head) {
|
2015-12-22 02:31:35 +07:00
|
|
|
struct amdgpu_bo *bo = lobj->robj;
|
2016-02-23 18:36:59 +07:00
|
|
|
bool binding_userptr = false;
|
2016-02-08 17:08:35 +07:00
|
|
|
struct mm_struct *usermm;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-02-08 17:08:35 +07:00
|
|
|
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
|
|
|
|
if (usermm && usermm != current->mm)
|
|
|
|
return -EPERM;
|
|
|
|
|
2016-02-23 18:36:59 +07:00
|
|
|
/* Check if we have user pages and nobody bound the BO already */
|
2017-09-05 19:30:05 +07:00
|
|
|
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
|
|
|
|
lobj->user_pages) {
|
2017-09-05 19:36:44 +07:00
|
|
|
amdgpu_ttm_placement_from_domain(bo,
|
|
|
|
AMDGPU_GEM_DOMAIN_CPU);
|
2017-04-12 19:24:39 +07:00
|
|
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
2017-09-05 19:36:44 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
2017-09-02 18:21:31 +07:00
|
|
|
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
|
|
|
|
lobj->user_pages);
|
2016-02-23 18:36:59 +07:00
|
|
|
binding_userptr = true;
|
|
|
|
}
|
|
|
|
|
2016-09-01 17:13:18 +07:00
|
|
|
if (p->evictable == lobj)
|
|
|
|
p->evictable = NULL;
|
|
|
|
|
2016-09-28 17:03:04 +07:00
|
|
|
r = amdgpu_cs_validate(p, bo);
|
2016-08-04 12:05:46 +07:00
|
|
|
if (r)
|
2015-12-22 02:31:35 +07:00
|
|
|
return r;
|
2016-09-01 17:13:18 +07:00
|
|
|
|
2016-02-23 18:36:59 +07:00
|
|
|
if (binding_userptr) {
|
2017-05-17 19:23:12 +07:00
|
|
|
kvfree(lobj->user_pages);
|
2016-02-23 18:36:59 +07:00
|
|
|
lobj->user_pages = NULL;
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-12-19 02:33:52 +07:00
|
|
|
static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|
|
|
union drm_amdgpu_cs *cs)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
2016-02-23 18:36:59 +07:00
|
|
|
struct amdgpu_bo_list_entry *e;
|
2015-09-03 21:40:39 +07:00
|
|
|
struct list_head duplicates;
|
2016-02-23 18:36:59 +07:00
|
|
|
unsigned i, tries = 10;
|
2015-12-19 03:26:47 +07:00
|
|
|
int r;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2015-12-19 02:33:52 +07:00
|
|
|
INIT_LIST_HEAD(&p->validated);
|
|
|
|
|
|
|
|
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
|
2015-04-27 14:19:20 +07:00
|
|
|
if (p->bo_list) {
|
2015-12-19 03:26:47 +07:00
|
|
|
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
|
2017-09-13 01:25:14 +07:00
|
|
|
if (p->bo_list->first_userptr != p->bo_list->num_entries)
|
|
|
|
p->mn = amdgpu_mn_get(p->adev);
|
2015-04-27 14:19:20 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2015-12-11 20:39:05 +07:00
|
|
|
INIT_LIST_HEAD(&duplicates);
|
2015-12-11 21:16:32 +07:00
|
|
|
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2018-01-31 19:58:55 +07:00
|
|
|
if (p->uf_entry.robj && !p->uf_entry.robj->parent)
|
2015-12-14 22:42:31 +07:00
|
|
|
list_add(&p->uf_entry.tv.head, &p->validated);
|
|
|
|
|
2016-02-23 18:36:59 +07:00
|
|
|
while (1) {
|
|
|
|
struct list_head need_pages;
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
|
|
|
|
&duplicates);
|
2016-07-30 05:48:39 +07:00
|
|
|
if (unlikely(r != 0)) {
|
2016-10-20 13:58:04 +07:00
|
|
|
if (r != -ERESTARTSYS)
|
|
|
|
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
|
2016-02-23 18:36:59 +07:00
|
|
|
goto error_free_pages;
|
2016-07-30 05:48:39 +07:00
|
|
|
}
|
2016-02-23 18:36:59 +07:00
|
|
|
|
|
|
|
/* Without a BO list we don't have userptr BOs */
|
|
|
|
if (!p->bo_list)
|
|
|
|
break;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&need_pages);
|
|
|
|
for (i = p->bo_list->first_userptr;
|
|
|
|
i < p->bo_list->num_entries; ++i) {
|
2017-09-05 19:30:05 +07:00
|
|
|
struct amdgpu_bo *bo;
|
2016-02-23 18:36:59 +07:00
|
|
|
|
|
|
|
e = &p->bo_list->array[i];
|
2017-09-05 19:30:05 +07:00
|
|
|
bo = e->robj;
|
2016-02-23 18:36:59 +07:00
|
|
|
|
2017-09-05 19:30:05 +07:00
|
|
|
if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
|
2016-02-23 18:36:59 +07:00
|
|
|
&e->user_invalidated) && e->user_pages) {
|
|
|
|
|
|
|
|
/* We acquired a page array, but somebody
|
2017-06-21 03:33:02 +07:00
|
|
|
* invalidated it. Free it and try again
|
2016-02-23 18:36:59 +07:00
|
|
|
*/
|
|
|
|
release_pages(e->user_pages,
|
2017-11-16 11:42:10 +07:00
|
|
|
bo->tbo.ttm->num_pages);
|
2017-05-17 19:23:12 +07:00
|
|
|
kvfree(e->user_pages);
|
2016-02-23 18:36:59 +07:00
|
|
|
e->user_pages = NULL;
|
|
|
|
}
|
|
|
|
|
2017-09-05 19:30:05 +07:00
|
|
|
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
|
2016-02-23 18:36:59 +07:00
|
|
|
!e->user_pages) {
|
|
|
|
list_del(&e->tv.head);
|
|
|
|
list_add(&e->tv.head, &need_pages);
|
|
|
|
|
|
|
|
amdgpu_bo_unreserve(e->robj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (list_empty(&need_pages))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Unreserve everything again. */
|
|
|
|
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
|
|
|
|
|
2016-07-30 05:48:39 +07:00
|
|
|
/* We tried too many times, just abort */
|
2016-02-23 18:36:59 +07:00
|
|
|
if (!--tries) {
|
|
|
|
r = -EDEADLK;
|
2016-07-30 05:48:39 +07:00
|
|
|
DRM_ERROR("deadlock in %s\n", __func__);
|
2016-02-23 18:36:59 +07:00
|
|
|
goto error_free_pages;
|
|
|
|
}
|
|
|
|
|
2017-06-09 01:53:26 +07:00
|
|
|
/* Fill the page arrays for all userptrs. */
|
2016-02-23 18:36:59 +07:00
|
|
|
list_for_each_entry(e, &need_pages, tv.head) {
|
|
|
|
struct ttm_tt *ttm = e->robj->tbo.ttm;
|
|
|
|
|
2017-05-17 19:23:12 +07:00
|
|
|
e->user_pages = kvmalloc_array(ttm->num_pages,
|
|
|
|
sizeof(struct page*),
|
|
|
|
GFP_KERNEL | __GFP_ZERO);
|
2016-02-23 18:36:59 +07:00
|
|
|
if (!e->user_pages) {
|
|
|
|
r = -ENOMEM;
|
2016-07-30 05:48:39 +07:00
|
|
|
DRM_ERROR("calloc failure in %s\n", __func__);
|
2016-02-23 18:36:59 +07:00
|
|
|
goto error_free_pages;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
|
|
|
|
if (r) {
|
2016-07-30 05:48:39 +07:00
|
|
|
DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
|
2017-05-17 19:23:12 +07:00
|
|
|
kvfree(e->user_pages);
|
2016-02-23 18:36:59 +07:00
|
|
|
e->user_pages = NULL;
|
|
|
|
goto error_free_pages;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* And try again. */
|
|
|
|
list_splice(&need_pages, &p->validated);
|
|
|
|
}
|
2015-09-03 21:40:39 +07:00
|
|
|
|
2017-06-28 09:33:18 +07:00
|
|
|
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
|
|
|
|
&p->bytes_moved_vis_threshold);
|
2015-12-22 01:47:42 +07:00
|
|
|
p->bytes_moved = 0;
|
2017-06-28 09:33:18 +07:00
|
|
|
p->bytes_moved_vis = 0;
|
2016-09-01 17:13:18 +07:00
|
|
|
p->evictable = list_last_entry(&p->validated,
|
|
|
|
struct amdgpu_bo_list_entry,
|
|
|
|
tv.head);
|
2015-12-22 01:47:42 +07:00
|
|
|
|
2016-09-28 17:03:04 +07:00
|
|
|
r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
|
|
|
|
amdgpu_cs_validate, p);
|
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
|
|
|
|
goto error_validate;
|
|
|
|
}
|
|
|
|
|
2015-12-22 01:47:42 +07:00
|
|
|
r = amdgpu_cs_list_validate(p, &duplicates);
|
2016-07-30 05:48:39 +07:00
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
|
2015-09-03 21:40:39 +07:00
|
|
|
goto error_validate;
|
2016-07-30 05:48:39 +07:00
|
|
|
}
|
2015-09-03 21:40:39 +07:00
|
|
|
|
2015-12-22 01:47:42 +07:00
|
|
|
r = amdgpu_cs_list_validate(p, &p->validated);
|
2016-07-30 05:48:39 +07:00
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
|
2016-01-05 22:03:39 +07:00
|
|
|
goto error_validate;
|
2016-07-30 05:48:39 +07:00
|
|
|
}
|
2016-01-05 22:03:39 +07:00
|
|
|
|
2017-06-28 09:33:18 +07:00
|
|
|
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
|
|
|
|
p->bytes_moved_vis);
|
2016-01-05 22:03:39 +07:00
|
|
|
if (p->bo_list) {
|
2016-05-06 22:50:03 +07:00
|
|
|
struct amdgpu_bo *gds = p->bo_list->gds_obj;
|
|
|
|
struct amdgpu_bo *gws = p->bo_list->gws_obj;
|
|
|
|
struct amdgpu_bo *oa = p->bo_list->oa_obj;
|
2016-01-05 22:03:39 +07:00
|
|
|
struct amdgpu_vm *vm = &fpriv->vm;
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < p->bo_list->num_entries; i++) {
|
|
|
|
struct amdgpu_bo *bo = p->bo_list->array[i].robj;
|
|
|
|
|
|
|
|
p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
|
|
|
|
}
|
2016-05-06 22:50:03 +07:00
|
|
|
|
|
|
|
if (gds) {
|
|
|
|
p->job->gds_base = amdgpu_bo_gpu_offset(gds);
|
|
|
|
p->job->gds_size = amdgpu_bo_size(gds);
|
|
|
|
}
|
|
|
|
if (gws) {
|
|
|
|
p->job->gws_base = amdgpu_bo_gpu_offset(gws);
|
|
|
|
p->job->gws_size = amdgpu_bo_size(gws);
|
|
|
|
}
|
|
|
|
if (oa) {
|
|
|
|
p->job->oa_base = amdgpu_bo_gpu_offset(oa);
|
|
|
|
p->job->oa_size = amdgpu_bo_size(oa);
|
|
|
|
}
|
2016-01-05 22:03:39 +07:00
|
|
|
}
|
2015-09-03 21:40:39 +07:00
|
|
|
|
2016-09-05 22:00:57 +07:00
|
|
|
if (!r && p->uf_entry.robj) {
|
|
|
|
struct amdgpu_bo *uf = p->uf_entry.robj;
|
|
|
|
|
2017-10-27 20:43:14 +07:00
|
|
|
r = amdgpu_ttm_alloc_gart(&uf->tbo);
|
2016-09-05 22:00:57 +07:00
|
|
|
p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
|
|
|
|
}
|
2016-06-29 18:26:41 +07:00
|
|
|
|
2015-09-03 21:40:39 +07:00
|
|
|
error_validate:
|
2017-08-03 22:44:01 +07:00
|
|
|
if (r)
|
2015-09-03 21:40:39 +07:00
|
|
|
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-02-23 18:36:59 +07:00
|
|
|
error_free_pages:
|
|
|
|
|
|
|
|
if (p->bo_list) {
|
|
|
|
for (i = p->bo_list->first_userptr;
|
|
|
|
i < p->bo_list->num_entries; ++i) {
|
|
|
|
e = &p->bo_list->array[i];
|
|
|
|
|
|
|
|
if (!e->user_pages)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
release_pages(e->user_pages,
|
2017-11-16 08:37:55 +07:00
|
|
|
e->robj->tbo.ttm->num_pages);
|
2017-05-17 19:23:12 +07:00
|
|
|
kvfree(e->user_pages);
|
2016-02-23 18:36:59 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_list_entry *e;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
list_for_each_entry(e, &p->validated, tv.head) {
|
|
|
|
struct reservation_object *resv = e->robj->tbo.resv;
|
2017-09-16 07:44:06 +07:00
|
|
|
r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
|
|
|
|
amdgpu_bo_explicit_sync(e->robj));
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-15 03:05:35 +07:00
|
|
|
/**
|
|
|
|
* cs_parser_fini() - clean parser states
|
|
|
|
* @parser: parser structure holding parsing context.
|
|
|
|
* @error: error number
|
|
|
|
*
|
|
|
|
* If error is set than unvalidate buffer, otherwise just free memory
|
|
|
|
* used by parsing context.
|
|
|
|
**/
|
2017-08-03 22:44:01 +07:00
|
|
|
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
|
|
|
|
bool backoff)
|
2015-07-21 13:36:51 +07:00
|
|
|
{
|
2015-11-15 03:05:35 +07:00
|
|
|
unsigned i;
|
|
|
|
|
2017-09-13 01:25:14 +07:00
|
|
|
if (error && backoff)
|
2015-04-21 03:55:21 +07:00
|
|
|
ttm_eu_backoff_reservation(&parser->ticket,
|
|
|
|
&parser->validated);
|
2017-03-14 05:18:15 +07:00
|
|
|
|
|
|
|
for (i = 0; i < parser->num_post_dep_syncobjs; i++)
|
|
|
|
drm_syncobj_put(parser->post_dep_syncobjs[i]);
|
|
|
|
kfree(parser->post_dep_syncobjs);
|
|
|
|
|
2016-10-25 19:00:45 +07:00
|
|
|
dma_fence_put(parser->fence);
|
2015-11-04 21:44:39 +07:00
|
|
|
|
2017-10-11 03:50:17 +07:00
|
|
|
if (parser->ctx) {
|
|
|
|
mutex_unlock(&parser->ctx->lock);
|
2015-05-11 20:34:59 +07:00
|
|
|
amdgpu_ctx_put(parser->ctx);
|
2017-10-11 03:50:17 +07:00
|
|
|
}
|
2015-08-18 15:25:46 +07:00
|
|
|
if (parser->bo_list)
|
|
|
|
amdgpu_bo_list_put(parser->bo_list);
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
for (i = 0; i < parser->nchunks; i++)
|
2017-05-17 19:23:12 +07:00
|
|
|
kvfree(parser->chunks[i].kdata);
|
2015-04-21 03:55:21 +07:00
|
|
|
kfree(parser->chunks);
|
2016-02-03 19:44:52 +07:00
|
|
|
if (parser->job)
|
|
|
|
amdgpu_job_free(parser->job);
|
2015-12-14 22:42:31 +07:00
|
|
|
amdgpu_bo_unref(&parser->uf_entry.robj);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2017-01-16 12:59:01 +07:00
|
|
|
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = p->adev;
|
2017-01-16 12:59:01 +07:00
|
|
|
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
|
|
|
struct amdgpu_vm *vm = &fpriv->vm;
|
2015-04-21 03:55:21 +07:00
|
|
|
struct amdgpu_bo_va *bo_va;
|
|
|
|
struct amdgpu_bo *bo;
|
|
|
|
int i, r;
|
|
|
|
|
2017-03-24 01:36:31 +07:00
|
|
|
r = amdgpu_vm_clear_freed(adev, vm, NULL);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2017-01-16 12:59:01 +07:00
|
|
|
r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
r = amdgpu_sync_fence(adev, &p->job->sync,
|
2017-11-14 02:47:52 +07:00
|
|
|
fpriv->prt_va->last_pt_update, false);
|
2017-01-16 12:59:01 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2017-01-09 14:54:32 +07:00
|
|
|
if (amdgpu_sriov_vf(adev)) {
|
|
|
|
struct dma_fence *f;
|
2017-07-31 20:32:40 +07:00
|
|
|
|
|
|
|
bo_va = fpriv->csa_va;
|
2017-01-09 14:54:32 +07:00
|
|
|
BUG_ON(!bo_va);
|
|
|
|
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
f = bo_va->last_pt_update;
|
2017-11-14 02:47:52 +07:00
|
|
|
r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
|
2017-01-09 14:54:32 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
if (p->bo_list) {
|
|
|
|
for (i = 0; i < p->bo_list->num_entries; i++) {
|
2016-10-25 19:00:45 +07:00
|
|
|
struct dma_fence *f;
|
2015-07-07 03:06:40 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/* ignore duplicates */
|
|
|
|
bo = p->bo_list->array[i].robj;
|
|
|
|
if (!bo)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
bo_va = p->bo_list->array[i].bo_va;
|
|
|
|
if (bo_va == NULL)
|
|
|
|
continue;
|
|
|
|
|
2016-08-16 19:43:17 +07:00
|
|
|
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2015-08-03 17:19:38 +07:00
|
|
|
f = bo_va->last_pt_update;
|
2017-11-14 02:47:52 +07:00
|
|
|
r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
|
2015-07-07 03:06:40 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
2015-09-10 19:00:35 +07:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-09-11 21:54:59 +07:00
|
|
|
r = amdgpu_vm_handle_moved(adev, vm);
|
2017-09-08 19:09:41 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2017-09-02 01:37:57 +07:00
|
|
|
r = amdgpu_vm_update_directories(adev, vm);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2017-11-14 02:47:52 +07:00
|
|
|
r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
|
2017-09-08 19:09:41 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
2015-09-10 19:00:35 +07:00
|
|
|
|
|
|
|
if (amdgpu_vm_debug && p->bo_list) {
|
|
|
|
/* Invalidate all BOs to test for userspace bugs */
|
|
|
|
for (i = 0; i < p->bo_list->num_entries; i++) {
|
|
|
|
/* ignore duplicates */
|
|
|
|
bo = p->bo_list->array[i].robj;
|
|
|
|
if (!bo)
|
|
|
|
continue;
|
|
|
|
|
2017-08-03 19:02:13 +07:00
|
|
|
amdgpu_vm_bo_invalidate(adev, bo, false);
|
2015-09-10 19:00:35 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2015-09-10 19:00:35 +07:00
|
|
|
return r;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
2016-01-31 18:29:04 +07:00
|
|
|
struct amdgpu_cs_parser *p)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2016-01-31 18:29:04 +07:00
|
|
|
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
2015-04-21 03:55:21 +07:00
|
|
|
struct amdgpu_vm *vm = &fpriv->vm;
|
2016-01-31 18:29:04 +07:00
|
|
|
struct amdgpu_ring *ring = p->job->ring;
|
2017-10-12 17:16:33 +07:00
|
|
|
int r;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
/* Only for UVD/VCE VM emulation */
|
2017-10-12 17:16:33 +07:00
|
|
|
if (p->job->ring->funcs->parse_cs) {
|
|
|
|
unsigned i, j;
|
2017-10-11 03:50:16 +07:00
|
|
|
|
2017-10-12 17:16:33 +07:00
|
|
|
for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
|
|
|
|
struct drm_amdgpu_cs_chunk_ib *chunk_ib;
|
2017-10-11 03:50:16 +07:00
|
|
|
struct amdgpu_bo_va_mapping *m;
|
|
|
|
struct amdgpu_bo *aobj = NULL;
|
2017-10-12 17:16:33 +07:00
|
|
|
struct amdgpu_cs_chunk *chunk;
|
2017-11-06 21:37:01 +07:00
|
|
|
uint64_t offset, va_start;
|
2017-10-12 17:16:33 +07:00
|
|
|
struct amdgpu_ib *ib;
|
2017-10-11 03:50:16 +07:00
|
|
|
uint8_t *kptr;
|
|
|
|
|
2017-10-12 17:16:33 +07:00
|
|
|
chunk = &p->chunks[i];
|
|
|
|
ib = &p->job->ibs[j];
|
|
|
|
chunk_ib = chunk->kdata;
|
|
|
|
|
|
|
|
if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
|
|
|
|
continue;
|
|
|
|
|
2017-11-06 21:37:01 +07:00
|
|
|
va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK;
|
|
|
|
r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
|
2017-10-11 03:50:16 +07:00
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("IB va_start is invalid\n");
|
|
|
|
return r;
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-11-06 21:37:01 +07:00
|
|
|
if ((va_start + chunk_ib->ib_bytes) >
|
2017-10-12 17:16:33 +07:00
|
|
|
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
|
2017-10-11 03:50:16 +07:00
|
|
|
DRM_ERROR("IB va_start+ib_bytes is invalid\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* the IB should be reserved at this point */
|
|
|
|
r = amdgpu_bo_kmap(aobj, (void **)&kptr);
|
|
|
|
if (r) {
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
offset = m->start * AMDGPU_GPU_PAGE_SIZE;
|
2017-11-06 21:37:01 +07:00
|
|
|
kptr += va_start - offset;
|
2017-10-11 03:50:16 +07:00
|
|
|
|
|
|
|
memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
|
|
|
|
amdgpu_bo_kunmap(aobj);
|
|
|
|
|
|
|
|
r = amdgpu_ring_parse_cs(ring, p, j);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
2017-10-12 17:16:33 +07:00
|
|
|
|
|
|
|
j++;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
2016-10-05 21:49:19 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (p->job->vm) {
|
2017-08-03 19:02:13 +07:00
|
|
|
p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
|
2016-06-15 18:44:04 +07:00
|
|
|
|
2017-01-16 12:59:01 +07:00
|
|
|
r = amdgpu_bo_vm_update_pte(p);
|
2016-06-22 19:25:55 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-06-22 19:25:55 +07:00
|
|
|
return amdgpu_cs_sync_rings(p);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_cs_parser *parser)
|
|
|
|
{
|
|
|
|
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
|
|
|
|
struct amdgpu_vm *vm = &fpriv->vm;
|
|
|
|
int i, j;
|
2017-03-08 14:51:13 +07:00
|
|
|
int r, ce_preempt = 0, de_preempt = 0;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-02-03 19:44:52 +07:00
|
|
|
for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
|
2015-04-21 03:55:21 +07:00
|
|
|
struct amdgpu_cs_chunk *chunk;
|
|
|
|
struct amdgpu_ib *ib;
|
|
|
|
struct drm_amdgpu_cs_chunk_ib *chunk_ib;
|
|
|
|
struct amdgpu_ring *ring;
|
|
|
|
|
|
|
|
chunk = &parser->chunks[i];
|
2016-02-03 19:44:52 +07:00
|
|
|
ib = &parser->job->ibs[j];
|
2015-04-21 03:55:21 +07:00
|
|
|
chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
|
|
|
|
|
|
|
|
if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
|
|
|
|
continue;
|
|
|
|
|
2017-03-27 14:14:53 +07:00
|
|
|
if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && amdgpu_sriov_vf(adev)) {
|
2017-03-28 22:29:53 +07:00
|
|
|
if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
|
2017-03-27 14:14:53 +07:00
|
|
|
if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
|
|
|
|
ce_preempt++;
|
|
|
|
else
|
|
|
|
de_preempt++;
|
2017-03-28 22:29:53 +07:00
|
|
|
}
|
2017-03-27 14:14:53 +07:00
|
|
|
|
|
|
|
/* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
|
|
|
|
if (ce_preempt > 1 || de_preempt > 1)
|
drm/amdgpu:changes in gfx DMAframe scheme (v2)
1) Adapt to vulkan:
Now use double SWITCH BUFFER to replace the 128 nops w/a,
because when vulkan introduced, umd can insert 7 ~ 16 IBs
per submit which makes 256 DW size cannot hold the whole
DMAframe (if we still insert those 128 nops), CP team suggests
use double SWITCH_BUFFERs, instead of tricky 128 NOPs w/a.
2) To fix the CE VM fault issue when MCBP introduced:
Need one more COND_EXEC wrapping IB part (original one us
for VM switch part).
this change can fix vm fault issue caused by below scenario
without this change:
>CE passed original COND_EXEC (no MCBP issued this moment),
proceed as normal.
>DE catch up to this COND_EXEC, but this time MCBP issued,
thus DE treats all following packages as NOP. The following
VM switch packages now looks just as NOP to DE, so DE
dosen't do VM flush at all.
>Now CE proceeds to the first IBc, and triggers VM fault,
because DE didn't do VM flush for this DMAframe.
3) change estimated alloc size for gfx9.
with new DMAframe scheme, we need modify emit_frame_size
for gfx9
4) No need to insert 128 nops after gfx8 vm flush anymore
because there was double SWITCH_BUFFER append to vm flush,
and for gfx7 we already use double SWITCH_BUFFER following
after vm_flush so no change needed for it.
5) Change emit_frame_size for gfx8
v2: squash in BUG removal from Monk
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-03-15 11:18:57 +07:00
|
|
|
return -EINVAL;
|
2017-03-08 14:51:13 +07:00
|
|
|
}
|
|
|
|
|
2017-02-16 12:47:32 +07:00
|
|
|
r = amdgpu_queue_mgr_map(adev, &parser->ctx->queue_mgr, chunk_ib->ip_type,
|
|
|
|
chunk_ib->ip_instance, chunk_ib->ring, &ring);
|
2015-06-02 22:44:49 +07:00
|
|
|
if (r)
|
2015-04-21 03:55:21 +07:00
|
|
|
return r;
|
|
|
|
|
2017-03-28 10:00:03 +07:00
|
|
|
if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
|
2016-08-26 12:28:28 +07:00
|
|
|
parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
|
|
|
|
if (!parser->ctx->preamble_presented) {
|
|
|
|
parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
|
|
|
|
parser->ctx->preamble_presented = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-31 18:29:04 +07:00
|
|
|
if (parser->job->ring && parser->job->ring != ring)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
parser->job->ring = ring;
|
|
|
|
|
2017-10-11 03:50:16 +07:00
|
|
|
r = amdgpu_ib_get(adev, vm,
|
|
|
|
ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
|
|
|
|
ib);
|
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("Failed to get ib !\n");
|
|
|
|
return r;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2016-10-05 21:49:19 +07:00
|
|
|
ib->gpu_addr = chunk_ib->va_start;
|
2015-06-02 22:44:49 +07:00
|
|
|
ib->length_dw = chunk_ib->ib_bytes / 4;
|
2015-05-11 22:41:41 +07:00
|
|
|
ib->flags = chunk_ib->flags;
|
2017-10-11 03:50:16 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
j++;
|
|
|
|
}
|
|
|
|
|
2016-05-07 03:14:00 +07:00
|
|
|
/* UVD & VCE fw doesn't support user fences */
|
2016-06-29 18:26:41 +07:00
|
|
|
if (parser->job->uf_addr && (
|
2016-10-05 20:36:39 +07:00
|
|
|
parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
|
|
|
|
parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
|
2016-05-07 03:14:00 +07:00
|
|
|
return -EINVAL;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-10-11 03:50:17 +07:00
|
|
|
return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2017-03-09 10:45:52 +07:00
|
|
|
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
|
|
|
|
struct amdgpu_cs_chunk *chunk)
|
2015-06-19 22:31:29 +07:00
|
|
|
{
|
2015-07-07 00:42:10 +07:00
|
|
|
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
2017-03-09 10:45:52 +07:00
|
|
|
unsigned num_deps;
|
|
|
|
int i, r;
|
|
|
|
struct drm_amdgpu_cs_chunk_dep *deps;
|
2015-06-19 22:31:29 +07:00
|
|
|
|
2017-03-09 10:45:52 +07:00
|
|
|
deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
|
|
|
|
num_deps = chunk->length_dw * 4 /
|
|
|
|
sizeof(struct drm_amdgpu_cs_chunk_dep);
|
2015-06-19 22:31:29 +07:00
|
|
|
|
2017-03-09 10:45:52 +07:00
|
|
|
for (i = 0; i < num_deps; ++i) {
|
|
|
|
struct amdgpu_ring *ring;
|
|
|
|
struct amdgpu_ctx *ctx;
|
|
|
|
struct dma_fence *fence;
|
2015-06-19 22:31:29 +07:00
|
|
|
|
2017-03-09 10:45:52 +07:00
|
|
|
ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
|
|
|
|
if (ctx == NULL)
|
|
|
|
return -EINVAL;
|
2015-06-19 22:31:29 +07:00
|
|
|
|
2017-03-09 10:45:52 +07:00
|
|
|
r = amdgpu_queue_mgr_map(p->adev, &ctx->queue_mgr,
|
|
|
|
deps[i].ip_type,
|
|
|
|
deps[i].ip_instance,
|
|
|
|
deps[i].ring, &ring);
|
|
|
|
if (r) {
|
|
|
|
amdgpu_ctx_put(ctx);
|
|
|
|
return r;
|
|
|
|
}
|
2015-06-19 22:31:29 +07:00
|
|
|
|
2017-03-09 10:45:52 +07:00
|
|
|
fence = amdgpu_ctx_get_fence(ctx, ring,
|
|
|
|
deps[i].handle);
|
|
|
|
if (IS_ERR(fence)) {
|
|
|
|
r = PTR_ERR(fence);
|
|
|
|
amdgpu_ctx_put(ctx);
|
|
|
|
return r;
|
|
|
|
} else if (fence) {
|
2017-11-14 02:47:52 +07:00
|
|
|
r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
|
|
|
|
true);
|
2017-03-09 10:45:52 +07:00
|
|
|
dma_fence_put(fence);
|
|
|
|
amdgpu_ctx_put(ctx);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2015-06-19 22:31:29 +07:00
|
|
|
|
2017-03-14 05:18:15 +07:00
|
|
|
static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
|
|
|
|
uint32_t handle)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
struct dma_fence *fence;
|
2017-08-26 00:52:19 +07:00
|
|
|
r = drm_syncobj_find_fence(p->filp, handle, &fence);
|
2017-03-14 05:18:15 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2017-11-14 02:47:52 +07:00
|
|
|
r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
|
2017-03-14 05:18:15 +07:00
|
|
|
dma_fence_put(fence);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
|
|
|
|
struct amdgpu_cs_chunk *chunk)
|
|
|
|
{
|
|
|
|
unsigned num_deps;
|
|
|
|
int i, r;
|
|
|
|
struct drm_amdgpu_cs_chunk_sem *deps;
|
|
|
|
|
|
|
|
deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
|
|
|
|
num_deps = chunk->length_dw * 4 /
|
|
|
|
sizeof(struct drm_amdgpu_cs_chunk_sem);
|
|
|
|
|
|
|
|
for (i = 0; i < num_deps; ++i) {
|
|
|
|
r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
|
|
|
|
struct amdgpu_cs_chunk *chunk)
|
|
|
|
{
|
|
|
|
unsigned num_deps;
|
|
|
|
int i;
|
|
|
|
struct drm_amdgpu_cs_chunk_sem *deps;
|
|
|
|
deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
|
|
|
|
num_deps = chunk->length_dw * 4 /
|
|
|
|
sizeof(struct drm_amdgpu_cs_chunk_sem);
|
|
|
|
|
|
|
|
p->post_dep_syncobjs = kmalloc_array(num_deps,
|
|
|
|
sizeof(struct drm_syncobj *),
|
|
|
|
GFP_KERNEL);
|
|
|
|
p->num_post_dep_syncobjs = 0;
|
|
|
|
|
2017-08-23 12:52:36 +07:00
|
|
|
if (!p->post_dep_syncobjs)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-03-14 05:18:15 +07:00
|
|
|
for (i = 0; i < num_deps; ++i) {
|
|
|
|
p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
|
|
|
|
if (!p->post_dep_syncobjs[i])
|
|
|
|
return -EINVAL;
|
|
|
|
p->num_post_dep_syncobjs++;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-09 10:45:52 +07:00
|
|
|
static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_cs_parser *p)
|
|
|
|
{
|
|
|
|
int i, r;
|
2015-07-07 00:42:10 +07:00
|
|
|
|
2017-03-09 10:45:52 +07:00
|
|
|
for (i = 0; i < p->nchunks; ++i) {
|
|
|
|
struct amdgpu_cs_chunk *chunk;
|
2017-02-16 12:47:32 +07:00
|
|
|
|
2017-03-09 10:45:52 +07:00
|
|
|
chunk = &p->chunks[i];
|
2015-07-07 03:06:40 +07:00
|
|
|
|
2017-03-09 10:45:52 +07:00
|
|
|
if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) {
|
|
|
|
r = amdgpu_cs_process_fence_dep(p, chunk);
|
|
|
|
if (r)
|
|
|
|
return r;
|
2017-03-14 05:18:15 +07:00
|
|
|
} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
|
|
|
|
r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
|
|
|
|
r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
|
|
|
|
if (r)
|
|
|
|
return r;
|
2015-06-19 22:31:29 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-14 05:18:15 +07:00
|
|
|
static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2017-07-06 03:12:44 +07:00
|
|
|
for (i = 0; i < p->num_post_dep_syncobjs; ++i)
|
|
|
|
drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
|
2017-03-14 05:18:15 +07:00
|
|
|
}
|
|
|
|
|
2016-01-31 17:30:55 +07:00
|
|
|
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
|
|
|
union drm_amdgpu_cs *cs)
|
|
|
|
{
|
2016-01-31 18:29:04 +07:00
|
|
|
struct amdgpu_ring *ring = p->job->ring;
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
|
2016-01-31 17:30:55 +07:00
|
|
|
struct amdgpu_job *job;
|
2017-09-13 01:25:14 +07:00
|
|
|
unsigned i;
|
2017-09-15 12:40:31 +07:00
|
|
|
uint64_t seq;
|
|
|
|
|
2016-03-07 11:49:55 +07:00
|
|
|
int r;
|
2016-01-31 17:30:55 +07:00
|
|
|
|
2017-09-13 01:25:14 +07:00
|
|
|
amdgpu_mn_lock(p->mn);
|
|
|
|
if (p->bo_list) {
|
|
|
|
for (i = p->bo_list->first_userptr;
|
|
|
|
i < p->bo_list->num_entries; ++i) {
|
|
|
|
struct amdgpu_bo *bo = p->bo_list->array[i].robj;
|
|
|
|
|
|
|
|
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
|
|
|
|
amdgpu_mn_unlock(p->mn);
|
|
|
|
return -ERESTARTSYS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-03 19:44:52 +07:00
|
|
|
job = p->job;
|
|
|
|
p->job = NULL;
|
2016-01-31 17:30:55 +07:00
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
|
2016-03-07 11:49:55 +07:00
|
|
|
if (r) {
|
2016-02-01 18:20:25 +07:00
|
|
|
amdgpu_job_free(job);
|
2017-09-13 01:25:14 +07:00
|
|
|
amdgpu_mn_unlock(p->mn);
|
2016-03-07 11:49:55 +07:00
|
|
|
return r;
|
2016-01-31 17:30:55 +07:00
|
|
|
}
|
|
|
|
|
2016-03-07 11:49:55 +07:00
|
|
|
job->owner = p->filp;
|
2016-08-25 14:40:48 +07:00
|
|
|
job->fence_ctx = entity->fence_context;
|
2016-10-25 19:00:45 +07:00
|
|
|
p->fence = dma_fence_get(&job->base.s_fence->finished);
|
2017-03-14 05:18:15 +07:00
|
|
|
|
2017-09-15 12:40:31 +07:00
|
|
|
r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
|
|
|
|
if (r) {
|
|
|
|
dma_fence_put(p->fence);
|
|
|
|
dma_fence_put(&job->base.s_fence->finished);
|
|
|
|
amdgpu_job_free(job);
|
|
|
|
amdgpu_mn_unlock(p->mn);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2017-03-14 05:18:15 +07:00
|
|
|
amdgpu_cs_post_dependencies(p);
|
|
|
|
|
2017-09-15 12:40:31 +07:00
|
|
|
cs->out.handle = seq;
|
|
|
|
job->uf_sequence = seq;
|
|
|
|
|
2016-06-29 20:10:31 +07:00
|
|
|
amdgpu_job_free_resources(job);
|
2017-10-20 01:29:46 +07:00
|
|
|
amdgpu_ring_priority_get(job->ring, job->base.s_priority);
|
2016-01-31 17:30:55 +07:00
|
|
|
|
|
|
|
trace_amdgpu_cs_ioctl(job);
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_entity_push_job(&job->base, entity);
|
2017-09-13 01:25:14 +07:00
|
|
|
|
|
|
|
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
|
|
|
|
amdgpu_mn_unlock(p->mn);
|
|
|
|
|
2016-01-31 17:30:55 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-07-21 13:36:51 +07:00
|
|
|
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
union drm_amdgpu_cs *cs = data;
|
2015-11-04 21:44:39 +07:00
|
|
|
struct amdgpu_cs_parser parser = {};
|
2015-08-19 02:09:33 +07:00
|
|
|
bool reserved_buffers = false;
|
|
|
|
int i, r;
|
2015-07-21 13:36:51 +07:00
|
|
|
|
2015-09-01 20:13:53 +07:00
|
|
|
if (!adev->accel_working)
|
2015-07-21 13:36:51 +07:00
|
|
|
return -EBUSY;
|
2015-06-19 22:31:29 +07:00
|
|
|
|
2015-11-04 21:44:39 +07:00
|
|
|
parser.adev = adev;
|
|
|
|
parser.filp = filp;
|
|
|
|
|
|
|
|
r = amdgpu_cs_parser_init(&parser, data);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r) {
|
2015-07-21 13:36:51 +07:00
|
|
|
DRM_ERROR("Failed to initialize parser !\n");
|
2016-10-30 22:05:47 +07:00
|
|
|
goto out;
|
2015-08-19 02:09:33 +07:00
|
|
|
}
|
|
|
|
|
2017-10-11 03:50:16 +07:00
|
|
|
r = amdgpu_cs_ib_fill(adev, &parser);
|
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
|
2016-10-30 22:05:47 +07:00
|
|
|
r = amdgpu_cs_parser_bos(&parser, data);
|
|
|
|
if (r) {
|
|
|
|
if (r == -ENOMEM)
|
|
|
|
DRM_ERROR("Not enough memory for command submission!\n");
|
|
|
|
else if (r != -ERESTARTSYS)
|
|
|
|
DRM_ERROR("Failed to process the buffer list %d!\n", r);
|
|
|
|
goto out;
|
2015-08-19 02:09:33 +07:00
|
|
|
}
|
|
|
|
|
2016-10-30 22:05:47 +07:00
|
|
|
reserved_buffers = true;
|
2015-08-19 02:09:33 +07:00
|
|
|
|
2016-10-30 22:05:47 +07:00
|
|
|
r = amdgpu_cs_dependencies(adev, &parser);
|
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("Failed in the dependencies handling %d!\n", r);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-02-03 19:44:52 +07:00
|
|
|
for (i = 0; i < parser.job->num_ibs; i++)
|
2015-11-04 21:44:39 +07:00
|
|
|
trace_amdgpu_cs(&parser, i);
|
2015-08-19 02:09:33 +07:00
|
|
|
|
2015-11-04 21:44:39 +07:00
|
|
|
r = amdgpu_cs_ib_vm_chunk(adev, &parser);
|
2015-08-18 15:12:15 +07:00
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
|
2016-01-31 17:32:04 +07:00
|
|
|
r = amdgpu_cs_submit(&parser, cs);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
out:
|
2015-11-04 21:44:39 +07:00
|
|
|
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
|
2015-04-21 03:55:21 +07:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_cs_wait_ioctl - wait for a command submission to finish
|
|
|
|
*
|
|
|
|
* @dev: drm device
|
|
|
|
* @data: data from userspace
|
|
|
|
* @filp: file private
|
|
|
|
*
|
|
|
|
* Wait for the command submission identified by handle to finish.
|
|
|
|
*/
|
|
|
|
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *filp)
|
|
|
|
{
|
|
|
|
union drm_amdgpu_wait_cs *wait = data;
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
|
2015-06-19 22:00:19 +07:00
|
|
|
struct amdgpu_ring *ring = NULL;
|
2015-05-08 16:29:40 +07:00
|
|
|
struct amdgpu_ctx *ctx;
|
2016-10-25 19:00:45 +07:00
|
|
|
struct dma_fence *fence;
|
2015-04-21 03:55:21 +07:00
|
|
|
long r;
|
|
|
|
|
2015-05-08 16:29:40 +07:00
|
|
|
ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
|
|
|
|
if (ctx == NULL)
|
|
|
|
return -EINVAL;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-02-16 12:47:32 +07:00
|
|
|
r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr,
|
|
|
|
wait->in.ip_type, wait->in.ip_instance,
|
|
|
|
wait->in.ring, &ring);
|
|
|
|
if (r) {
|
|
|
|
amdgpu_ctx_put(ctx);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2015-07-21 14:53:04 +07:00
|
|
|
fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
|
|
|
|
if (IS_ERR(fence))
|
|
|
|
r = PTR_ERR(fence);
|
|
|
|
else if (fence) {
|
2016-10-25 19:00:45 +07:00
|
|
|
r = dma_fence_wait_timeout(fence, true, timeout);
|
2017-10-09 20:51:10 +07:00
|
|
|
if (r > 0 && fence->error)
|
|
|
|
r = fence->error;
|
2016-10-25 19:00:45 +07:00
|
|
|
dma_fence_put(fence);
|
2015-07-21 14:53:04 +07:00
|
|
|
} else
|
|
|
|
r = 1;
|
2015-07-21 13:36:51 +07:00
|
|
|
|
2015-05-08 16:29:40 +07:00
|
|
|
amdgpu_ctx_put(ctx);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
memset(wait, 0, sizeof(*wait));
|
|
|
|
wait->out.status = (r == 0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-05 03:16:10 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device
|
|
|
|
* @filp: file private
|
|
|
|
* @user: drm_amdgpu_fence copied from user space
|
|
|
|
*/
|
|
|
|
static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
|
|
|
|
struct drm_file *filp,
|
|
|
|
struct drm_amdgpu_fence *user)
|
|
|
|
{
|
|
|
|
struct amdgpu_ring *ring;
|
|
|
|
struct amdgpu_ctx *ctx;
|
|
|
|
struct dma_fence *fence;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
|
|
|
|
if (ctx == NULL)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2017-02-16 12:47:32 +07:00
|
|
|
r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, user->ip_type,
|
|
|
|
user->ip_instance, user->ring, &ring);
|
|
|
|
if (r) {
|
|
|
|
amdgpu_ctx_put(ctx);
|
|
|
|
return ERR_PTR(r);
|
|
|
|
}
|
|
|
|
|
2016-11-05 03:16:10 +07:00
|
|
|
fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
|
|
|
|
amdgpu_ctx_put(ctx);
|
|
|
|
|
|
|
|
return fence;
|
|
|
|
}
|
|
|
|
|
2017-09-13 03:42:14 +07:00
|
|
|
int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *filp)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
union drm_amdgpu_fence_to_handle *info = data;
|
|
|
|
struct dma_fence *fence;
|
|
|
|
struct drm_syncobj *syncobj;
|
|
|
|
struct sync_file *sync_file;
|
|
|
|
int fd, r;
|
|
|
|
|
|
|
|
fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
|
|
|
|
if (IS_ERR(fence))
|
|
|
|
return PTR_ERR(fence);
|
|
|
|
|
|
|
|
switch (info->in.what) {
|
|
|
|
case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
|
|
|
|
r = drm_syncobj_create(&syncobj, 0, fence);
|
|
|
|
dma_fence_put(fence);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
|
|
|
|
drm_syncobj_put(syncobj);
|
|
|
|
return r;
|
|
|
|
|
|
|
|
case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
|
|
|
|
r = drm_syncobj_create(&syncobj, 0, fence);
|
|
|
|
dma_fence_put(fence);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
|
|
|
|
drm_syncobj_put(syncobj);
|
|
|
|
return r;
|
|
|
|
|
|
|
|
case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
|
|
|
|
fd = get_unused_fd_flags(O_CLOEXEC);
|
|
|
|
if (fd < 0) {
|
|
|
|
dma_fence_put(fence);
|
|
|
|
return fd;
|
|
|
|
}
|
|
|
|
|
|
|
|
sync_file = sync_file_create(fence);
|
|
|
|
dma_fence_put(fence);
|
|
|
|
if (!sync_file) {
|
|
|
|
put_unused_fd(fd);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
fd_install(fd, sync_file->file);
|
|
|
|
info->out.handle = fd;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-05 03:16:10 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_cs_wait_all_fence - wait on all fences to signal
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device
|
|
|
|
* @filp: file private
|
|
|
|
* @wait: wait parameters
|
|
|
|
* @fences: array of drm_amdgpu_fence
|
|
|
|
*/
|
|
|
|
static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
|
|
|
|
struct drm_file *filp,
|
|
|
|
union drm_amdgpu_wait_fences *wait,
|
|
|
|
struct drm_amdgpu_fence *fences)
|
|
|
|
{
|
|
|
|
uint32_t fence_count = wait->in.fence_count;
|
|
|
|
unsigned int i;
|
|
|
|
long r = 1;
|
|
|
|
|
|
|
|
for (i = 0; i < fence_count; i++) {
|
|
|
|
struct dma_fence *fence;
|
|
|
|
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
|
|
|
|
|
|
|
|
fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
|
|
|
|
if (IS_ERR(fence))
|
|
|
|
return PTR_ERR(fence);
|
|
|
|
else if (!fence)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
r = dma_fence_wait_timeout(fence, true, timeout);
|
2017-04-07 16:05:45 +07:00
|
|
|
dma_fence_put(fence);
|
2016-11-05 03:16:10 +07:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
if (r == 0)
|
|
|
|
break;
|
2017-10-09 20:51:10 +07:00
|
|
|
|
|
|
|
if (fence->error)
|
|
|
|
return fence->error;
|
2016-11-05 03:16:10 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
memset(wait, 0, sizeof(*wait));
|
|
|
|
wait->out.status = (r > 0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_cs_wait_any_fence - wait on any fence to signal
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device
|
|
|
|
* @filp: file private
|
|
|
|
* @wait: wait parameters
|
|
|
|
* @fences: array of drm_amdgpu_fence
|
|
|
|
*/
|
|
|
|
static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
|
|
|
|
struct drm_file *filp,
|
|
|
|
union drm_amdgpu_wait_fences *wait,
|
|
|
|
struct drm_amdgpu_fence *fences)
|
|
|
|
{
|
|
|
|
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
|
|
|
|
uint32_t fence_count = wait->in.fence_count;
|
|
|
|
uint32_t first = ~0;
|
|
|
|
struct dma_fence **array;
|
|
|
|
unsigned int i;
|
|
|
|
long r;
|
|
|
|
|
|
|
|
/* Prepare the fence array */
|
|
|
|
array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (array == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for (i = 0; i < fence_count; i++) {
|
|
|
|
struct dma_fence *fence;
|
|
|
|
|
|
|
|
fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
|
|
|
|
if (IS_ERR(fence)) {
|
|
|
|
r = PTR_ERR(fence);
|
|
|
|
goto err_free_fence_array;
|
|
|
|
} else if (fence) {
|
|
|
|
array[i] = fence;
|
|
|
|
} else { /* NULL, the fence has been already signaled */
|
|
|
|
r = 1;
|
2017-08-11 16:49:48 +07:00
|
|
|
first = i;
|
2016-11-05 03:16:10 +07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
|
|
|
|
&first);
|
|
|
|
if (r < 0)
|
|
|
|
goto err_free_fence_array;
|
|
|
|
|
|
|
|
out:
|
|
|
|
memset(wait, 0, sizeof(*wait));
|
|
|
|
wait->out.status = (r > 0);
|
|
|
|
wait->out.first_signaled = first;
|
2017-11-09 16:18:18 +07:00
|
|
|
|
2017-11-17 11:45:18 +07:00
|
|
|
if (first < fence_count && array[first])
|
2017-11-09 16:18:18 +07:00
|
|
|
r = array[first]->error;
|
|
|
|
else
|
|
|
|
r = 0;
|
2016-11-05 03:16:10 +07:00
|
|
|
|
|
|
|
err_free_fence_array:
|
|
|
|
for (i = 0; i < fence_count; i++)
|
|
|
|
dma_fence_put(array[i]);
|
|
|
|
kfree(array);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
|
|
|
|
*
|
|
|
|
* @dev: drm device
|
|
|
|
* @data: data from userspace
|
|
|
|
* @filp: file private
|
|
|
|
*/
|
|
|
|
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *filp)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
union drm_amdgpu_wait_fences *wait = data;
|
|
|
|
uint32_t fence_count = wait->in.fence_count;
|
|
|
|
struct drm_amdgpu_fence *fences_user;
|
|
|
|
struct drm_amdgpu_fence *fences;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
/* Get the fences from userspace */
|
|
|
|
fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (fences == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-07-26 22:02:52 +07:00
|
|
|
fences_user = u64_to_user_ptr(wait->in.fences);
|
2016-11-05 03:16:10 +07:00
|
|
|
if (copy_from_user(fences, fences_user,
|
|
|
|
sizeof(struct drm_amdgpu_fence) * fence_count)) {
|
|
|
|
r = -EFAULT;
|
|
|
|
goto err_free_fences;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (wait->in.wait_all)
|
|
|
|
r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
|
|
|
|
else
|
|
|
|
r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
|
|
|
|
|
|
|
|
err_free_fences:
|
|
|
|
kfree(fences);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_cs_find_bo_va - find bo_va for VM address
|
|
|
|
*
|
|
|
|
* @parser: command submission parser context
|
|
|
|
* @addr: VM address
|
|
|
|
* @bo: resulting BO of the mapping found
|
|
|
|
*
|
|
|
|
* Search the buffer objects in the command submission context for a certain
|
|
|
|
* virtual memory address. Returns allocation structure when found, NULL
|
|
|
|
* otherwise.
|
|
|
|
*/
|
2017-09-06 21:15:28 +07:00
|
|
|
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
|
|
|
uint64_t addr, struct amdgpu_bo **bo,
|
|
|
|
struct amdgpu_bo_va_mapping **map)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2017-09-06 21:55:16 +07:00
|
|
|
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
|
2017-04-12 19:24:39 +07:00
|
|
|
struct ttm_operation_ctx ctx = { false, false };
|
2017-09-06 21:55:16 +07:00
|
|
|
struct amdgpu_vm *vm = &fpriv->vm;
|
2015-04-21 03:55:21 +07:00
|
|
|
struct amdgpu_bo_va_mapping *mapping;
|
2016-09-05 22:00:57 +07:00
|
|
|
int r;
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
addr /= AMDGPU_GPU_PAGE_SIZE;
|
2016-09-05 22:00:57 +07:00
|
|
|
|
2017-09-06 21:55:16 +07:00
|
|
|
mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
|
|
|
|
if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
|
|
|
|
return -EINVAL;
|
2016-09-05 22:00:57 +07:00
|
|
|
|
2017-09-06 21:55:16 +07:00
|
|
|
*bo = mapping->bo_va->base.bo;
|
|
|
|
*map = mapping;
|
2016-08-15 22:00:22 +07:00
|
|
|
|
2017-09-06 21:55:16 +07:00
|
|
|
/* Double check that the BO is reserved by this CS */
|
|
|
|
if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
|
|
|
|
return -EINVAL;
|
2016-08-15 22:00:22 +07:00
|
|
|
|
2017-10-16 15:32:04 +07:00
|
|
|
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
|
|
|
|
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
|
|
|
amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
|
2017-04-12 19:24:39 +07:00
|
|
|
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
|
2017-10-16 15:32:04 +07:00
|
|
|
if (r)
|
2016-08-15 22:00:22 +07:00
|
|
|
return r;
|
2016-09-05 22:00:57 +07:00
|
|
|
}
|
|
|
|
|
2017-10-27 20:43:14 +07:00
|
|
|
return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
|
2016-09-05 22:00:57 +07:00
|
|
|
}
|