2015-04-21 03:55:21 +07:00
|
|
|
/*
|
|
|
|
* Copyright 2008 Jerome Glisse.
|
|
|
|
* All Rights Reserved.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
* DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Jerome Glisse <glisse@freedesktop.org>
|
|
|
|
*/
|
2016-03-17 11:30:49 +07:00
|
|
|
#include <linux/pagemap.h>
|
2015-04-21 03:55:21 +07:00
|
|
|
#include <drm/drmP.h>
|
|
|
|
#include <drm/amdgpu_drm.h>
|
|
|
|
#include "amdgpu.h"
|
|
|
|
#include "amdgpu_trace.h"
|
|
|
|
|
|
|
|
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
|
|
|
u32 ip_instance, u32 ring,
|
|
|
|
struct amdgpu_ring **out_ring)
|
|
|
|
{
|
|
|
|
/* Right now all IPs have only one instance - multiple rings. */
|
|
|
|
if (ip_instance != 0) {
|
|
|
|
DRM_ERROR("invalid ip instance: %d\n", ip_instance);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (ip_type) {
|
|
|
|
default:
|
|
|
|
DRM_ERROR("unknown ip type: %d\n", ip_type);
|
|
|
|
return -EINVAL;
|
|
|
|
case AMDGPU_HW_IP_GFX:
|
|
|
|
if (ring < adev->gfx.num_gfx_rings) {
|
|
|
|
*out_ring = &adev->gfx.gfx_ring[ring];
|
|
|
|
} else {
|
|
|
|
DRM_ERROR("only %d gfx rings are supported now\n",
|
|
|
|
adev->gfx.num_gfx_rings);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case AMDGPU_HW_IP_COMPUTE:
|
|
|
|
if (ring < adev->gfx.num_compute_rings) {
|
|
|
|
*out_ring = &adev->gfx.compute_ring[ring];
|
|
|
|
} else {
|
|
|
|
DRM_ERROR("only %d compute rings are supported now\n",
|
|
|
|
adev->gfx.num_compute_rings);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case AMDGPU_HW_IP_DMA:
|
2015-10-09 03:30:37 +07:00
|
|
|
if (ring < adev->sdma.num_instances) {
|
|
|
|
*out_ring = &adev->sdma.instance[ring].ring;
|
2015-04-21 03:55:21 +07:00
|
|
|
} else {
|
2015-10-09 03:30:37 +07:00
|
|
|
DRM_ERROR("only %d SDMA rings are supported\n",
|
|
|
|
adev->sdma.num_instances);
|
2015-04-21 03:55:21 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case AMDGPU_HW_IP_UVD:
|
|
|
|
*out_ring = &adev->uvd.ring;
|
|
|
|
break;
|
|
|
|
case AMDGPU_HW_IP_VCE:
|
2017-01-12 04:11:48 +07:00
|
|
|
if (ring < adev->vce.num_rings){
|
2015-04-21 03:55:21 +07:00
|
|
|
*out_ring = &adev->vce.ring[ring];
|
|
|
|
} else {
|
2017-01-12 04:11:48 +07:00
|
|
|
DRM_ERROR("only %d VCE rings are supported\n", adev->vce.num_rings);
|
2015-04-21 03:55:21 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
break;
|
2017-01-10 23:57:24 +07:00
|
|
|
case AMDGPU_HW_IP_UVD_ENC:
|
|
|
|
if (ring < adev->uvd.num_enc_rings){
|
|
|
|
*out_ring = &adev->uvd.ring_enc[ring];
|
|
|
|
} else {
|
|
|
|
DRM_ERROR("only %d UVD ENC rings are supported\n",
|
|
|
|
adev->uvd.num_enc_rings);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
break;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
2017-01-18 16:26:38 +07:00
|
|
|
|
|
|
|
if (!(*out_ring && (*out_ring)->adev)) {
|
|
|
|
DRM_ERROR("Ring %d is not initialized on IP %d\n",
|
|
|
|
ring, ip_type);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-12-14 22:42:31 +07:00
|
|
|
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
|
2016-05-07 03:14:00 +07:00
|
|
|
struct drm_amdgpu_cs_chunk_fence *data,
|
|
|
|
uint32_t *offset)
|
2015-12-14 22:42:31 +07:00
|
|
|
{
|
|
|
|
struct drm_gem_object *gobj;
|
2016-09-09 16:21:43 +07:00
|
|
|
unsigned long size;
|
2015-12-14 22:42:31 +07:00
|
|
|
|
2016-05-09 17:04:54 +07:00
|
|
|
gobj = drm_gem_object_lookup(p->filp, data->handle);
|
2015-12-14 22:42:31 +07:00
|
|
|
if (gobj == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-05-07 03:14:00 +07:00
|
|
|
p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
|
2015-12-14 22:42:31 +07:00
|
|
|
p->uf_entry.priority = 0;
|
|
|
|
p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
|
|
|
|
p->uf_entry.tv.shared = true;
|
2016-02-23 18:36:59 +07:00
|
|
|
p->uf_entry.user_pages = NULL;
|
2016-09-09 16:21:43 +07:00
|
|
|
|
|
|
|
size = amdgpu_bo_size(p->uf_entry.robj);
|
|
|
|
if (size != PAGE_SIZE || (data->offset + 8) > size)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-05-07 03:14:00 +07:00
|
|
|
*offset = data->offset;
|
2015-12-14 22:42:31 +07:00
|
|
|
|
|
|
|
drm_gem_object_unreference_unlocked(gobj);
|
2016-05-07 03:14:00 +07:00
|
|
|
|
|
|
|
if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
|
|
|
|
amdgpu_bo_unref(&p->uf_entry.robj);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-12-14 22:42:31 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|
|
|
{
|
2016-02-01 17:20:37 +07:00
|
|
|
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
2016-04-19 19:11:32 +07:00
|
|
|
struct amdgpu_vm *vm = &fpriv->vm;
|
2015-04-21 03:55:21 +07:00
|
|
|
union drm_amdgpu_cs *cs = data;
|
|
|
|
uint64_t *chunk_array_user;
|
2015-09-23 17:59:28 +07:00
|
|
|
uint64_t *chunk_array;
|
2016-02-03 19:44:52 +07:00
|
|
|
unsigned size, num_ibs = 0;
|
2016-05-07 03:14:00 +07:00
|
|
|
uint32_t uf_offset = 0;
|
2015-09-25 18:36:55 +07:00
|
|
|
int i;
|
2015-09-23 17:59:28 +07:00
|
|
|
int ret;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2015-09-23 17:59:28 +07:00
|
|
|
if (cs->in.num_chunks == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
|
|
|
|
if (!chunk_array)
|
|
|
|
return -ENOMEM;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2015-05-11 20:34:59 +07:00
|
|
|
p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
|
|
|
|
if (!p->ctx) {
|
2015-09-23 17:59:28 +07:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto free_chunk;
|
2015-05-11 20:34:59 +07:00
|
|
|
}
|
2015-09-23 17:59:28 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/* get chunks */
|
2015-10-07 14:41:27 +07:00
|
|
|
chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (copy_from_user(chunk_array, chunk_array_user,
|
|
|
|
sizeof(uint64_t)*cs->in.num_chunks)) {
|
2015-09-23 17:59:28 +07:00
|
|
|
ret = -EFAULT;
|
2015-12-19 02:33:52 +07:00
|
|
|
goto put_ctx;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
p->nchunks = cs->in.num_chunks;
|
2015-07-17 17:39:25 +07:00
|
|
|
p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
|
2015-04-21 03:55:21 +07:00
|
|
|
GFP_KERNEL);
|
2015-09-23 17:59:28 +07:00
|
|
|
if (!p->chunks) {
|
|
|
|
ret = -ENOMEM;
|
2015-12-19 02:33:52 +07:00
|
|
|
goto put_ctx;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < p->nchunks; i++) {
|
|
|
|
struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
|
|
|
|
struct drm_amdgpu_cs_chunk user_chunk;
|
|
|
|
uint32_t __user *cdata;
|
|
|
|
|
2015-10-07 14:41:27 +07:00
|
|
|
chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
|
2015-04-21 03:55:21 +07:00
|
|
|
if (copy_from_user(&user_chunk, chunk_ptr,
|
|
|
|
sizeof(struct drm_amdgpu_cs_chunk))) {
|
2015-09-23 17:59:28 +07:00
|
|
|
ret = -EFAULT;
|
|
|
|
i--;
|
|
|
|
goto free_partial_kdata;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
p->chunks[i].chunk_id = user_chunk.chunk_id;
|
|
|
|
p->chunks[i].length_dw = user_chunk.length_dw;
|
|
|
|
|
|
|
|
size = p->chunks[i].length_dw;
|
2015-10-07 14:41:27 +07:00
|
|
|
cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
|
|
|
|
if (p->chunks[i].kdata == NULL) {
|
2015-09-23 17:59:28 +07:00
|
|
|
ret = -ENOMEM;
|
|
|
|
i--;
|
|
|
|
goto free_partial_kdata;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
size *= sizeof(uint32_t);
|
|
|
|
if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
|
2015-09-23 17:59:28 +07:00
|
|
|
ret = -EFAULT;
|
|
|
|
goto free_partial_kdata;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2015-06-23 22:07:03 +07:00
|
|
|
switch (p->chunks[i].chunk_id) {
|
|
|
|
case AMDGPU_CHUNK_ID_IB:
|
2016-02-03 19:44:52 +07:00
|
|
|
++num_ibs;
|
2015-06-23 22:07:03 +07:00
|
|
|
break;
|
|
|
|
|
|
|
|
case AMDGPU_CHUNK_ID_FENCE:
|
2015-04-21 03:55:21 +07:00
|
|
|
size = sizeof(struct drm_amdgpu_cs_chunk_fence);
|
2015-12-14 22:42:31 +07:00
|
|
|
if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
|
2015-09-23 17:59:28 +07:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto free_partial_kdata;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
2015-12-14 22:42:31 +07:00
|
|
|
|
2016-05-07 03:14:00 +07:00
|
|
|
ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
|
|
|
|
&uf_offset);
|
2015-12-14 22:42:31 +07:00
|
|
|
if (ret)
|
|
|
|
goto free_partial_kdata;
|
|
|
|
|
2015-06-23 22:07:03 +07:00
|
|
|
break;
|
|
|
|
|
2015-06-19 22:31:29 +07:00
|
|
|
case AMDGPU_CHUNK_ID_DEPENDENCIES:
|
|
|
|
break;
|
|
|
|
|
2015-06-23 22:07:03 +07:00
|
|
|
default:
|
2015-09-23 17:59:28 +07:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto free_partial_kdata;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-19 19:11:32 +07:00
|
|
|
ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
|
2016-02-03 19:44:52 +07:00
|
|
|
if (ret)
|
2016-01-31 17:32:04 +07:00
|
|
|
goto free_all_kdata;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-06-29 18:26:41 +07:00
|
|
|
if (p->uf_entry.robj)
|
|
|
|
p->job->uf_addr = uf_offset;
|
2015-04-21 03:55:21 +07:00
|
|
|
kfree(chunk_array);
|
2015-09-23 17:59:28 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
free_all_kdata:
|
|
|
|
i = p->nchunks - 1;
|
|
|
|
free_partial_kdata:
|
|
|
|
for (; i >= 0; i--)
|
|
|
|
drm_free_large(p->chunks[i].kdata);
|
|
|
|
kfree(p->chunks);
|
2017-03-10 09:13:04 +07:00
|
|
|
p->chunks = NULL;
|
|
|
|
p->nchunks = 0;
|
2015-12-19 02:33:52 +07:00
|
|
|
put_ctx:
|
2015-09-23 17:59:28 +07:00
|
|
|
amdgpu_ctx_put(p->ctx);
|
|
|
|
free_chunk:
|
|
|
|
kfree(chunk_array);
|
|
|
|
|
|
|
|
return ret;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2016-08-18 04:49:27 +07:00
|
|
|
/* Convert microseconds to bytes. */
|
|
|
|
static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
|
|
|
|
{
|
|
|
|
if (us <= 0 || !adev->mm_stats.log2_max_MBps)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Since accum_us is incremented by a million per second, just
|
|
|
|
* multiply it by the number of MB/s to get the number of bytes.
|
|
|
|
*/
|
|
|
|
return us << adev->mm_stats.log2_max_MBps;
|
|
|
|
}
|
|
|
|
|
|
|
|
static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
|
|
|
|
{
|
|
|
|
if (!adev->mm_stats.log2_max_MBps)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return bytes >> adev->mm_stats.log2_max_MBps;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns how many bytes TTM can move right now. If no bytes can be moved,
|
|
|
|
* it returns 0. If it returns non-zero, it's OK to move at least one buffer,
|
|
|
|
* which means it can go over the threshold once. If that happens, the driver
|
|
|
|
* will be in debt and no other buffer migrations can be done until that debt
|
|
|
|
* is repaid.
|
|
|
|
*
|
|
|
|
* This approach allows moving a buffer of any size (it's important to allow
|
|
|
|
* that).
|
|
|
|
*
|
|
|
|
* The currency is simply time in microseconds and it increases as the clock
|
|
|
|
* ticks. The accumulated microseconds (us) are converted to bytes and
|
|
|
|
* returned.
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
|
|
|
static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
|
|
|
|
{
|
2016-08-18 04:49:27 +07:00
|
|
|
s64 time_us, increment_us;
|
|
|
|
u64 max_bytes;
|
|
|
|
u64 free_vram, total_vram, used_vram;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-08-18 04:49:27 +07:00
|
|
|
/* Allow a maximum of 200 accumulated ms. This is basically per-IB
|
|
|
|
* throttling.
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
2016-08-18 04:49:27 +07:00
|
|
|
* It means that in order to get full max MBps, at least 5 IBs per
|
|
|
|
* second must be submitted and not more than 200ms apart from each
|
|
|
|
* other.
|
|
|
|
*/
|
|
|
|
const s64 us_upper_bound = 200000;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-08-18 04:49:27 +07:00
|
|
|
if (!adev->mm_stats.log2_max_MBps)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
total_vram = adev->mc.real_vram_size - adev->vram_pin_size;
|
|
|
|
used_vram = atomic64_read(&adev->vram_usage);
|
|
|
|
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
|
|
|
|
|
|
|
|
spin_lock(&adev->mm_stats.lock);
|
|
|
|
|
|
|
|
/* Increase the amount of accumulated us. */
|
|
|
|
time_us = ktime_to_us(ktime_get());
|
|
|
|
increment_us = time_us - adev->mm_stats.last_update_us;
|
|
|
|
adev->mm_stats.last_update_us = time_us;
|
|
|
|
adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
|
|
|
|
us_upper_bound);
|
|
|
|
|
|
|
|
/* This prevents the short period of low performance when the VRAM
|
|
|
|
* usage is low and the driver is in debt or doesn't have enough
|
|
|
|
* accumulated us to fill VRAM quickly.
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
2016-08-18 04:49:27 +07:00
|
|
|
* The situation can occur in these cases:
|
|
|
|
* - a lot of VRAM is freed by userspace
|
|
|
|
* - the presence of a big buffer causes a lot of evictions
|
|
|
|
* (solution: split buffers into smaller ones)
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
2016-08-18 04:49:27 +07:00
|
|
|
* If 128 MB or 1/8th of VRAM is free, start filling it now by setting
|
|
|
|
* accum_us to a positive number.
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
2016-08-18 04:49:27 +07:00
|
|
|
if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
|
|
|
|
s64 min_us;
|
|
|
|
|
|
|
|
/* Be more aggresive on dGPUs. Try to fill a portion of free
|
|
|
|
* VRAM now.
|
|
|
|
*/
|
|
|
|
if (!(adev->flags & AMD_IS_APU))
|
|
|
|
min_us = bytes_to_us(adev, free_vram / 4);
|
|
|
|
else
|
|
|
|
min_us = 0; /* Reset accum_us on APUs. */
|
|
|
|
|
|
|
|
adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-08-18 04:49:27 +07:00
|
|
|
/* This returns 0 if the driver is in debt to disallow (optional)
|
|
|
|
* buffer moves.
|
|
|
|
*/
|
|
|
|
max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
|
|
|
|
|
|
|
|
spin_unlock(&adev->mm_stats.lock);
|
|
|
|
return max_bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Report how many bytes have really been moved for the last command
|
|
|
|
* submission. This can result in a debt that can stop buffer migrations
|
|
|
|
* temporarily.
|
|
|
|
*/
|
2017-02-09 17:33:37 +07:00
|
|
|
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes)
|
2016-08-18 04:49:27 +07:00
|
|
|
{
|
|
|
|
spin_lock(&adev->mm_stats.lock);
|
|
|
|
adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
|
|
|
|
spin_unlock(&adev->mm_stats.lock);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2016-08-04 12:05:46 +07:00
|
|
|
static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
|
|
|
|
struct amdgpu_bo *bo)
|
|
|
|
{
|
2016-09-15 19:58:48 +07:00
|
|
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
2016-08-04 12:05:46 +07:00
|
|
|
u64 initial_bytes_moved;
|
|
|
|
uint32_t domain;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (bo->pin_count)
|
|
|
|
return 0;
|
|
|
|
|
2016-08-18 04:49:27 +07:00
|
|
|
/* Don't move this buffer if we have depleted our allowance
|
|
|
|
* to move it. Don't move anything if the threshold is zero.
|
2016-08-04 12:05:46 +07:00
|
|
|
*/
|
2016-08-18 04:49:27 +07:00
|
|
|
if (p->bytes_moved < p->bytes_moved_threshold)
|
2016-08-04 12:05:46 +07:00
|
|
|
domain = bo->prefered_domains;
|
|
|
|
else
|
|
|
|
domain = bo->allowed_domains;
|
|
|
|
|
|
|
|
retry:
|
|
|
|
amdgpu_ttm_placement_from_domain(bo, domain);
|
2016-09-15 19:58:48 +07:00
|
|
|
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
|
2016-08-04 12:05:46 +07:00
|
|
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
2016-09-15 19:58:48 +07:00
|
|
|
p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
|
2016-08-04 12:05:46 +07:00
|
|
|
initial_bytes_moved;
|
|
|
|
|
2016-08-31 22:28:11 +07:00
|
|
|
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
|
|
|
|
domain = bo->allowed_domains;
|
|
|
|
goto retry;
|
2016-08-04 12:05:46 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2016-09-01 17:13:18 +07:00
|
|
|
/* Last resort, try to evict something from the current working set */
|
|
|
|
static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
|
2016-09-28 17:03:04 +07:00
|
|
|
struct amdgpu_bo *validated)
|
2016-09-01 17:13:18 +07:00
|
|
|
{
|
2016-09-28 17:03:04 +07:00
|
|
|
uint32_t domain = validated->allowed_domains;
|
2016-09-01 17:13:18 +07:00
|
|
|
int r;
|
|
|
|
|
|
|
|
if (!p->evictable)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (;&p->evictable->tv.head != &p->validated;
|
|
|
|
p->evictable = list_prev_entry(p->evictable, tv.head)) {
|
|
|
|
|
|
|
|
struct amdgpu_bo_list_entry *candidate = p->evictable;
|
|
|
|
struct amdgpu_bo *bo = candidate->robj;
|
2016-09-15 19:58:48 +07:00
|
|
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
2016-09-01 17:13:18 +07:00
|
|
|
u64 initial_bytes_moved;
|
|
|
|
uint32_t other;
|
|
|
|
|
|
|
|
/* If we reached our current BO we can forget it */
|
2016-09-28 17:03:04 +07:00
|
|
|
if (candidate->robj == validated)
|
2016-09-01 17:13:18 +07:00
|
|
|
break;
|
|
|
|
|
|
|
|
other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
|
|
|
|
|
|
|
|
/* Check if this BO is in one of the domains we need space for */
|
|
|
|
if (!(other & domain))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Check if we can move this BO somewhere else */
|
|
|
|
other = bo->allowed_domains & ~domain;
|
|
|
|
if (!other)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Good we can try to move this BO somewhere else */
|
|
|
|
amdgpu_ttm_placement_from_domain(bo, other);
|
2016-09-15 19:58:48 +07:00
|
|
|
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
|
2016-09-01 17:13:18 +07:00
|
|
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
2016-09-15 19:58:48 +07:00
|
|
|
p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
|
2016-09-01 17:13:18 +07:00
|
|
|
initial_bytes_moved;
|
|
|
|
|
|
|
|
if (unlikely(r))
|
|
|
|
break;
|
|
|
|
|
|
|
|
p->evictable = list_prev_entry(p->evictable, tv.head);
|
|
|
|
list_move(&candidate->tv.head, &p->validated);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-09-28 17:03:04 +07:00
|
|
|
static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
|
|
|
|
{
|
|
|
|
struct amdgpu_cs_parser *p = param;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
do {
|
|
|
|
r = amdgpu_cs_bo_validate(p, bo);
|
|
|
|
} while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
if (bo->shadow)
|
2016-12-01 05:19:40 +07:00
|
|
|
r = amdgpu_cs_bo_validate(p, bo->shadow);
|
2016-09-28 17:03:04 +07:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2016-09-03 12:57:14 +07:00
|
|
|
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
2015-09-03 21:40:39 +07:00
|
|
|
struct list_head *validated)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
struct amdgpu_bo_list_entry *lobj;
|
|
|
|
int r;
|
|
|
|
|
2015-09-03 21:40:39 +07:00
|
|
|
list_for_each_entry(lobj, validated, tv.head) {
|
2015-12-22 02:31:35 +07:00
|
|
|
struct amdgpu_bo *bo = lobj->robj;
|
2016-02-23 18:36:59 +07:00
|
|
|
bool binding_userptr = false;
|
2016-02-08 17:08:35 +07:00
|
|
|
struct mm_struct *usermm;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-02-08 17:08:35 +07:00
|
|
|
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
|
|
|
|
if (usermm && usermm != current->mm)
|
|
|
|
return -EPERM;
|
|
|
|
|
2016-02-23 18:36:59 +07:00
|
|
|
/* Check if we have user pages and nobody bound the BO already */
|
|
|
|
if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
|
|
|
|
size_t size = sizeof(struct page *);
|
|
|
|
|
|
|
|
size *= bo->tbo.ttm->num_pages;
|
|
|
|
memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
|
|
|
|
binding_userptr = true;
|
|
|
|
}
|
|
|
|
|
2016-09-01 17:13:18 +07:00
|
|
|
if (p->evictable == lobj)
|
|
|
|
p->evictable = NULL;
|
|
|
|
|
2016-09-28 17:03:04 +07:00
|
|
|
r = amdgpu_cs_validate(p, bo);
|
2016-08-04 12:05:46 +07:00
|
|
|
if (r)
|
2015-12-22 02:31:35 +07:00
|
|
|
return r;
|
2016-09-01 17:13:18 +07:00
|
|
|
|
2016-02-23 18:36:59 +07:00
|
|
|
if (binding_userptr) {
|
|
|
|
drm_free_large(lobj->user_pages);
|
|
|
|
lobj->user_pages = NULL;
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-12-19 02:33:52 +07:00
|
|
|
static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|
|
|
union drm_amdgpu_cs *cs)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
2016-02-23 18:36:59 +07:00
|
|
|
struct amdgpu_bo_list_entry *e;
|
2015-09-03 21:40:39 +07:00
|
|
|
struct list_head duplicates;
|
2015-04-27 14:19:20 +07:00
|
|
|
bool need_mmap_lock = false;
|
2016-02-23 18:36:59 +07:00
|
|
|
unsigned i, tries = 10;
|
2015-12-19 03:26:47 +07:00
|
|
|
int r;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2015-12-19 02:33:52 +07:00
|
|
|
INIT_LIST_HEAD(&p->validated);
|
|
|
|
|
|
|
|
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
|
2015-04-27 14:19:20 +07:00
|
|
|
if (p->bo_list) {
|
2016-02-22 21:40:59 +07:00
|
|
|
need_mmap_lock = p->bo_list->first_userptr !=
|
|
|
|
p->bo_list->num_entries;
|
2015-12-19 03:26:47 +07:00
|
|
|
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
|
2015-04-27 14:19:20 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2015-12-11 20:39:05 +07:00
|
|
|
INIT_LIST_HEAD(&duplicates);
|
2015-12-11 21:16:32 +07:00
|
|
|
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-05-07 03:14:00 +07:00
|
|
|
if (p->uf_entry.robj)
|
2015-12-14 22:42:31 +07:00
|
|
|
list_add(&p->uf_entry.tv.head, &p->validated);
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
if (need_mmap_lock)
|
|
|
|
down_read(¤t->mm->mmap_sem);
|
|
|
|
|
2016-02-23 18:36:59 +07:00
|
|
|
while (1) {
|
|
|
|
struct list_head need_pages;
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
|
|
|
|
&duplicates);
|
2016-07-30 05:48:39 +07:00
|
|
|
if (unlikely(r != 0)) {
|
2016-10-20 13:58:04 +07:00
|
|
|
if (r != -ERESTARTSYS)
|
|
|
|
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
|
2016-02-23 18:36:59 +07:00
|
|
|
goto error_free_pages;
|
2016-07-30 05:48:39 +07:00
|
|
|
}
|
2016-02-23 18:36:59 +07:00
|
|
|
|
|
|
|
/* Without a BO list we don't have userptr BOs */
|
|
|
|
if (!p->bo_list)
|
|
|
|
break;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&need_pages);
|
|
|
|
for (i = p->bo_list->first_userptr;
|
|
|
|
i < p->bo_list->num_entries; ++i) {
|
|
|
|
|
|
|
|
e = &p->bo_list->array[i];
|
|
|
|
|
|
|
|
if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
|
|
|
|
&e->user_invalidated) && e->user_pages) {
|
|
|
|
|
|
|
|
/* We acquired a page array, but somebody
|
|
|
|
* invalidated it. Free it an try again
|
|
|
|
*/
|
|
|
|
release_pages(e->user_pages,
|
|
|
|
e->robj->tbo.ttm->num_pages,
|
|
|
|
false);
|
|
|
|
drm_free_large(e->user_pages);
|
|
|
|
e->user_pages = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (e->robj->tbo.ttm->state != tt_bound &&
|
|
|
|
!e->user_pages) {
|
|
|
|
list_del(&e->tv.head);
|
|
|
|
list_add(&e->tv.head, &need_pages);
|
|
|
|
|
|
|
|
amdgpu_bo_unreserve(e->robj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (list_empty(&need_pages))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Unreserve everything again. */
|
|
|
|
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
|
|
|
|
|
2016-07-30 05:48:39 +07:00
|
|
|
/* We tried too many times, just abort */
|
2016-02-23 18:36:59 +07:00
|
|
|
if (!--tries) {
|
|
|
|
r = -EDEADLK;
|
2016-07-30 05:48:39 +07:00
|
|
|
DRM_ERROR("deadlock in %s\n", __func__);
|
2016-02-23 18:36:59 +07:00
|
|
|
goto error_free_pages;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fill the page arrays for all useptrs. */
|
|
|
|
list_for_each_entry(e, &need_pages, tv.head) {
|
|
|
|
struct ttm_tt *ttm = e->robj->tbo.ttm;
|
|
|
|
|
|
|
|
e->user_pages = drm_calloc_large(ttm->num_pages,
|
|
|
|
sizeof(struct page*));
|
|
|
|
if (!e->user_pages) {
|
|
|
|
r = -ENOMEM;
|
2016-07-30 05:48:39 +07:00
|
|
|
DRM_ERROR("calloc failure in %s\n", __func__);
|
2016-02-23 18:36:59 +07:00
|
|
|
goto error_free_pages;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
|
|
|
|
if (r) {
|
2016-07-30 05:48:39 +07:00
|
|
|
DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
|
2016-02-23 18:36:59 +07:00
|
|
|
drm_free_large(e->user_pages);
|
|
|
|
e->user_pages = NULL;
|
|
|
|
goto error_free_pages;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* And try again. */
|
|
|
|
list_splice(&need_pages, &p->validated);
|
|
|
|
}
|
2015-09-03 21:40:39 +07:00
|
|
|
|
2015-12-22 01:47:42 +07:00
|
|
|
p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
|
|
|
|
p->bytes_moved = 0;
|
2016-09-01 17:13:18 +07:00
|
|
|
p->evictable = list_last_entry(&p->validated,
|
|
|
|
struct amdgpu_bo_list_entry,
|
|
|
|
tv.head);
|
2015-12-22 01:47:42 +07:00
|
|
|
|
2016-09-28 17:03:04 +07:00
|
|
|
r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
|
|
|
|
amdgpu_cs_validate, p);
|
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
|
|
|
|
goto error_validate;
|
|
|
|
}
|
|
|
|
|
2015-12-22 01:47:42 +07:00
|
|
|
r = amdgpu_cs_list_validate(p, &duplicates);
|
2016-07-30 05:48:39 +07:00
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
|
2015-09-03 21:40:39 +07:00
|
|
|
goto error_validate;
|
2016-07-30 05:48:39 +07:00
|
|
|
}
|
2015-09-03 21:40:39 +07:00
|
|
|
|
2015-12-22 01:47:42 +07:00
|
|
|
r = amdgpu_cs_list_validate(p, &p->validated);
|
2016-07-30 05:48:39 +07:00
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
|
2016-01-05 22:03:39 +07:00
|
|
|
goto error_validate;
|
2016-07-30 05:48:39 +07:00
|
|
|
}
|
2016-01-05 22:03:39 +07:00
|
|
|
|
2016-08-18 04:49:27 +07:00
|
|
|
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved);
|
|
|
|
|
2016-06-21 21:28:15 +07:00
|
|
|
fpriv->vm.last_eviction_counter =
|
|
|
|
atomic64_read(&p->adev->num_evictions);
|
|
|
|
|
2016-01-05 22:03:39 +07:00
|
|
|
if (p->bo_list) {
|
2016-05-06 22:50:03 +07:00
|
|
|
struct amdgpu_bo *gds = p->bo_list->gds_obj;
|
|
|
|
struct amdgpu_bo *gws = p->bo_list->gws_obj;
|
|
|
|
struct amdgpu_bo *oa = p->bo_list->oa_obj;
|
2016-01-05 22:03:39 +07:00
|
|
|
struct amdgpu_vm *vm = &fpriv->vm;
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < p->bo_list->num_entries; i++) {
|
|
|
|
struct amdgpu_bo *bo = p->bo_list->array[i].robj;
|
|
|
|
|
|
|
|
p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
|
|
|
|
}
|
2016-05-06 22:50:03 +07:00
|
|
|
|
|
|
|
if (gds) {
|
|
|
|
p->job->gds_base = amdgpu_bo_gpu_offset(gds);
|
|
|
|
p->job->gds_size = amdgpu_bo_size(gds);
|
|
|
|
}
|
|
|
|
if (gws) {
|
|
|
|
p->job->gws_base = amdgpu_bo_gpu_offset(gws);
|
|
|
|
p->job->gws_size = amdgpu_bo_size(gws);
|
|
|
|
}
|
|
|
|
if (oa) {
|
|
|
|
p->job->oa_base = amdgpu_bo_gpu_offset(oa);
|
|
|
|
p->job->oa_size = amdgpu_bo_size(oa);
|
|
|
|
}
|
2016-01-05 22:03:39 +07:00
|
|
|
}
|
2015-09-03 21:40:39 +07:00
|
|
|
|
2016-09-05 22:00:57 +07:00
|
|
|
if (!r && p->uf_entry.robj) {
|
|
|
|
struct amdgpu_bo *uf = p->uf_entry.robj;
|
|
|
|
|
2016-09-09 21:32:33 +07:00
|
|
|
r = amdgpu_ttm_bind(&uf->tbo, &uf->tbo.mem);
|
2016-09-05 22:00:57 +07:00
|
|
|
p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
|
|
|
|
}
|
2016-06-29 18:26:41 +07:00
|
|
|
|
2015-09-03 21:40:39 +07:00
|
|
|
error_validate:
|
2016-01-11 21:35:21 +07:00
|
|
|
if (r) {
|
|
|
|
amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm);
|
2015-09-03 21:40:39 +07:00
|
|
|
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
|
2016-01-11 21:35:21 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-02-23 18:36:59 +07:00
|
|
|
error_free_pages:
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
if (need_mmap_lock)
|
|
|
|
up_read(¤t->mm->mmap_sem);
|
|
|
|
|
2016-02-23 18:36:59 +07:00
|
|
|
if (p->bo_list) {
|
|
|
|
for (i = p->bo_list->first_userptr;
|
|
|
|
i < p->bo_list->num_entries; ++i) {
|
|
|
|
e = &p->bo_list->array[i];
|
|
|
|
|
|
|
|
if (!e->user_pages)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
release_pages(e->user_pages,
|
|
|
|
e->robj->tbo.ttm->num_pages,
|
|
|
|
false);
|
|
|
|
drm_free_large(e->user_pages);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_list_entry *e;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
list_for_each_entry(e, &p->validated, tv.head) {
|
|
|
|
struct reservation_object *resv = e->robj->tbo.resv;
|
2016-02-08 18:13:05 +07:00
|
|
|
r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-15 03:05:35 +07:00
|
|
|
/**
|
|
|
|
* cs_parser_fini() - clean parser states
|
|
|
|
* @parser: parser structure holding parsing context.
|
|
|
|
* @error: error number
|
|
|
|
*
|
|
|
|
* If error is set than unvalidate buffer, otherwise just free memory
|
|
|
|
* used by parsing context.
|
|
|
|
**/
|
|
|
|
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
|
2015-07-21 13:36:51 +07:00
|
|
|
{
|
2016-01-11 21:35:21 +07:00
|
|
|
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
|
2015-11-15 03:05:35 +07:00
|
|
|
unsigned i;
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
if (!error) {
|
2016-01-27 23:04:19 +07:00
|
|
|
amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
ttm_eu_fence_buffer_objects(&parser->ticket,
|
2015-11-15 03:05:35 +07:00
|
|
|
&parser->validated,
|
|
|
|
parser->fence);
|
2015-04-21 03:55:21 +07:00
|
|
|
} else if (backoff) {
|
|
|
|
ttm_eu_backoff_reservation(&parser->ticket,
|
|
|
|
&parser->validated);
|
|
|
|
}
|
2016-10-25 19:00:45 +07:00
|
|
|
dma_fence_put(parser->fence);
|
2015-11-04 21:44:39 +07:00
|
|
|
|
2015-05-11 20:34:59 +07:00
|
|
|
if (parser->ctx)
|
|
|
|
amdgpu_ctx_put(parser->ctx);
|
2015-08-18 15:25:46 +07:00
|
|
|
if (parser->bo_list)
|
|
|
|
amdgpu_bo_list_put(parser->bo_list);
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
for (i = 0; i < parser->nchunks; i++)
|
|
|
|
drm_free_large(parser->chunks[i].kdata);
|
|
|
|
kfree(parser->chunks);
|
2016-02-03 19:44:52 +07:00
|
|
|
if (parser->job)
|
|
|
|
amdgpu_job_free(parser->job);
|
2015-12-14 22:42:31 +07:00
|
|
|
amdgpu_bo_unref(&parser->uf_entry.robj);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2017-01-16 12:59:01 +07:00
|
|
|
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = p->adev;
|
2017-01-16 12:59:01 +07:00
|
|
|
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
|
|
|
struct amdgpu_vm *vm = &fpriv->vm;
|
2015-04-21 03:55:21 +07:00
|
|
|
struct amdgpu_bo_va *bo_va;
|
|
|
|
struct amdgpu_bo *bo;
|
|
|
|
int i, r;
|
|
|
|
|
2016-10-12 20:13:52 +07:00
|
|
|
r = amdgpu_vm_update_directories(adev, vm);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2016-10-12 18:20:52 +07:00
|
|
|
r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_dir_update);
|
2015-08-15 01:08:40 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2017-03-24 01:36:31 +07:00
|
|
|
r = amdgpu_vm_clear_freed(adev, vm, NULL);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2017-01-16 12:59:01 +07:00
|
|
|
r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
r = amdgpu_sync_fence(adev, &p->job->sync,
|
|
|
|
fpriv->prt_va->last_pt_update);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2017-01-09 14:54:32 +07:00
|
|
|
if (amdgpu_sriov_vf(adev)) {
|
|
|
|
struct dma_fence *f;
|
|
|
|
bo_va = vm->csa_bo_va;
|
|
|
|
BUG_ON(!bo_va);
|
|
|
|
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
f = bo_va->last_pt_update;
|
|
|
|
r = amdgpu_sync_fence(adev, &p->job->sync, f);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
if (p->bo_list) {
|
|
|
|
for (i = 0; i < p->bo_list->num_entries; i++) {
|
2016-10-25 19:00:45 +07:00
|
|
|
struct dma_fence *f;
|
2015-07-07 03:06:40 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/* ignore duplicates */
|
|
|
|
bo = p->bo_list->array[i].robj;
|
|
|
|
if (!bo)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
bo_va = p->bo_list->array[i].bo_va;
|
|
|
|
if (bo_va == NULL)
|
|
|
|
continue;
|
|
|
|
|
2016-08-16 19:43:17 +07:00
|
|
|
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2015-08-03 17:19:38 +07:00
|
|
|
f = bo_va->last_pt_update;
|
2016-02-08 18:13:05 +07:00
|
|
|
r = amdgpu_sync_fence(adev, &p->job->sync, f);
|
2015-07-07 03:06:40 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
2015-09-10 19:00:35 +07:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-02-08 18:13:05 +07:00
|
|
|
r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync);
|
2015-09-10 19:00:35 +07:00
|
|
|
|
|
|
|
if (amdgpu_vm_debug && p->bo_list) {
|
|
|
|
/* Invalidate all BOs to test for userspace bugs */
|
|
|
|
for (i = 0; i < p->bo_list->num_entries; i++) {
|
|
|
|
/* ignore duplicates */
|
|
|
|
bo = p->bo_list->array[i].robj;
|
|
|
|
if (!bo)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
amdgpu_vm_bo_invalidate(adev, bo);
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2015-09-10 19:00:35 +07:00
|
|
|
return r;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
2016-01-31 18:29:04 +07:00
|
|
|
struct amdgpu_cs_parser *p)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2016-01-31 18:29:04 +07:00
|
|
|
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
2015-04-21 03:55:21 +07:00
|
|
|
struct amdgpu_vm *vm = &fpriv->vm;
|
2016-01-31 18:29:04 +07:00
|
|
|
struct amdgpu_ring *ring = p->job->ring;
|
2015-04-21 03:55:21 +07:00
|
|
|
int i, r;
|
|
|
|
|
|
|
|
/* Only for UVD/VCE VM emulation */
|
2016-01-31 18:29:04 +07:00
|
|
|
if (ring->funcs->parse_cs) {
|
|
|
|
for (i = 0; i < p->job->num_ibs; i++) {
|
|
|
|
r = amdgpu_ring_parse_cs(ring, p, i);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
2016-10-05 21:49:19 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (p->job->vm) {
|
2016-10-12 19:46:26 +07:00
|
|
|
p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.bo);
|
2016-06-15 18:44:04 +07:00
|
|
|
|
2017-01-16 12:59:01 +07:00
|
|
|
r = amdgpu_bo_vm_update_pte(p);
|
2016-06-22 19:25:55 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-06-22 19:25:55 +07:00
|
|
|
return amdgpu_cs_sync_rings(p);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_cs_parser *parser)
|
|
|
|
{
|
|
|
|
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
|
|
|
|
struct amdgpu_vm *vm = &fpriv->vm;
|
|
|
|
int i, j;
|
2017-03-08 14:51:13 +07:00
|
|
|
int r, ce_preempt = 0, de_preempt = 0;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-02-03 19:44:52 +07:00
|
|
|
for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
|
2015-04-21 03:55:21 +07:00
|
|
|
struct amdgpu_cs_chunk *chunk;
|
|
|
|
struct amdgpu_ib *ib;
|
|
|
|
struct drm_amdgpu_cs_chunk_ib *chunk_ib;
|
|
|
|
struct amdgpu_ring *ring;
|
|
|
|
|
|
|
|
chunk = &parser->chunks[i];
|
2016-02-03 19:44:52 +07:00
|
|
|
ib = &parser->job->ibs[j];
|
2015-04-21 03:55:21 +07:00
|
|
|
chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
|
|
|
|
|
|
|
|
if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
|
|
|
|
continue;
|
|
|
|
|
2017-03-27 14:14:53 +07:00
|
|
|
if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && amdgpu_sriov_vf(adev)) {
|
2017-03-28 22:29:53 +07:00
|
|
|
if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
|
2017-03-27 14:14:53 +07:00
|
|
|
if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
|
|
|
|
ce_preempt++;
|
|
|
|
else
|
|
|
|
de_preempt++;
|
2017-03-28 22:29:53 +07:00
|
|
|
}
|
2017-03-27 14:14:53 +07:00
|
|
|
|
|
|
|
/* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
|
|
|
|
if (ce_preempt > 1 || de_preempt > 1)
|
drm/amdgpu:changes in gfx DMAframe scheme (v2)
1) Adapt to vulkan:
Now use double SWITCH BUFFER to replace the 128 nops w/a,
because when vulkan introduced, umd can insert 7 ~ 16 IBs
per submit which makes 256 DW size cannot hold the whole
DMAframe (if we still insert those 128 nops), CP team suggests
use double SWITCH_BUFFERs, instead of tricky 128 NOPs w/a.
2) To fix the CE VM fault issue when MCBP introduced:
Need one more COND_EXEC wrapping IB part (original one us
for VM switch part).
this change can fix vm fault issue caused by below scenario
without this change:
>CE passed original COND_EXEC (no MCBP issued this moment),
proceed as normal.
>DE catch up to this COND_EXEC, but this time MCBP issued,
thus DE treats all following packages as NOP. The following
VM switch packages now looks just as NOP to DE, so DE
dosen't do VM flush at all.
>Now CE proceeds to the first IBc, and triggers VM fault,
because DE didn't do VM flush for this DMAframe.
3) change estimated alloc size for gfx9.
with new DMAframe scheme, we need modify emit_frame_size
for gfx9
4) No need to insert 128 nops after gfx8 vm flush anymore
because there was double SWITCH_BUFFER append to vm flush,
and for gfx7 we already use double SWITCH_BUFFER following
after vm_flush so no change needed for it.
5) Change emit_frame_size for gfx8
v2: squash in BUG removal from Monk
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-03-15 11:18:57 +07:00
|
|
|
return -EINVAL;
|
2017-03-08 14:51:13 +07:00
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
|
|
|
|
chunk_ib->ip_instance, chunk_ib->ring,
|
|
|
|
&ring);
|
2015-06-02 22:44:49 +07:00
|
|
|
if (r)
|
2015-04-21 03:55:21 +07:00
|
|
|
return r;
|
|
|
|
|
2017-03-28 10:00:03 +07:00
|
|
|
if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
|
2016-08-26 12:28:28 +07:00
|
|
|
parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
|
|
|
|
if (!parser->ctx->preamble_presented) {
|
|
|
|
parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
|
|
|
|
parser->ctx->preamble_presented = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-31 18:29:04 +07:00
|
|
|
if (parser->job->ring && parser->job->ring != ring)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
parser->job->ring = ring;
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
if (ring->funcs->parse_cs) {
|
2015-06-10 22:20:11 +07:00
|
|
|
struct amdgpu_bo_va_mapping *m;
|
2015-06-02 22:44:49 +07:00
|
|
|
struct amdgpu_bo *aobj = NULL;
|
2015-06-10 22:20:11 +07:00
|
|
|
uint64_t offset;
|
|
|
|
uint8_t *kptr;
|
2015-06-02 22:44:49 +07:00
|
|
|
|
2015-06-10 22:20:11 +07:00
|
|
|
m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
|
|
|
|
&aobj);
|
2015-06-02 22:44:49 +07:00
|
|
|
if (!aobj) {
|
|
|
|
DRM_ERROR("IB va_start is invalid\n");
|
|
|
|
return -EINVAL;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2015-06-10 22:20:11 +07:00
|
|
|
if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
|
2017-03-30 19:03:59 +07:00
|
|
|
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
|
2015-06-10 22:20:11 +07:00
|
|
|
DRM_ERROR("IB va_start+ib_bytes is invalid\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-06-02 22:44:49 +07:00
|
|
|
/* the IB should be reserved at this point */
|
2015-06-10 22:20:11 +07:00
|
|
|
r = amdgpu_bo_kmap(aobj, (void **)&kptr);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r) {
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2017-03-30 19:03:59 +07:00
|
|
|
offset = m->start * AMDGPU_GPU_PAGE_SIZE;
|
2015-06-10 22:20:11 +07:00
|
|
|
kptr += chunk_ib->va_start - offset;
|
|
|
|
|
2016-10-05 21:49:19 +07:00
|
|
|
r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("Failed to get ib !\n");
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
|
|
|
|
amdgpu_bo_kunmap(aobj);
|
|
|
|
} else {
|
2016-01-31 18:29:04 +07:00
|
|
|
r = amdgpu_ib_get(adev, vm, 0, ib);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("Failed to get ib !\n");
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-10-05 21:49:19 +07:00
|
|
|
ib->gpu_addr = chunk_ib->va_start;
|
2015-06-02 22:44:49 +07:00
|
|
|
ib->length_dw = chunk_ib->ib_bytes / 4;
|
2015-05-11 22:41:41 +07:00
|
|
|
ib->flags = chunk_ib->flags;
|
2015-04-21 03:55:21 +07:00
|
|
|
j++;
|
|
|
|
}
|
|
|
|
|
2016-05-07 03:14:00 +07:00
|
|
|
/* UVD & VCE fw doesn't support user fences */
|
2016-06-29 18:26:41 +07:00
|
|
|
if (parser->job->uf_addr && (
|
2016-10-05 20:36:39 +07:00
|
|
|
parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
|
|
|
|
parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
|
2016-05-07 03:14:00 +07:00
|
|
|
return -EINVAL;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-19 22:31:29 +07:00
|
|
|
static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_cs_parser *p)
|
|
|
|
{
|
2015-07-07 00:42:10 +07:00
|
|
|
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
2015-06-19 22:31:29 +07:00
|
|
|
int i, j, r;
|
|
|
|
|
|
|
|
for (i = 0; i < p->nchunks; ++i) {
|
|
|
|
struct drm_amdgpu_cs_chunk_dep *deps;
|
|
|
|
struct amdgpu_cs_chunk *chunk;
|
|
|
|
unsigned num_deps;
|
|
|
|
|
|
|
|
chunk = &p->chunks[i];
|
|
|
|
|
|
|
|
if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
|
|
|
|
num_deps = chunk->length_dw * 4 /
|
|
|
|
sizeof(struct drm_amdgpu_cs_chunk_dep);
|
|
|
|
|
|
|
|
for (j = 0; j < num_deps; ++j) {
|
|
|
|
struct amdgpu_ring *ring;
|
2015-07-07 00:42:10 +07:00
|
|
|
struct amdgpu_ctx *ctx;
|
2016-10-25 19:00:45 +07:00
|
|
|
struct dma_fence *fence;
|
2015-06-19 22:31:29 +07:00
|
|
|
|
|
|
|
r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
|
|
|
|
deps[j].ip_instance,
|
|
|
|
deps[j].ring, &ring);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2015-07-07 00:42:10 +07:00
|
|
|
ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
|
|
|
|
if (ctx == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-07-07 22:24:49 +07:00
|
|
|
fence = amdgpu_ctx_get_fence(ctx, ring,
|
|
|
|
deps[j].handle);
|
|
|
|
if (IS_ERR(fence)) {
|
|
|
|
r = PTR_ERR(fence);
|
2015-07-07 00:42:10 +07:00
|
|
|
amdgpu_ctx_put(ctx);
|
2015-06-19 22:31:29 +07:00
|
|
|
return r;
|
2015-07-07 03:06:40 +07:00
|
|
|
|
2015-07-07 22:24:49 +07:00
|
|
|
} else if (fence) {
|
2016-02-08 18:13:05 +07:00
|
|
|
r = amdgpu_sync_fence(adev, &p->job->sync,
|
|
|
|
fence);
|
2016-10-25 19:00:45 +07:00
|
|
|
dma_fence_put(fence);
|
2015-07-07 22:24:49 +07:00
|
|
|
amdgpu_ctx_put(ctx);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
2015-06-19 22:31:29 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-01-31 17:30:55 +07:00
|
|
|
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
|
|
|
union drm_amdgpu_cs *cs)
|
|
|
|
{
|
2016-01-31 18:29:04 +07:00
|
|
|
struct amdgpu_ring *ring = p->job->ring;
|
2016-05-06 20:57:42 +07:00
|
|
|
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
|
2016-01-31 17:30:55 +07:00
|
|
|
struct amdgpu_job *job;
|
2016-03-07 11:49:55 +07:00
|
|
|
int r;
|
2016-01-31 17:30:55 +07:00
|
|
|
|
2016-02-03 19:44:52 +07:00
|
|
|
job = p->job;
|
|
|
|
p->job = NULL;
|
2016-01-31 17:30:55 +07:00
|
|
|
|
2016-06-30 15:52:03 +07:00
|
|
|
r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
|
2016-03-07 11:49:55 +07:00
|
|
|
if (r) {
|
2016-02-01 18:20:25 +07:00
|
|
|
amdgpu_job_free(job);
|
2016-03-07 11:49:55 +07:00
|
|
|
return r;
|
2016-01-31 17:30:55 +07:00
|
|
|
}
|
|
|
|
|
2016-03-07 11:49:55 +07:00
|
|
|
job->owner = p->filp;
|
2016-08-25 14:40:48 +07:00
|
|
|
job->fence_ctx = entity->fence_context;
|
2016-10-25 19:00:45 +07:00
|
|
|
p->fence = dma_fence_get(&job->base.s_fence->finished);
|
2016-06-30 15:52:03 +07:00
|
|
|
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
|
2016-05-07 03:14:00 +07:00
|
|
|
job->uf_sequence = cs->out.handle;
|
2016-06-29 20:10:31 +07:00
|
|
|
amdgpu_job_free_resources(job);
|
2016-01-31 17:30:55 +07:00
|
|
|
|
|
|
|
trace_amdgpu_cs_ioctl(job);
|
|
|
|
amd_sched_entity_push_job(&job->base);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-07-21 13:36:51 +07:00
|
|
|
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
union drm_amdgpu_cs *cs = data;
|
2015-11-04 21:44:39 +07:00
|
|
|
struct amdgpu_cs_parser parser = {};
|
2015-08-19 02:09:33 +07:00
|
|
|
bool reserved_buffers = false;
|
|
|
|
int i, r;
|
2015-07-21 13:36:51 +07:00
|
|
|
|
2015-09-01 20:13:53 +07:00
|
|
|
if (!adev->accel_working)
|
2015-07-21 13:36:51 +07:00
|
|
|
return -EBUSY;
|
2015-06-19 22:31:29 +07:00
|
|
|
|
2015-11-04 21:44:39 +07:00
|
|
|
parser.adev = adev;
|
|
|
|
parser.filp = filp;
|
|
|
|
|
|
|
|
r = amdgpu_cs_parser_init(&parser, data);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r) {
|
2015-07-21 13:36:51 +07:00
|
|
|
DRM_ERROR("Failed to initialize parser !\n");
|
2016-10-30 22:05:47 +07:00
|
|
|
goto out;
|
2015-08-19 02:09:33 +07:00
|
|
|
}
|
|
|
|
|
2016-10-30 22:05:47 +07:00
|
|
|
r = amdgpu_cs_parser_bos(&parser, data);
|
|
|
|
if (r) {
|
|
|
|
if (r == -ENOMEM)
|
|
|
|
DRM_ERROR("Not enough memory for command submission!\n");
|
|
|
|
else if (r != -ERESTARTSYS)
|
|
|
|
DRM_ERROR("Failed to process the buffer list %d!\n", r);
|
|
|
|
goto out;
|
2015-08-19 02:09:33 +07:00
|
|
|
}
|
|
|
|
|
2016-10-30 22:05:47 +07:00
|
|
|
reserved_buffers = true;
|
|
|
|
r = amdgpu_cs_ib_fill(adev, &parser);
|
2015-08-19 02:09:33 +07:00
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
|
2016-10-30 22:05:47 +07:00
|
|
|
r = amdgpu_cs_dependencies(adev, &parser);
|
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("Failed in the dependencies handling %d!\n", r);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-02-03 19:44:52 +07:00
|
|
|
for (i = 0; i < parser.job->num_ibs; i++)
|
2015-11-04 21:44:39 +07:00
|
|
|
trace_amdgpu_cs(&parser, i);
|
2015-08-19 02:09:33 +07:00
|
|
|
|
2015-11-04 21:44:39 +07:00
|
|
|
r = amdgpu_cs_ib_vm_chunk(adev, &parser);
|
2015-08-18 15:12:15 +07:00
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
|
2016-01-31 17:32:04 +07:00
|
|
|
r = amdgpu_cs_submit(&parser, cs);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
out:
|
2015-11-04 21:44:39 +07:00
|
|
|
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
|
2015-04-21 03:55:21 +07:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_cs_wait_ioctl - wait for a command submission to finish
|
|
|
|
*
|
|
|
|
* @dev: drm device
|
|
|
|
* @data: data from userspace
|
|
|
|
* @filp: file private
|
|
|
|
*
|
|
|
|
* Wait for the command submission identified by handle to finish.
|
|
|
|
*/
|
|
|
|
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *filp)
|
|
|
|
{
|
|
|
|
union drm_amdgpu_wait_cs *wait = data;
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
|
2015-06-19 22:00:19 +07:00
|
|
|
struct amdgpu_ring *ring = NULL;
|
2015-05-08 16:29:40 +07:00
|
|
|
struct amdgpu_ctx *ctx;
|
2016-10-25 19:00:45 +07:00
|
|
|
struct dma_fence *fence;
|
2015-04-21 03:55:21 +07:00
|
|
|
long r;
|
|
|
|
|
2015-07-07 22:24:49 +07:00
|
|
|
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
|
|
|
|
wait->in.ring, &ring);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2015-05-08 16:29:40 +07:00
|
|
|
ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
|
|
|
|
if (ctx == NULL)
|
|
|
|
return -EINVAL;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2015-07-21 14:53:04 +07:00
|
|
|
fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
|
|
|
|
if (IS_ERR(fence))
|
|
|
|
r = PTR_ERR(fence);
|
|
|
|
else if (fence) {
|
2016-10-25 19:00:45 +07:00
|
|
|
r = dma_fence_wait_timeout(fence, true, timeout);
|
|
|
|
dma_fence_put(fence);
|
2015-07-21 14:53:04 +07:00
|
|
|
} else
|
|
|
|
r = 1;
|
2015-07-21 13:36:51 +07:00
|
|
|
|
2015-05-08 16:29:40 +07:00
|
|
|
amdgpu_ctx_put(ctx);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
memset(wait, 0, sizeof(*wait));
|
|
|
|
wait->out.status = (r == 0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-05 03:16:10 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device
|
|
|
|
* @filp: file private
|
|
|
|
* @user: drm_amdgpu_fence copied from user space
|
|
|
|
*/
|
|
|
|
static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
|
|
|
|
struct drm_file *filp,
|
|
|
|
struct drm_amdgpu_fence *user)
|
|
|
|
{
|
|
|
|
struct amdgpu_ring *ring;
|
|
|
|
struct amdgpu_ctx *ctx;
|
|
|
|
struct dma_fence *fence;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
|
|
|
|
user->ring, &ring);
|
|
|
|
if (r)
|
|
|
|
return ERR_PTR(r);
|
|
|
|
|
|
|
|
ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
|
|
|
|
if (ctx == NULL)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
|
|
|
|
amdgpu_ctx_put(ctx);
|
|
|
|
|
|
|
|
return fence;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_cs_wait_all_fence - wait on all fences to signal
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device
|
|
|
|
* @filp: file private
|
|
|
|
* @wait: wait parameters
|
|
|
|
* @fences: array of drm_amdgpu_fence
|
|
|
|
*/
|
|
|
|
static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
|
|
|
|
struct drm_file *filp,
|
|
|
|
union drm_amdgpu_wait_fences *wait,
|
|
|
|
struct drm_amdgpu_fence *fences)
|
|
|
|
{
|
|
|
|
uint32_t fence_count = wait->in.fence_count;
|
|
|
|
unsigned int i;
|
|
|
|
long r = 1;
|
|
|
|
|
|
|
|
for (i = 0; i < fence_count; i++) {
|
|
|
|
struct dma_fence *fence;
|
|
|
|
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
|
|
|
|
|
|
|
|
fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
|
|
|
|
if (IS_ERR(fence))
|
|
|
|
return PTR_ERR(fence);
|
|
|
|
else if (!fence)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
r = dma_fence_wait_timeout(fence, true, timeout);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
if (r == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(wait, 0, sizeof(*wait));
|
|
|
|
wait->out.status = (r > 0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_cs_wait_any_fence - wait on any fence to signal
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device
|
|
|
|
* @filp: file private
|
|
|
|
* @wait: wait parameters
|
|
|
|
* @fences: array of drm_amdgpu_fence
|
|
|
|
*/
|
|
|
|
static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
|
|
|
|
struct drm_file *filp,
|
|
|
|
union drm_amdgpu_wait_fences *wait,
|
|
|
|
struct drm_amdgpu_fence *fences)
|
|
|
|
{
|
|
|
|
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
|
|
|
|
uint32_t fence_count = wait->in.fence_count;
|
|
|
|
uint32_t first = ~0;
|
|
|
|
struct dma_fence **array;
|
|
|
|
unsigned int i;
|
|
|
|
long r;
|
|
|
|
|
|
|
|
/* Prepare the fence array */
|
|
|
|
array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (array == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for (i = 0; i < fence_count; i++) {
|
|
|
|
struct dma_fence *fence;
|
|
|
|
|
|
|
|
fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
|
|
|
|
if (IS_ERR(fence)) {
|
|
|
|
r = PTR_ERR(fence);
|
|
|
|
goto err_free_fence_array;
|
|
|
|
} else if (fence) {
|
|
|
|
array[i] = fence;
|
|
|
|
} else { /* NULL, the fence has been already signaled */
|
|
|
|
r = 1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
|
|
|
|
&first);
|
|
|
|
if (r < 0)
|
|
|
|
goto err_free_fence_array;
|
|
|
|
|
|
|
|
out:
|
|
|
|
memset(wait, 0, sizeof(*wait));
|
|
|
|
wait->out.status = (r > 0);
|
|
|
|
wait->out.first_signaled = first;
|
|
|
|
/* set return value 0 to indicate success */
|
|
|
|
r = 0;
|
|
|
|
|
|
|
|
err_free_fence_array:
|
|
|
|
for (i = 0; i < fence_count; i++)
|
|
|
|
dma_fence_put(array[i]);
|
|
|
|
kfree(array);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
|
|
|
|
*
|
|
|
|
* @dev: drm device
|
|
|
|
* @data: data from userspace
|
|
|
|
* @filp: file private
|
|
|
|
*/
|
|
|
|
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *filp)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
union drm_amdgpu_wait_fences *wait = data;
|
|
|
|
uint32_t fence_count = wait->in.fence_count;
|
|
|
|
struct drm_amdgpu_fence *fences_user;
|
|
|
|
struct drm_amdgpu_fence *fences;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
/* Get the fences from userspace */
|
|
|
|
fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (fences == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
fences_user = (void __user *)(unsigned long)(wait->in.fences);
|
|
|
|
if (copy_from_user(fences, fences_user,
|
|
|
|
sizeof(struct drm_amdgpu_fence) * fence_count)) {
|
|
|
|
r = -EFAULT;
|
|
|
|
goto err_free_fences;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (wait->in.wait_all)
|
|
|
|
r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
|
|
|
|
else
|
|
|
|
r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
|
|
|
|
|
|
|
|
err_free_fences:
|
|
|
|
kfree(fences);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_cs_find_bo_va - find bo_va for VM address
|
|
|
|
*
|
|
|
|
* @parser: command submission parser context
|
|
|
|
* @addr: VM address
|
|
|
|
* @bo: resulting BO of the mapping found
|
|
|
|
*
|
|
|
|
* Search the buffer objects in the command submission context for a certain
|
|
|
|
* virtual memory address. Returns allocation structure when found, NULL
|
|
|
|
* otherwise.
|
|
|
|
*/
|
|
|
|
struct amdgpu_bo_va_mapping *
|
|
|
|
amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
|
|
|
uint64_t addr, struct amdgpu_bo **bo)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *mapping;
|
2015-12-22 22:06:12 +07:00
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
if (!parser->bo_list)
|
|
|
|
return NULL;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
addr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
|
2015-12-22 22:06:12 +07:00
|
|
|
for (i = 0; i < parser->bo_list->num_entries; i++) {
|
|
|
|
struct amdgpu_bo_list_entry *lobj;
|
|
|
|
|
|
|
|
lobj = &parser->bo_list->array[i];
|
|
|
|
if (!lobj->bo_va)
|
2015-04-21 03:55:21 +07:00
|
|
|
continue;
|
|
|
|
|
2015-12-22 22:06:12 +07:00
|
|
|
list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
|
2017-03-30 19:03:59 +07:00
|
|
|
if (mapping->start > addr ||
|
|
|
|
addr > mapping->last)
|
2015-07-30 16:53:42 +07:00
|
|
|
continue;
|
|
|
|
|
2015-12-22 22:06:12 +07:00
|
|
|
*bo = lobj->bo_va->bo;
|
2015-07-30 16:53:42 +07:00
|
|
|
return mapping;
|
|
|
|
}
|
|
|
|
|
2015-12-22 22:06:12 +07:00
|
|
|
list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
|
2017-03-30 19:03:59 +07:00
|
|
|
if (mapping->start > addr ||
|
|
|
|
addr > mapping->last)
|
2015-04-21 03:55:21 +07:00
|
|
|
continue;
|
|
|
|
|
2015-12-22 22:06:12 +07:00
|
|
|
*bo = lobj->bo_va->bo;
|
2015-04-21 03:55:21 +07:00
|
|
|
return mapping;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
2016-09-05 22:00:57 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
|
|
|
|
*
|
|
|
|
* @parser: command submission parser context
|
|
|
|
*
|
|
|
|
* Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
|
|
|
|
*/
|
|
|
|
int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (!parser->bo_list)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (i = 0; i < parser->bo_list->num_entries; i++) {
|
|
|
|
struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
|
|
|
|
|
2016-09-09 21:32:33 +07:00
|
|
|
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
|
2016-09-05 22:00:57 +07:00
|
|
|
if (unlikely(r))
|
|
|
|
return r;
|
2016-08-15 22:00:22 +07:00
|
|
|
|
|
|
|
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
|
|
|
amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
|
|
|
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
|
|
|
if (unlikely(r))
|
|
|
|
return r;
|
2016-09-05 22:00:57 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|