Final changes to drm-misc-next for v4.21:

UAPI Changes:
 
 Core Changes:
 - Add dma_fence_get_stub to dma-buf, and use it in drm/syncobj.
 - Add and use DRM_MODESET_LOCK_BEGIN/END helpers.
 - Small fixes to drm_atomic_helper_resume(), drm_mode_setcrtc() and
   drm_atomic_helper_commit_duplicated_state()
 - Fix drm_atomic_state_helper.[c] extraction.
 
 Driver Changes:
 - Small fixes to tinydrm, vkms, meson, rcar-du, virtio, vkms,
   v3d, and pl111.
 - vc4: Allow scaling and YUV formats on cursor planes.
 - v3d: Enable use of the Texture Formatting Unit, and fix
   prime imports of buffers from other drivers.
 - Add support for the AUO G101EVN010 panel.
 - sun4i: Enable support for the H6 display engine.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEuXvWqAysSYEJGuVH/lWMcqZwE8MFAlwI7wUACgkQ/lWMcqZw
 E8O/3g/9GQvNpNthglt4kSXUn9bj2D/MzQsL4sEsKYcp9eM3XTMy6raybfBpMdtf
 U9y7G9EwQ8F01wd1m3d3HfBkKoy0WDTIsAjh06QUuSyyEivuQEoSxCKiI7t+Tatc
 gJaj2TKHF1RaDikcV+x2pqqv8ptH+IzF0A65v5SAn8U5PVYz17pD0jctYR8F434X
 o1xkjL5KS+vmfzu2keUCrWWtfAsPoyqcbEu5A3W+hhpKyQmS/8kVwr5J5LylvWqa
 xalXHRUgaUrOGOFuYrqWhIKjydqpr8QhIuJqacmHfmV/qvASUTeqTTnzartb+FuU
 d5E4p3bFaAjbAOHUEdlaLRA4oUK8/VA+WWaGKhPGreUE3TWn/thJJElAqoJhkbql
 bGelBBkNNzDbJhtwjR4hmFN7fQz0Y1LxuBXtjw7wfomJAd/xJ5fgxqT58OovZgZt
 f+hxxBTsdov5MOZ3XsB457b2MNmM+4kHM7urP3Z7mluEJ0ojmiokYMGJx0RDtLzP
 YoukpqEwCaMlS4rdvil1nUfYw61/PFRxoBwlmarV2A3m1DUDQ2YyUNHOgVLrlzXv
 RGdiTBvuvUZm6aSuzCVUiiiuQ0zwaFfBKdwXEA016irb34rdOoZ2b6w70GWi7QEl
 0tQy5ULBwuhy1Kk8QXsJxRZPnnHznGoREryNDDOC0RAr/WEhwGY=
 =t02J
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-next-2018-12-06' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

Final changes to drm-misc-next for v4.21:

UAPI Changes:

Core Changes:
- Add dma_fence_get_stub to dma-buf, and use it in drm/syncobj.
- Add and use DRM_MODESET_LOCK_BEGIN/END helpers.
- Small fixes to drm_atomic_helper_resume(), drm_mode_setcrtc() and
  drm_atomic_helper_commit_duplicated_state()
- Fix drm_atomic_state_helper.[c] extraction.

Driver Changes:
- Small fixes to tinydrm, vkms, meson, rcar-du, virtio, vkms,
  v3d, and pl111.
- vc4: Allow scaling and YUV formats on cursor planes.
- v3d: Enable use of the Texture Formatting Unit, and fix
  prime imports of buffers from other drivers.
- Add support for the AUO G101EVN010 panel.
- sun4i: Enable support for the H6 display engine.

Signed-off-by: Dave Airlie <airlied@redhat.com>
[airlied: added drm/v3d: fix broken build to the merge commit]
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/321be9d3-ab75-5f92-8193-e5113662edef@linux.intel.com
This commit is contained in:
Dave Airlie 2018-12-07 10:46:29 +10:00
commit e69aa5f9b9
46 changed files with 1163 additions and 517 deletions

View File

@ -0,0 +1,12 @@
AU Optronics Corporation 10.1" (1280x800) color TFT LCD panel
Required properties:
- compatible: should be "auo,g101evn010"
- power-supply: as specified in the base binding
Optional properties:
- backlight: as specified in the base binding
- enable-gpios: as specified in the base binding
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -241,6 +241,21 @@ struct drm_gem_object_funcs
GEM objects can now have a function table instead of having the callbacks on the GEM objects can now have a function table instead of having the callbacks on the
DRM driver struct. This is now the preferred way and drivers can be moved over. DRM driver struct. This is now the preferred way and drivers can be moved over.
Use DRM_MODESET_LOCK_ALL_* helpers instead of boilerplate
---------------------------------------------------------
For cases where drivers are attempting to grab the modeset locks with a local
acquire context. Replace the boilerplate code surrounding
drm_modeset_lock_all_ctx() with DRM_MODESET_LOCK_ALL_BEGIN() and
DRM_MODESET_LOCK_ALL_END() instead.
This should also be done for all places where drm_modest_lock_all() is still
used.
As a reference, take a look at the conversions already completed in drm core.
Contact: Sean Paul, respective driver maintainers
Core refactorings Core refactorings
================= =================

View File

@ -30,13 +30,16 @@
EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal); EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
static DEFINE_SPINLOCK(dma_fence_stub_lock);
static struct dma_fence dma_fence_stub;
/* /*
* fence context counter: each execution context should have its own * fence context counter: each execution context should have its own
* fence context, this allows checking if fences belong to the same * fence context, this allows checking if fences belong to the same
* context or not. One device can have multiple separate contexts, * context or not. One device can have multiple separate contexts,
* and they're used if some engine can run independently of another. * and they're used if some engine can run independently of another.
*/ */
static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0); static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
/** /**
* DOC: DMA fences overview * DOC: DMA fences overview
@ -68,6 +71,37 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
* &dma_buf.resv pointer. * &dma_buf.resv pointer.
*/ */
static const char *dma_fence_stub_get_name(struct dma_fence *fence)
{
return "stub";
}
static const struct dma_fence_ops dma_fence_stub_ops = {
.get_driver_name = dma_fence_stub_get_name,
.get_timeline_name = dma_fence_stub_get_name,
};
/**
* dma_fence_get_stub - return a signaled fence
*
* Return a stub fence which is already signaled.
*/
struct dma_fence *dma_fence_get_stub(void)
{
spin_lock(&dma_fence_stub_lock);
if (!dma_fence_stub.ops) {
dma_fence_init(&dma_fence_stub,
&dma_fence_stub_ops,
&dma_fence_stub_lock,
0, 0);
dma_fence_signal_locked(&dma_fence_stub);
}
spin_unlock(&dma_fence_stub_lock);
return dma_fence_get(&dma_fence_stub);
}
EXPORT_SYMBOL(dma_fence_get_stub);
/** /**
* dma_fence_context_alloc - allocate an array of fence contexts * dma_fence_context_alloc - allocate an array of fence contexts
* @num: amount of contexts to allocate * @num: amount of contexts to allocate

View File

@ -1193,7 +1193,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
int i; int i;
for (i = 0; i < p->num_post_dep_syncobjs; ++i) for (i = 0; i < p->num_post_dep_syncobjs; ++i)
drm_syncobj_replace_fence(p->post_dep_syncobjs[i], 0, p->fence); drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
} }
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,

View File

@ -3131,26 +3131,103 @@ void drm_atomic_helper_shutdown(struct drm_device *dev)
struct drm_modeset_acquire_ctx ctx; struct drm_modeset_acquire_ctx ctx;
int ret; int ret;
drm_modeset_acquire_init(&ctx, 0); DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
while (1) {
ret = drm_modeset_lock_all_ctx(dev, &ctx);
if (!ret)
ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
if (ret != -EDEADLK)
break;
drm_modeset_backoff(&ctx);
}
ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
if (ret) if (ret)
DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret); DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
drm_modeset_drop_locks(&ctx); DRM_MODESET_LOCK_ALL_END(ctx, ret);
drm_modeset_acquire_fini(&ctx);
} }
EXPORT_SYMBOL(drm_atomic_helper_shutdown); EXPORT_SYMBOL(drm_atomic_helper_shutdown);
/**
* drm_atomic_helper_duplicate_state - duplicate an atomic state object
* @dev: DRM device
* @ctx: lock acquisition context
*
* Makes a copy of the current atomic state by looping over all objects and
* duplicating their respective states. This is used for example by suspend/
* resume support code to save the state prior to suspend such that it can
* be restored upon resume.
*
* Note that this treats atomic state as persistent between save and restore.
* Drivers must make sure that this is possible and won't result in confusion
* or erroneous behaviour.
*
* Note that if callers haven't already acquired all modeset locks this might
* return -EDEADLK, which must be handled by calling drm_modeset_backoff().
*
* Returns:
* A pointer to the copy of the atomic state object on success or an
* ERR_PTR()-encoded error code on failure.
*
* See also:
* drm_atomic_helper_suspend(), drm_atomic_helper_resume()
*/
struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_atomic_state *state;
struct drm_connector *conn;
struct drm_connector_list_iter conn_iter;
struct drm_plane *plane;
struct drm_crtc *crtc;
int err = 0;
state = drm_atomic_state_alloc(dev);
if (!state)
return ERR_PTR(-ENOMEM);
state->acquire_ctx = ctx;
drm_for_each_crtc(crtc, dev) {
struct drm_crtc_state *crtc_state;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
err = PTR_ERR(crtc_state);
goto free;
}
}
drm_for_each_plane(plane, dev) {
struct drm_plane_state *plane_state;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
err = PTR_ERR(plane_state);
goto free;
}
}
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(conn, &conn_iter) {
struct drm_connector_state *conn_state;
conn_state = drm_atomic_get_connector_state(state, conn);
if (IS_ERR(conn_state)) {
err = PTR_ERR(conn_state);
drm_connector_list_iter_end(&conn_iter);
goto free;
}
}
drm_connector_list_iter_end(&conn_iter);
/* clear the acquire context so that it isn't accidentally reused */
state->acquire_ctx = NULL;
free:
if (err < 0) {
drm_atomic_state_put(state);
state = ERR_PTR(err);
}
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
/** /**
* drm_atomic_helper_suspend - subsystem-level suspend helper * drm_atomic_helper_suspend - subsystem-level suspend helper
* @dev: DRM device * @dev: DRM device
@ -3182,14 +3259,10 @@ struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
struct drm_atomic_state *state; struct drm_atomic_state *state;
int err; int err;
drm_modeset_acquire_init(&ctx, 0); /* This can never be returned, but it makes the compiler happy */
state = ERR_PTR(-EINVAL);
retry: DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
err = drm_modeset_lock_all_ctx(dev, &ctx);
if (err < 0) {
state = ERR_PTR(err);
goto unlock;
}
state = drm_atomic_helper_duplicate_state(dev, &ctx); state = drm_atomic_helper_duplicate_state(dev, &ctx);
if (IS_ERR(state)) if (IS_ERR(state))
@ -3203,13 +3276,10 @@ struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
} }
unlock: unlock:
if (PTR_ERR(state) == -EDEADLK) { DRM_MODESET_LOCK_ALL_END(ctx, err);
drm_modeset_backoff(&ctx); if (err)
goto retry; return ERR_PTR(err);
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return state; return state;
} }
EXPORT_SYMBOL(drm_atomic_helper_suspend); EXPORT_SYMBOL(drm_atomic_helper_suspend);
@ -3232,7 +3302,7 @@ EXPORT_SYMBOL(drm_atomic_helper_suspend);
int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx) struct drm_modeset_acquire_ctx *ctx)
{ {
int i; int i, ret;
struct drm_plane *plane; struct drm_plane *plane;
struct drm_plane_state *new_plane_state; struct drm_plane_state *new_plane_state;
struct drm_connector *connector; struct drm_connector *connector;
@ -3251,7 +3321,11 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
for_each_new_connector_in_state(state, connector, new_conn_state, i) for_each_new_connector_in_state(state, connector, new_conn_state, i)
state->connectors[i].old_state = connector->state; state->connectors[i].old_state = connector->state;
return drm_atomic_commit(state); ret = drm_atomic_commit(state);
state->acquire_ctx = NULL;
return ret;
} }
EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state); EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
@ -3279,23 +3353,12 @@ int drm_atomic_helper_resume(struct drm_device *dev,
drm_mode_config_reset(dev); drm_mode_config_reset(dev);
drm_modeset_acquire_init(&ctx, 0); DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
while (1) {
err = drm_modeset_lock_all_ctx(dev, &ctx);
if (err)
goto out;
err = drm_atomic_helper_commit_duplicated_state(state, &ctx); err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
out:
if (err != -EDEADLK)
break;
drm_modeset_backoff(&ctx);
}
DRM_MODESET_LOCK_ALL_END(ctx, err);
drm_atomic_state_put(state); drm_atomic_state_put(state);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return err; return err;
} }
@ -3434,3 +3497,73 @@ int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
return ret; return ret;
} }
EXPORT_SYMBOL(drm_atomic_helper_page_flip_target); EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
/**
* drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
* @crtc: CRTC object
* @red: red correction table
* @green: green correction table
* @blue: green correction table
* @size: size of the tables
* @ctx: lock acquire context
*
* Implements support for legacy gamma correction table for drivers
* that support color management through the DEGAMMA_LUT/GAMMA_LUT
* properties. See drm_crtc_enable_color_mgmt() and the containing chapter for
* how the atomic color management and gamma tables work.
*/
int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue,
uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = crtc->dev;
struct drm_atomic_state *state;
struct drm_crtc_state *crtc_state;
struct drm_property_blob *blob = NULL;
struct drm_color_lut *blob_data;
int i, ret = 0;
bool replaced;
state = drm_atomic_state_alloc(crtc->dev);
if (!state)
return -ENOMEM;
blob = drm_property_create_blob(dev,
sizeof(struct drm_color_lut) * size,
NULL);
if (IS_ERR(blob)) {
ret = PTR_ERR(blob);
blob = NULL;
goto fail;
}
/* Prepare GAMMA_LUT with the legacy values. */
blob_data = blob->data;
for (i = 0; i < size; i++) {
blob_data[i].red = red[i];
blob_data[i].green = green[i];
blob_data[i].blue = blue[i];
}
state->acquire_ctx = ctx;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto fail;
}
/* Reset DEGAMMA_LUT and CTM properties. */
replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL);
replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob);
crtc_state->color_mgmt_changed |= replaced;
ret = drm_atomic_commit(state);
fail:
drm_atomic_state_put(state);
drm_property_blob_put(blob);
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);

View File

@ -393,93 +393,6 @@ drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
} }
EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state); EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
/**
* drm_atomic_helper_duplicate_state - duplicate an atomic state object
* @dev: DRM device
* @ctx: lock acquisition context
*
* Makes a copy of the current atomic state by looping over all objects and
* duplicating their respective states. This is used for example by suspend/
* resume support code to save the state prior to suspend such that it can
* be restored upon resume.
*
* Note that this treats atomic state as persistent between save and restore.
* Drivers must make sure that this is possible and won't result in confusion
* or erroneous behaviour.
*
* Note that if callers haven't already acquired all modeset locks this might
* return -EDEADLK, which must be handled by calling drm_modeset_backoff().
*
* Returns:
* A pointer to the copy of the atomic state object on success or an
* ERR_PTR()-encoded error code on failure.
*
* See also:
* drm_atomic_helper_suspend(), drm_atomic_helper_resume()
*/
struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_atomic_state *state;
struct drm_connector *conn;
struct drm_connector_list_iter conn_iter;
struct drm_plane *plane;
struct drm_crtc *crtc;
int err = 0;
state = drm_atomic_state_alloc(dev);
if (!state)
return ERR_PTR(-ENOMEM);
state->acquire_ctx = ctx;
drm_for_each_crtc(crtc, dev) {
struct drm_crtc_state *crtc_state;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
err = PTR_ERR(crtc_state);
goto free;
}
}
drm_for_each_plane(plane, dev) {
struct drm_plane_state *plane_state;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
err = PTR_ERR(plane_state);
goto free;
}
}
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(conn, &conn_iter) {
struct drm_connector_state *conn_state;
conn_state = drm_atomic_get_connector_state(state, conn);
if (IS_ERR(conn_state)) {
err = PTR_ERR(conn_state);
drm_connector_list_iter_end(&conn_iter);
goto free;
}
}
drm_connector_list_iter_end(&conn_iter);
/* clear the acquire context so that it isn't accidentally reused */
state->acquire_ctx = NULL;
free:
if (err < 0) {
drm_atomic_state_put(state);
state = ERR_PTR(err);
}
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
/** /**
* __drm_atomic_helper_connector_destroy_state - release connector state * __drm_atomic_helper_connector_destroy_state - release connector state
* @state: connector state object to release * @state: connector state object to release
@ -515,76 +428,6 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
} }
EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state); EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
/**
* drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
* @crtc: CRTC object
* @red: red correction table
* @green: green correction table
* @blue: green correction table
* @size: size of the tables
* @ctx: lock acquire context
*
* Implements support for legacy gamma correction table for drivers
* that support color management through the DEGAMMA_LUT/GAMMA_LUT
* properties. See drm_crtc_enable_color_mgmt() and the containing chapter for
* how the atomic color management and gamma tables work.
*/
int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue,
uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = crtc->dev;
struct drm_atomic_state *state;
struct drm_crtc_state *crtc_state;
struct drm_property_blob *blob = NULL;
struct drm_color_lut *blob_data;
int i, ret = 0;
bool replaced;
state = drm_atomic_state_alloc(crtc->dev);
if (!state)
return -ENOMEM;
blob = drm_property_create_blob(dev,
sizeof(struct drm_color_lut) * size,
NULL);
if (IS_ERR(blob)) {
ret = PTR_ERR(blob);
blob = NULL;
goto fail;
}
/* Prepare GAMMA_LUT with the legacy values. */
blob_data = blob->data;
for (i = 0; i < size; i++) {
blob_data[i].red = red[i];
blob_data[i].green = green[i];
blob_data[i].blue = blue[i];
}
state->acquire_ctx = ctx;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto fail;
}
/* Reset DEGAMMA_LUT and CTM properties. */
replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL);
replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob);
crtc_state->color_mgmt_changed |= replaced;
ret = drm_atomic_commit(state);
fail:
drm_atomic_state_put(state);
drm_property_blob_put(blob);
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
/** /**
* __drm_atomic_helper_private_duplicate_state - copy atomic private state * __drm_atomic_helper_private_duplicate_state - copy atomic private state
* @obj: CRTC object * @obj: CRTC object

View File

@ -255,11 +255,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
if (crtc_lut->gamma_size != crtc->gamma_size) if (crtc_lut->gamma_size != crtc->gamma_size)
return -EINVAL; return -EINVAL;
drm_modeset_acquire_init(&ctx, 0); DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
retry:
ret = drm_modeset_lock_all_ctx(dev, &ctx);
if (ret)
goto out;
size = crtc_lut->gamma_size * (sizeof(uint16_t)); size = crtc_lut->gamma_size * (sizeof(uint16_t));
r_base = crtc->gamma_store; r_base = crtc->gamma_store;
@ -284,13 +280,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
crtc->gamma_size, &ctx); crtc->gamma_size, &ctx);
out: out:
if (ret == -EDEADLK) { DRM_MODESET_LOCK_ALL_END(ctx, ret);
drm_modeset_backoff(&ctx);
goto retry;
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret; return ret;
} }

View File

@ -572,9 +572,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_mode_crtc *crtc_req = data; struct drm_mode_crtc *crtc_req = data;
struct drm_crtc *crtc; struct drm_crtc *crtc;
struct drm_plane *plane; struct drm_plane *plane;
struct drm_connector **connector_set, *connector; struct drm_connector **connector_set = NULL, *connector;
struct drm_framebuffer *fb; struct drm_framebuffer *fb = NULL;
struct drm_display_mode *mode; struct drm_display_mode *mode = NULL;
struct drm_mode_set set; struct drm_mode_set set;
uint32_t __user *set_connectors_ptr; uint32_t __user *set_connectors_ptr;
struct drm_modeset_acquire_ctx ctx; struct drm_modeset_acquire_ctx ctx;
@ -601,15 +601,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
plane = crtc->primary; plane = crtc->primary;
mutex_lock(&crtc->dev->mode_config.mutex); mutex_lock(&crtc->dev->mode_config.mutex);
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx,
retry: DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
connector_set = NULL;
fb = NULL;
mode = NULL;
ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx);
if (ret)
goto out;
if (crtc_req->mode_valid) { if (crtc_req->mode_valid) {
/* If we have a mode we need a framebuffer. */ /* If we have a mode we need a framebuffer. */
@ -768,13 +761,13 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
} }
kfree(connector_set); kfree(connector_set);
drm_mode_destroy(dev, mode); drm_mode_destroy(dev, mode);
if (ret == -EDEADLK) {
ret = drm_modeset_backoff(&ctx); /* In case we need to retry... */
if (!ret) connector_set = NULL;
goto retry; fb = NULL;
} mode = NULL;
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx); DRM_MODESET_LOCK_ALL_END(ctx, ret);
mutex_unlock(&crtc->dev->mode_config.mutex); mutex_unlock(&crtc->dev->mode_config.mutex);
return ret; return ret;

View File

@ -56,6 +56,10 @@
* drm_modeset_drop_locks(ctx); * drm_modeset_drop_locks(ctx);
* drm_modeset_acquire_fini(ctx); * drm_modeset_acquire_fini(ctx);
* *
* For convenience this control flow is implemented in
* DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END() for the case
* where all modeset locks need to be taken through drm_modeset_lock_all_ctx().
*
* If all that is needed is a single modeset lock, then the &struct * If all that is needed is a single modeset lock, then the &struct
* drm_modeset_acquire_ctx is not needed and the locking can be simplified * drm_modeset_acquire_ctx is not needed and the locking can be simplified
* by passing a NULL instead of ctx in the drm_modeset_lock() call or * by passing a NULL instead of ctx in the drm_modeset_lock() call or
@ -383,6 +387,8 @@ EXPORT_SYMBOL(drm_modeset_unlock);
* Locks acquired with this function should be released by calling the * Locks acquired with this function should be released by calling the
* drm_modeset_drop_locks() function on @ctx. * drm_modeset_drop_locks() function on @ctx.
* *
* See also: DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END()
*
* Returns: 0 on success or a negative error-code on failure. * Returns: 0 on success or a negative error-code on failure.
*/ */
int drm_modeset_lock_all_ctx(struct drm_device *dev, int drm_modeset_lock_all_ctx(struct drm_device *dev,

View File

@ -767,11 +767,8 @@ static int setplane_internal(struct drm_plane *plane,
struct drm_modeset_acquire_ctx ctx; struct drm_modeset_acquire_ctx ctx;
int ret; int ret;
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); DRM_MODESET_LOCK_ALL_BEGIN(plane->dev, ctx,
retry: DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
ret = drm_modeset_lock_all_ctx(plane->dev, &ctx);
if (ret)
goto fail;
if (drm_drv_uses_atomic_modeset(plane->dev)) if (drm_drv_uses_atomic_modeset(plane->dev))
ret = __setplane_atomic(plane, crtc, fb, ret = __setplane_atomic(plane, crtc, fb,
@ -782,14 +779,7 @@ static int setplane_internal(struct drm_plane *plane,
crtc_x, crtc_y, crtc_w, crtc_h, crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h, &ctx); src_x, src_y, src_w, src_h, &ctx);
fail: DRM_MODESET_LOCK_ALL_END(ctx, ret);
if (ret == -EDEADLK) {
ret = drm_modeset_backoff(&ctx);
if (!ret)
goto retry;
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret; return ret;
} }

View File

@ -56,22 +56,6 @@
#include "drm_internal.h" #include "drm_internal.h"
#include <drm/drm_syncobj.h> #include <drm/drm_syncobj.h>
struct drm_syncobj_stub_fence {
struct dma_fence base;
spinlock_t lock;
};
static const char *drm_syncobj_stub_fence_get_name(struct dma_fence *fence)
{
return "syncobjstub";
}
static const struct dma_fence_ops drm_syncobj_stub_fence_ops = {
.get_driver_name = drm_syncobj_stub_fence_get_name,
.get_timeline_name = drm_syncobj_stub_fence_get_name,
};
/** /**
* drm_syncobj_find - lookup and reference a sync object. * drm_syncobj_find - lookup and reference a sync object.
* @file_private: drm file private pointer * @file_private: drm file private pointer
@ -156,13 +140,11 @@ void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
/** /**
* drm_syncobj_replace_fence - replace fence in a sync object. * drm_syncobj_replace_fence - replace fence in a sync object.
* @syncobj: Sync object to replace fence in * @syncobj: Sync object to replace fence in
* @point: timeline point
* @fence: fence to install in sync file. * @fence: fence to install in sync file.
* *
* This replaces the fence on a sync object, or a timeline point fence. * This replaces the fence on a sync object.
*/ */
void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
u64 point,
struct dma_fence *fence) struct dma_fence *fence)
{ {
struct dma_fence *old_fence; struct dma_fence *old_fence;
@ -190,23 +172,18 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
} }
EXPORT_SYMBOL(drm_syncobj_replace_fence); EXPORT_SYMBOL(drm_syncobj_replace_fence);
static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) /**
* drm_syncobj_assign_null_handle - assign a stub fence to the sync object
* @syncobj: sync object to assign the fence on
*
* Assign a already signaled stub fence to the sync object.
*/
static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
{ {
struct drm_syncobj_stub_fence *fence; struct dma_fence *fence = dma_fence_get_stub();
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (fence == NULL)
return -ENOMEM;
spin_lock_init(&fence->lock); drm_syncobj_replace_fence(syncobj, fence);
dma_fence_init(&fence->base, &drm_syncobj_stub_fence_ops, dma_fence_put(fence);
&fence->lock, 0, 0);
dma_fence_signal(&fence->base);
drm_syncobj_replace_fence(syncobj, 0, &fence->base);
dma_fence_put(&fence->base);
return 0;
} }
/** /**
@ -254,7 +231,7 @@ void drm_syncobj_free(struct kref *kref)
struct drm_syncobj *syncobj = container_of(kref, struct drm_syncobj *syncobj = container_of(kref,
struct drm_syncobj, struct drm_syncobj,
refcount); refcount);
drm_syncobj_replace_fence(syncobj, 0, NULL); drm_syncobj_replace_fence(syncobj, NULL);
kfree(syncobj); kfree(syncobj);
} }
EXPORT_SYMBOL(drm_syncobj_free); EXPORT_SYMBOL(drm_syncobj_free);
@ -274,7 +251,6 @@ EXPORT_SYMBOL(drm_syncobj_free);
int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
struct dma_fence *fence) struct dma_fence *fence)
{ {
int ret;
struct drm_syncobj *syncobj; struct drm_syncobj *syncobj;
syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL); syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
@ -285,16 +261,11 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
INIT_LIST_HEAD(&syncobj->cb_list); INIT_LIST_HEAD(&syncobj->cb_list);
spin_lock_init(&syncobj->lock); spin_lock_init(&syncobj->lock);
if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) { if (flags & DRM_SYNCOBJ_CREATE_SIGNALED)
ret = drm_syncobj_assign_null_handle(syncobj); drm_syncobj_assign_null_handle(syncobj);
if (ret < 0) {
drm_syncobj_put(syncobj);
return ret;
}
}
if (fence) if (fence)
drm_syncobj_replace_fence(syncobj, 0, fence); drm_syncobj_replace_fence(syncobj, fence);
*out_syncobj = syncobj; *out_syncobj = syncobj;
return 0; return 0;
@ -479,7 +450,7 @@ static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
return -ENOENT; return -ENOENT;
} }
drm_syncobj_replace_fence(syncobj, 0, fence); drm_syncobj_replace_fence(syncobj, fence);
dma_fence_put(fence); dma_fence_put(fence);
drm_syncobj_put(syncobj); drm_syncobj_put(syncobj);
return 0; return 0;
@ -950,7 +921,7 @@ drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
return ret; return ret;
for (i = 0; i < args->count_handles; i++) for (i = 0; i < args->count_handles; i++)
drm_syncobj_replace_fence(syncobjs[i], 0, NULL); drm_syncobj_replace_fence(syncobjs[i], NULL);
drm_syncobj_array_free(syncobjs, args->count_handles); drm_syncobj_array_free(syncobjs, args->count_handles);
@ -982,11 +953,8 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
if (ret < 0) if (ret < 0)
return ret; return ret;
for (i = 0; i < args->count_handles; i++) { for (i = 0; i < args->count_handles; i++)
ret = drm_syncobj_assign_null_handle(syncobjs[i]); drm_syncobj_assign_null_handle(syncobjs[i]);
if (ret < 0)
break;
}
drm_syncobj_array_free(syncobjs, args->count_handles); drm_syncobj_array_free(syncobjs, args->count_handles);

View File

@ -2191,7 +2191,7 @@ signal_fence_array(struct i915_execbuffer *eb,
if (!(flags & I915_EXEC_FENCE_SIGNAL)) if (!(flags & I915_EXEC_FENCE_SIGNAL))
continue; continue;
drm_syncobj_replace_fence(syncobj, 0, fence); drm_syncobj_replace_fence(syncobj, fence);
} }
} }

View File

@ -80,6 +80,7 @@
struct meson_plane { struct meson_plane {
struct drm_plane base; struct drm_plane base;
struct meson_drm *priv; struct meson_drm *priv;
bool enabled;
}; };
#define to_meson_plane(x) container_of(x, struct meson_plane, base) #define to_meson_plane(x) container_of(x, struct meson_plane, base)
@ -304,6 +305,15 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
priv->viu.osd1_stride = fb->pitches[0]; priv->viu.osd1_stride = fb->pitches[0];
priv->viu.osd1_height = fb->height; priv->viu.osd1_height = fb->height;
if (!meson_plane->enabled) {
/* Reset OSD1 before enabling it on GXL+ SoCs */
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
meson_viu_osd1_reset(priv);
meson_plane->enabled = true;
}
spin_unlock_irqrestore(&priv->drm->event_lock, flags); spin_unlock_irqrestore(&priv->drm->event_lock, flags);
} }
@ -317,6 +327,8 @@ static void meson_plane_atomic_disable(struct drm_plane *plane,
writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0, writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
priv->io_base + _REG(VPP_MISC)); priv->io_base + _REG(VPP_MISC));
meson_plane->enabled = false;
} }
static const struct drm_plane_helper_funcs meson_plane_helper_funcs = { static const struct drm_plane_helper_funcs meson_plane_helper_funcs = {

View File

@ -296,6 +296,33 @@ static void meson_viu_load_matrix(struct meson_drm *priv)
true); true);
} }
/* VIU OSD1 Reset as workaround for GXL+ Alpha OSD Bug */
void meson_viu_osd1_reset(struct meson_drm *priv)
{
uint32_t osd1_fifo_ctrl_stat, osd1_ctrl_stat2;
/* Save these 2 registers state */
osd1_fifo_ctrl_stat = readl_relaxed(
priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
osd1_ctrl_stat2 = readl_relaxed(
priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
/* Reset OSD1 */
writel_bits_relaxed(BIT(0), BIT(0),
priv->io_base + _REG(VIU_SW_RESET));
writel_bits_relaxed(BIT(0), 0,
priv->io_base + _REG(VIU_SW_RESET));
/* Rewrite these registers state lost in the reset */
writel_relaxed(osd1_fifo_ctrl_stat,
priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
writel_relaxed(osd1_ctrl_stat2,
priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
/* Reload the conversion matrix */
meson_viu_load_matrix(priv);
}
void meson_viu_init(struct meson_drm *priv) void meson_viu_init(struct meson_drm *priv)
{ {
uint32_t reg; uint32_t reg;

View File

@ -59,6 +59,7 @@
#define OSD_REPLACE_EN BIT(14) #define OSD_REPLACE_EN BIT(14)
#define OSD_REPLACE_SHIFT 6 #define OSD_REPLACE_SHIFT 6
void meson_viu_osd1_reset(struct meson_drm *priv);
void meson_viu_init(struct meson_drm *priv); void meson_viu_init(struct meson_drm *priv);
#endif /* __MESON_VIU_H */ #endif /* __MESON_VIU_H */

View File

@ -618,6 +618,30 @@ static const struct panel_desc auo_g070vvn01 = {
}, },
}; };
static const struct drm_display_mode auo_g101evn010_mode = {
.clock = 68930,
.hdisplay = 1280,
.hsync_start = 1280 + 82,
.hsync_end = 1280 + 82 + 2,
.htotal = 1280 + 82 + 2 + 84,
.vdisplay = 800,
.vsync_start = 800 + 8,
.vsync_end = 800 + 8 + 2,
.vtotal = 800 + 8 + 2 + 6,
.vrefresh = 60,
};
static const struct panel_desc auo_g101evn010 = {
.modes = &auo_g101evn010_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 216,
.height = 135,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
static const struct drm_display_mode auo_g104sn02_mode = { static const struct drm_display_mode auo_g104sn02_mode = {
.clock = 40000, .clock = 40000,
.hdisplay = 800, .hdisplay = 800,
@ -2493,6 +2517,9 @@ static const struct of_device_id platform_of_match[] = {
}, { }, {
.compatible = "auo,g070vvn01", .compatible = "auo,g070vvn01",
.data = &auo_g070vvn01, .data = &auo_g070vvn01,
}, {
.compatible = "auo,g101evn010",
.data = &auo_g101evn010,
}, { }, {
.compatible = "auo,g104sn02", .compatible = "auo,g104sn02",
.data = &auo_g104sn02, .data = &auo_g104sn02,

View File

@ -55,6 +55,8 @@ int pl111_vexpress_clcd_init(struct device *dev,
} }
} }
of_node_put(root);
/* /*
* If there is a coretile HDLCD and it has a driver, * If there is a coretile HDLCD and it has a driver,
* do not mux the CLCD on the motherboard to the DVI. * do not mux the CLCD on the motherboard to the DVI.

View File

@ -21,6 +21,7 @@
#include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h> #include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_cma_helper.h>
#include "rcar_du_drv.h" #include "rcar_du_drv.h"
@ -392,19 +393,11 @@ MODULE_DEVICE_TABLE(of, rcar_du_of_table);
* DRM operations * DRM operations
*/ */
static void rcar_du_lastclose(struct drm_device *dev)
{
struct rcar_du_device *rcdu = dev->dev_private;
drm_fbdev_cma_restore_mode(rcdu->fbdev);
}
DEFINE_DRM_GEM_CMA_FOPS(rcar_du_fops); DEFINE_DRM_GEM_CMA_FOPS(rcar_du_fops);
static struct drm_driver rcar_du_driver = { static struct drm_driver rcar_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME
| DRIVER_ATOMIC, | DRIVER_ATOMIC,
.lastclose = rcar_du_lastclose,
.gem_free_object_unlocked = drm_gem_cma_free_object, .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops, .gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@ -460,9 +453,6 @@ static int rcar_du_remove(struct platform_device *pdev)
drm_dev_unregister(ddev); drm_dev_unregister(ddev);
if (rcdu->fbdev)
drm_fbdev_cma_fini(rcdu->fbdev);
drm_kms_helper_poll_fini(ddev); drm_kms_helper_poll_fini(ddev);
drm_mode_config_cleanup(ddev); drm_mode_config_cleanup(ddev);
@ -522,6 +512,8 @@ static int rcar_du_probe(struct platform_device *pdev)
DRM_INFO("Device %s probed\n", dev_name(&pdev->dev)); DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
drm_fbdev_generic_setup(ddev, 32);
return 0; return 0;
error: error:

View File

@ -20,7 +20,6 @@
struct clk; struct clk;
struct device; struct device;
struct drm_device; struct drm_device;
struct drm_fbdev_cma;
struct rcar_du_device; struct rcar_du_device;
#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */ #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */
@ -78,7 +77,6 @@ struct rcar_du_device {
void __iomem *mmio; void __iomem *mmio;
struct drm_device *ddev; struct drm_device *ddev;
struct drm_fbdev_cma *fbdev;
struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS]; struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS];
unsigned int num_crtcs; unsigned int num_crtcs;

View File

@ -255,13 +255,6 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return drm_gem_fb_create(dev, file_priv, mode_cmd); return drm_gem_fb_create(dev, file_priv, mode_cmd);
} }
static void rcar_du_output_poll_changed(struct drm_device *dev)
{
struct rcar_du_device *rcdu = dev->dev_private;
drm_fbdev_cma_hotplug_event(rcdu->fbdev);
}
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
* Atomic Check and Update * Atomic Check and Update
*/ */
@ -308,7 +301,6 @@ static const struct drm_mode_config_helper_funcs rcar_du_mode_config_helper = {
static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = { static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
.fb_create = rcar_du_fb_create, .fb_create = rcar_du_fb_create,
.output_poll_changed = rcar_du_output_poll_changed,
.atomic_check = rcar_du_atomic_check, .atomic_check = rcar_du_atomic_check,
.atomic_commit = drm_atomic_helper_commit, .atomic_commit = drm_atomic_helper_commit,
}; };
@ -543,7 +535,6 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
struct drm_device *dev = rcdu->ddev; struct drm_device *dev = rcdu->ddev;
struct drm_encoder *encoder; struct drm_encoder *encoder;
struct drm_fbdev_cma *fbdev;
unsigned int dpad0_sources; unsigned int dpad0_sources;
unsigned int num_encoders; unsigned int num_encoders;
unsigned int num_groups; unsigned int num_groups;
@ -682,17 +673,5 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
drm_kms_helper_poll_init(dev); drm_kms_helper_poll_init(dev);
if (dev->mode_config.num_connector) {
fbdev = drm_fbdev_cma_init(dev, 32,
dev->mode_config.num_connector);
if (IS_ERR(fbdev))
return PTR_ERR(fbdev);
rcdu->fbdev = fbdev;
} else {
dev_info(rcdu->dev,
"no connector found, disabling fbdev emulation\n");
}
return 0; return 0;
} }

View File

@ -410,6 +410,7 @@ static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun8i-v3s-display-engine" }, { .compatible = "allwinner,sun8i-v3s-display-engine" },
{ .compatible = "allwinner,sun9i-a80-display-engine" }, { .compatible = "allwinner,sun9i-a80-display-engine" },
{ .compatible = "allwinner,sun50i-a64-display-engine" }, { .compatible = "allwinner,sun50i-a64-display-engine" },
{ .compatible = "allwinner,sun50i-h6-display-engine" },
{ } { }
}; };
MODULE_DEVICE_TABLE(of, sun4i_drv_of_table); MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);

View File

@ -108,12 +108,11 @@ static int repaper_spi_transfer(struct spi_device *spi, u8 header,
/* Stack allocated tx? */ /* Stack allocated tx? */
if (tx && len <= 32) { if (tx && len <= 32) {
txbuf = kmalloc(len, GFP_KERNEL); txbuf = kmemdup(tx, len, GFP_KERNEL);
if (!txbuf) { if (!txbuf) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_free; goto out_free;
} }
memcpy(txbuf, tx, len);
} }
if (rx) { if (rx) {

View File

@ -293,6 +293,7 @@ v3d_prime_import_sg_table(struct drm_device *dev,
bo->resv = attach->dmabuf->resv; bo->resv = attach->dmabuf->resv;
bo->sgt = sgt; bo->sgt = sgt;
obj->import_attach = attach;
v3d_bo_get_pages(bo); v3d_bo_get_pages(bo);
v3d_mmu_insert_ptes(bo); v3d_mmu_insert_ptes(bo);

View File

@ -112,10 +112,15 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
return 0; return 0;
} }
/* Any params that aren't just register reads would go here. */
DRM_DEBUG("Unknown parameter %d\n", args->param); switch (args->param) {
return -EINVAL; case DRM_V3D_PARAM_SUPPORTS_TFU:
args->value = 1;
return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
return -EINVAL;
}
} }
static int static int
@ -170,7 +175,8 @@ static const struct file_operations v3d_drm_fops = {
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP /* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
* protection between clients. Note that render nodes would be be * protection between clients. Note that render nodes would be be
* able to submit CLs that could access BOs from clients authenticated * able to submit CLs that could access BOs from clients authenticated
* with the master node. * with the master node. The TFU doesn't use the GMP, so it would
* need to stay DRM_AUTH until we do buffer size/offset validation.
*/ */
static const struct drm_ioctl_desc v3d_drm_ioctls[] = { static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CL, v3d_submit_cl_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CL, v3d_submit_cl_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
@ -179,6 +185,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_MMAP_BO, v3d_mmap_bo_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(V3D_MMAP_BO, v3d_mmap_bo_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_GET_PARAM, v3d_get_param_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(V3D_GET_PARAM, v3d_get_param_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
}; };
static const struct vm_operations_struct v3d_vm_ops = { static const struct vm_operations_struct v3d_vm_ops = {

View File

@ -7,19 +7,18 @@
#include <drm/drm_encoder.h> #include <drm/drm_encoder.h>
#include <drm/drm_gem.h> #include <drm/drm_gem.h>
#include <drm/gpu_scheduler.h> #include <drm/gpu_scheduler.h>
#include "uapi/drm/v3d_drm.h"
#define GMP_GRANULARITY (128 * 1024) #define GMP_GRANULARITY (128 * 1024)
/* Enum for each of the V3D queues. We maintain various queue /* Enum for each of the V3D queues. */
* tracking as an array because at some point we'll want to support
* the TFU (texture formatting unit) as another queue.
*/
enum v3d_queue { enum v3d_queue {
V3D_BIN, V3D_BIN,
V3D_RENDER, V3D_RENDER,
V3D_TFU,
}; };
#define V3D_MAX_QUEUES (V3D_RENDER + 1) #define V3D_MAX_QUEUES (V3D_TFU + 1)
struct v3d_queue_state { struct v3d_queue_state {
struct drm_gpu_scheduler sched; struct drm_gpu_scheduler sched;
@ -68,6 +67,7 @@ struct v3d_dev {
struct v3d_exec_info *bin_job; struct v3d_exec_info *bin_job;
struct v3d_exec_info *render_job; struct v3d_exec_info *render_job;
struct v3d_tfu_job *tfu_job;
struct v3d_queue_state queue[V3D_MAX_QUEUES]; struct v3d_queue_state queue[V3D_MAX_QUEUES];
@ -218,6 +218,25 @@ struct v3d_exec_info {
u32 qma, qms, qts; u32 qma, qms, qts;
}; };
struct v3d_tfu_job {
struct drm_sched_job base;
struct drm_v3d_submit_tfu args;
/* An optional fence userspace can pass in for the job to depend on. */
struct dma_fence *in_fence;
/* v3d fence to be signaled by IRQ handler when the job is complete. */
struct dma_fence *done_fence;
struct v3d_dev *v3d;
struct kref refcount;
/* This is the array of BOs that were looked up at the start of exec. */
struct v3d_bo *bo[4];
};
/** /**
* _wait_for - magic (register) wait macro * _wait_for - magic (register) wait macro
* *
@ -281,9 +300,12 @@ int v3d_gem_init(struct drm_device *dev);
void v3d_gem_destroy(struct drm_device *dev); void v3d_gem_destroy(struct drm_device *dev);
int v3d_submit_cl_ioctl(struct drm_device *dev, void *data, int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_wait_bo_ioctl(struct drm_device *dev, void *data, int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
void v3d_exec_put(struct v3d_exec_info *exec); void v3d_exec_put(struct v3d_exec_info *exec);
void v3d_tfu_job_put(struct v3d_tfu_job *exec);
void v3d_reset(struct v3d_dev *v3d); void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d); void v3d_invalidate_caches(struct v3d_dev *v3d);
void v3d_flush_caches(struct v3d_dev *v3d); void v3d_flush_caches(struct v3d_dev *v3d);

View File

@ -29,10 +29,16 @@ static const char *v3d_fence_get_timeline_name(struct dma_fence *fence)
{ {
struct v3d_fence *f = to_v3d_fence(fence); struct v3d_fence *f = to_v3d_fence(fence);
if (f->queue == V3D_BIN) switch (f->queue) {
case V3D_BIN:
return "v3d-bin"; return "v3d-bin";
else case V3D_RENDER:
return "v3d-render"; return "v3d-render";
case V3D_TFU:
return "v3d-tfu";
default:
return NULL;
}
} }
const struct dma_fence_ops v3d_fence_ops = { const struct dma_fence_ops v3d_fence_ops = {

View File

@ -207,26 +207,26 @@ v3d_flush_caches(struct v3d_dev *v3d)
} }
static void static void
v3d_attach_object_fences(struct v3d_exec_info *exec) v3d_attach_object_fences(struct v3d_bo **bos, int bo_count,
struct dma_fence *fence)
{ {
struct dma_fence *out_fence = exec->render_done_fence;
int i; int i;
for (i = 0; i < exec->bo_count; i++) { for (i = 0; i < bo_count; i++) {
/* XXX: Use shared fences for read-only objects. */ /* XXX: Use shared fences for read-only objects. */
reservation_object_add_excl_fence(exec->bo[i]->resv, out_fence); reservation_object_add_excl_fence(bos[i]->resv, fence);
} }
} }
static void static void
v3d_unlock_bo_reservations(struct drm_device *dev, v3d_unlock_bo_reservations(struct v3d_bo **bos,
struct v3d_exec_info *exec, int bo_count,
struct ww_acquire_ctx *acquire_ctx) struct ww_acquire_ctx *acquire_ctx)
{ {
int i; int i;
for (i = 0; i < exec->bo_count; i++) for (i = 0; i < bo_count; i++)
ww_mutex_unlock(&exec->bo[i]->resv->lock); ww_mutex_unlock(&bos[i]->resv->lock);
ww_acquire_fini(acquire_ctx); ww_acquire_fini(acquire_ctx);
} }
@ -239,8 +239,8 @@ v3d_unlock_bo_reservations(struct drm_device *dev,
* to v3d, so we don't attach dma-buf fences to them. * to v3d, so we don't attach dma-buf fences to them.
*/ */
static int static int
v3d_lock_bo_reservations(struct drm_device *dev, v3d_lock_bo_reservations(struct v3d_bo **bos,
struct v3d_exec_info *exec, int bo_count,
struct ww_acquire_ctx *acquire_ctx) struct ww_acquire_ctx *acquire_ctx)
{ {
int contended_lock = -1; int contended_lock = -1;
@ -250,7 +250,7 @@ v3d_lock_bo_reservations(struct drm_device *dev,
retry: retry:
if (contended_lock != -1) { if (contended_lock != -1) {
struct v3d_bo *bo = exec->bo[contended_lock]; struct v3d_bo *bo = bos[contended_lock];
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
acquire_ctx); acquire_ctx);
@ -260,20 +260,20 @@ v3d_lock_bo_reservations(struct drm_device *dev,
} }
} }
for (i = 0; i < exec->bo_count; i++) { for (i = 0; i < bo_count; i++) {
if (i == contended_lock) if (i == contended_lock)
continue; continue;
ret = ww_mutex_lock_interruptible(&exec->bo[i]->resv->lock, ret = ww_mutex_lock_interruptible(&bos[i]->resv->lock,
acquire_ctx); acquire_ctx);
if (ret) { if (ret) {
int j; int j;
for (j = 0; j < i; j++) for (j = 0; j < i; j++)
ww_mutex_unlock(&exec->bo[j]->resv->lock); ww_mutex_unlock(&bos[j]->resv->lock);
if (contended_lock != -1 && contended_lock >= i) { if (contended_lock != -1 && contended_lock >= i) {
struct v3d_bo *bo = exec->bo[contended_lock]; struct v3d_bo *bo = bos[contended_lock];
ww_mutex_unlock(&bo->resv->lock); ww_mutex_unlock(&bo->resv->lock);
} }
@ -293,10 +293,11 @@ v3d_lock_bo_reservations(struct drm_device *dev,
/* Reserve space for our shared (read-only) fence references, /* Reserve space for our shared (read-only) fence references,
* before we commit the CL to the hardware. * before we commit the CL to the hardware.
*/ */
for (i = 0; i < exec->bo_count; i++) { for (i = 0; i < bo_count; i++) {
ret = reservation_object_reserve_shared(exec->bo[i]->resv, 1); ret = reservation_object_reserve_shared(bos[i]->resv, 1);
if (ret) { if (ret) {
v3d_unlock_bo_reservations(dev, exec, acquire_ctx); v3d_unlock_bo_reservations(bos, bo_count,
acquire_ctx);
return ret; return ret;
} }
} }
@ -419,6 +420,33 @@ void v3d_exec_put(struct v3d_exec_info *exec)
kref_put(&exec->refcount, v3d_exec_cleanup); kref_put(&exec->refcount, v3d_exec_cleanup);
} }
static void
v3d_tfu_job_cleanup(struct kref *ref)
{
struct v3d_tfu_job *job = container_of(ref, struct v3d_tfu_job,
refcount);
struct v3d_dev *v3d = job->v3d;
unsigned int i;
dma_fence_put(job->in_fence);
dma_fence_put(job->done_fence);
for (i = 0; i < ARRAY_SIZE(job->bo); i++) {
if (job->bo[i])
drm_gem_object_put_unlocked(&job->bo[i]->base);
}
pm_runtime_mark_last_busy(v3d->dev);
pm_runtime_put_autosuspend(v3d->dev);
kfree(job);
}
void v3d_tfu_job_put(struct v3d_tfu_job *job)
{
kref_put(&job->refcount, v3d_tfu_job_cleanup);
}
int int
v3d_wait_bo_ioctl(struct drm_device *dev, void *data, v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
@ -493,6 +521,8 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_syncobj *sync_out; struct drm_syncobj *sync_out;
int ret = 0; int ret = 0;
trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
if (args->pad != 0) { if (args->pad != 0) {
DRM_INFO("pad must be zero: %d\n", args->pad); DRM_INFO("pad must be zero: %d\n", args->pad);
return -EINVAL; return -EINVAL;
@ -536,7 +566,8 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (ret) if (ret)
goto fail; goto fail;
ret = v3d_lock_bo_reservations(dev, exec, &acquire_ctx); ret = v3d_lock_bo_reservations(exec->bo, exec->bo_count,
&acquire_ctx);
if (ret) if (ret)
goto fail; goto fail;
@ -570,15 +601,15 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
&v3d_priv->sched_entity[V3D_RENDER]); &v3d_priv->sched_entity[V3D_RENDER]);
mutex_unlock(&v3d->sched_lock); mutex_unlock(&v3d->sched_lock);
v3d_attach_object_fences(exec); v3d_attach_object_fences(exec->bo, exec->bo_count,
exec->render_done_fence);
v3d_unlock_bo_reservations(dev, exec, &acquire_ctx); v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx);
/* Update the return sync object for the */ /* Update the return sync object for the */
sync_out = drm_syncobj_find(file_priv, args->out_sync); sync_out = drm_syncobj_find(file_priv, args->out_sync);
if (sync_out) { if (sync_out) {
drm_syncobj_replace_fence(sync_out, 0, drm_syncobj_replace_fence(sync_out, exec->render_done_fence);
exec->render_done_fence);
drm_syncobj_put(sync_out); drm_syncobj_put(sync_out);
} }
@ -588,13 +619,121 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
fail_unreserve: fail_unreserve:
mutex_unlock(&v3d->sched_lock); mutex_unlock(&v3d->sched_lock);
v3d_unlock_bo_reservations(dev, exec, &acquire_ctx); v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx);
fail: fail:
v3d_exec_put(exec); v3d_exec_put(exec);
return ret; return ret;
} }
/**
* v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
* @dev: DRM device
* @data: ioctl argument
* @file_priv: DRM file for this fd
*
* Userspace provides the register setup for the TFU, which we don't
* need to validate since the TFU is behind the MMU.
*/
int
v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_submit_tfu *args = data;
struct v3d_tfu_job *job;
struct ww_acquire_ctx acquire_ctx;
struct drm_syncobj *sync_out;
struct dma_fence *sched_done_fence;
int ret = 0;
int bo_count;
trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
job = kcalloc(1, sizeof(*job), GFP_KERNEL);
if (!job)
return -ENOMEM;
ret = pm_runtime_get_sync(v3d->dev);
if (ret < 0) {
kfree(job);
return ret;
}
kref_init(&job->refcount);
ret = drm_syncobj_find_fence(file_priv, args->in_sync,
0, 0, &job->in_fence);
if (ret == -EINVAL)
goto fail;
job->args = *args;
job->v3d = v3d;
spin_lock(&file_priv->table_lock);
for (bo_count = 0; bo_count < ARRAY_SIZE(job->bo); bo_count++) {
struct drm_gem_object *bo;
if (!args->bo_handles[bo_count])
break;
bo = idr_find(&file_priv->object_idr,
args->bo_handles[bo_count]);
if (!bo) {
DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
bo_count, args->bo_handles[bo_count]);
ret = -ENOENT;
spin_unlock(&file_priv->table_lock);
goto fail;
}
drm_gem_object_get(bo);
job->bo[bo_count] = to_v3d_bo(bo);
}
spin_unlock(&file_priv->table_lock);
ret = v3d_lock_bo_reservations(job->bo, bo_count, &acquire_ctx);
if (ret)
goto fail;
mutex_lock(&v3d->sched_lock);
ret = drm_sched_job_init(&job->base,
&v3d_priv->sched_entity[V3D_TFU],
v3d_priv);
if (ret)
goto fail_unreserve;
sched_done_fence = dma_fence_get(&job->base.s_fence->finished);
kref_get(&job->refcount); /* put by scheduler job completion */
drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[V3D_TFU]);
mutex_unlock(&v3d->sched_lock);
v3d_attach_object_fences(job->bo, bo_count, sched_done_fence);
v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx);
/* Update the return sync object */
sync_out = drm_syncobj_find(file_priv, args->out_sync);
if (sync_out) {
drm_syncobj_replace_fence(sync_out, sched_done_fence);
drm_syncobj_put(sync_out);
}
dma_fence_put(sched_done_fence);
v3d_tfu_job_put(job);
return 0;
fail_unreserve:
mutex_unlock(&v3d->sched_lock);
v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx);
fail:
v3d_tfu_job_put(job);
return ret;
}
int int
v3d_gem_init(struct drm_device *dev) v3d_gem_init(struct drm_device *dev)
{ {

View File

@ -4,8 +4,8 @@
/** /**
* DOC: Interrupt management for the V3D engine * DOC: Interrupt management for the V3D engine
* *
* When we take a binning or rendering flush done interrupt, we need * When we take a bin, render, or TFU done interrupt, we need to
* to signal the fence for that job so that the scheduler can queue up * signal the fence for that job so that the scheduler can queue up
* the next one and unblock any waiters. * the next one and unblock any waiters.
* *
* When we take the binner out of memory interrupt, we need to * When we take the binner out of memory interrupt, we need to
@ -15,6 +15,7 @@
#include "v3d_drv.h" #include "v3d_drv.h"
#include "v3d_regs.h" #include "v3d_regs.h"
#include "v3d_trace.h"
#define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \ #define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \
V3D_INT_FLDONE | \ V3D_INT_FLDONE | \
@ -23,7 +24,8 @@
#define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \ #define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \
V3D_HUB_INT_MMU_PTI | \ V3D_HUB_INT_MMU_PTI | \
V3D_HUB_INT_MMU_CAP)) V3D_HUB_INT_MMU_CAP | \
V3D_HUB_INT_TFUC))
static void static void
v3d_overflow_mem_work(struct work_struct *work) v3d_overflow_mem_work(struct work_struct *work)
@ -87,12 +89,20 @@ v3d_irq(int irq, void *arg)
} }
if (intsts & V3D_INT_FLDONE) { if (intsts & V3D_INT_FLDONE) {
dma_fence_signal(v3d->bin_job->bin.done_fence); struct v3d_fence *fence =
to_v3d_fence(v3d->bin_job->bin.done_fence);
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
status = IRQ_HANDLED; status = IRQ_HANDLED;
} }
if (intsts & V3D_INT_FRDONE) { if (intsts & V3D_INT_FRDONE) {
dma_fence_signal(v3d->render_job->render.done_fence); struct v3d_fence *fence =
to_v3d_fence(v3d->render_job->render.done_fence);
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
status = IRQ_HANDLED; status = IRQ_HANDLED;
} }
@ -117,6 +127,15 @@ v3d_hub_irq(int irq, void *arg)
/* Acknowledge the interrupts we're handling here. */ /* Acknowledge the interrupts we're handling here. */
V3D_WRITE(V3D_HUB_INT_CLR, intsts); V3D_WRITE(V3D_HUB_INT_CLR, intsts);
if (intsts & V3D_HUB_INT_TFUC) {
struct v3d_fence *fence =
to_v3d_fence(v3d->tfu_job->done_fence);
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
}
if (intsts & (V3D_HUB_INT_MMU_WRV | if (intsts & (V3D_HUB_INT_MMU_WRV |
V3D_HUB_INT_MMU_PTI | V3D_HUB_INT_MMU_PTI |
V3D_HUB_INT_MMU_CAP)) { V3D_HUB_INT_MMU_CAP)) {

View File

@ -86,6 +86,55 @@
# define V3D_TOP_GR_BRIDGE_SW_INIT_1 0x0000c # define V3D_TOP_GR_BRIDGE_SW_INIT_1 0x0000c
# define V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT BIT(0) # define V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT BIT(0)
#define V3D_TFU_CS 0x00400
/* Stops current job, empties input fifo. */
# define V3D_TFU_CS_TFURST BIT(31)
# define V3D_TFU_CS_CVTCT_MASK V3D_MASK(23, 16)
# define V3D_TFU_CS_CVTCT_SHIFT 16
# define V3D_TFU_CS_NFREE_MASK V3D_MASK(13, 8)
# define V3D_TFU_CS_NFREE_SHIFT 8
# define V3D_TFU_CS_BUSY BIT(0)
#define V3D_TFU_SU 0x00404
/* Interrupt when FINTTHR input slots are free (0 = disabled) */
# define V3D_TFU_SU_FINTTHR_MASK V3D_MASK(13, 8)
# define V3D_TFU_SU_FINTTHR_SHIFT 8
/* Skips resetting the CRC at the start of CRC generation. */
# define V3D_TFU_SU_CRCCHAIN BIT(4)
/* skips writes, computes CRC of the image. miplevels must be 0. */
# define V3D_TFU_SU_CRC BIT(3)
# define V3D_TFU_SU_THROTTLE_MASK V3D_MASK(1, 0)
# define V3D_TFU_SU_THROTTLE_SHIFT 0
#define V3D_TFU_ICFG 0x00408
/* Interrupt when the conversion is complete. */
# define V3D_TFU_ICFG_IOC BIT(0)
/* Input Image Address */
#define V3D_TFU_IIA 0x0040c
/* Input Chroma Address */
#define V3D_TFU_ICA 0x00410
/* Input Image Stride */
#define V3D_TFU_IIS 0x00414
/* Input Image U-Plane Address */
#define V3D_TFU_IUA 0x00418
/* Output Image Address */
#define V3D_TFU_IOA 0x0041c
/* Image Output Size */
#define V3D_TFU_IOS 0x00420
/* TFU YUV Coefficient 0 */
#define V3D_TFU_COEF0 0x00424
/* Use these regs instead of the defaults. */
# define V3D_TFU_COEF0_USECOEF BIT(31)
/* TFU YUV Coefficient 1 */
#define V3D_TFU_COEF1 0x00428
/* TFU YUV Coefficient 2 */
#define V3D_TFU_COEF2 0x0042c
/* TFU YUV Coefficient 3 */
#define V3D_TFU_COEF3 0x00430
#define V3D_TFU_CRC 0x00434
/* Per-MMU registers. */ /* Per-MMU registers. */
#define V3D_MMUC_CONTROL 0x01000 #define V3D_MMUC_CONTROL 0x01000

View File

@ -30,6 +30,12 @@ to_v3d_job(struct drm_sched_job *sched_job)
return container_of(sched_job, struct v3d_job, base); return container_of(sched_job, struct v3d_job, base);
} }
static struct v3d_tfu_job *
to_tfu_job(struct drm_sched_job *sched_job)
{
return container_of(sched_job, struct v3d_tfu_job, base);
}
static void static void
v3d_job_free(struct drm_sched_job *sched_job) v3d_job_free(struct drm_sched_job *sched_job)
{ {
@ -40,6 +46,16 @@ v3d_job_free(struct drm_sched_job *sched_job)
v3d_exec_put(job->exec); v3d_exec_put(job->exec);
} }
static void
v3d_tfu_job_free(struct drm_sched_job *sched_job)
{
struct v3d_tfu_job *job = to_tfu_job(sched_job);
drm_sched_job_cleanup(sched_job);
v3d_tfu_job_put(job);
}
/** /**
* Returns the fences that the bin or render job depends on, one by one. * Returns the fences that the bin or render job depends on, one by one.
* v3d_job_run() won't be called until all of them have been signaled. * v3d_job_run() won't be called until all of them have been signaled.
@ -78,6 +94,27 @@ v3d_job_dependency(struct drm_sched_job *sched_job,
return fence; return fence;
} }
/**
* Returns the fences that the TFU job depends on, one by one.
* v3d_tfu_job_run() won't be called until all of them have been
* signaled.
*/
static struct dma_fence *
v3d_tfu_job_dependency(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity)
{
struct v3d_tfu_job *job = to_tfu_job(sched_job);
struct dma_fence *fence;
fence = job->in_fence;
if (fence) {
job->in_fence = NULL;
return fence;
}
return NULL;
}
static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job) static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
{ {
struct v3d_job *job = to_v3d_job(sched_job); struct v3d_job *job = to_v3d_job(sched_job);
@ -149,28 +186,47 @@ static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
return fence; return fence;
} }
static void static struct dma_fence *
v3d_job_timedout(struct drm_sched_job *sched_job) v3d_tfu_job_run(struct drm_sched_job *sched_job)
{ {
struct v3d_job *job = to_v3d_job(sched_job); struct v3d_tfu_job *job = to_tfu_job(sched_job);
struct v3d_exec_info *exec = job->exec; struct v3d_dev *v3d = job->v3d;
struct v3d_dev *v3d = exec->v3d; struct drm_device *dev = &v3d->drm;
enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER; struct dma_fence *fence;
enum v3d_queue q;
u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q));
u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q));
/* If the current address or return address have changed, then fence = v3d_fence_create(v3d, V3D_TFU);
* the GPU has probably made progress and we should delay the if (IS_ERR(fence))
* reset. This could fail if the GPU got in an infinite loop return NULL;
* in the CL, but that is pretty unlikely outside of an i-g-t
* testcase. v3d->tfu_job = job;
*/ if (job->done_fence)
if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) { dma_fence_put(job->done_fence);
job->timedout_ctca = ctca; job->done_fence = dma_fence_get(fence);
job->timedout_ctra = ctra;
return; trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno);
V3D_WRITE(V3D_TFU_IIA, job->args.iia);
V3D_WRITE(V3D_TFU_IIS, job->args.iis);
V3D_WRITE(V3D_TFU_ICA, job->args.ica);
V3D_WRITE(V3D_TFU_IUA, job->args.iua);
V3D_WRITE(V3D_TFU_IOA, job->args.ioa);
V3D_WRITE(V3D_TFU_IOS, job->args.ios);
V3D_WRITE(V3D_TFU_COEF0, job->args.coef[0]);
if (job->args.coef[0] & V3D_TFU_COEF0_USECOEF) {
V3D_WRITE(V3D_TFU_COEF1, job->args.coef[1]);
V3D_WRITE(V3D_TFU_COEF2, job->args.coef[2]);
V3D_WRITE(V3D_TFU_COEF3, job->args.coef[3]);
} }
/* ICFG kicks off the job. */
V3D_WRITE(V3D_TFU_ICFG, job->args.icfg | V3D_TFU_ICFG_IOC);
return fence;
}
static void
v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
{
enum v3d_queue q;
mutex_lock(&v3d->reset_lock); mutex_lock(&v3d->reset_lock);
@ -195,6 +251,39 @@ v3d_job_timedout(struct drm_sched_job *sched_job)
mutex_unlock(&v3d->reset_lock); mutex_unlock(&v3d->reset_lock);
} }
static void
v3d_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
struct v3d_exec_info *exec = job->exec;
struct v3d_dev *v3d = exec->v3d;
enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q));
u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q));
/* If the current address or return address have changed, then
* the GPU has probably made progress and we should delay the
* reset. This could fail if the GPU got in an infinite loop
* in the CL, but that is pretty unlikely outside of an i-g-t
* testcase.
*/
if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) {
job->timedout_ctca = ctca;
job->timedout_ctra = ctra;
return;
}
v3d_gpu_reset_for_timeout(v3d, sched_job);
}
static void
v3d_tfu_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_tfu_job *job = to_tfu_job(sched_job);
v3d_gpu_reset_for_timeout(job->v3d, sched_job);
}
static const struct drm_sched_backend_ops v3d_sched_ops = { static const struct drm_sched_backend_ops v3d_sched_ops = {
.dependency = v3d_job_dependency, .dependency = v3d_job_dependency,
.run_job = v3d_job_run, .run_job = v3d_job_run,
@ -202,6 +291,13 @@ static const struct drm_sched_backend_ops v3d_sched_ops = {
.free_job = v3d_job_free .free_job = v3d_job_free
}; };
static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
.dependency = v3d_tfu_job_dependency,
.run_job = v3d_tfu_job_run,
.timedout_job = v3d_tfu_job_timedout,
.free_job = v3d_tfu_job_free
};
int int
v3d_sched_init(struct v3d_dev *v3d) v3d_sched_init(struct v3d_dev *v3d)
{ {
@ -232,6 +328,19 @@ v3d_sched_init(struct v3d_dev *v3d)
return ret; return ret;
} }
ret = drm_sched_init(&v3d->queue[V3D_TFU].sched,
&v3d_tfu_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms),
"v3d_tfu");
if (ret) {
dev_err(v3d->dev, "Failed to create TFU scheduler: %d.",
ret);
drm_sched_fini(&v3d->queue[V3D_RENDER].sched);
drm_sched_fini(&v3d->queue[V3D_BIN].sched);
return ret;
}
return 0; return 0;
} }

View File

@ -12,6 +12,28 @@
#define TRACE_SYSTEM v3d #define TRACE_SYSTEM v3d
#define TRACE_INCLUDE_FILE v3d_trace #define TRACE_INCLUDE_FILE v3d_trace
TRACE_EVENT(v3d_submit_cl_ioctl,
TP_PROTO(struct drm_device *dev, u32 ct1qba, u32 ct1qea),
TP_ARGS(dev, ct1qba, ct1qea),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ct1qba)
__field(u32, ct1qea)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
__entry->ct1qba = ct1qba;
__entry->ct1qea = ct1qea;
),
TP_printk("dev=%u, RCL 0x%08x..0x%08x",
__entry->dev,
__entry->ct1qba,
__entry->ct1qea)
);
TRACE_EVENT(v3d_submit_cl, TRACE_EVENT(v3d_submit_cl,
TP_PROTO(struct drm_device *dev, bool is_render, TP_PROTO(struct drm_device *dev, bool is_render,
uint64_t seqno, uint64_t seqno,
@ -42,6 +64,105 @@ TRACE_EVENT(v3d_submit_cl,
__entry->ctnqea) __entry->ctnqea)
); );
TRACE_EVENT(v3d_bcl_irq,
TP_PROTO(struct drm_device *dev,
uint64_t seqno),
TP_ARGS(dev, seqno),
TP_STRUCT__entry(
__field(u32, dev)
__field(u64, seqno)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
__entry->seqno = seqno;
),
TP_printk("dev=%u, seqno=%llu",
__entry->dev,
__entry->seqno)
);
TRACE_EVENT(v3d_rcl_irq,
TP_PROTO(struct drm_device *dev,
uint64_t seqno),
TP_ARGS(dev, seqno),
TP_STRUCT__entry(
__field(u32, dev)
__field(u64, seqno)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
__entry->seqno = seqno;
),
TP_printk("dev=%u, seqno=%llu",
__entry->dev,
__entry->seqno)
);
TRACE_EVENT(v3d_tfu_irq,
TP_PROTO(struct drm_device *dev,
uint64_t seqno),
TP_ARGS(dev, seqno),
TP_STRUCT__entry(
__field(u32, dev)
__field(u64, seqno)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
__entry->seqno = seqno;
),
TP_printk("dev=%u, seqno=%llu",
__entry->dev,
__entry->seqno)
);
TRACE_EVENT(v3d_submit_tfu_ioctl,
TP_PROTO(struct drm_device *dev, u32 iia),
TP_ARGS(dev, iia),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, iia)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
__entry->iia = iia;
),
TP_printk("dev=%u, IIA 0x%08x",
__entry->dev,
__entry->iia)
);
TRACE_EVENT(v3d_submit_tfu,
TP_PROTO(struct drm_device *dev,
uint64_t seqno),
TP_ARGS(dev, seqno),
TP_STRUCT__entry(
__field(u32, dev)
__field(u64, seqno)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
__entry->seqno = seqno;
),
TP_printk("dev=%u, seqno=%llu",
__entry->dev,
__entry->seqno)
);
TRACE_EVENT(v3d_reset_begin, TRACE_EVENT(v3d_reset_begin,
TP_PROTO(struct drm_device *dev), TP_PROTO(struct drm_device *dev),
TP_ARGS(dev), TP_ARGS(dev),

View File

@ -338,6 +338,7 @@ struct vc4_plane_state {
u32 pos0_offset; u32 pos0_offset;
u32 pos2_offset; u32 pos2_offset;
u32 ptr0_offset; u32 ptr0_offset;
u32 lbm_offset;
/* Offset where the plane's dlist was last stored in the /* Offset where the plane's dlist was last stored in the
* hardware at vc4_crtc_atomic_flush() time. * hardware at vc4_crtc_atomic_flush() time.
@ -369,6 +370,11 @@ struct vc4_plane_state {
* to enable background color fill. * to enable background color fill.
*/ */
bool needs_bg_fill; bool needs_bg_fill;
/* Mark the dlist as initialized. Useful to avoid initializing it twice
* when async update is not possible.
*/
bool dlist_initialized;
}; };
static inline struct vc4_plane_state * static inline struct vc4_plane_state *

View File

@ -681,7 +681,7 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
exec->fence = &fence->base; exec->fence = &fence->base;
if (out_sync) if (out_sync)
drm_syncobj_replace_fence(out_sync, 0, exec->fence); drm_syncobj_replace_fence(out_sync, exec->fence);
vc4_update_bo_seqnos(exec, seqno); vc4_update_bo_seqnos(exec, seqno);

View File

@ -154,6 +154,7 @@ static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane
return NULL; return NULL;
memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm)); memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm));
vc4_state->dlist_initialized = 0;
__drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base); __drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base);
@ -259,14 +260,12 @@ static u32 vc4_get_scl_field(struct drm_plane_state *state, int plane)
static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
{ {
struct drm_plane *plane = state->plane;
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb; struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0); struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
u32 subpixel_src_mask = (1 << 16) - 1; u32 subpixel_src_mask = (1 << 16) - 1;
u32 format = fb->format->format; u32 format = fb->format->format;
int num_planes = fb->format->num_planes; int num_planes = fb->format->num_planes;
int min_scale = 1, max_scale = INT_MAX;
struct drm_crtc_state *crtc_state; struct drm_crtc_state *crtc_state;
u32 h_subsample, v_subsample; u32 h_subsample, v_subsample;
int i, ret; int i, ret;
@ -278,21 +277,8 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
return -EINVAL; return -EINVAL;
} }
/* No configuring scaling on the cursor plane, since it gets ret = drm_atomic_helper_check_plane_state(state, crtc_state, 1,
* non-vblank-synced updates, and scaling requires LBM changes which INT_MAX, true, true);
* have to be vblank-synced.
*/
if (plane->type == DRM_PLANE_TYPE_CURSOR) {
min_scale = DRM_PLANE_HELPER_NO_SCALING;
max_scale = DRM_PLANE_HELPER_NO_SCALING;
} else {
min_scale = 1;
max_scale = INT_MAX;
}
ret = drm_atomic_helper_check_plane_state(state, crtc_state,
min_scale, max_scale,
true, true);
if (ret) if (ret)
return ret; return ret;
@ -395,10 +381,13 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
u32 pix_per_line = max(vc4_state->src_w[0], (u32)vc4_state->crtc_w); u32 pix_per_line = max(vc4_state->src_w[0], (u32)vc4_state->crtc_w);
u32 lbm; u32 lbm;
/* LBM is not needed when there's no vertical scaling. */
if (vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
vc4_state->y_scaling[1] == VC4_SCALING_NONE)
return 0;
if (!vc4_state->is_yuv) { if (!vc4_state->is_yuv) {
if (vc4_state->is_unity) if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
return 0;
else if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
lbm = pix_per_line * 8; lbm = pix_per_line * 8;
else { else {
/* In special cases, this multiplier might be 12. */ /* In special cases, this multiplier might be 12. */
@ -449,6 +438,43 @@ static void vc4_write_scaling_parameters(struct drm_plane_state *state,
} }
} }
static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
unsigned long irqflags;
u32 lbm_size;
lbm_size = vc4_lbm_size(state);
if (!lbm_size)
return 0;
if (WARN_ON(!vc4_state->lbm_offset))
return -EINVAL;
/* Allocate the LBM memory that the HVS will use for temporary
* storage due to our scaling/format conversion.
*/
if (!vc4_state->lbm.allocated) {
int ret;
spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
&vc4_state->lbm,
lbm_size, 32, 0, 0);
spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
if (ret)
return ret;
} else {
WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
}
vc4_state->dlist[vc4_state->lbm_offset] = vc4_state->lbm.start;
return 0;
}
/* Writes out a full display list for an active plane to the plane's /* Writes out a full display list for an active plane to the plane's
* private dlist state. * private dlist state.
*/ */
@ -466,31 +492,14 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
bool mix_plane_alpha; bool mix_plane_alpha;
bool covers_screen; bool covers_screen;
u32 scl0, scl1, pitch0; u32 scl0, scl1, pitch0;
u32 lbm_size, tiling; u32 tiling;
unsigned long irqflags;
u32 hvs_format = format->hvs; u32 hvs_format = format->hvs;
int ret, i; int ret, i;
if (vc4_state->dlist_initialized)
return 0;
ret = vc4_plane_setup_clipping_and_scaling(state); ret = vc4_plane_setup_clipping_and_scaling(state);
if (ret)
return ret;
/* Allocate the LBM memory that the HVS will use for temporary
* storage due to our scaling/format conversion.
*/
lbm_size = vc4_lbm_size(state);
if (lbm_size) {
if (!vc4_state->lbm.allocated) {
spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
&vc4_state->lbm,
lbm_size, 32, 0, 0);
spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
} else {
WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
}
}
if (ret) if (ret)
return ret; return ret;
@ -714,15 +723,18 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5); vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
} }
vc4_state->lbm_offset = 0;
if (vc4_state->x_scaling[0] != VC4_SCALING_NONE || if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
vc4_state->x_scaling[1] != VC4_SCALING_NONE || vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
vc4_state->y_scaling[0] != VC4_SCALING_NONE || vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
vc4_state->y_scaling[1] != VC4_SCALING_NONE) { vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
/* LBM Base Address. */ /* Reserve a slot for the LBM Base Address. The real value will
* be set when calling vc4_plane_allocate_lbm().
*/
if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
vc4_state->y_scaling[1] != VC4_SCALING_NONE) { vc4_state->y_scaling[1] != VC4_SCALING_NONE)
vc4_dlist_write(vc4_state, vc4_state->lbm.start); vc4_state->lbm_offset = vc4_state->dlist_count++;
}
if (num_planes > 1) { if (num_planes > 1) {
/* Emit Cb/Cr as channel 0 and Y as channel /* Emit Cb/Cr as channel 0 and Y as channel
@ -768,6 +780,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen || vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen ||
state->alpha != DRM_BLEND_ALPHA_OPAQUE; state->alpha != DRM_BLEND_ALPHA_OPAQUE;
/* Flag the dlist as initialized to avoid checking it twice in case
* the async update check already called vc4_plane_mode_set() and
* decided to fallback to sync update because async update was not
* possible.
*/
vc4_state->dlist_initialized = 1;
return 0; return 0;
} }
@ -782,13 +801,18 @@ static int vc4_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state) struct drm_plane_state *state)
{ {
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
int ret;
vc4_state->dlist_count = 0; vc4_state->dlist_count = 0;
if (plane_enabled(state)) if (!plane_enabled(state))
return vc4_plane_mode_set(plane, state);
else
return 0; return 0;
ret = vc4_plane_mode_set(plane, state);
if (ret)
return ret;
return vc4_plane_allocate_lbm(state);
} }
static void vc4_plane_atomic_update(struct drm_plane *plane, static void vc4_plane_atomic_update(struct drm_plane *plane,
@ -856,30 +880,50 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
{ {
struct vc4_plane_state *vc4_state, *new_vc4_state; struct vc4_plane_state *vc4_state, *new_vc4_state;
if (plane->state->fb != state->fb) { drm_atomic_set_fb_for_plane(plane->state, state->fb);
vc4_plane_async_set_fb(plane, state->fb);
drm_atomic_set_fb_for_plane(plane->state, state->fb);
}
/* Set the cursor's position on the screen. This is the
* expected change from the drm_mode_cursor_universal()
* helper.
*/
plane->state->crtc_x = state->crtc_x; plane->state->crtc_x = state->crtc_x;
plane->state->crtc_y = state->crtc_y; plane->state->crtc_y = state->crtc_y;
plane->state->crtc_w = state->crtc_w;
/* Allow changing the start position within the cursor BO, if plane->state->crtc_h = state->crtc_h;
* that matters.
*/
plane->state->src_x = state->src_x; plane->state->src_x = state->src_x;
plane->state->src_y = state->src_y; plane->state->src_y = state->src_y;
plane->state->src_w = state->src_w;
/* Update the display list based on the new crtc_x/y. */ plane->state->src_h = state->src_h;
vc4_plane_atomic_check(plane, state); plane->state->src_h = state->src_h;
plane->state->alpha = state->alpha;
plane->state->pixel_blend_mode = state->pixel_blend_mode;
plane->state->rotation = state->rotation;
plane->state->zpos = state->zpos;
plane->state->normalized_zpos = state->normalized_zpos;
plane->state->color_encoding = state->color_encoding;
plane->state->color_range = state->color_range;
plane->state->src = state->src;
plane->state->dst = state->dst;
plane->state->visible = state->visible;
new_vc4_state = to_vc4_plane_state(state); new_vc4_state = to_vc4_plane_state(state);
vc4_state = to_vc4_plane_state(plane->state); vc4_state = to_vc4_plane_state(plane->state);
vc4_state->crtc_x = new_vc4_state->crtc_x;
vc4_state->crtc_y = new_vc4_state->crtc_y;
vc4_state->crtc_h = new_vc4_state->crtc_h;
vc4_state->crtc_w = new_vc4_state->crtc_w;
vc4_state->src_x = new_vc4_state->src_x;
vc4_state->src_y = new_vc4_state->src_y;
memcpy(vc4_state->src_w, new_vc4_state->src_w,
sizeof(vc4_state->src_w));
memcpy(vc4_state->src_h, new_vc4_state->src_h,
sizeof(vc4_state->src_h));
memcpy(vc4_state->x_scaling, new_vc4_state->x_scaling,
sizeof(vc4_state->x_scaling));
memcpy(vc4_state->y_scaling, new_vc4_state->y_scaling,
sizeof(vc4_state->y_scaling));
vc4_state->is_unity = new_vc4_state->is_unity;
vc4_state->is_yuv = new_vc4_state->is_yuv;
memcpy(vc4_state->offsets, new_vc4_state->offsets,
sizeof(vc4_state->offsets));
vc4_state->needs_bg_fill = new_vc4_state->needs_bg_fill;
/* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */ /* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */
vc4_state->dlist[vc4_state->pos0_offset] = vc4_state->dlist[vc4_state->pos0_offset] =
new_vc4_state->dlist[vc4_state->pos0_offset]; new_vc4_state->dlist[vc4_state->pos0_offset];
@ -903,13 +947,38 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
static int vc4_plane_atomic_async_check(struct drm_plane *plane, static int vc4_plane_atomic_async_check(struct drm_plane *plane,
struct drm_plane_state *state) struct drm_plane_state *state)
{ {
/* No configuring new scaling in the fast path. */ struct vc4_plane_state *old_vc4_state, *new_vc4_state;
if (plane->state->crtc_w != state->crtc_w || int ret;
plane->state->crtc_h != state->crtc_h || u32 i;
plane->state->src_w != state->src_w ||
plane->state->src_h != state->src_h) ret = vc4_plane_mode_set(plane, state);
if (ret)
return ret;
old_vc4_state = to_vc4_plane_state(plane->state);
new_vc4_state = to_vc4_plane_state(state);
if (old_vc4_state->dlist_count != new_vc4_state->dlist_count ||
old_vc4_state->pos0_offset != new_vc4_state->pos0_offset ||
old_vc4_state->pos2_offset != new_vc4_state->pos2_offset ||
old_vc4_state->ptr0_offset != new_vc4_state->ptr0_offset ||
vc4_lbm_size(plane->state) != vc4_lbm_size(state))
return -EINVAL; return -EINVAL;
/* Only pos0, pos2 and ptr0 DWORDS can be updated in an async update
* if anything else has changed, fallback to a sync update.
*/
for (i = 0; i < new_vc4_state->dlist_count; i++) {
if (i == new_vc4_state->pos0_offset ||
i == new_vc4_state->pos2_offset ||
i == new_vc4_state->ptr0_offset ||
(new_vc4_state->lbm_offset &&
i == new_vc4_state->lbm_offset))
continue;
if (new_vc4_state->dlist[i] != old_vc4_state->dlist[i])
return -EINVAL;
}
return 0; return 0;
} }
@ -1026,7 +1095,6 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
struct drm_plane *plane = NULL; struct drm_plane *plane = NULL;
struct vc4_plane *vc4_plane; struct vc4_plane *vc4_plane;
u32 formats[ARRAY_SIZE(hvs_formats)]; u32 formats[ARRAY_SIZE(hvs_formats)];
u32 num_formats = 0;
int ret = 0; int ret = 0;
unsigned i; unsigned i;
static const uint64_t modifiers[] = { static const uint64_t modifiers[] = {
@ -1043,20 +1111,13 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
if (!vc4_plane) if (!vc4_plane)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) { for (i = 0; i < ARRAY_SIZE(hvs_formats); i++)
/* Don't allow YUV in cursor planes, since that means formats[i] = hvs_formats[i].drm;
* tuning on the scaler, which we don't allow for the
* cursor.
*/
if (type != DRM_PLANE_TYPE_CURSOR ||
hvs_formats[i].hvs < HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE) {
formats[num_formats++] = hvs_formats[i].drm;
}
}
plane = &vc4_plane->base; plane = &vc4_plane->base;
ret = drm_universal_plane_init(dev, plane, 0, ret = drm_universal_plane_init(dev, plane, 0,
&vc4_plane_funcs, &vc4_plane_funcs,
formats, num_formats, formats, ARRAY_SIZE(formats),
modifiers, type, NULL); modifiers, type, NULL);
drm_plane_helper_add(plane, &vc4_plane_helper_funcs); drm_plane_helper_add(plane, &vc4_plane_helper_funcs);

View File

@ -270,7 +270,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset, uint64_t offset,
__le32 width, __le32 height, __le32 width, __le32 height,
__le32 x, __le32 y, __le32 x, __le32 y,
struct virtio_gpu_fence **fence); struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t resource_id,
uint32_t x, uint32_t y, uint32_t x, uint32_t y,
@ -281,7 +281,7 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t x, uint32_t y); uint32_t x, uint32_t y);
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj, struct virtio_gpu_object *obj,
struct virtio_gpu_fence **fence); struct virtio_gpu_fence *fence);
void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj); struct virtio_gpu_object *obj);
int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev); int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
@ -306,23 +306,22 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id); uint32_t resource_id);
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size, void *data, uint32_t data_size,
uint32_t ctx_id, struct virtio_gpu_fence **fence); uint32_t ctx_id, struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t ctx_id, uint32_t resource_id, uint32_t ctx_id,
uint64_t offset, uint32_t level, uint64_t offset, uint32_t level,
struct virtio_gpu_box *box, struct virtio_gpu_box *box,
struct virtio_gpu_fence **fence); struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo, struct virtio_gpu_object *bo,
uint32_t ctx_id, uint32_t ctx_id,
uint64_t offset, uint32_t level, uint64_t offset, uint32_t level,
struct virtio_gpu_box *box, struct virtio_gpu_box *box,
struct virtio_gpu_fence **fence); struct virtio_gpu_fence *fence);
void void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo, struct virtio_gpu_object *bo,
struct virtio_gpu_resource_create_3d *rc_3d, struct virtio_gpu_resource_create_3d *rc_3d);
struct virtio_gpu_fence **fence);
void virtio_gpu_ctrl_ack(struct virtqueue *vq); void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq); void virtio_gpu_cursor_ack(struct virtqueue *vq);
void virtio_gpu_fence_ack(struct virtqueue *vq); void virtio_gpu_fence_ack(struct virtqueue *vq);
@ -355,7 +354,7 @@ struct virtio_gpu_fence *virtio_gpu_fence_alloc(
void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence); void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence);
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr, struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence **fence); struct virtio_gpu_fence *fence);
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev, void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
u64 last_seq); u64 last_seq);

View File

@ -91,19 +91,19 @@ void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence)
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr, struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence **fence) struct virtio_gpu_fence *fence)
{ {
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
unsigned long irq_flags; unsigned long irq_flags;
spin_lock_irqsave(&drv->lock, irq_flags); spin_lock_irqsave(&drv->lock, irq_flags);
(*fence)->seq = ++drv->sync_seq; fence->seq = ++drv->sync_seq;
dma_fence_get(&(*fence)->f); dma_fence_get(&fence->f);
list_add_tail(&(*fence)->node, &drv->fences); list_add_tail(&fence->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags); spin_unlock_irqrestore(&drv->lock, irq_flags);
cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE); cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
cmd_hdr->fence_id = cpu_to_le64((*fence)->seq); cmd_hdr->fence_id = cpu_to_le64(fence->seq);
return 0; return 0;
} }

View File

@ -221,7 +221,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
} }
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
vfpriv->ctx_id, &out_fence); vfpriv->ctx_id, out_fence);
ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f); ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
@ -348,8 +348,8 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
goto fail_backoff; goto fail_backoff;
} }
virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d, NULL); virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d);
ret = virtio_gpu_object_attach(vgdev, qobj, &fence); ret = virtio_gpu_object_attach(vgdev, qobj, fence);
if (ret) { if (ret) {
virtio_gpu_fence_cleanup(fence); virtio_gpu_fence_cleanup(fence);
goto fail_backoff; goto fail_backoff;
@ -450,7 +450,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
virtio_gpu_cmd_transfer_from_host_3d virtio_gpu_cmd_transfer_from_host_3d
(vgdev, qobj->hw_res_handle, (vgdev, qobj->hw_res_handle,
vfpriv->ctx_id, offset, args->level, vfpriv->ctx_id, offset, args->level,
&box, &fence); &box, fence);
reservation_object_add_excl_fence(qobj->tbo.resv, reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f); &fence->f);
@ -504,7 +504,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
virtio_gpu_cmd_transfer_to_host_3d virtio_gpu_cmd_transfer_to_host_3d
(vgdev, qobj, (vgdev, qobj,
vfpriv ? vfpriv->ctx_id : 0, offset, vfpriv ? vfpriv->ctx_id : 0, offset,
args->level, &box, &fence); args->level, &box, fence);
reservation_object_add_excl_fence(qobj->tbo.resv, reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f); &fence->f);
dma_fence_put(&fence->f); dma_fence_put(&fence->f);

View File

@ -204,7 +204,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
(vgdev, bo, 0, (vgdev, bo, 0,
cpu_to_le32(plane->state->crtc_w), cpu_to_le32(plane->state->crtc_w),
cpu_to_le32(plane->state->crtc_h), cpu_to_le32(plane->state->crtc_h),
0, 0, &vgfb->fence); 0, 0, vgfb->fence);
ret = virtio_gpu_object_reserve(bo, false); ret = virtio_gpu_object_reserve(bo, false);
if (!ret) { if (!ret) {
reservation_object_add_excl_fence(bo->tbo.resv, reservation_object_add_excl_fence(bo->tbo.resv,

View File

@ -298,7 +298,7 @@ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf, struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_ctrl_hdr *hdr, struct virtio_gpu_ctrl_hdr *hdr,
struct virtio_gpu_fence **fence) struct virtio_gpu_fence *fence)
{ {
struct virtqueue *vq = vgdev->ctrlq.vq; struct virtqueue *vq = vgdev->ctrlq.vq;
int rc; int rc;
@ -405,7 +405,7 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t resource_id,
struct virtio_gpu_fence **fence) struct virtio_gpu_fence *fence)
{ {
struct virtio_gpu_resource_detach_backing *cmd_p; struct virtio_gpu_resource_detach_backing *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
@ -467,7 +467,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset, uint64_t offset,
__le32 width, __le32 height, __le32 width, __le32 height,
__le32 x, __le32 y, __le32 x, __le32 y,
struct virtio_gpu_fence **fence) struct virtio_gpu_fence *fence)
{ {
struct virtio_gpu_transfer_to_host_2d *cmd_p; struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
@ -497,7 +497,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t resource_id,
struct virtio_gpu_mem_entry *ents, struct virtio_gpu_mem_entry *ents,
uint32_t nents, uint32_t nents,
struct virtio_gpu_fence **fence) struct virtio_gpu_fence *fence)
{ {
struct virtio_gpu_resource_attach_backing *cmd_p; struct virtio_gpu_resource_attach_backing *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
@ -820,8 +820,7 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
void void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo, struct virtio_gpu_object *bo,
struct virtio_gpu_resource_create_3d *rc_3d, struct virtio_gpu_resource_create_3d *rc_3d)
struct virtio_gpu_fence **fence)
{ {
struct virtio_gpu_resource_create_3d *cmd_p; struct virtio_gpu_resource_create_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
@ -833,7 +832,7 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
cmd_p->hdr.flags = 0; cmd_p->hdr.flags = 0;
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
bo->created = true; bo->created = true;
} }
@ -842,7 +841,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id, uint32_t ctx_id,
uint64_t offset, uint32_t level, uint64_t offset, uint32_t level,
struct virtio_gpu_box *box, struct virtio_gpu_box *box,
struct virtio_gpu_fence **fence) struct virtio_gpu_fence *fence)
{ {
struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
@ -870,7 +869,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t ctx_id, uint32_t resource_id, uint32_t ctx_id,
uint64_t offset, uint32_t level, uint64_t offset, uint32_t level,
struct virtio_gpu_box *box, struct virtio_gpu_box *box,
struct virtio_gpu_fence **fence) struct virtio_gpu_fence *fence)
{ {
struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
@ -890,7 +889,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size, void *data, uint32_t data_size,
uint32_t ctx_id, struct virtio_gpu_fence **fence) uint32_t ctx_id, struct virtio_gpu_fence *fence)
{ {
struct virtio_gpu_cmd_submit *cmd_p; struct virtio_gpu_cmd_submit *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
@ -910,7 +909,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj, struct virtio_gpu_object *obj,
struct virtio_gpu_fence **fence) struct virtio_gpu_fence *fence)
{ {
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct virtio_gpu_mem_entry *ents; struct virtio_gpu_mem_entry *ents;
@ -967,7 +966,7 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
if (use_dma_api && obj->mapped) { if (use_dma_api && obj->mapped) {
struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev); struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
/* detach backing and wait for the host process it ... */ /* detach backing and wait for the host process it ... */
virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence); virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
dma_fence_wait(&fence->f, true); dma_fence_wait(&fence->f, true);
dma_fence_put(&fence->f); dma_fence_put(&fence->f);

View File

@ -23,8 +23,11 @@ vkms_plane_duplicate_state(struct drm_plane *plane)
return NULL; return NULL;
crc_data = kzalloc(sizeof(*crc_data), GFP_KERNEL); crc_data = kzalloc(sizeof(*crc_data), GFP_KERNEL);
if (WARN_ON(!crc_data)) if (!crc_data) {
DRM_INFO("Couldn't allocate crc_data"); DRM_DEBUG_KMS("Couldn't allocate crc_data\n");
kfree(vkms_state);
return NULL;
}
vkms_state->crc_data = crc_data; vkms_state->crc_data = crc_data;
@ -138,14 +141,12 @@ static int vkms_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state) struct drm_plane_state *state)
{ {
struct drm_gem_object *gem_obj; struct drm_gem_object *gem_obj;
struct vkms_gem_object *vkms_obj;
int ret; int ret;
if (!state->fb) if (!state->fb)
return 0; return 0;
gem_obj = drm_gem_fb_get_obj(state->fb, 0); gem_obj = drm_gem_fb_get_obj(state->fb, 0);
vkms_obj = drm_gem_to_vkms_gem(gem_obj);
ret = vkms_gem_vmap(gem_obj); ret = vkms_gem_vmap(gem_obj);
if (ret) if (ret)
DRM_ERROR("vmap failed: %d\n", ret); DRM_ERROR("vmap failed: %d\n", ret);

View File

@ -127,6 +127,9 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
int drm_atomic_helper_disable_all(struct drm_device *dev, int drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx); struct drm_modeset_acquire_ctx *ctx);
void drm_atomic_helper_shutdown(struct drm_device *dev); void drm_atomic_helper_shutdown(struct drm_device *dev);
struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev); struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev);
int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx); struct drm_modeset_acquire_ctx *ctx);
@ -145,6 +148,10 @@ int drm_atomic_helper_page_flip_target(
uint32_t flags, uint32_t flags,
uint32_t target, uint32_t target,
struct drm_modeset_acquire_ctx *ctx); struct drm_modeset_acquire_ctx *ctx);
int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue,
uint32_t size,
struct drm_modeset_acquire_ctx *ctx);
/** /**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC

View File

@ -65,16 +65,9 @@ __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
struct drm_connector_state *state); struct drm_connector_state *state);
struct drm_connector_state * struct drm_connector_state *
drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector); drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
void void
__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state); __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state); struct drm_connector_state *state);
int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue,
uint32_t size,
struct drm_modeset_acquire_ctx *ctx);
void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj, void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
struct drm_private_state *state); struct drm_private_state *state);

View File

@ -130,4 +130,63 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
int drm_modeset_lock_all_ctx(struct drm_device *dev, int drm_modeset_lock_all_ctx(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx); struct drm_modeset_acquire_ctx *ctx);
/**
* DRM_MODESET_LOCK_ALL_BEGIN - Helper to acquire modeset locks
* @dev: drm device
* @ctx: local modeset acquire context, will be dereferenced
* @flags: DRM_MODESET_ACQUIRE_* flags to pass to drm_modeset_acquire_init()
* @ret: local ret/err/etc variable to track error status
*
* Use these macros to simplify grabbing all modeset locks using a local
* context. This has the advantage of reducing boilerplate, but also properly
* checking return values where appropriate.
*
* Any code run between BEGIN and END will be holding the modeset locks.
*
* This must be paired with DRM_MODESET_LOCK_ALL_END(). We will jump back and
* forth between the labels on deadlock and error conditions.
*
* Drivers can acquire additional modeset locks. If any lock acquisition
* fails, the control flow needs to jump to DRM_MODESET_LOCK_ALL_END() with
* the @ret parameter containing the return value of drm_modeset_lock().
*
* Returns:
* The only possible value of ret immediately after DRM_MODESET_LOCK_ALL_BEGIN()
* is 0, so no error checking is necessary
*/
#define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret) \
drm_modeset_acquire_init(&ctx, flags); \
modeset_lock_retry: \
ret = drm_modeset_lock_all_ctx(dev, &ctx); \
if (ret) \
goto modeset_lock_fail;
/**
* DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks
* @ctx: local modeset acquire context, will be dereferenced
* @ret: local ret/err/etc variable to track error status
*
* The other side of DRM_MODESET_LOCK_ALL_BEGIN(). It will bounce back to BEGIN
* if ret is -EDEADLK.
*
* It's important that you use the same ret variable for begin and end so
* deadlock conditions are properly handled.
*
* Returns:
* ret will be untouched unless it is -EDEADLK on entry. That means that if you
* successfully acquire the locks, ret will be whatever your code sets it to. If
* there is a deadlock or other failure with acquire or backoff, ret will be set
* to that failure. In both of these cases the code between BEGIN/END will not
* be run, so the failure will reflect the inability to grab the locks.
*/
#define DRM_MODESET_LOCK_ALL_END(ctx, ret) \
modeset_lock_fail: \
if (ret == -EDEADLK) { \
ret = drm_modeset_backoff(&ctx); \
if (!ret) \
goto modeset_lock_retry; \
} \
drm_modeset_drop_locks(&ctx); \
drm_modeset_acquire_fini(&ctx);
#endif /* DRM_MODESET_LOCK_H_ */ #endif /* DRM_MODESET_LOCK_H_ */

View File

@ -131,7 +131,7 @@ drm_syncobj_fence_get(struct drm_syncobj *syncobj)
struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
u32 handle); u32 handle);
void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, u64 point, void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
struct dma_fence *fence); struct dma_fence *fence);
int drm_syncobj_find_fence(struct drm_file *file_private, int drm_syncobj_find_fence(struct drm_file *file_private,
u32 handle, u64 point, u64 flags, u32 handle, u64 point, u64 flags,

View File

@ -541,6 +541,7 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
} }
struct dma_fence *dma_fence_get_stub(void);
u64 dma_fence_context_alloc(unsigned num); u64 dma_fence_context_alloc(unsigned num);
#define DMA_FENCE_TRACE(f, fmt, args...) \ #define DMA_FENCE_TRACE(f, fmt, args...) \

View File

@ -36,6 +36,7 @@ extern "C" {
#define DRM_V3D_MMAP_BO 0x03 #define DRM_V3D_MMAP_BO 0x03
#define DRM_V3D_GET_PARAM 0x04 #define DRM_V3D_GET_PARAM 0x04
#define DRM_V3D_GET_BO_OFFSET 0x05 #define DRM_V3D_GET_BO_OFFSET 0x05
#define DRM_V3D_SUBMIT_TFU 0x06
#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl) #define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo) #define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
@ -43,6 +44,7 @@ extern "C" {
#define DRM_IOCTL_V3D_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_MMAP_BO, struct drm_v3d_mmap_bo) #define DRM_IOCTL_V3D_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_MMAP_BO, struct drm_v3d_mmap_bo)
#define DRM_IOCTL_V3D_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param) #define DRM_IOCTL_V3D_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param)
#define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset) #define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset)
#define DRM_IOCTL_V3D_SUBMIT_TFU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu)
/** /**
* struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D * struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
@ -179,6 +181,7 @@ enum drm_v3d_param {
DRM_V3D_PARAM_V3D_CORE0_IDENT0, DRM_V3D_PARAM_V3D_CORE0_IDENT0,
DRM_V3D_PARAM_V3D_CORE0_IDENT1, DRM_V3D_PARAM_V3D_CORE0_IDENT1,
DRM_V3D_PARAM_V3D_CORE0_IDENT2, DRM_V3D_PARAM_V3D_CORE0_IDENT2,
DRM_V3D_PARAM_SUPPORTS_TFU,
}; };
struct drm_v3d_get_param { struct drm_v3d_get_param {
@ -197,6 +200,28 @@ struct drm_v3d_get_bo_offset {
__u32 offset; __u32 offset;
}; };
struct drm_v3d_submit_tfu {
__u32 icfg;
__u32 iia;
__u32 iis;
__u32 ica;
__u32 iua;
__u32 ioa;
__u32 ios;
__u32 coef[4];
/* First handle is the output BO, following are other inputs.
* 0 for unused.
*/
__u32 bo_handles[4];
/* sync object to block on before running the TFU job. Each TFU
* job will execute in the order submitted to its FD. Synchronization
* against rendering jobs requires using sync objects.
*/
__u32 in_sync;
/* Sync object to signal when the TFU job is done. */
__u32 out_sync;
};
#if defined(__cplusplus) #if defined(__cplusplus)
} }
#endif #endif