linux_dsm_epyc7002/drivers/gpu/drm/drm_mode_config.c

469 lines
13 KiB
C
Raw Normal View History

/*
* Copyright (c) 2016 Intel Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <drm/drm_encoder.h>
#include <drm/drm_mode_config.h>
#include <drm/drmP.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
int drm_modeset_register_all(struct drm_device *dev)
{
int ret;
ret = drm_plane_register_all(dev);
if (ret)
goto err_plane;
ret = drm_crtc_register_all(dev);
if (ret)
goto err_crtc;
ret = drm_encoder_register_all(dev);
if (ret)
goto err_encoder;
ret = drm_connector_register_all(dev);
if (ret)
goto err_connector;
return 0;
err_connector:
drm_encoder_unregister_all(dev);
err_encoder:
drm_crtc_unregister_all(dev);
err_crtc:
drm_plane_unregister_all(dev);
err_plane:
return ret;
}
void drm_modeset_unregister_all(struct drm_device *dev)
{
drm_connector_unregister_all(dev);
drm_encoder_unregister_all(dev);
drm_crtc_unregister_all(dev);
drm_plane_unregister_all(dev);
}
/**
* drm_mode_getresources - get graphics configuration
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* Construct a set of configuration description structures and return
* them to the user, including CRTC, connector and framebuffer configuration.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_getresources(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_card_res *card_res = data;
struct drm_framebuffer *fb;
struct drm_connector *connector;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
int count, ret = 0;
uint32_t __user *fb_id;
uint32_t __user *crtc_id;
uint32_t __user *connector_id;
uint32_t __user *encoder_id;
drm: locking&new iterators for connector_list The requirements for connector_list locking are a bit tricky: - We need to be able to jump over zombie conectors (i.e. with refcount == 0, but not yet removed from the list). If instead we require that there's no zombies on the list then the final kref_put must happen under the list protection lock, which means that locking context leaks all over the place. Not pretty - better to deal with zombies and wrap the locking just around the list_del in the destructor. - When we walk the list we must _not_ hold the connector list lock. We walk the connector list at an absolutely massive amounts of places, if all those places can't ever call drm_connector_unreference the code would get unecessarily complicated. - connector_list needs it own lock, again too many places that walk it that we could reuse e.g. mode_config.mutex without resulting in inversions. - Lots of code uses these loops to look-up a connector, i.e. they want to be able to call drm_connector_reference. But on the other hand we want connectors to stay on that list until they're dead (i.e. connector_list can't hold a full reference), which means despite the "can't hold lock for the loop body" rule we need to make sure a connector doesn't suddenly become a zombie. At first Dave&I discussed various horror-show approaches using srcu, but turns out it's fairly easy: - For the loop body we always hold an additional reference to the current connector. That means it can't zombify, and it also means it'll stay on the list, which means we can use it as our iterator to find the next connector. - When we try to find the next connector we only have to jump over zombies. To make sure we don't chase bad pointers that entire loop is protected with the new connect_list_lock spinlock. And because we know that we're starting out with a non-zombie (need to drop our reference for the old connector only after we have our new one), we're guranteed to still be on the connector_list and either find the next non-zombie or complete the iteration. - Only downside is that we need to make sure that the temporary reference for the loop body doesn't leak. iter_get/put() functions + lockdep make sure that's the case. - To avoid a flag day the new iterator macro has an _iter postfix. We can rename it back once all the users of the unsafe version are gone (there's about 100 list walkers for the connector_list). For now this patch only converts all the list walking in the core, leaving helpers and drivers for later patches. The nice thing is that we can now finally remove 2 FIXME comments from the register/unregister functions. v2: - use irqsafe spinlocks, so that we can use this in drm_state_dump too. - nuke drm_modeset_lock_all from drm_connector_init, now entirely cargo-culted nonsense. v3: - do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave). - pretty kerneldoc - add EXPORT_SYMBOL, helpers&drivers are supposed to use this. v4: Change lockdep annotations to only check whether we release the iter fake lock again (i.e. make sure that iter_put is called), but not check any locking dependecies itself. That seams to require a recursive read lock in trylock mode. Cc: Dave Airlie <airlied@gmail.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Sean Paul <seanpaul@chromium.org> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 06:08:06 +07:00
struct drm_connector_list_iter conn_iter;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&file_priv->fbs_lock);
count = 0;
fb_id = u64_to_user_ptr(card_res->fb_id_ptr);
list_for_each_entry(fb, &file_priv->fbs, filp_head) {
if (count < card_res->count_fbs &&
put_user(fb->base.id, fb_id + count)) {
mutex_unlock(&file_priv->fbs_lock);
return -EFAULT;
}
count++;
}
card_res->count_fbs = count;
mutex_unlock(&file_priv->fbs_lock);
card_res->max_height = dev->mode_config.max_height;
card_res->min_height = dev->mode_config.min_height;
card_res->max_width = dev->mode_config.max_width;
card_res->min_width = dev->mode_config.min_width;
count = 0;
crtc_id = u64_to_user_ptr(card_res->crtc_id_ptr);
drm_for_each_crtc(crtc, dev) {
if (count < card_res->count_crtcs &&
drm: locking&new iterators for connector_list The requirements for connector_list locking are a bit tricky: - We need to be able to jump over zombie conectors (i.e. with refcount == 0, but not yet removed from the list). If instead we require that there's no zombies on the list then the final kref_put must happen under the list protection lock, which means that locking context leaks all over the place. Not pretty - better to deal with zombies and wrap the locking just around the list_del in the destructor. - When we walk the list we must _not_ hold the connector list lock. We walk the connector list at an absolutely massive amounts of places, if all those places can't ever call drm_connector_unreference the code would get unecessarily complicated. - connector_list needs it own lock, again too many places that walk it that we could reuse e.g. mode_config.mutex without resulting in inversions. - Lots of code uses these loops to look-up a connector, i.e. they want to be able to call drm_connector_reference. But on the other hand we want connectors to stay on that list until they're dead (i.e. connector_list can't hold a full reference), which means despite the "can't hold lock for the loop body" rule we need to make sure a connector doesn't suddenly become a zombie. At first Dave&I discussed various horror-show approaches using srcu, but turns out it's fairly easy: - For the loop body we always hold an additional reference to the current connector. That means it can't zombify, and it also means it'll stay on the list, which means we can use it as our iterator to find the next connector. - When we try to find the next connector we only have to jump over zombies. To make sure we don't chase bad pointers that entire loop is protected with the new connect_list_lock spinlock. And because we know that we're starting out with a non-zombie (need to drop our reference for the old connector only after we have our new one), we're guranteed to still be on the connector_list and either find the next non-zombie or complete the iteration. - Only downside is that we need to make sure that the temporary reference for the loop body doesn't leak. iter_get/put() functions + lockdep make sure that's the case. - To avoid a flag day the new iterator macro has an _iter postfix. We can rename it back once all the users of the unsafe version are gone (there's about 100 list walkers for the connector_list). For now this patch only converts all the list walking in the core, leaving helpers and drivers for later patches. The nice thing is that we can now finally remove 2 FIXME comments from the register/unregister functions. v2: - use irqsafe spinlocks, so that we can use this in drm_state_dump too. - nuke drm_modeset_lock_all from drm_connector_init, now entirely cargo-culted nonsense. v3: - do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave). - pretty kerneldoc - add EXPORT_SYMBOL, helpers&drivers are supposed to use this. v4: Change lockdep annotations to only check whether we release the iter fake lock again (i.e. make sure that iter_put is called), but not check any locking dependecies itself. That seams to require a recursive read lock in trylock mode. Cc: Dave Airlie <airlied@gmail.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Sean Paul <seanpaul@chromium.org> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 06:08:06 +07:00
put_user(crtc->base.id, crtc_id + count))
return -EFAULT;
count++;
}
card_res->count_crtcs = count;
count = 0;
encoder_id = u64_to_user_ptr(card_res->encoder_id_ptr);
drm_for_each_encoder(encoder, dev) {
if (count < card_res->count_encoders &&
drm: locking&new iterators for connector_list The requirements for connector_list locking are a bit tricky: - We need to be able to jump over zombie conectors (i.e. with refcount == 0, but not yet removed from the list). If instead we require that there's no zombies on the list then the final kref_put must happen under the list protection lock, which means that locking context leaks all over the place. Not pretty - better to deal with zombies and wrap the locking just around the list_del in the destructor. - When we walk the list we must _not_ hold the connector list lock. We walk the connector list at an absolutely massive amounts of places, if all those places can't ever call drm_connector_unreference the code would get unecessarily complicated. - connector_list needs it own lock, again too many places that walk it that we could reuse e.g. mode_config.mutex without resulting in inversions. - Lots of code uses these loops to look-up a connector, i.e. they want to be able to call drm_connector_reference. But on the other hand we want connectors to stay on that list until they're dead (i.e. connector_list can't hold a full reference), which means despite the "can't hold lock for the loop body" rule we need to make sure a connector doesn't suddenly become a zombie. At first Dave&I discussed various horror-show approaches using srcu, but turns out it's fairly easy: - For the loop body we always hold an additional reference to the current connector. That means it can't zombify, and it also means it'll stay on the list, which means we can use it as our iterator to find the next connector. - When we try to find the next connector we only have to jump over zombies. To make sure we don't chase bad pointers that entire loop is protected with the new connect_list_lock spinlock. And because we know that we're starting out with a non-zombie (need to drop our reference for the old connector only after we have our new one), we're guranteed to still be on the connector_list and either find the next non-zombie or complete the iteration. - Only downside is that we need to make sure that the temporary reference for the loop body doesn't leak. iter_get/put() functions + lockdep make sure that's the case. - To avoid a flag day the new iterator macro has an _iter postfix. We can rename it back once all the users of the unsafe version are gone (there's about 100 list walkers for the connector_list). For now this patch only converts all the list walking in the core, leaving helpers and drivers for later patches. The nice thing is that we can now finally remove 2 FIXME comments from the register/unregister functions. v2: - use irqsafe spinlocks, so that we can use this in drm_state_dump too. - nuke drm_modeset_lock_all from drm_connector_init, now entirely cargo-culted nonsense. v3: - do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave). - pretty kerneldoc - add EXPORT_SYMBOL, helpers&drivers are supposed to use this. v4: Change lockdep annotations to only check whether we release the iter fake lock again (i.e. make sure that iter_put is called), but not check any locking dependecies itself. That seams to require a recursive read lock in trylock mode. Cc: Dave Airlie <airlied@gmail.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Sean Paul <seanpaul@chromium.org> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 06:08:06 +07:00
put_user(encoder->base.id, encoder_id + count))
return -EFAULT;
count++;
}
card_res->count_encoders = count;
drm: locking&new iterators for connector_list The requirements for connector_list locking are a bit tricky: - We need to be able to jump over zombie conectors (i.e. with refcount == 0, but not yet removed from the list). If instead we require that there's no zombies on the list then the final kref_put must happen under the list protection lock, which means that locking context leaks all over the place. Not pretty - better to deal with zombies and wrap the locking just around the list_del in the destructor. - When we walk the list we must _not_ hold the connector list lock. We walk the connector list at an absolutely massive amounts of places, if all those places can't ever call drm_connector_unreference the code would get unecessarily complicated. - connector_list needs it own lock, again too many places that walk it that we could reuse e.g. mode_config.mutex without resulting in inversions. - Lots of code uses these loops to look-up a connector, i.e. they want to be able to call drm_connector_reference. But on the other hand we want connectors to stay on that list until they're dead (i.e. connector_list can't hold a full reference), which means despite the "can't hold lock for the loop body" rule we need to make sure a connector doesn't suddenly become a zombie. At first Dave&I discussed various horror-show approaches using srcu, but turns out it's fairly easy: - For the loop body we always hold an additional reference to the current connector. That means it can't zombify, and it also means it'll stay on the list, which means we can use it as our iterator to find the next connector. - When we try to find the next connector we only have to jump over zombies. To make sure we don't chase bad pointers that entire loop is protected with the new connect_list_lock spinlock. And because we know that we're starting out with a non-zombie (need to drop our reference for the old connector only after we have our new one), we're guranteed to still be on the connector_list and either find the next non-zombie or complete the iteration. - Only downside is that we need to make sure that the temporary reference for the loop body doesn't leak. iter_get/put() functions + lockdep make sure that's the case. - To avoid a flag day the new iterator macro has an _iter postfix. We can rename it back once all the users of the unsafe version are gone (there's about 100 list walkers for the connector_list). For now this patch only converts all the list walking in the core, leaving helpers and drivers for later patches. The nice thing is that we can now finally remove 2 FIXME comments from the register/unregister functions. v2: - use irqsafe spinlocks, so that we can use this in drm_state_dump too. - nuke drm_modeset_lock_all from drm_connector_init, now entirely cargo-culted nonsense. v3: - do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave). - pretty kerneldoc - add EXPORT_SYMBOL, helpers&drivers are supposed to use this. v4: Change lockdep annotations to only check whether we release the iter fake lock again (i.e. make sure that iter_put is called), but not check any locking dependecies itself. That seams to require a recursive read lock in trylock mode. Cc: Dave Airlie <airlied@gmail.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Sean Paul <seanpaul@chromium.org> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 06:08:06 +07:00
drm_connector_list_iter_get(dev, &conn_iter);
count = 0;
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
drm: locking&new iterators for connector_list The requirements for connector_list locking are a bit tricky: - We need to be able to jump over zombie conectors (i.e. with refcount == 0, but not yet removed from the list). If instead we require that there's no zombies on the list then the final kref_put must happen under the list protection lock, which means that locking context leaks all over the place. Not pretty - better to deal with zombies and wrap the locking just around the list_del in the destructor. - When we walk the list we must _not_ hold the connector list lock. We walk the connector list at an absolutely massive amounts of places, if all those places can't ever call drm_connector_unreference the code would get unecessarily complicated. - connector_list needs it own lock, again too many places that walk it that we could reuse e.g. mode_config.mutex without resulting in inversions. - Lots of code uses these loops to look-up a connector, i.e. they want to be able to call drm_connector_reference. But on the other hand we want connectors to stay on that list until they're dead (i.e. connector_list can't hold a full reference), which means despite the "can't hold lock for the loop body" rule we need to make sure a connector doesn't suddenly become a zombie. At first Dave&I discussed various horror-show approaches using srcu, but turns out it's fairly easy: - For the loop body we always hold an additional reference to the current connector. That means it can't zombify, and it also means it'll stay on the list, which means we can use it as our iterator to find the next connector. - When we try to find the next connector we only have to jump over zombies. To make sure we don't chase bad pointers that entire loop is protected with the new connect_list_lock spinlock. And because we know that we're starting out with a non-zombie (need to drop our reference for the old connector only after we have our new one), we're guranteed to still be on the connector_list and either find the next non-zombie or complete the iteration. - Only downside is that we need to make sure that the temporary reference for the loop body doesn't leak. iter_get/put() functions + lockdep make sure that's the case. - To avoid a flag day the new iterator macro has an _iter postfix. We can rename it back once all the users of the unsafe version are gone (there's about 100 list walkers for the connector_list). For now this patch only converts all the list walking in the core, leaving helpers and drivers for later patches. The nice thing is that we can now finally remove 2 FIXME comments from the register/unregister functions. v2: - use irqsafe spinlocks, so that we can use this in drm_state_dump too. - nuke drm_modeset_lock_all from drm_connector_init, now entirely cargo-culted nonsense. v3: - do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave). - pretty kerneldoc - add EXPORT_SYMBOL, helpers&drivers are supposed to use this. v4: Change lockdep annotations to only check whether we release the iter fake lock again (i.e. make sure that iter_put is called), but not check any locking dependecies itself. That seams to require a recursive read lock in trylock mode. Cc: Dave Airlie <airlied@gmail.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Sean Paul <seanpaul@chromium.org> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 06:08:06 +07:00
drm_for_each_connector_iter(connector, &conn_iter) {
if (count < card_res->count_connectors &&
put_user(connector->base.id, connector_id + count)) {
drm: locking&new iterators for connector_list The requirements for connector_list locking are a bit tricky: - We need to be able to jump over zombie conectors (i.e. with refcount == 0, but not yet removed from the list). If instead we require that there's no zombies on the list then the final kref_put must happen under the list protection lock, which means that locking context leaks all over the place. Not pretty - better to deal with zombies and wrap the locking just around the list_del in the destructor. - When we walk the list we must _not_ hold the connector list lock. We walk the connector list at an absolutely massive amounts of places, if all those places can't ever call drm_connector_unreference the code would get unecessarily complicated. - connector_list needs it own lock, again too many places that walk it that we could reuse e.g. mode_config.mutex without resulting in inversions. - Lots of code uses these loops to look-up a connector, i.e. they want to be able to call drm_connector_reference. But on the other hand we want connectors to stay on that list until they're dead (i.e. connector_list can't hold a full reference), which means despite the "can't hold lock for the loop body" rule we need to make sure a connector doesn't suddenly become a zombie. At first Dave&I discussed various horror-show approaches using srcu, but turns out it's fairly easy: - For the loop body we always hold an additional reference to the current connector. That means it can't zombify, and it also means it'll stay on the list, which means we can use it as our iterator to find the next connector. - When we try to find the next connector we only have to jump over zombies. To make sure we don't chase bad pointers that entire loop is protected with the new connect_list_lock spinlock. And because we know that we're starting out with a non-zombie (need to drop our reference for the old connector only after we have our new one), we're guranteed to still be on the connector_list and either find the next non-zombie or complete the iteration. - Only downside is that we need to make sure that the temporary reference for the loop body doesn't leak. iter_get/put() functions + lockdep make sure that's the case. - To avoid a flag day the new iterator macro has an _iter postfix. We can rename it back once all the users of the unsafe version are gone (there's about 100 list walkers for the connector_list). For now this patch only converts all the list walking in the core, leaving helpers and drivers for later patches. The nice thing is that we can now finally remove 2 FIXME comments from the register/unregister functions. v2: - use irqsafe spinlocks, so that we can use this in drm_state_dump too. - nuke drm_modeset_lock_all from drm_connector_init, now entirely cargo-culted nonsense. v3: - do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave). - pretty kerneldoc - add EXPORT_SYMBOL, helpers&drivers are supposed to use this. v4: Change lockdep annotations to only check whether we release the iter fake lock again (i.e. make sure that iter_put is called), but not check any locking dependecies itself. That seams to require a recursive read lock in trylock mode. Cc: Dave Airlie <airlied@gmail.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Sean Paul <seanpaul@chromium.org> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 06:08:06 +07:00
drm_connector_list_iter_put(&conn_iter);
return -EFAULT;
}
count++;
}
card_res->count_connectors = count;
drm: locking&new iterators for connector_list The requirements for connector_list locking are a bit tricky: - We need to be able to jump over zombie conectors (i.e. with refcount == 0, but not yet removed from the list). If instead we require that there's no zombies on the list then the final kref_put must happen under the list protection lock, which means that locking context leaks all over the place. Not pretty - better to deal with zombies and wrap the locking just around the list_del in the destructor. - When we walk the list we must _not_ hold the connector list lock. We walk the connector list at an absolutely massive amounts of places, if all those places can't ever call drm_connector_unreference the code would get unecessarily complicated. - connector_list needs it own lock, again too many places that walk it that we could reuse e.g. mode_config.mutex without resulting in inversions. - Lots of code uses these loops to look-up a connector, i.e. they want to be able to call drm_connector_reference. But on the other hand we want connectors to stay on that list until they're dead (i.e. connector_list can't hold a full reference), which means despite the "can't hold lock for the loop body" rule we need to make sure a connector doesn't suddenly become a zombie. At first Dave&I discussed various horror-show approaches using srcu, but turns out it's fairly easy: - For the loop body we always hold an additional reference to the current connector. That means it can't zombify, and it also means it'll stay on the list, which means we can use it as our iterator to find the next connector. - When we try to find the next connector we only have to jump over zombies. To make sure we don't chase bad pointers that entire loop is protected with the new connect_list_lock spinlock. And because we know that we're starting out with a non-zombie (need to drop our reference for the old connector only after we have our new one), we're guranteed to still be on the connector_list and either find the next non-zombie or complete the iteration. - Only downside is that we need to make sure that the temporary reference for the loop body doesn't leak. iter_get/put() functions + lockdep make sure that's the case. - To avoid a flag day the new iterator macro has an _iter postfix. We can rename it back once all the users of the unsafe version are gone (there's about 100 list walkers for the connector_list). For now this patch only converts all the list walking in the core, leaving helpers and drivers for later patches. The nice thing is that we can now finally remove 2 FIXME comments from the register/unregister functions. v2: - use irqsafe spinlocks, so that we can use this in drm_state_dump too. - nuke drm_modeset_lock_all from drm_connector_init, now entirely cargo-culted nonsense. v3: - do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave). - pretty kerneldoc - add EXPORT_SYMBOL, helpers&drivers are supposed to use this. v4: Change lockdep annotations to only check whether we release the iter fake lock again (i.e. make sure that iter_put is called), but not check any locking dependecies itself. That seams to require a recursive read lock in trylock mode. Cc: Dave Airlie <airlied@gmail.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Sean Paul <seanpaul@chromium.org> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 06:08:06 +07:00
drm_connector_list_iter_put(&conn_iter);
return ret;
}
/**
* drm_mode_config_reset - call ->reset callbacks
* @dev: drm device
*
* This functions calls all the crtc's, encoder's and connector's ->reset
* callback. Drivers can use this in e.g. their driver load or resume code to
* reset hardware and software state.
*/
void drm_mode_config_reset(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_plane *plane;
struct drm_encoder *encoder;
struct drm_connector *connector;
drm: locking&new iterators for connector_list The requirements for connector_list locking are a bit tricky: - We need to be able to jump over zombie conectors (i.e. with refcount == 0, but not yet removed from the list). If instead we require that there's no zombies on the list then the final kref_put must happen under the list protection lock, which means that locking context leaks all over the place. Not pretty - better to deal with zombies and wrap the locking just around the list_del in the destructor. - When we walk the list we must _not_ hold the connector list lock. We walk the connector list at an absolutely massive amounts of places, if all those places can't ever call drm_connector_unreference the code would get unecessarily complicated. - connector_list needs it own lock, again too many places that walk it that we could reuse e.g. mode_config.mutex without resulting in inversions. - Lots of code uses these loops to look-up a connector, i.e. they want to be able to call drm_connector_reference. But on the other hand we want connectors to stay on that list until they're dead (i.e. connector_list can't hold a full reference), which means despite the "can't hold lock for the loop body" rule we need to make sure a connector doesn't suddenly become a zombie. At first Dave&I discussed various horror-show approaches using srcu, but turns out it's fairly easy: - For the loop body we always hold an additional reference to the current connector. That means it can't zombify, and it also means it'll stay on the list, which means we can use it as our iterator to find the next connector. - When we try to find the next connector we only have to jump over zombies. To make sure we don't chase bad pointers that entire loop is protected with the new connect_list_lock spinlock. And because we know that we're starting out with a non-zombie (need to drop our reference for the old connector only after we have our new one), we're guranteed to still be on the connector_list and either find the next non-zombie or complete the iteration. - Only downside is that we need to make sure that the temporary reference for the loop body doesn't leak. iter_get/put() functions + lockdep make sure that's the case. - To avoid a flag day the new iterator macro has an _iter postfix. We can rename it back once all the users of the unsafe version are gone (there's about 100 list walkers for the connector_list). For now this patch only converts all the list walking in the core, leaving helpers and drivers for later patches. The nice thing is that we can now finally remove 2 FIXME comments from the register/unregister functions. v2: - use irqsafe spinlocks, so that we can use this in drm_state_dump too. - nuke drm_modeset_lock_all from drm_connector_init, now entirely cargo-culted nonsense. v3: - do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave). - pretty kerneldoc - add EXPORT_SYMBOL, helpers&drivers are supposed to use this. v4: Change lockdep annotations to only check whether we release the iter fake lock again (i.e. make sure that iter_put is called), but not check any locking dependecies itself. That seams to require a recursive read lock in trylock mode. Cc: Dave Airlie <airlied@gmail.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Sean Paul <seanpaul@chromium.org> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 06:08:06 +07:00
struct drm_connector_list_iter conn_iter;
drm_for_each_plane(plane, dev)
if (plane->funcs->reset)
plane->funcs->reset(plane);
drm_for_each_crtc(crtc, dev)
if (crtc->funcs->reset)
crtc->funcs->reset(crtc);
drm_for_each_encoder(encoder, dev)
if (encoder->funcs->reset)
encoder->funcs->reset(encoder);
drm: locking&new iterators for connector_list The requirements for connector_list locking are a bit tricky: - We need to be able to jump over zombie conectors (i.e. with refcount == 0, but not yet removed from the list). If instead we require that there's no zombies on the list then the final kref_put must happen under the list protection lock, which means that locking context leaks all over the place. Not pretty - better to deal with zombies and wrap the locking just around the list_del in the destructor. - When we walk the list we must _not_ hold the connector list lock. We walk the connector list at an absolutely massive amounts of places, if all those places can't ever call drm_connector_unreference the code would get unecessarily complicated. - connector_list needs it own lock, again too many places that walk it that we could reuse e.g. mode_config.mutex without resulting in inversions. - Lots of code uses these loops to look-up a connector, i.e. they want to be able to call drm_connector_reference. But on the other hand we want connectors to stay on that list until they're dead (i.e. connector_list can't hold a full reference), which means despite the "can't hold lock for the loop body" rule we need to make sure a connector doesn't suddenly become a zombie. At first Dave&I discussed various horror-show approaches using srcu, but turns out it's fairly easy: - For the loop body we always hold an additional reference to the current connector. That means it can't zombify, and it also means it'll stay on the list, which means we can use it as our iterator to find the next connector. - When we try to find the next connector we only have to jump over zombies. To make sure we don't chase bad pointers that entire loop is protected with the new connect_list_lock spinlock. And because we know that we're starting out with a non-zombie (need to drop our reference for the old connector only after we have our new one), we're guranteed to still be on the connector_list and either find the next non-zombie or complete the iteration. - Only downside is that we need to make sure that the temporary reference for the loop body doesn't leak. iter_get/put() functions + lockdep make sure that's the case. - To avoid a flag day the new iterator macro has an _iter postfix. We can rename it back once all the users of the unsafe version are gone (there's about 100 list walkers for the connector_list). For now this patch only converts all the list walking in the core, leaving helpers and drivers for later patches. The nice thing is that we can now finally remove 2 FIXME comments from the register/unregister functions. v2: - use irqsafe spinlocks, so that we can use this in drm_state_dump too. - nuke drm_modeset_lock_all from drm_connector_init, now entirely cargo-culted nonsense. v3: - do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave). - pretty kerneldoc - add EXPORT_SYMBOL, helpers&drivers are supposed to use this. v4: Change lockdep annotations to only check whether we release the iter fake lock again (i.e. make sure that iter_put is called), but not check any locking dependecies itself. That seams to require a recursive read lock in trylock mode. Cc: Dave Airlie <airlied@gmail.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Sean Paul <seanpaul@chromium.org> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 06:08:06 +07:00
drm_connector_list_iter_get(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
if (connector->funcs->reset)
connector->funcs->reset(connector);
drm: locking&new iterators for connector_list The requirements for connector_list locking are a bit tricky: - We need to be able to jump over zombie conectors (i.e. with refcount == 0, but not yet removed from the list). If instead we require that there's no zombies on the list then the final kref_put must happen under the list protection lock, which means that locking context leaks all over the place. Not pretty - better to deal with zombies and wrap the locking just around the list_del in the destructor. - When we walk the list we must _not_ hold the connector list lock. We walk the connector list at an absolutely massive amounts of places, if all those places can't ever call drm_connector_unreference the code would get unecessarily complicated. - connector_list needs it own lock, again too many places that walk it that we could reuse e.g. mode_config.mutex without resulting in inversions. - Lots of code uses these loops to look-up a connector, i.e. they want to be able to call drm_connector_reference. But on the other hand we want connectors to stay on that list until they're dead (i.e. connector_list can't hold a full reference), which means despite the "can't hold lock for the loop body" rule we need to make sure a connector doesn't suddenly become a zombie. At first Dave&I discussed various horror-show approaches using srcu, but turns out it's fairly easy: - For the loop body we always hold an additional reference to the current connector. That means it can't zombify, and it also means it'll stay on the list, which means we can use it as our iterator to find the next connector. - When we try to find the next connector we only have to jump over zombies. To make sure we don't chase bad pointers that entire loop is protected with the new connect_list_lock spinlock. And because we know that we're starting out with a non-zombie (need to drop our reference for the old connector only after we have our new one), we're guranteed to still be on the connector_list and either find the next non-zombie or complete the iteration. - Only downside is that we need to make sure that the temporary reference for the loop body doesn't leak. iter_get/put() functions + lockdep make sure that's the case. - To avoid a flag day the new iterator macro has an _iter postfix. We can rename it back once all the users of the unsafe version are gone (there's about 100 list walkers for the connector_list). For now this patch only converts all the list walking in the core, leaving helpers and drivers for later patches. The nice thing is that we can now finally remove 2 FIXME comments from the register/unregister functions. v2: - use irqsafe spinlocks, so that we can use this in drm_state_dump too. - nuke drm_modeset_lock_all from drm_connector_init, now entirely cargo-culted nonsense. v3: - do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave). - pretty kerneldoc - add EXPORT_SYMBOL, helpers&drivers are supposed to use this. v4: Change lockdep annotations to only check whether we release the iter fake lock again (i.e. make sure that iter_put is called), but not check any locking dependecies itself. That seams to require a recursive read lock in trylock mode. Cc: Dave Airlie <airlied@gmail.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Sean Paul <seanpaul@chromium.org> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 06:08:06 +07:00
drm_connector_list_iter_put(&conn_iter);
}
EXPORT_SYMBOL(drm_mode_config_reset);
/*
* Global properties
*/
static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
{ DRM_PLANE_TYPE_OVERLAY, "Overlay" },
{ DRM_PLANE_TYPE_PRIMARY, "Primary" },
{ DRM_PLANE_TYPE_CURSOR, "Cursor" },
};
static int drm_mode_create_standard_properties(struct drm_device *dev)
{
struct drm_property *prop;
int ret;
ret = drm_connector_create_standard_properties(dev);
if (ret)
return ret;
prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"type", drm_plane_type_enum_list,
ARRAY_SIZE(drm_plane_type_enum_list));
if (!prop)
return -ENOMEM;
dev->mode_config.plane_type_property = prop;
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"SRC_X", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_src_x = prop;
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"SRC_Y", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_src_y = prop;
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"SRC_W", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_src_w = prop;
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"SRC_H", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_src_h = prop;
prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
"CRTC_X", INT_MIN, INT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_crtc_x = prop;
prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
"CRTC_Y", INT_MIN, INT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_crtc_y = prop;
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"CRTC_W", 0, INT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_crtc_w = prop;
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"CRTC_H", 0, INT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_crtc_h = prop;
prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
"FB_ID", DRM_MODE_OBJECT_FB);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_fb_id = prop;
drm/fence: add in-fences support There is now a new property called IN_FENCE_FD attached to every plane state that receives sync_file fds from userspace via the atomic commit IOCTL. The fd is then translated to a fence (that may be a fence_array subclass or just a normal fence) and then used by DRM to fence_wait() for all fences in the sync_file to signal. So it only commits when all framebuffers are ready to scanout. v2: Comments by Daniel Vetter: - remove set state->fence = NULL in destroy phase - accept fence -1 as valid and just return 0 - do not call fence_get() - sync_file_fences_get() already calls it - fence_put() if state->fence is already set, in case userspace set the property more than once. v3: WARN_ON if fence is set but state has no FB v4: Comment from Maarten Lankhorst - allow set fence with no related fb v5: rename FENCE_FD to IN_FENCE_FD v6: Comments by Daniel Vetter: - rename plane_state->in_fence back to "fence" - re-introduce WARN_ON if fence set but no fb - rebase after fence -> dma_fence rename v7: Comments by Brian Starkey - set state->fence to NULL when duplicating the state - fail if IN_FENCE_FD was already set v8: rebase against latest drm-misc Signed-off-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk> Reviewed-by: Brian Starkey <brian.starkey@arm.com> Reviewed-by: Sean Paul <seanpaul@chromium.org> Tested-by: Robert Foss <robert.foss@collabora.com> [danvet: Rebase onto extracted drm_mode_config.[hc].] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2016-11-15 20:06:39 +07:00
prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
"IN_FENCE_FD", -1, INT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_in_fence_fd = prop;
drm/fence: add out-fences support Support DRM out-fences by creating a sync_file with a fence for each CRTC that sets the OUT_FENCE_PTR property. We use the out_fence pointer received in the OUT_FENCE_PTR prop to send the sync_file fd back to userspace. The sync_file and fd are allocated/created before commit, but the fd_install operation only happens after we know that commit succeed. v2: Comment by Rob Clark: - Squash commit that adds DRM_MODE_ATOMIC_OUT_FENCE flag here. Comment by Daniel Vetter: - Add clean up code for out_fences v3: Comments by Daniel Vetter: - create DRM_MODE_ATOMIC_EVENT_MASK - userspace should fill out_fences_ptr with the crtc_ids for which it wants fences back. v4: Create OUT_FENCE_PTR properties and remove old approach. v5: Comments by Brian Starkey: - Remove extra fence_get() in atomic_ioctl() - Check ret before iterating on the crtc_state - check ret before fd_install - set fence_state to NULL at the beginning - check fence_state->out_fence_ptr before put_user() - change order of fput() and put_unused_fd() on failure - Add access_ok() check to the out_fence_ptr received - Rebase after fence -> dma_fence rename - Store out_fence_ptr in the drm_atomic_state - Split crtc_setup_out_fence() - return -1 as out_fence with TEST_ONLY flag v6: Comments by Daniel Vetter - Add prepare/unprepare_crtc_signaling() - move struct drm_out_fence_state to drm_atomic.c - mark get_crtc_fence() as static Comments by Brian Starkey - proper set fence_ptr fence_state array - isolate fence_idx increment - improve error handling v7: Comments by Daniel Vetter - remove prefix from internal functions - make out_fence_ptr an s64 pointer - degrade DRM_INFO to DRM_DEBUG_ATOMIC when put_user fail - fix doc issues - filter out OUT_FENCE_PTR == NULL and do not fail in this case - add complete_crtc_signalling() - krealloc fence_state on demand Comment by Brian Starkey - remove unused crtc_state arg from get_out_fence() v8: Comment by Brian Starkey - cancel events before check for !fence_state - convert a few lefovers u64 types for out_fence_ptr - fix memleak by assign fence_state earlier after realloc - proper accout num_fences in case of error v9: Comment by Brian Starkey - memset last position of fence_state after krealloc Comments by Sean Paul - pass install_fds in complete_crtc_signaling() instead of ret - put_user(-1, fence_ptr) when decoding props v10: Comment by Brian Starkey - remove unneeded num_fences increment on error path - kfree fence_state after installing fences fd v11: rebase against latest drm-misc v12: rebase again against latest drm-misc Signed-off-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk> Reviewed-by: Brian Starkey <brian.starkey@arm.com> (v10) Reviewed-by: Sean Paul <seanpaul@chromium.org> Tested-by: Robert Foss <robert.foss@collabora.com> (v10) [danvet: Appease checkpatch.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/1479301221-13056-1-git-send-email-gustavo@padovan.org
2016-11-16 20:00:21 +07:00
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"OUT_FENCE_PTR", 0, U64_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_out_fence_ptr = prop;
prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
"CRTC_ID", DRM_MODE_OBJECT_CRTC);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_crtc_id = prop;
prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
"ACTIVE");
if (!prop)
return -ENOMEM;
dev->mode_config.prop_active = prop;
prop = drm_property_create(dev,
DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
"MODE_ID", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_mode_id = prop;
prop = drm_property_create(dev,
DRM_MODE_PROP_BLOB,
"DEGAMMA_LUT", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.degamma_lut_property = prop;
prop = drm_property_create_range(dev,
DRM_MODE_PROP_IMMUTABLE,
"DEGAMMA_LUT_SIZE", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.degamma_lut_size_property = prop;
prop = drm_property_create(dev,
DRM_MODE_PROP_BLOB,
"CTM", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.ctm_property = prop;
prop = drm_property_create(dev,
DRM_MODE_PROP_BLOB,
"GAMMA_LUT", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.gamma_lut_property = prop;
prop = drm_property_create_range(dev,
DRM_MODE_PROP_IMMUTABLE,
"GAMMA_LUT_SIZE", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.gamma_lut_size_property = prop;
return 0;
}
/**
* drm_mode_config_init - initialize DRM mode_configuration structure
* @dev: DRM device
*
* Initialize @dev's mode_config structure, used for tracking the graphics
* configuration of @dev.
*
* Since this initializes the modeset locks, no locking is possible. Which is no
* problem, since this should happen single threaded at init time. It is the
* driver's problem to ensure this guarantee.
*
*/
void drm_mode_config_init(struct drm_device *dev)
{
mutex_init(&dev->mode_config.mutex);
drm_modeset_lock_init(&dev->mode_config.connection_mutex);
mutex_init(&dev->mode_config.idr_mutex);
mutex_init(&dev->mode_config.fb_lock);
mutex_init(&dev->mode_config.blob_lock);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
INIT_LIST_HEAD(&dev->mode_config.property_list);
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
INIT_LIST_HEAD(&dev->mode_config.plane_list);
idr_init(&dev->mode_config.crtc_idr);
idr_init(&dev->mode_config.tile_idr);
ida_init(&dev->mode_config.connector_ida);
drm: locking&new iterators for connector_list The requirements for connector_list locking are a bit tricky: - We need to be able to jump over zombie conectors (i.e. with refcount == 0, but not yet removed from the list). If instead we require that there's no zombies on the list then the final kref_put must happen under the list protection lock, which means that locking context leaks all over the place. Not pretty - better to deal with zombies and wrap the locking just around the list_del in the destructor. - When we walk the list we must _not_ hold the connector list lock. We walk the connector list at an absolutely massive amounts of places, if all those places can't ever call drm_connector_unreference the code would get unecessarily complicated. - connector_list needs it own lock, again too many places that walk it that we could reuse e.g. mode_config.mutex without resulting in inversions. - Lots of code uses these loops to look-up a connector, i.e. they want to be able to call drm_connector_reference. But on the other hand we want connectors to stay on that list until they're dead (i.e. connector_list can't hold a full reference), which means despite the "can't hold lock for the loop body" rule we need to make sure a connector doesn't suddenly become a zombie. At first Dave&I discussed various horror-show approaches using srcu, but turns out it's fairly easy: - For the loop body we always hold an additional reference to the current connector. That means it can't zombify, and it also means it'll stay on the list, which means we can use it as our iterator to find the next connector. - When we try to find the next connector we only have to jump over zombies. To make sure we don't chase bad pointers that entire loop is protected with the new connect_list_lock spinlock. And because we know that we're starting out with a non-zombie (need to drop our reference for the old connector only after we have our new one), we're guranteed to still be on the connector_list and either find the next non-zombie or complete the iteration. - Only downside is that we need to make sure that the temporary reference for the loop body doesn't leak. iter_get/put() functions + lockdep make sure that's the case. - To avoid a flag day the new iterator macro has an _iter postfix. We can rename it back once all the users of the unsafe version are gone (there's about 100 list walkers for the connector_list). For now this patch only converts all the list walking in the core, leaving helpers and drivers for later patches. The nice thing is that we can now finally remove 2 FIXME comments from the register/unregister functions. v2: - use irqsafe spinlocks, so that we can use this in drm_state_dump too. - nuke drm_modeset_lock_all from drm_connector_init, now entirely cargo-culted nonsense. v3: - do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave). - pretty kerneldoc - add EXPORT_SYMBOL, helpers&drivers are supposed to use this. v4: Change lockdep annotations to only check whether we release the iter fake lock again (i.e. make sure that iter_put is called), but not check any locking dependecies itself. That seams to require a recursive read lock in trylock mode. Cc: Dave Airlie <airlied@gmail.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Sean Paul <seanpaul@chromium.org> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 06:08:06 +07:00
spin_lock_init(&dev->mode_config.connector_list_lock);
drm_mode_create_standard_properties(dev);
/* Just to be sure */
dev->mode_config.num_fb = 0;
dev->mode_config.num_connector = 0;
dev->mode_config.num_crtc = 0;
dev->mode_config.num_encoder = 0;
dev->mode_config.num_overlay_plane = 0;
dev->mode_config.num_total_plane = 0;
}
EXPORT_SYMBOL(drm_mode_config_init);
/**
* drm_mode_config_cleanup - free up DRM mode_config info
* @dev: DRM device
*
* Free up all the connectors and CRTCs associated with this DRM device, then
* free up the framebuffers and associated buffer objects.
*
* Note that since this /should/ happen single-threaded at driver/device
* teardown time, no locking is required. It's the driver's job to ensure that
* this guarantee actually holds true.
*
* FIXME: cleanup any dangling user buffer objects too
*/
void drm_mode_config_cleanup(struct drm_device *dev)
{
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
struct drm_crtc *crtc, *ct;
struct drm_encoder *encoder, *enct;
struct drm_framebuffer *fb, *fbt;
struct drm_property *property, *pt;
struct drm_property_blob *blob, *bt;
struct drm_plane *plane, *plt;
list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
head) {
encoder->funcs->destroy(encoder);
}
drm_connector_list_iter_get(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
/* drm_connector_list_iter holds an full reference to the
* current connector itself, which means it is inherently safe
* against unreferencing the current connector - but not against
* deleting it right away. */
drm_connector_unreference(connector);
}
drm_connector_list_iter_put(&conn_iter);
if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) {
drm_connector_list_iter_get(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
DRM_ERROR("connector %s leaked!\n", connector->name);
drm_connector_list_iter_put(&conn_iter);
}
list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
head) {
drm_property_destroy(dev, property);
}
list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
head) {
plane->funcs->destroy(plane);
}
list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
crtc->funcs->destroy(crtc);
}
list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
head_global) {
drm_property_unreference_blob(blob);
}
/*
* Single-threaded teardown context, so it's not required to grab the
* fb_lock to protect against concurrent fb_list access. Contrary, it
* would actually deadlock with the drm_framebuffer_cleanup function.
*
* Also, if there are any framebuffers left, that's a driver leak now,
* so politely WARN about this.
*/
WARN_ON(!list_empty(&dev->mode_config.fb_list));
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
drm_framebuffer_free(&fb->base.refcount);
}
ida_destroy(&dev->mode_config.connector_ida);
idr_destroy(&dev->mode_config.tile_idr);
idr_destroy(&dev->mode_config.crtc_idr);
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
}
EXPORT_SYMBOL(drm_mode_config_cleanup);