ttm: Make parts of a struct ttm_bo_device global.

Common resources, like memory accounting and swap lists should be
global and not per device. Introduce a struct ttm_bo_global to
accomodate this, and register it with sysfs. Add a small sysfs interface
to return the number of active buffer objects.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Dave Airlie <airlied@linux.ie>
This commit is contained in:
Thomas Hellstrom 2009-08-18 16:51:56 +02:00 committed by Dave Airlie
parent 5fd9cbad3a
commit a987fcaa80
7 changed files with 296 additions and 141 deletions

View File

@ -37,6 +37,7 @@
* TTM.
*/
struct radeon_mman {
struct ttm_bo_global_ref bo_global_ref;
struct ttm_global_reference mem_global_ref;
bool mem_global_referenced;
struct ttm_bo_device bdev;

View File

@ -77,9 +77,25 @@ static int radeon_ttm_global_init(struct radeon_device *rdev)
global_ref->release = &radeon_ttm_mem_global_release;
r = ttm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed referencing a global TTM memory object.\n");
DRM_ERROR("Failed setting up TTM memory accounting "
"subsystem.\n");
return r;
}
rdev->mman.bo_global_ref.mem_glob =
rdev->mman.mem_global_ref.object;
global_ref = &rdev->mman.bo_global_ref.ref;
global_ref->global_type = TTM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &ttm_bo_global_init;
global_ref->release = &ttm_bo_global_release;
r = ttm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
ttm_global_item_unref(&rdev->mman.mem_global_ref);
return r;
}
rdev->mman.mem_global_referenced = true;
return 0;
}
@ -87,6 +103,7 @@ static int radeon_ttm_global_init(struct radeon_device *rdev)
static void radeon_ttm_global_fini(struct radeon_device *rdev)
{
if (rdev->mman.mem_global_referenced) {
ttm_global_item_unref(&rdev->mman.bo_global_ref.ref);
ttm_global_item_unref(&rdev->mman.mem_global_ref);
rdev->mman.mem_global_referenced = false;
}
@ -286,9 +303,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
out_cleanup:
if (tmp_mem.mm_node) {
spin_lock(&rdev->mman.bdev.lru_lock);
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
spin_lock(&glob->lru_lock);
drm_mm_put_block(tmp_mem.mm_node);
spin_unlock(&rdev->mman.bdev.lru_lock);
spin_unlock(&glob->lru_lock);
return r;
}
return r;
@ -323,9 +342,11 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
}
out_cleanup:
if (tmp_mem.mm_node) {
spin_lock(&rdev->mman.bdev.lru_lock);
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
spin_lock(&glob->lru_lock);
drm_mm_put_block(tmp_mem.mm_node);
spin_unlock(&rdev->mman.bdev.lru_lock);
spin_unlock(&glob->lru_lock);
return r;
}
return r;
@ -441,7 +462,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
}
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&rdev->mman.bdev,
rdev->mman.mem_global_ref.object,
rdev->mman.bo_global_ref.ref.object,
&radeon_bo_driver, DRM_FILE_PAGE_OFFSET);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);

View File

@ -45,6 +45,39 @@
static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
static void ttm_bo_global_kobj_release(struct kobject *kobj);
static struct attribute ttm_bo_count = {
.name = "bo_count",
.mode = S_IRUGO
};
static ssize_t ttm_bo_global_show(struct kobject *kobj,
struct attribute *attr,
char *buffer)
{
struct ttm_bo_global *glob =
container_of(kobj, struct ttm_bo_global, kobj);
return snprintf(buffer, PAGE_SIZE, "%lu\n",
(unsigned long) atomic_read(&glob->bo_count));
}
static struct attribute *ttm_bo_global_attrs[] = {
&ttm_bo_count,
NULL
};
static struct sysfs_ops ttm_bo_global_ops = {
.show = &ttm_bo_global_show
};
static struct kobj_type ttm_bo_glob_kobj_type = {
.release = &ttm_bo_global_kobj_release,
.sysfs_ops = &ttm_bo_global_ops,
.default_attrs = ttm_bo_global_attrs
};
static inline uint32_t ttm_bo_type_flags(unsigned type)
{
@ -67,10 +100,11 @@ static void ttm_bo_release_list(struct kref *list_kref)
if (bo->ttm)
ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count);
if (bo->destroy)
bo->destroy(bo);
else {
ttm_mem_global_free(bdev->mem_glob, bo->acc_size);
ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
kfree(bo);
}
}
@ -107,7 +141,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
kref_get(&bo->list_kref);
if (bo->ttm != NULL) {
list_add_tail(&bo->swap, &bdev->swap_lru);
list_add_tail(&bo->swap, &bo->glob->swap_lru);
kref_get(&bo->list_kref);
}
}
@ -142,7 +176,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait, bool use_sequence, uint32_t sequence)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
int ret;
while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
@ -154,9 +188,9 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
if (no_wait)
return -EBUSY;
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
ret = ttm_bo_wait_unreserved(bo, interruptible);
spin_lock(&bdev->lru_lock);
spin_lock(&glob->lru_lock);
if (unlikely(ret))
return ret;
@ -182,16 +216,16 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait, bool use_sequence, uint32_t sequence)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
int put_count = 0;
int ret;
spin_lock(&bdev->lru_lock);
spin_lock(&glob->lru_lock);
ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
sequence);
if (likely(ret == 0))
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
while (put_count--)
kref_put(&bo->list_kref, ttm_bo_ref_bug);
@ -201,13 +235,13 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
spin_lock(&bdev->lru_lock);
spin_lock(&glob->lru_lock);
ttm_bo_add_to_lru(bo);
atomic_set(&bo->reserved, 0);
wake_up_all(&bo->event_queue);
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_unreserve);
@ -218,6 +252,7 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
int ret = 0;
uint32_t page_flags = 0;
@ -230,14 +265,14 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
case ttm_bo_type_kernel:
bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
page_flags, bdev->dummy_read_page);
page_flags, glob->dummy_read_page);
if (unlikely(bo->ttm == NULL))
ret = -ENOMEM;
break;
case ttm_bo_type_user:
bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
page_flags | TTM_PAGE_FLAG_USER,
bdev->dummy_read_page);
glob->dummy_read_page);
if (unlikely(bo->ttm == NULL))
ret = -ENOMEM;
break;
@ -355,6 +390,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
struct ttm_bo_driver *driver = bdev->driver;
int ret;
@ -366,7 +402,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
spin_unlock(&bo->lock);
spin_lock(&bdev->lru_lock);
spin_lock(&glob->lru_lock);
ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
BUG_ON(ret);
if (bo->ttm)
@ -381,7 +417,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
bo->mem.mm_node = NULL;
}
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
atomic_set(&bo->reserved, 0);
@ -391,14 +427,14 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
return 0;
}
spin_lock(&bdev->lru_lock);
spin_lock(&glob->lru_lock);
if (list_empty(&bo->ddestroy)) {
void *sync_obj = bo->sync_obj;
void *sync_obj_arg = bo->sync_obj_arg;
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
spin_unlock(&bo->lock);
if (sync_obj)
@ -408,7 +444,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
ret = 0;
} else {
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
spin_unlock(&bo->lock);
ret = -EBUSY;
}
@ -423,11 +459,12 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_buffer_object *entry, *nentry;
struct list_head *list, *next;
int ret;
spin_lock(&bdev->lru_lock);
spin_lock(&glob->lru_lock);
list_for_each_safe(list, next, &bdev->ddestroy) {
entry = list_entry(list, struct ttm_buffer_object, ddestroy);
nentry = NULL;
@ -444,16 +481,16 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
}
kref_get(&entry->list_kref);
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
ret = ttm_bo_cleanup_refs(entry, remove_all);
kref_put(&entry->list_kref, ttm_bo_release_list);
spin_lock(&bdev->lru_lock);
spin_lock(&glob->lru_lock);
if (nentry) {
bool next_onlist = !list_empty(next);
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
kref_put(&nentry->list_kref, ttm_bo_release_list);
spin_lock(&bdev->lru_lock);
spin_lock(&glob->lru_lock);
/*
* Someone might have raced us and removed the
* next entry from the list. We don't bother restarting
@ -467,7 +504,7 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
break;
}
ret = !list_empty(&bdev->ddestroy);
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
return ret;
}
@ -517,6 +554,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
{
int ret = 0;
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
struct ttm_mem_reg evict_mem;
uint32_t proposed_placement;
@ -565,12 +603,12 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
goto out;
}
spin_lock(&bdev->lru_lock);
spin_lock(&glob->lru_lock);
if (evict_mem.mm_node) {
drm_mm_put_block(evict_mem.mm_node);
evict_mem.mm_node = NULL;
}
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
bo->evicted = true;
out:
return ret;
@ -585,6 +623,7 @@ static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
uint32_t mem_type,
bool interruptible, bool no_wait)
{
struct ttm_bo_global *glob = bdev->glob;
struct drm_mm_node *node;
struct ttm_buffer_object *entry;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@ -598,7 +637,7 @@ static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
if (unlikely(ret != 0))
return ret;
spin_lock(&bdev->lru_lock);
spin_lock(&glob->lru_lock);
do {
node = drm_mm_search_free(&man->manager, num_pages,
mem->page_alignment, 1);
@ -619,7 +658,7 @@ static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
if (likely(ret == 0))
put_count = ttm_bo_del_from_lru(entry);
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
if (unlikely(ret != 0))
return ret;
@ -635,21 +674,21 @@ static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
if (ret)
return ret;
spin_lock(&bdev->lru_lock);
spin_lock(&glob->lru_lock);
} while (1);
if (!node) {
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
return -ENOMEM;
}
node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
if (unlikely(!node)) {
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
goto retry_pre_get;
}
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
mem->mm_node = node;
mem->mem_type = mem_type;
return 0;
@ -697,6 +736,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
struct ttm_mem_type_manager *man;
uint32_t num_prios = bdev->driver->num_mem_type_prio;
@ -733,20 +773,20 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (unlikely(ret))
return ret;
spin_lock(&bdev->lru_lock);
spin_lock(&glob->lru_lock);
node = drm_mm_search_free(&man->manager,
mem->num_pages,
mem->page_alignment,
1);
if (unlikely(!node)) {
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
break;
}
node = drm_mm_get_block_atomic(node,
mem->num_pages,
mem->
page_alignment);
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
} while (!node);
}
if (node)
@ -816,7 +856,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
uint32_t proposed_placement,
bool interruptible, bool no_wait)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
int ret = 0;
struct ttm_mem_reg mem;
@ -852,9 +892,9 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
out_unlock:
if (ret && mem.mm_node) {
spin_lock(&bdev->lru_lock);
spin_lock(&glob->lru_lock);
drm_mm_put_block(mem.mm_node);
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
}
return ret;
}
@ -990,6 +1030,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
bo->bdev = bdev;
bo->glob = bdev->glob;
bo->type = type;
bo->num_pages = num_pages;
bo->mem.mem_type = TTM_PL_SYSTEM;
@ -1002,6 +1043,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
bo->seq_valid = false;
bo->persistant_swap_storage = persistant_swap_storage;
bo->acc_size = acc_size;
atomic_inc(&bo->glob->bo_count);
ret = ttm_bo_check_placement(bo, flags, 0ULL);
if (unlikely(ret != 0))
@ -1040,13 +1082,13 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
}
EXPORT_SYMBOL(ttm_buffer_object_init);
static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
unsigned long num_pages)
{
size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
PAGE_MASK;
return bdev->ttm_bo_size + 2 * page_array_size;
return glob->ttm_bo_size + 2 * page_array_size;
}
int ttm_buffer_object_create(struct ttm_bo_device *bdev,
@ -1061,10 +1103,10 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
{
struct ttm_buffer_object *bo;
int ret;
struct ttm_mem_global *mem_glob = bdev->mem_glob;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
size_t acc_size =
ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
if (unlikely(ret != 0))
return ret;
@ -1118,6 +1160,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
struct list_head *head,
unsigned mem_type, bool allow_errors)
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_buffer_object *entry;
int ret;
int put_count;
@ -1126,30 +1169,31 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
* Can't use standard list traversal since we're unlocking.
*/
spin_lock(&bdev->lru_lock);
spin_lock(&glob->lru_lock);
while (!list_empty(head)) {
entry = list_first_entry(head, struct ttm_buffer_object, lru);
kref_get(&entry->list_kref);
ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
put_count = ttm_bo_del_from_lru(entry);
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
while (put_count--)
kref_put(&entry->list_kref, ttm_bo_ref_bug);
BUG_ON(ret);
ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
ttm_bo_unreserve(entry);
kref_put(&entry->list_kref, ttm_bo_release_list);
spin_lock(&bdev->lru_lock);
spin_lock(&glob->lru_lock);
}
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
return 0;
}
int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
int ret = -EINVAL;
@ -1171,13 +1215,13 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
if (mem_type > 0) {
ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
spin_lock(&bdev->lru_lock);
spin_lock(&glob->lru_lock);
if (drm_mm_clean(&man->manager))
drm_mm_takedown(&man->manager);
else
ret = -EBUSY;
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
}
return ret;
@ -1251,11 +1295,83 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
}
EXPORT_SYMBOL(ttm_bo_init_mm);
static void ttm_bo_global_kobj_release(struct kobject *kobj)
{
struct ttm_bo_global *glob =
container_of(kobj, struct ttm_bo_global, kobj);
printk(KERN_INFO TTM_PFX "Freeing bo global.\n");
ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
__free_page(glob->dummy_read_page);
kfree(glob);
}
void ttm_bo_global_release(struct ttm_global_reference *ref)
{
struct ttm_bo_global *glob = ref->object;
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
}
EXPORT_SYMBOL(ttm_bo_global_release);
int ttm_bo_global_init(struct ttm_global_reference *ref)
{
struct ttm_bo_global_ref *bo_ref =
container_of(ref, struct ttm_bo_global_ref, ref);
struct ttm_bo_global *glob = ref->object;
int ret;
mutex_init(&glob->device_list_mutex);
spin_lock_init(&glob->lru_lock);
glob->mem_glob = bo_ref->mem_glob;
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(glob->dummy_read_page == NULL)) {
ret = -ENOMEM;
goto out_no_drp;
}
INIT_LIST_HEAD(&glob->swap_lru);
INIT_LIST_HEAD(&glob->device_list);
ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
if (unlikely(ret != 0)) {
printk(KERN_ERR TTM_PFX
"Could not register buffer object swapout.\n");
goto out_no_shrink;
}
glob->ttm_bo_extra_size =
ttm_round_pot(sizeof(struct ttm_tt)) +
ttm_round_pot(sizeof(struct ttm_backend));
glob->ttm_bo_size = glob->ttm_bo_extra_size +
ttm_round_pot(sizeof(struct ttm_buffer_object));
atomic_set(&glob->bo_count, 0);
kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
if (unlikely(ret != 0))
kobject_put(&glob->kobj);
return ret;
out_no_shrink:
__free_page(glob->dummy_read_page);
out_no_drp:
kfree(glob);
return ret;
}
EXPORT_SYMBOL(ttm_bo_global_init);
int ttm_bo_device_release(struct ttm_bo_device *bdev)
{
int ret = 0;
unsigned i = TTM_NUM_MEM_TYPES;
struct ttm_mem_type_manager *man;
struct ttm_bo_global *glob = bdev->glob;
while (i--) {
man = &bdev->man[i];
@ -1271,98 +1387,74 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
}
}
mutex_lock(&glob->device_list_mutex);
list_del(&bdev->device_list);
mutex_unlock(&glob->device_list_mutex);
if (!cancel_delayed_work(&bdev->wq))
flush_scheduled_work();
while (ttm_bo_delayed_delete(bdev, true))
;
spin_lock(&bdev->lru_lock);
spin_lock(&glob->lru_lock);
if (list_empty(&bdev->ddestroy))
TTM_DEBUG("Delayed destroy list was clean\n");
if (list_empty(&bdev->man[0].lru))
TTM_DEBUG("Swap list was clean\n");
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
write_lock(&bdev->vm_lock);
drm_mm_takedown(&bdev->addr_space_mm);
write_unlock(&bdev->vm_lock);
__free_page(bdev->dummy_read_page);
return ret;
}
EXPORT_SYMBOL(ttm_bo_device_release);
/*
* This function is intended to be called on drm driver load.
* If you decide to call it from firstopen, you must protect the call
* from a potentially racing ttm_bo_driver_finish in lastclose.
* (This may happen on X server restart).
*/
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_mem_global *mem_glob,
struct ttm_bo_driver *driver, uint64_t file_page_offset)
struct ttm_bo_global *glob,
struct ttm_bo_driver *driver,
uint64_t file_page_offset)
{
int ret = -EINVAL;
bdev->dummy_read_page = NULL;
rwlock_init(&bdev->vm_lock);
spin_lock_init(&bdev->lru_lock);
spin_lock_init(&glob->lru_lock);
bdev->driver = driver;
bdev->mem_glob = mem_glob;
memset(bdev->man, 0, sizeof(bdev->man));
bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(bdev->dummy_read_page == NULL)) {
ret = -ENOMEM;
goto out_err0;
}
/*
* Initialize the system memory buffer type.
* Other types need to be driver / IOCTL initialized.
*/
ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
if (unlikely(ret != 0))
goto out_err1;
goto out_no_sys;
bdev->addr_space_rb = RB_ROOT;
ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
if (unlikely(ret != 0))
goto out_err2;
goto out_no_addr_mm;
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
bdev->nice_mode = true;
INIT_LIST_HEAD(&bdev->ddestroy);
INIT_LIST_HEAD(&bdev->swap_lru);
bdev->dev_mapping = NULL;
ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
if (unlikely(ret != 0)) {
printk(KERN_ERR TTM_PFX
"Could not register buffer object swapout.\n");
goto out_err2;
}
bdev->glob = glob;
bdev->ttm_bo_extra_size =
ttm_round_pot(sizeof(struct ttm_tt)) +
ttm_round_pot(sizeof(struct ttm_backend));
bdev->ttm_bo_size = bdev->ttm_bo_extra_size +
ttm_round_pot(sizeof(struct ttm_buffer_object));
mutex_lock(&glob->device_list_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
mutex_unlock(&glob->device_list_mutex);
return 0;
out_err2:
out_no_addr_mm:
ttm_bo_clean_mm(bdev, 0);
out_err1:
__free_page(bdev->dummy_read_page);
out_err0:
out_no_sys:
return ret;
}
EXPORT_SYMBOL(ttm_bo_device_init);
@ -1607,21 +1699,21 @@ void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
{
struct ttm_bo_device *bdev =
container_of(shrink, struct ttm_bo_device, shrink);
struct ttm_bo_global *glob =
container_of(shrink, struct ttm_bo_global, shrink);
struct ttm_buffer_object *bo;
int ret = -EBUSY;
int put_count;
uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
spin_lock(&bdev->lru_lock);
spin_lock(&glob->lru_lock);
while (ret == -EBUSY) {
if (unlikely(list_empty(&bdev->swap_lru))) {
spin_unlock(&bdev->lru_lock);
if (unlikely(list_empty(&glob->swap_lru))) {
spin_unlock(&glob->lru_lock);
return -EBUSY;
}
bo = list_first_entry(&bdev->swap_lru,
bo = list_first_entry(&glob->swap_lru,
struct ttm_buffer_object, swap);
kref_get(&bo->list_kref);
@ -1633,16 +1725,16 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
if (unlikely(ret == -EBUSY)) {
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
ttm_bo_wait_unreserved(bo, false);
kref_put(&bo->list_kref, ttm_bo_release_list);
spin_lock(&bdev->lru_lock);
spin_lock(&glob->lru_lock);
}
}
BUG_ON(ret != 0);
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&bdev->lru_lock);
spin_unlock(&glob->lru_lock);
while (put_count--)
kref_put(&bo->list_kref, ttm_bo_ref_bug);
@ -1696,6 +1788,6 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
{
while (ttm_bo_swapout(&bdev->shrink) == 0)
while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
;
}

View File

@ -41,9 +41,9 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
struct ttm_mem_reg *old_mem = &bo->mem;
if (old_mem->mm_node) {
spin_lock(&bo->bdev->lru_lock);
spin_lock(&bo->glob->lru_lock);
drm_mm_put_block(old_mem->mm_node);
spin_unlock(&bo->bdev->lru_lock);
spin_unlock(&bo->glob->lru_lock);
}
old_mem->mm_node = NULL;
}

View File

@ -166,7 +166,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
set_page_dirty_lock(page);
ttm->pages[i] = NULL;
ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE);
ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
put_page(page);
}
ttm->state = tt_unpopulated;
@ -177,8 +177,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
{
struct page *p;
struct ttm_bo_device *bdev = ttm->bdev;
struct ttm_mem_global *mem_glob = bdev->mem_glob;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
int ret;
while (NULL == (p = ttm->pages[index])) {
@ -348,7 +347,7 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
printk(KERN_ERR TTM_PFX
"Erroneous page count. "
"Leaking pages.\n");
ttm_mem_global_free_page(ttm->bdev->mem_glob,
ttm_mem_global_free_page(ttm->glob->mem_glob,
cur_page);
__free_page(cur_page);
}
@ -394,7 +393,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm,
struct mm_struct *mm = tsk->mm;
int ret;
int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
BUG_ON(num_pages != ttm->num_pages);
BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
@ -439,8 +438,7 @@ struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
if (!ttm)
return NULL;
ttm->bdev = bdev;
ttm->glob = bdev->glob;
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1;

View File

@ -155,6 +155,7 @@ struct ttm_buffer_object {
* Members constant at init.
*/
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
unsigned long buffer_start;
enum ttm_bo_type type;

View File

@ -32,6 +32,7 @@
#include "ttm/ttm_bo_api.h"
#include "ttm/ttm_memory.h"
#include "ttm/ttm_module.h"
#include "drm_mm.h"
#include "linux/workqueue.h"
#include "linux/fs.h"
@ -160,7 +161,7 @@ struct ttm_tt {
long last_lomem_page;
uint32_t page_flags;
unsigned long num_pages;
struct ttm_bo_device *bdev;
struct ttm_bo_global *glob;
struct ttm_backend *be;
struct task_struct *tsk;
unsigned long start;
@ -355,6 +356,64 @@ struct ttm_bo_driver {
void *(*sync_obj_ref) (void *sync_obj);
};
/**
* struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
*/
struct ttm_bo_global_ref {
struct ttm_global_reference ref;
struct ttm_mem_global *mem_glob;
};
/**
* struct ttm_bo_global - Buffer object driver global data.
*
* @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
* @dummy_read_page: Pointer to a dummy page used for mapping requests
* of unpopulated pages.
* @shrink: A shrink callback object used for buffer object swap.
* @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
* used by a buffer object. This is excluding page arrays and backing pages.
* @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
* @device_list_mutex: Mutex protecting the device list.
* This mutex is held while traversing the device list for pm options.
* @lru_lock: Spinlock protecting the bo subsystem lru lists.
* @device_list: List of buffer object devices.
* @swap_lru: Lru list of buffer objects used for swapping.
*/
struct ttm_bo_global {
/**
* Constant after init.
*/
struct kobject kobj;
struct ttm_mem_global *mem_glob;
struct page *dummy_read_page;
struct ttm_mem_shrink shrink;
size_t ttm_bo_extra_size;
size_t ttm_bo_size;
struct mutex device_list_mutex;
spinlock_t lru_lock;
/**
* Protected by device_list_mutex.
*/
struct list_head device_list;
/**
* Protected by the lru_lock.
*/
struct list_head swap_lru;
/**
* Internal protection.
*/
atomic_t bo_count;
};
#define TTM_NUM_MEM_TYPES 8
#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
@ -363,16 +422,7 @@ struct ttm_bo_driver {
/**
* struct ttm_bo_device - Buffer object driver device-specific data.
*
* @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
* @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
* @count: Current number of buffer object.
* @pages: Current number of pinned pages.
* @dummy_read_page: Pointer to a dummy page used for mapping requests
* of unpopulated pages.
* @shrink: A shrink callback object used for buffre object swap.
* @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
* used by a buffer object. This is excluding page arrays and backing pages.
* @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
* @man: An array of mem_type_managers.
* @addr_space_mm: Range manager for the device address space.
* lru_lock: Spinlock that protects the buffer+device lru lists and
@ -390,32 +440,21 @@ struct ttm_bo_device {
/*
* Constant after bo device init / atomic.
*/
struct ttm_mem_global *mem_glob;
struct list_head device_list;
struct ttm_bo_global *glob;
struct ttm_bo_driver *driver;
struct page *dummy_read_page;
struct ttm_mem_shrink shrink;
size_t ttm_bo_extra_size;
size_t ttm_bo_size;
rwlock_t vm_lock;
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
/*
* Protected by the vm lock.
*/
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
struct rb_root addr_space_rb;
struct drm_mm addr_space_mm;
/*
* Might want to change this to one lock per manager.
*/
spinlock_t lru_lock;
/*
* Protected by the lru lock.
* Protected by the global:lru lock.
*/
struct list_head ddestroy;
struct list_head swap_lru;
/*
* Protected by load / firstopen / lastclose /unload sync.
@ -629,6 +668,9 @@ extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
unsigned long *bus_offset,
unsigned long *bus_size);
extern void ttm_bo_global_release(struct ttm_global_reference *ref);
extern int ttm_bo_global_init(struct ttm_global_reference *ref);
extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
/**
@ -646,7 +688,7 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
* !0: Failure.
*/
extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_mem_global *mem_glob,
struct ttm_bo_global *glob,
struct ttm_bo_driver *driver,
uint64_t file_page_offset);