diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 1c636723823c..2680bdc97c1c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -157,12 +157,12 @@ static int nouveau_ttm_init_vram(struct nouveau_drm *drm) { struct nvif_mmu *mmu = &drm->client.mmu; - if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { - struct ttm_mem_type_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); - /* Some BARs do not support being ioremapped WC */ const u8 type = mmu->type[drm->ttm.type_vram].type; + struct ttm_mem_type_manager *man = kzalloc(sizeof(*man), GFP_KERNEL); + if (!man) + return -ENOMEM; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; @@ -174,8 +174,10 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm) man->func = &nouveau_vram_manager; man->use_io_reserve_lru = true; + ttm_mem_type_manager_init(&drm->ttm.bdev, man, drm->gem.vram_available >> PAGE_SHIFT); + ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man); ttm_mem_type_manager_set_used(man, true); return 0; } else { @@ -195,6 +197,8 @@ nouveau_ttm_fini_vram(struct nouveau_drm *drm) ttm_mem_type_manager_disable(man); ttm_mem_type_manager_force_list_clean(&drm->ttm.bdev, man); ttm_mem_type_manager_cleanup(man); + ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL); + kfree(man); } else ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_VRAM); } @@ -202,30 +206,43 @@ nouveau_ttm_fini_vram(struct nouveau_drm *drm) static int nouveau_ttm_init_gtt(struct nouveau_drm *drm) { - struct ttm_mem_type_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_TT); + struct ttm_mem_type_manager *man; unsigned long size_pages = drm->gem.gart_available >> PAGE_SHIFT; - man->use_tt = true; + unsigned available_caching, default_caching; + const struct ttm_mem_type_manager_func *func = NULL; if (drm->agp.bridge) { - man->available_caching = TTM_PL_FLAG_UNCACHED | + available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; - man->default_caching = TTM_PL_FLAG_WC; + default_caching = TTM_PL_FLAG_WC; } else { - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; + available_caching = TTM_PL_MASK_CACHING; + default_caching = TTM_PL_FLAG_CACHED; } if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) - man->func = &nouveau_gart_manager; + func = &nouveau_gart_manager; else if (!drm->agp.bridge) - man->func = &nv04_gart_manager; + func = &nv04_gart_manager; else return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT, - TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC, - TTM_PL_FLAG_WC, true, + available_caching, default_caching, + true, size_pages); + + man = kzalloc(sizeof(*man), GFP_KERNEL); + if (!man) + return -ENOMEM; + + man->func = func; + man->available_caching = available_caching; + man->default_caching = default_caching; + man->use_tt = true; ttm_mem_type_manager_init(&drm->ttm.bdev, man, size_pages); + + ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man); ttm_mem_type_manager_set_used(man, true); + return 0; } @@ -241,6 +258,8 @@ nouveau_ttm_fini_gtt(struct nouveau_drm *drm) ttm_mem_type_manager_disable(man); ttm_mem_type_manager_force_list_clean(&drm->ttm.bdev, man); ttm_mem_type_manager_cleanup(man); + ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, NULL); + kfree(man); } }