linux_dsm_epyc7002/drivers/gpu/drm/ttm/ttm_bo_util.c
Linus Torvalds 9f68e3655a drm pull for 5.6-rc1
uapi:
 - dma-buf heaps added (and fixed)
 - command line add support for panel oreientation
 - command line allow overriding penguin count
 
 drm:
 - mipi dsi definition updates
 - lockdep annotations for dma_resv
 - remove dma-buf kmap/kunmap support
 - constify fb_ops in all fbdev drivers
 - MST fix for daisy chained hotplug-
 - CTA-861-G modes with VIC >= 193 added
 - fix drm_panel_of_backlight export
 - LVDS decoder support
 - more device based logging support
 - scanline alighment for dumb buffers
 - MST DSC helpers
 
 scheduler:
 - documentation fixes
 - job distribution improvements
 
 panel:
 - Logic PD type 28 panel support
 - Jimax8729d MIPI-DSI
 - igenic JZ4770
 - generic DSI devicetree bindings
 - sony acx424AKP panel
 - Leadtek LTK500HD1829
 - xinpeng XPP055C272
 - AUO B116XAK01
 - GiantPlus GPM940B0
 - BOE NV140FHM-N49
 - Satoz SAT050AT40H12R2
 - Sharp LS020B1DD01D panels.
 
 ttm:
 - use blocking WW lock
 
 i915:
 - hw/uapi state separation
 - Lock annotation improvements
 - selftest improvements
 - ICL/TGL DSI VDSC support
 - VBT parsing improvments
 - Display refactoring
 - DSI updates + fixes
 - HDCP 2.2 for CFL
 - CML PCI ID fixes
 - GLK+ fbc fix
 - PSR fixes
 - GEN/GT refactor improvments
 - DP MST fixes
 - switch context id alloc to xarray
 - workaround updates
 - LMEM debugfs support
 - tiled monitor fixes
 - ICL+ clock gating programming removed
 - DP MST disable sequence fixed
 - LMEM discontiguous object maps
 - prefaulting for discontiguous objects
 - use LMEM for dumb buffers if possible
 - add LMEM mmap support
 
 amdgpu:
 - enable sync object timelines for vulkan
 - MST atomic routines
 - enable MST DSC support
 - add DMCUB display microengine support
 - DC OEM i2c support
 - Renoir DC fixes
 - Initial HDCP 2.x support
 - BACO support for Arcturus
 - Use BACO for runtime PM power save
 - gfxoff on navi10
 - gfx10 golden updates and fixes
 - DCN support on POWER
 - GFXOFF for raven1 refresh
 - MM engine idle handlers cleanup
 - 10bpc EDP panel fixes
 - renoir watermark fixes
 - SR-IOV fixes
 - Arcturus VCN fixes
 - GDDR6 training fixes
 - freesync fixes
 - Pollock support
 
 amdkfd:
 - unify more codepath with amdgpu
 - use KIQ to setup HIQ rather than MMIO
 
 radeon:
 - fix vma fault handler race
 - PPC DMA fix
 - register check fixes for r100/r200
 
 nouveau:
 - mmap_sem vs dma_resv fix
 - rewrite the ACR secure boot code for Turing
 - TU10x graphics engine support (TU11x pending)
 - Page kind mapping for turing
 - 10-bit LUT support
 - GP10B Tegra fixes
 - HD audio regression fix
 
 hisilicon/hibmc:
 - use generic fbdev code and helpers
 
 rockchip:
 - dsi/px30 support
 
 virtio:
 - fb damage support
 - static some functions
 
 vc4:
 - use dma_resv lock wrappers
 
 msm:
 - use dma_resv lock wrappers
 - sc7180 display + DSI support
 - a618 support
 - UBWC support improvements
 
 vmwgfx:
 - updates + new logging uapi
 
 exynos:
 - enable/disable callback cleanups
 
 etnaviv:
 - use dma_resv lock wrappers
 
 atmel-hlcdc:
 - clock fixes
 
 mediatek:
 - cmdq support
 - non-smooth cursor fixes
 - ctm property support
 
 sun4i:
 - suspend support
 - A64 mipi dsi support
 
 rcar-du:
 - Color management module support
 - LVDS encoder dual-link support
 - R8A77980 support
 
 analogic:
 - add support for an6345
 
 ast:
 - atomic modeset support
 - primary plane garbage fix
 
 arcgpu:
 - fixes for fourcc handling
 
 tegra:
 - minor fixes and improvments
 
 mcde:
 - vblank support
 
 meson:
 - OSD1 plane AFBC commit
 
 gma500:
 - add pageflip support
 - reomve global drm_dev
 
 komeda:
 - tweak debugfs output
 - d32 support
 - runtime PM suppotr
 
 udl:
 - use generic shmem helpers
 - cleanup and fixes
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJeMm6RAAoJEAx081l5xIa+vN8P/0j4jEOv+KIinAhoH+LG3EpD
 m2TUuu5OQIoBrcCoWOgFBk3wqYpw6PdMBdkXh+5sE5lfeBynp8oC3Bin+QsHJE05
 eGBpZtHe+70MQb0Eha+Aic0hchvBKzRnq6i0MYSIHn6afs76dLmF8knTjycxrvV5
 Xu1Z3WDmjzqgWF9ja5JCD6fby11seP5RrwObYKVikO35QQyJJwGSGKgu5rq/pByK
 /n0PCnCOINuL0Lz6J9qexdh/0/XYFQilRC31GJNlKbDSFuECF0GOEzEE/xUBW/pI
 dLh2YwIIygm18Gar9PgvMwXJn3BfzQ0qEJsf+HlQeNw9iLgbHpp2AsTxHTE87OGe
 R/y85taW3jGjPsNOKZOeLpvg/Ro8l8ZipLApvDCG2O22DThg/cd6NDjZxl1FJfRH
 acDG/JdgPo5MbdRAH/cM1WuFS9gEM+0BeSQ5gCjtPakF+X4Vz+ABFDLMRJoaejkJ
 q8DG32TQXELQx0RMghsqK7YCWGfl+2alA1u9w6TgJh9Rq4iVckvpDeqAZnK1Adkc
 87g957Tl0n6FA4wJj/t5jrceiLRMJAm/rBK+R3GZNfWrgx4bHbCmb4fZDZsrFzph
 nbAjNJ5kOchrFCaRR47ULby6+Q14MAFbkWq4Crfu4YDdzUkTPpep6pi2GIe8w0rV
 P0hdYOYJf6LUda0utuQX
 =oFrI
 -----END PGP SIGNATURE-----

Merge tag 'drm-next-2020-01-30' of git://anongit.freedesktop.org/drm/drm

Pull drm updates from Davbe Airlie:
 "This is the main pull request for graphics for 5.6. Usual selection of
  changes all over.

  I've got one outstanding vmwgfx pull that touches mm so kept it
  separate until after all of this lands. I'll try and get it to you
  soon after this, but it might be early next week (nothing wrong with
  code, just my schedule is messy)

  This also hits a lot of fbdev drivers with some cleanups.

  Other notables:
   - vulkan timeline semaphore support added to syncobjs
   - nouveau turing secureboot/graphics support
   - Displayport MST display stream compression support

  Detailed summary:

  uapi:
   - dma-buf heaps added (and fixed)
   - command line add support for panel oreientation
   - command line allow overriding penguin count

  drm:
   - mipi dsi definition updates
   - lockdep annotations for dma_resv
   - remove dma-buf kmap/kunmap support
   - constify fb_ops in all fbdev drivers
   - MST fix for daisy chained hotplug-
   - CTA-861-G modes with VIC >= 193 added
   - fix drm_panel_of_backlight export
   - LVDS decoder support
   - more device based logging support
   - scanline alighment for dumb buffers
   - MST DSC helpers

  scheduler:
   - documentation fixes
   - job distribution improvements

  panel:
   - Logic PD type 28 panel support
   - Jimax8729d MIPI-DSI
   - igenic JZ4770
   - generic DSI devicetree bindings
   - sony acx424AKP panel
   - Leadtek LTK500HD1829
   - xinpeng XPP055C272
   - AUO B116XAK01
   - GiantPlus GPM940B0
   - BOE NV140FHM-N49
   - Satoz SAT050AT40H12R2
   - Sharp LS020B1DD01D panels.

  ttm:
   - use blocking WW lock

  i915:
   - hw/uapi state separation
   - Lock annotation improvements
   - selftest improvements
   - ICL/TGL DSI VDSC support
   - VBT parsing improvments
   - Display refactoring
   - DSI updates + fixes
   - HDCP 2.2 for CFL
   - CML PCI ID fixes
   - GLK+ fbc fix
   - PSR fixes
   - GEN/GT refactor improvments
   - DP MST fixes
   - switch context id alloc to xarray
   - workaround updates
   - LMEM debugfs support
   - tiled monitor fixes
   - ICL+ clock gating programming removed
   - DP MST disable sequence fixed
   - LMEM discontiguous object maps
   - prefaulting for discontiguous objects
   - use LMEM for dumb buffers if possible
   - add LMEM mmap support

  amdgpu:
   - enable sync object timelines for vulkan
   - MST atomic routines
   - enable MST DSC support
   - add DMCUB display microengine support
   - DC OEM i2c support
   - Renoir DC fixes
   - Initial HDCP 2.x support
   - BACO support for Arcturus
   - Use BACO for runtime PM power save
   - gfxoff on navi10
   - gfx10 golden updates and fixes
   - DCN support on POWER
   - GFXOFF for raven1 refresh
   - MM engine idle handlers cleanup
   - 10bpc EDP panel fixes
   - renoir watermark fixes
   - SR-IOV fixes
   - Arcturus VCN fixes
   - GDDR6 training fixes
   - freesync fixes
   - Pollock support

  amdkfd:
   - unify more codepath with amdgpu
   - use KIQ to setup HIQ rather than MMIO

  radeon:
   - fix vma fault handler race
   - PPC DMA fix
   - register check fixes for r100/r200

  nouveau:
   - mmap_sem vs dma_resv fix
   - rewrite the ACR secure boot code for Turing
   - TU10x graphics engine support (TU11x pending)
   - Page kind mapping for turing
   - 10-bit LUT support
   - GP10B Tegra fixes
   - HD audio regression fix

  hisilicon/hibmc:
   - use generic fbdev code and helpers

  rockchip:
   - dsi/px30 support

  virtio:
   - fb damage support
   - static some functions

  vc4:
   - use dma_resv lock wrappers

  msm:
   - use dma_resv lock wrappers
   - sc7180 display + DSI support
   - a618 support
   - UBWC support improvements

  vmwgfx:
   - updates + new logging uapi

  exynos:
   - enable/disable callback cleanups

  etnaviv:
   - use dma_resv lock wrappers

  atmel-hlcdc:
   - clock fixes

  mediatek:
   - cmdq support
   - non-smooth cursor fixes
   - ctm property support

  sun4i:
   - suspend support
   - A64 mipi dsi support

  rcar-du:
   - Color management module support
   - LVDS encoder dual-link support
   - R8A77980 support

  analogic:
   - add support for an6345

  ast:
   - atomic modeset support
   - primary plane garbage fix

  arcgpu:
   - fixes for fourcc handling

  tegra:
   - minor fixes and improvments

  mcde:
   - vblank support

  meson:
   - OSD1 plane AFBC commit

  gma500:
   - add pageflip support
   - reomve global drm_dev

  komeda:
   - tweak debugfs output
   - d32 support
   - runtime PM suppotr

  udl:
   - use generic shmem helpers
   - cleanup and fixes"

* tag 'drm-next-2020-01-30' of git://anongit.freedesktop.org/drm/drm: (1998 commits)
  drm/nouveau/fb/gp102-: allow module to load even when scrubber binary is missing
  drm/nouveau/acr: return error when registering LSF if ACR not supported
  drm/nouveau/disp/gv100-: not all channel types support reporting error codes
  drm/nouveau/disp/nv50-: prevent oops when no channel method map provided
  drm/nouveau: support synchronous pushbuf submission
  drm/nouveau: signal pending fences when channel has been killed
  drm/nouveau: reject attempts to submit to dead channels
  drm/nouveau: zero vma pointer even if we only unreference it rather than free
  drm/nouveau: Add HD-audio component notifier support
  drm/nouveau: fix build error without CONFIG_IOMMU_API
  drm/nouveau/kms/nv04: remove set but not used variable 'width'
  drm/nouveau/kms/nv50: remove set but not unused variable 'nv_connector'
  drm/nouveau/mmu: fix comptag memory leak
  drm/nouveau/gr/gp10b: Use gp100_grctx and gp100_gr_zbc
  drm/nouveau/pmu/gm20b,gp10b: Fix Falcon bootstrapping
  drm/exynos: Rename Exynos to lowercase
  drm/exynos: change callback names
  drm/mst: Don't do atomic checks over disabled managers
  drm/amdgpu: add the lost mutex_init back
  drm/amd/display: skip opp blank or unblank if test pattern enabled
  ...
2020-01-30 08:04:01 -08:00

854 lines
20 KiB
C

/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_vma_manager.h>
#include <linux/io.h>
#include <linux/highmem.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/dma-resv.h>
struct ttm_transfer_obj {
struct ttm_buffer_object base;
struct ttm_buffer_object *bo;
};
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
ttm_bo_mem_put(bo, &bo->mem);
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
if (old_mem->mem_type != TTM_PL_SYSTEM) {
ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
pr_err("Failed to expire sync object before unbinding TTM\n");
return ret;
}
ttm_tt_unbind(ttm);
ttm_bo_free_old_node(bo);
ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
TTM_PL_MASK_MEM);
old_mem->mem_type = TTM_PL_SYSTEM;
}
ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
if (unlikely(ret != 0))
return ret;
if (new_mem->mem_type != TTM_PL_SYSTEM) {
ret = ttm_tt_bind(ttm, new_mem, ctx);
if (unlikely(ret != 0))
return ret;
}
*old_mem = *new_mem;
new_mem->mm_node = NULL;
return 0;
}
EXPORT_SYMBOL(ttm_bo_move_ttm);
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
{
if (likely(man->io_reserve_fastpath))
return 0;
if (interruptible)
return mutex_lock_interruptible(&man->io_reserve_mutex);
mutex_lock(&man->io_reserve_mutex);
return 0;
}
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
{
if (likely(man->io_reserve_fastpath))
return;
mutex_unlock(&man->io_reserve_mutex);
}
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
{
struct ttm_buffer_object *bo;
if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
return -EAGAIN;
bo = list_first_entry(&man->io_reserve_lru,
struct ttm_buffer_object,
io_reserve_lru);
list_del_init(&bo->io_reserve_lru);
ttm_bo_unmap_virtual_locked(bo);
return 0;
}
int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret = 0;
if (!bdev->driver->io_mem_reserve)
return 0;
if (likely(man->io_reserve_fastpath))
return bdev->driver->io_mem_reserve(bdev, mem);
if (bdev->driver->io_mem_reserve &&
mem->bus.io_reserved_count++ == 0) {
retry:
ret = bdev->driver->io_mem_reserve(bdev, mem);
if (ret == -EAGAIN) {
ret = ttm_mem_io_evict(man);
if (ret == 0)
goto retry;
}
}
return ret;
}
void ttm_mem_io_free(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
if (likely(man->io_reserve_fastpath))
return;
if (bdev->driver->io_mem_reserve &&
--mem->bus.io_reserved_count == 0 &&
bdev->driver->io_mem_free)
bdev->driver->io_mem_free(bdev, mem);
}
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
struct ttm_mem_reg *mem = &bo->mem;
int ret;
if (!mem->bus.io_reserved_vm) {
struct ttm_mem_type_manager *man =
&bo->bdev->man[mem->mem_type];
ret = ttm_mem_io_reserve(bo->bdev, mem);
if (unlikely(ret != 0))
return ret;
mem->bus.io_reserved_vm = true;
if (man->use_io_reserve_lru)
list_add_tail(&bo->io_reserve_lru,
&man->io_reserve_lru);
}
return 0;
}
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
{
struct ttm_mem_reg *mem = &bo->mem;
if (mem->bus.io_reserved_vm) {
mem->bus.io_reserved_vm = false;
list_del_init(&bo->io_reserve_lru);
ttm_mem_io_free(bo->bdev, mem);
}
}
static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret;
void *addr;
*virtual = NULL;
(void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bdev, mem);
ttm_mem_io_unlock(man);
if (ret || !mem->bus.is_iomem)
return ret;
if (mem->bus.addr) {
addr = mem->bus.addr;
} else {
if (mem->placement & TTM_PL_FLAG_WC)
addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
else
addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size);
if (!addr) {
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
ttm_mem_io_unlock(man);
return -ENOMEM;
}
}
*virtual = addr;
return 0;
}
static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void *virtual)
{
struct ttm_mem_type_manager *man;
man = &bdev->man[mem->mem_type];
if (virtual && mem->bus.addr == NULL)
iounmap(virtual);
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
ttm_mem_io_unlock(man);
}
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
{
uint32_t *dstP =
(uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
uint32_t *srcP =
(uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
int i;
for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
iowrite32(ioread32(srcP++), dstP++);
return 0;
}
#ifdef CONFIG_X86
#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
#else
#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot)
#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
#endif
/**
* ttm_kmap_atomic_prot - Efficient kernel map of a single page with
* specified page protection.
*
* @page: The page to map.
* @prot: The page protection.
*
* This function maps a TTM page using the kmap_atomic api if available,
* otherwise falls back to vmap. The user must make sure that the
* specified page does not have an aliased mapping with a different caching
* policy unless the architecture explicitly allows it. Also mapping and
* unmapping using this api must be correctly nested. Unmapping should
* occur in the reverse order of mapping.
*/
void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
{
if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
return kmap_atomic(page);
else
return __ttm_kmap_atomic_prot(page, prot);
}
EXPORT_SYMBOL(ttm_kmap_atomic_prot);
/**
* ttm_kunmap_atomic_prot - Unmap a page that was mapped using
* ttm_kmap_atomic_prot.
*
* @addr: The virtual address from the map.
* @prot: The page protection.
*/
void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
{
if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
kunmap_atomic(addr);
else
__ttm_kunmap_atomic(addr);
}
EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
unsigned long page,
pgprot_t prot)
{
struct page *d = ttm->pages[page];
void *dst;
if (!d)
return -ENOMEM;
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
dst = ttm_kmap_atomic_prot(d, prot);
if (!dst)
return -ENOMEM;
memcpy_fromio(dst, src, PAGE_SIZE);
ttm_kunmap_atomic_prot(dst, prot);
return 0;
}
static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
unsigned long page,
pgprot_t prot)
{
struct page *s = ttm->pages[page];
void *src;
if (!s)
return -ENOMEM;
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
src = ttm_kmap_atomic_prot(s, prot);
if (!src)
return -ENOMEM;
memcpy_toio(dst, src, PAGE_SIZE);
ttm_kunmap_atomic_prot(src, prot);
return 0;
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg old_copy = *old_mem;
void *old_iomap;
void *new_iomap;
int ret;
unsigned long i;
unsigned long page;
unsigned long add = 0;
int dir;
ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
if (ret)
return ret;
ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
if (ret)
return ret;
ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
if (ret)
goto out;
/*
* Single TTM move. NOP.
*/
if (old_iomap == NULL && new_iomap == NULL)
goto out2;
/*
* Don't move nonexistent data. Clear destination instead.
*/
if (old_iomap == NULL &&
(ttm == NULL || (ttm->state == tt_unpopulated &&
!(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
goto out2;
}
/*
* TTM might be null for moves within the same region.
*/
if (ttm) {
ret = ttm_tt_populate(ttm, ctx);
if (ret)
goto out1;
}
add = 0;
dir = 1;
if ((old_mem->mem_type == new_mem->mem_type) &&
(new_mem->start < old_mem->start + old_mem->size)) {
dir = -1;
add = new_mem->num_pages - 1;
}
for (i = 0; i < new_mem->num_pages; ++i) {
page = i * dir + add;
if (old_iomap == NULL) {
pgprot_t prot = ttm_io_prot(old_mem->placement,
PAGE_KERNEL);
ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
prot);
} else if (new_iomap == NULL) {
pgprot_t prot = ttm_io_prot(new_mem->placement,
PAGE_KERNEL);
ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
prot);
} else {
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
}
if (ret)
goto out1;
}
mb();
out2:
old_copy = *old_mem;
*old_mem = *new_mem;
new_mem->mm_node = NULL;
if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
ttm_tt_destroy(ttm);
bo->ttm = NULL;
}
out1:
ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
out:
ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
/*
* On error, keep the mm node!
*/
if (!ret)
ttm_bo_mem_put(bo, &old_copy);
return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);
static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
{
struct ttm_transfer_obj *fbo;
fbo = container_of(bo, struct ttm_transfer_obj, base);
ttm_bo_put(fbo->bo);
kfree(fbo);
}
/**
* ttm_buffer_object_transfer
*
* @bo: A pointer to a struct ttm_buffer_object.
* @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
* holding the data of @bo with the old placement.
*
* This is a utility function that may be called after an accelerated move
* has been scheduled. A new buffer object is created as a placeholder for
* the old data while it's being copied. When that buffer object is idle,
* it can be destroyed, releasing the space of the old placement.
* Returns:
* !0: Failure.
*/
static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
struct ttm_buffer_object **new_obj)
{
struct ttm_transfer_obj *fbo;
int ret;
fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
if (!fbo)
return -ENOMEM;
fbo->base = *bo;
fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
ttm_bo_get(bo);
fbo->bo = bo;
/**
* Fix up members that we shouldn't copy directly:
* TODO: Explicit member copy would probably be better here.
*/
atomic_inc(&ttm_bo_glob.bo_count);
INIT_LIST_HEAD(&fbo->base.ddestroy);
INIT_LIST_HEAD(&fbo->base.lru);
INIT_LIST_HEAD(&fbo->base.swap);
INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node);
kref_init(&fbo->base.list_kref);
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
if (bo->base.resv == &bo->base._resv)
fbo->base.base.resv = &fbo->base.base._resv;
dma_resv_init(&fbo->base.base._resv);
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
*new_obj = &fbo->base;
return 0;
}
pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
{
/* Cached mappings need no adjustment */
if (caching_flags & TTM_PL_FLAG_CACHED)
return tmp;
#if defined(__i386__) || defined(__x86_64__)
if (caching_flags & TTM_PL_FLAG_WC)
tmp = pgprot_writecombine(tmp);
else if (boot_cpu_data.x86 > 3)
tmp = pgprot_noncached(tmp);
#endif
#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
defined(__powerpc__) || defined(__mips__)
if (caching_flags & TTM_PL_FLAG_WC)
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
#endif
#if defined(__sparc__)
tmp = pgprot_noncached(tmp);
#endif
return tmp;
}
EXPORT_SYMBOL(ttm_io_prot);
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
unsigned long offset,
unsigned long size,
struct ttm_bo_kmap_obj *map)
{
struct ttm_mem_reg *mem = &bo->mem;
if (bo->mem.bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped;
map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
} else {
map->bo_kmap_type = ttm_bo_map_iomap;
if (mem->placement & TTM_PL_FLAG_WC)
map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
else
map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
}
return (!map->virtual) ? -ENOMEM : 0;
}
static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
unsigned long start_page,
unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
struct ttm_mem_reg *mem = &bo->mem;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
struct ttm_tt *ttm = bo->ttm;
pgprot_t prot;
int ret;
BUG_ON(!ttm);
ret = ttm_tt_populate(ttm, &ctx);
if (ret)
return ret;
if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
/*
* We're mapping a single page, and the desired
* page protection is consistent with the bo.
*/
map->bo_kmap_type = ttm_bo_map_kmap;
map->page = ttm->pages[start_page];
map->virtual = kmap(map->page);
} else {
/*
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
*/
prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
map->bo_kmap_type = ttm_bo_map_vmap;
map->virtual = vmap(ttm->pages + start_page, num_pages,
0, prot);
}
return (!map->virtual) ? -ENOMEM : 0;
}
int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
struct ttm_mem_type_manager *man =
&bo->bdev->man[bo->mem.mem_type];
unsigned long offset, size;
int ret;
map->virtual = NULL;
map->bo = bo;
if (num_pages > bo->num_pages)
return -EINVAL;
if (start_page > bo->num_pages)
return -EINVAL;
(void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
ttm_mem_io_unlock(man);
if (ret)
return ret;
if (!bo->mem.bus.is_iomem) {
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else {
offset = start_page << PAGE_SHIFT;
size = num_pages << PAGE_SHIFT;
return ttm_bo_ioremap(bo, offset, size, map);
}
}
EXPORT_SYMBOL(ttm_bo_kmap);
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
struct ttm_buffer_object *bo = map->bo;
struct ttm_mem_type_manager *man =
&bo->bdev->man[bo->mem.mem_type];
if (!map->virtual)
return;
switch (map->bo_kmap_type) {
case ttm_bo_map_iomap:
iounmap(map->virtual);
break;
case ttm_bo_map_vmap:
vunmap(map->virtual);
break;
case ttm_bo_map_kmap:
kunmap(map->page);
break;
case ttm_bo_map_premapped:
break;
default:
BUG();
}
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
ttm_mem_io_unlock(man);
map->virtual = NULL;
map->page = NULL;
}
EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct dma_fence *fence,
bool evict,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
struct ttm_buffer_object *ghost_obj;
dma_resv_add_excl_fence(bo->base.resv, fence);
if (evict) {
ret = ttm_bo_wait(bo, false, false);
if (ret)
return ret;
if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
ttm_bo_free_old_node(bo);
} else {
/**
* This should help pipeline ordinary buffer moves.
*
* Hang old buffer memory on a new buffer object,
* and leave it to be released when the GPU
* operation has completed.
*/
dma_fence_put(bo->moving);
bo->moving = dma_fence_get(fence);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
return ret;
dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
/**
* If we're not moving to fixed memory, the TTM object
* needs to stay alive. Otherwhise hang it on the ghost
* bo to be unbound and destroyed.
*/
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
ghost_obj->ttm = NULL;
else
bo->ttm = NULL;
dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
}
*old_mem = *new_mem;
new_mem->mm_node = NULL;
return 0;
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
struct dma_fence *fence, bool evict,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
int ret;
dma_resv_add_excl_fence(bo->base.resv, fence);
if (!evict) {
struct ttm_buffer_object *ghost_obj;
/**
* This should help pipeline ordinary buffer moves.
*
* Hang old buffer memory on a new buffer object,
* and leave it to be released when the GPU
* operation has completed.
*/
dma_fence_put(bo->moving);
bo->moving = dma_fence_get(fence);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
return ret;
dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
/**
* If we're not moving to fixed memory, the TTM object
* needs to stay alive. Otherwhise hang it on the ghost
* bo to be unbound and destroyed.
*/
if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
ghost_obj->ttm = NULL;
else
bo->ttm = NULL;
dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
/**
* BO doesn't have a TTM we need to bind/unbind. Just remember
* this eviction and free up the allocation
*/
spin_lock(&from->move_lock);
if (!from->move || dma_fence_is_later(fence, from->move)) {
dma_fence_put(from->move);
from->move = dma_fence_get(fence);
}
spin_unlock(&from->move_lock);
ttm_bo_free_old_node(bo);
dma_fence_put(bo->moving);
bo->moving = dma_fence_get(fence);
} else {
/**
* Last resort, wait for the move to be completed.
*
* Should never happen in pratice.
*/
ret = ttm_bo_wait(bo, false, false);
if (ret)
return ret;
if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
ttm_bo_free_old_node(bo);
}
*old_mem = *new_mem;
new_mem->mm_node = NULL;
return 0;
}
EXPORT_SYMBOL(ttm_bo_pipeline_move);
int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
{
struct ttm_buffer_object *ghost;
int ret;
ret = ttm_buffer_object_transfer(bo, &ghost);
if (ret)
return ret;
ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
/* Last resort, wait for the BO to be idle when we are OOM */
if (ret)
ttm_bo_wait(bo, false, false);
memset(&bo->mem, 0, sizeof(bo->mem));
bo->mem.mem_type = TTM_PL_SYSTEM;
bo->ttm = NULL;
dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
return 0;
}