linux_dsm_epyc7002/arch/powerpc/platforms/powernv/pci-ioda-tce.c
Alexey Kardashevskiy a68bd1267b powerpc/powernv/ioda: Allocate indirect TCE levels on demand
At the moment we allocate the entire TCE table, twice (hardware part and
userspace translation cache). This normally works as we normally have
contigous memory and the guest will map entire RAM for 64bit DMA.

However if we have sparse RAM (one example is a memory device), then
we will allocate TCEs which will never be used as the guest only maps
actual memory for DMA. If it is a single level TCE table, there is nothing
we can really do but if it a multilevel table, we can skip allocating
TCEs we know we won't need.

This adds ability to allocate only first level, saving memory.

This changes iommu_table::free() to avoid allocating of an extra level;
iommu_table::set() will do this when needed.

This adds @alloc parameter to iommu_table::exchange() to tell the callback
if it can allocate an extra level; the flag is set to "false" for
the realmode KVM handlers of H_PUT_TCE hcalls and the callback returns
H_TOO_HARD.

This still requires the entire table to be counted in mm::locked_vm.

To be conservative, this only does on-demand allocation when
the usespace cache table is requested which is the case of VFIO.

The example math for a system replicating a powernv setup with NVLink2
in a guest:
16GB RAM mapped at 0x0
128GB GPU RAM window (16GB of actual RAM) mapped at 0x244000000000

the table to cover that all with 64K pages takes:
(((0x244000000000 + 0x2000000000) >> 16)*8)>>20 = 4556MB

If we allocate only necessary TCE levels, we will only need:
(((0x400000000 + 0x400000000) >> 16)*8)>>20 = 4MB (plus some for indirect
levels).

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-07-16 22:53:11 +10:00

400 lines
9.6 KiB
C

// SPDX-License-Identifier: GPL-2.0+
/*
* TCE helpers for IODA PCI/PCIe on PowerNV platforms
*
* Copyright 2018 IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/iommu.h>
#include <asm/iommu.h>
#include <asm/tce.h>
#include "pci.h"
void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
void *tce_mem, u64 tce_size,
u64 dma_offset, unsigned int page_shift)
{
tbl->it_blocksize = 16;
tbl->it_base = (unsigned long)tce_mem;
tbl->it_page_shift = page_shift;
tbl->it_offset = dma_offset >> tbl->it_page_shift;
tbl->it_index = 0;
tbl->it_size = tce_size >> 3;
tbl->it_busno = 0;
tbl->it_type = TCE_PCI;
}
static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift)
{
struct page *tce_mem = NULL;
__be64 *addr;
tce_mem = alloc_pages_node(nid, GFP_KERNEL, shift - PAGE_SHIFT);
if (!tce_mem) {
pr_err("Failed to allocate a TCE memory, level shift=%d\n",
shift);
return NULL;
}
addr = page_address(tce_mem);
memset(addr, 0, 1UL << shift);
return addr;
}
static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
{
__be64 *tmp = user ? tbl->it_userspace : (__be64 *) tbl->it_base;
int level = tbl->it_indirect_levels;
const long shift = ilog2(tbl->it_level_size);
unsigned long mask = (tbl->it_level_size - 1) << (level * shift);
while (level) {
int n = (idx & mask) >> (level * shift);
unsigned long tce;
if (tmp[n] == 0) {
__be64 *tmp2;
if (!alloc)
return NULL;
tmp2 = pnv_alloc_tce_level(tbl->it_nid,
ilog2(tbl->it_level_size) + 3);
if (!tmp2)
return NULL;
tmp[n] = cpu_to_be64(__pa(tmp2) |
TCE_PCI_READ | TCE_PCI_WRITE);
}
tce = be64_to_cpu(tmp[n]);
tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
idx &= ~mask;
mask >>= shift;
--level;
}
return tmp + idx;
}
int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction,
unsigned long attrs)
{
u64 proto_tce = iommu_direction_to_tce_perm(direction);
u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
long i;
if (proto_tce & TCE_PCI_WRITE)
proto_tce |= TCE_PCI_READ;
for (i = 0; i < npages; i++) {
unsigned long newtce = proto_tce |
((rpn + i) << tbl->it_page_shift);
unsigned long idx = index - tbl->it_offset + i;
*(pnv_tce(tbl, false, idx, true)) = cpu_to_be64(newtce);
}
return 0;
}
#ifdef CONFIG_IOMMU_API
int pnv_tce_xchg(struct iommu_table *tbl, long index,
unsigned long *hpa, enum dma_data_direction *direction,
bool alloc)
{
u64 proto_tce = iommu_direction_to_tce_perm(*direction);
unsigned long newtce = *hpa | proto_tce, oldtce;
unsigned long idx = index - tbl->it_offset;
__be64 *ptce = NULL;
BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
if (*direction == DMA_NONE) {
ptce = pnv_tce(tbl, false, idx, false);
if (!ptce) {
*hpa = 0;
return 0;
}
}
if (!ptce) {
ptce = pnv_tce(tbl, false, idx, alloc);
if (!ptce)
return alloc ? H_HARDWARE : H_TOO_HARD;
}
if (newtce & TCE_PCI_WRITE)
newtce |= TCE_PCI_READ;
oldtce = be64_to_cpu(xchg(ptce, cpu_to_be64(newtce)));
*hpa = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
*direction = iommu_tce_direction(oldtce);
return 0;
}
__be64 *pnv_tce_useraddrptr(struct iommu_table *tbl, long index, bool alloc)
{
if (WARN_ON_ONCE(!tbl->it_userspace))
return NULL;
return pnv_tce(tbl, true, index - tbl->it_offset, alloc);
}
#endif
void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
{
long i;
for (i = 0; i < npages; i++) {
unsigned long idx = index - tbl->it_offset + i;
__be64 *ptce = pnv_tce(tbl, false, idx, false);
if (ptce)
*ptce = cpu_to_be64(0);
}
}
unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
{
__be64 *ptce = pnv_tce(tbl, false, index - tbl->it_offset, false);
if (!ptce)
return 0;
return be64_to_cpu(*ptce);
}
static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
unsigned long size, unsigned int levels)
{
const unsigned long addr_ul = (unsigned long) addr &
~(TCE_PCI_READ | TCE_PCI_WRITE);
if (levels) {
long i;
u64 *tmp = (u64 *) addr_ul;
for (i = 0; i < size; ++i) {
unsigned long hpa = be64_to_cpu(tmp[i]);
if (!(hpa & (TCE_PCI_READ | TCE_PCI_WRITE)))
continue;
pnv_pci_ioda2_table_do_free_pages(__va(hpa), size,
levels - 1);
}
}
free_pages(addr_ul, get_order(size << 3));
}
void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl)
{
const unsigned long size = tbl->it_indirect_levels ?
tbl->it_level_size : tbl->it_size;
if (!tbl->it_size)
return;
pnv_pci_ioda2_table_do_free_pages((__be64 *)tbl->it_base, size,
tbl->it_indirect_levels);
if (tbl->it_userspace) {
pnv_pci_ioda2_table_do_free_pages(tbl->it_userspace, size,
tbl->it_indirect_levels);
}
}
static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned int shift,
unsigned int levels, unsigned long limit,
unsigned long *current_offset, unsigned long *total_allocated)
{
__be64 *addr, *tmp;
unsigned long allocated = 1UL << shift;
unsigned int entries = 1UL << (shift - 3);
long i;
addr = pnv_alloc_tce_level(nid, shift);
*total_allocated += allocated;
--levels;
if (!levels) {
*current_offset += allocated;
return addr;
}
for (i = 0; i < entries; ++i) {
tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
levels, limit, current_offset, total_allocated);
if (!tmp)
break;
addr[i] = cpu_to_be64(__pa(tmp) |
TCE_PCI_READ | TCE_PCI_WRITE);
if (*current_offset >= limit)
break;
}
return addr;
}
long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
__u32 page_shift, __u64 window_size, __u32 levels,
bool alloc_userspace_copy, struct iommu_table *tbl)
{
void *addr, *uas = NULL;
unsigned long offset = 0, level_shift, total_allocated = 0;
unsigned long total_allocated_uas = 0;
const unsigned int window_shift = ilog2(window_size);
unsigned int entries_shift = window_shift - page_shift;
unsigned int table_shift = max_t(unsigned int, entries_shift + 3,
PAGE_SHIFT);
const unsigned long tce_table_size = 1UL << table_shift;
unsigned int tmplevels = levels;
if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
return -EINVAL;
if (!is_power_of_2(window_size))
return -EINVAL;
if (alloc_userspace_copy && (window_size > (1ULL << 32)))
tmplevels = 1;
/* Adjust direct table size from window_size and levels */
entries_shift = (entries_shift + levels - 1) / levels;
level_shift = entries_shift + 3;
level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT);
if ((level_shift - 3) * levels + page_shift >= 60)
return -EINVAL;
/* Allocate TCE table */
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
tmplevels, tce_table_size, &offset, &total_allocated);
/* addr==NULL means that the first level allocation failed */
if (!addr)
return -ENOMEM;
/*
* First level was allocated but some lower level failed as
* we did not allocate as much as we wanted,
* release partially allocated table.
*/
if (tmplevels == levels && offset < tce_table_size)
goto free_tces_exit;
/* Allocate userspace view of the TCE table */
if (alloc_userspace_copy) {
offset = 0;
uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
levels, tce_table_size, &offset,
&total_allocated_uas);
if (!uas)
goto free_tces_exit;
if (tmplevels == levels && (offset < tce_table_size ||
total_allocated_uas != total_allocated))
goto free_uas_exit;
}
/* Setup linux iommu table */
pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, bus_offset,
page_shift);
tbl->it_level_size = 1ULL << (level_shift - 3);
tbl->it_indirect_levels = levels - 1;
tbl->it_allocated_size = total_allocated;
tbl->it_userspace = uas;
tbl->it_nid = nid;
pr_debug("Created TCE table: ws=%08llx ts=%lx @%08llx base=%lx uas=%p levels=%d/%d\n",
window_size, tce_table_size, bus_offset, tbl->it_base,
tbl->it_userspace, tmplevels, levels);
return 0;
free_uas_exit:
pnv_pci_ioda2_table_do_free_pages(uas,
1ULL << (level_shift - 3), levels - 1);
free_tces_exit:
pnv_pci_ioda2_table_do_free_pages(addr,
1ULL << (level_shift - 3), levels - 1);
return -ENOMEM;
}
static void pnv_iommu_table_group_link_free(struct rcu_head *head)
{
struct iommu_table_group_link *tgl = container_of(head,
struct iommu_table_group_link, rcu);
kfree(tgl);
}
void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
struct iommu_table_group *table_group)
{
long i;
bool found;
struct iommu_table_group_link *tgl;
if (!tbl || !table_group)
return;
/* Remove link to a group from table's list of attached groups */
found = false;
list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
if (tgl->table_group == table_group) {
list_del_rcu(&tgl->next);
call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free);
found = true;
break;
}
}
if (WARN_ON(!found))
return;
/* Clean a pointer to iommu_table in iommu_table_group::tables[] */
found = false;
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
if (table_group->tables[i] == tbl) {
table_group->tables[i] = NULL;
found = true;
break;
}
}
WARN_ON(!found);
}
long pnv_pci_link_table_and_group(int node, int num,
struct iommu_table *tbl,
struct iommu_table_group *table_group)
{
struct iommu_table_group_link *tgl = NULL;
if (WARN_ON(!tbl || !table_group))
return -EINVAL;
tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
node);
if (!tgl)
return -ENOMEM;
tgl->table_group = table_group;
list_add_rcu(&tgl->next, &tbl->it_group_list);
table_group->tables[num] = tbl;
return 0;
}