mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-04 09:36:52 +07:00
0fb5fe874c
Systems may contain heterogeneous IOMMUs supporting differing minimum page sizes, which may also not be common with the CPU page size. Thus it is practical to have an explicit notion of IOVA granularity to simplify handling of mapping and allocation constraints. As an initial step, move the IOVA page granularity from an implicit compile-time constant to a per-domain property so we can make use of it in IOVA domain context at runtime. To keep the abstraction tidy, extend the little API of inline iova_* helpers to parallel some of the equivalent PAGE_* macros. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
92 lines
2.6 KiB
C
92 lines
2.6 KiB
C
/*
|
|
* Copyright (c) 2006, Intel Corporation.
|
|
*
|
|
* This file is released under the GPLv2.
|
|
*
|
|
* Copyright (C) 2006-2008 Intel Corporation
|
|
* Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
|
|
*
|
|
*/
|
|
|
|
#ifndef _IOVA_H_
|
|
#define _IOVA_H_
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/rbtree.h>
|
|
#include <linux/dma-mapping.h>
|
|
|
|
/* iova structure */
|
|
struct iova {
|
|
struct rb_node node;
|
|
unsigned long pfn_hi; /* IOMMU dish out addr hi */
|
|
unsigned long pfn_lo; /* IOMMU dish out addr lo */
|
|
};
|
|
|
|
/* holds all the iova translations for a domain */
|
|
struct iova_domain {
|
|
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
|
|
struct rb_root rbroot; /* iova domain rbtree root */
|
|
struct rb_node *cached32_node; /* Save last alloced node */
|
|
unsigned long granule; /* pfn granularity for this domain */
|
|
unsigned long start_pfn; /* Lower limit for this domain */
|
|
unsigned long dma_32bit_pfn;
|
|
};
|
|
|
|
static inline unsigned long iova_size(struct iova *iova)
|
|
{
|
|
return iova->pfn_hi - iova->pfn_lo + 1;
|
|
}
|
|
|
|
static inline unsigned long iova_shift(struct iova_domain *iovad)
|
|
{
|
|
return __ffs(iovad->granule);
|
|
}
|
|
|
|
static inline unsigned long iova_mask(struct iova_domain *iovad)
|
|
{
|
|
return iovad->granule - 1;
|
|
}
|
|
|
|
static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
|
|
{
|
|
return iova & iova_mask(iovad);
|
|
}
|
|
|
|
static inline size_t iova_align(struct iova_domain *iovad, size_t size)
|
|
{
|
|
return ALIGN(size, iovad->granule);
|
|
}
|
|
|
|
static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
|
|
{
|
|
return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
|
|
}
|
|
|
|
static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
|
|
{
|
|
return iova >> iova_shift(iovad);
|
|
}
|
|
|
|
int iommu_iova_cache_init(void);
|
|
void iommu_iova_cache_destroy(void);
|
|
|
|
struct iova *alloc_iova_mem(void);
|
|
void free_iova_mem(struct iova *iova);
|
|
void free_iova(struct iova_domain *iovad, unsigned long pfn);
|
|
void __free_iova(struct iova_domain *iovad, struct iova *iova);
|
|
struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
|
|
unsigned long limit_pfn,
|
|
bool size_aligned);
|
|
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
|
|
unsigned long pfn_hi);
|
|
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
|
|
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
|
unsigned long start_pfn, unsigned long pfn_32bit);
|
|
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
|
|
void put_iova_domain(struct iova_domain *iovad);
|
|
struct iova *split_and_remove_iova(struct iova_domain *iovad,
|
|
struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
|
|
|
|
#endif
|