mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-04 04:56:40 +07:00
sparc64: rename iommu_num_pages function to iommu_nr_pages
This is a preparation patch for introducing a generic iommu_num_pages function. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Acked-by: David S. Miller <davem@davemloft.net> Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Muli Ben-Yehuda <muli@il.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
bdab0ba3d9
commit
a7375762a5
@ -575,7 +575,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
}
|
||||
/* Allocate iommu entries for that segment */
|
||||
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
|
||||
npages = iommu_num_pages(paddr, slen);
|
||||
npages = iommu_nr_pages(paddr, slen);
|
||||
entry = iommu_range_alloc(dev, iommu, npages, &handle);
|
||||
|
||||
/* Handle failure */
|
||||
@ -647,7 +647,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
iopte_t *base;
|
||||
|
||||
vaddr = s->dma_address & IO_PAGE_MASK;
|
||||
npages = iommu_num_pages(s->dma_address, s->dma_length);
|
||||
npages = iommu_nr_pages(s->dma_address, s->dma_length);
|
||||
iommu_range_free(iommu, vaddr, npages);
|
||||
|
||||
entry = (vaddr - iommu->page_table_map_base)
|
||||
@ -715,7 +715,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||
|
||||
if (!len)
|
||||
break;
|
||||
npages = iommu_num_pages(dma_handle, len);
|
||||
npages = iommu_nr_pages(dma_handle, len);
|
||||
iommu_range_free(iommu, dma_handle, npages);
|
||||
|
||||
entry = ((dma_handle - iommu->page_table_map_base)
|
||||
|
@ -35,7 +35,7 @@
|
||||
|
||||
#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
|
||||
|
||||
static inline unsigned long iommu_num_pages(unsigned long vaddr,
|
||||
static inline unsigned long iommu_nr_pages(unsigned long vaddr,
|
||||
unsigned long slen)
|
||||
{
|
||||
unsigned long npages;
|
||||
@ -53,7 +53,7 @@ static inline int is_span_boundary(unsigned long entry,
|
||||
struct scatterlist *sg)
|
||||
{
|
||||
unsigned long paddr = SG_ENT_PHYS_ADDRESS(outs);
|
||||
int nr = iommu_num_pages(paddr, outs->dma_length + sg->length);
|
||||
int nr = iommu_nr_pages(paddr, outs->dma_length + sg->length);
|
||||
|
||||
return iommu_is_span_boundary(entry, nr, shift, boundary_size);
|
||||
}
|
||||
|
@ -384,7 +384,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
}
|
||||
/* Allocate iommu entries for that segment */
|
||||
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
|
||||
npages = iommu_num_pages(paddr, slen);
|
||||
npages = iommu_nr_pages(paddr, slen);
|
||||
entry = iommu_range_alloc(dev, iommu, npages, &handle);
|
||||
|
||||
/* Handle failure */
|
||||
@ -461,7 +461,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
unsigned long vaddr, npages;
|
||||
|
||||
vaddr = s->dma_address & IO_PAGE_MASK;
|
||||
npages = iommu_num_pages(s->dma_address, s->dma_length);
|
||||
npages = iommu_nr_pages(s->dma_address, s->dma_length);
|
||||
iommu_range_free(iommu, vaddr, npages);
|
||||
/* XXX demap? XXX */
|
||||
s->dma_address = DMA_ERROR_CODE;
|
||||
@ -500,7 +500,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||
|
||||
if (!len)
|
||||
break;
|
||||
npages = iommu_num_pages(dma_handle, len);
|
||||
npages = iommu_nr_pages(dma_handle, len);
|
||||
iommu_range_free(iommu, dma_handle, npages);
|
||||
|
||||
entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
|
||||
|
Loading…
Reference in New Issue
Block a user