mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 12:20:52 +07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6: sparc64: remove unused calc_npages() in iommu_common.h sparc64: add the segment boundary checking to IOMMUs while merging SG entries [SPARC64]: Don't open-code {get,put}_cpu_var() in flush_tlb_pending().
This commit is contained in:
commit
4c61f72c72
@ -516,9 +516,11 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
unsigned long flags, handle, prot, ctx;
|
||||
dma_addr_t dma_next = 0, dma_addr;
|
||||
unsigned int max_seg_size;
|
||||
unsigned long seg_boundary_size;
|
||||
int outcount, incount, i;
|
||||
struct strbuf *strbuf;
|
||||
struct iommu *iommu;
|
||||
unsigned long base_shift;
|
||||
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
@ -549,8 +551,11 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
outs->dma_length = 0;
|
||||
|
||||
max_seg_size = dma_get_max_seg_size(dev);
|
||||
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
||||
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
|
||||
base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
|
||||
for_each_sg(sglist, s, nelems, i) {
|
||||
unsigned long paddr, npages, entry, slen;
|
||||
unsigned long paddr, npages, entry, out_entry = 0, slen;
|
||||
iopte_t *base;
|
||||
|
||||
slen = s->length;
|
||||
@ -593,7 +598,9 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
* - allocated dma_addr isn't contiguous to previous allocation
|
||||
*/
|
||||
if ((dma_addr != dma_next) ||
|
||||
(outs->dma_length + s->length > max_seg_size)) {
|
||||
(outs->dma_length + s->length > max_seg_size) ||
|
||||
(is_span_boundary(out_entry, base_shift,
|
||||
seg_boundary_size, outs, s))) {
|
||||
/* Can't merge: create a new segment */
|
||||
segstart = s;
|
||||
outcount++;
|
||||
@ -607,6 +614,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
/* This is a new segment, fill entries */
|
||||
outs->dma_address = dma_addr;
|
||||
outs->dma_length = slen;
|
||||
out_entry = entry;
|
||||
}
|
||||
|
||||
/* Calculate next page pointer for contiguous check */
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/iommu-helper.h>
|
||||
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/scatterlist.h>
|
||||
@ -45,17 +46,16 @@ static inline unsigned long iommu_num_pages(unsigned long vaddr,
|
||||
return npages;
|
||||
}
|
||||
|
||||
static inline unsigned long calc_npages(struct scatterlist *sglist, int nelems)
|
||||
static inline int is_span_boundary(unsigned long entry,
|
||||
unsigned long shift,
|
||||
unsigned long boundary_size,
|
||||
struct scatterlist *outs,
|
||||
struct scatterlist *sg)
|
||||
{
|
||||
unsigned long i, npages = 0;
|
||||
struct scatterlist *sg;
|
||||
unsigned long paddr = SG_ENT_PHYS_ADDRESS(outs);
|
||||
int nr = iommu_num_pages(paddr, outs->dma_length + sg->length);
|
||||
|
||||
for_each_sg(sglist, sg, nelems, i) {
|
||||
unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
|
||||
npages += iommu_num_pages(paddr, sg->length);
|
||||
}
|
||||
|
||||
return npages;
|
||||
return iommu_is_span_boundary(entry, nr, shift, boundary_size);
|
||||
}
|
||||
|
||||
extern unsigned long iommu_range_alloc(struct device *dev,
|
||||
|
@ -335,8 +335,10 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
unsigned long flags, handle, prot;
|
||||
dma_addr_t dma_next = 0, dma_addr;
|
||||
unsigned int max_seg_size;
|
||||
unsigned long seg_boundary_size;
|
||||
int outcount, incount, i;
|
||||
struct iommu *iommu;
|
||||
unsigned long base_shift;
|
||||
long err;
|
||||
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
@ -362,8 +364,11 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
iommu_batch_start(dev, prot, ~0UL);
|
||||
|
||||
max_seg_size = dma_get_max_seg_size(dev);
|
||||
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
||||
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
|
||||
base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
|
||||
for_each_sg(sglist, s, nelems, i) {
|
||||
unsigned long paddr, npages, entry, slen;
|
||||
unsigned long paddr, npages, entry, out_entry = 0, slen;
|
||||
|
||||
slen = s->length;
|
||||
/* Sanity check */
|
||||
@ -406,7 +411,9 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
* - allocated dma_addr isn't contiguous to previous allocation
|
||||
*/
|
||||
if ((dma_addr != dma_next) ||
|
||||
(outs->dma_length + s->length > max_seg_size)) {
|
||||
(outs->dma_length + s->length > max_seg_size) ||
|
||||
(is_span_boundary(out_entry, base_shift,
|
||||
seg_boundary_size, outs, s))) {
|
||||
/* Can't merge: create a new segment */
|
||||
segstart = s;
|
||||
outcount++;
|
||||
@ -420,6 +427,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
/* This is a new segment, fill entries */
|
||||
outs->dma_address = dma_addr;
|
||||
outs->dma_length = slen;
|
||||
out_entry = entry;
|
||||
}
|
||||
|
||||
/* Calculate next page pointer for contiguous check */
|
||||
|
@ -23,11 +23,8 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) = { 0, };
|
||||
|
||||
void flush_tlb_pending(void)
|
||||
{
|
||||
struct mmu_gather *mp;
|
||||
struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
|
||||
|
||||
preempt_disable();
|
||||
|
||||
mp = &__get_cpu_var(mmu_gathers);
|
||||
if (mp->tlb_nr) {
|
||||
flush_tsb_user(mp);
|
||||
|
||||
@ -43,7 +40,7 @@ void flush_tlb_pending(void)
|
||||
mp->tlb_nr = 0;
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
put_cpu_var(mmu_gathers);
|
||||
}
|
||||
|
||||
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
|
||||
|
Loading…
Reference in New Issue
Block a user