mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-27 01:10:52 +07:00
s390: implement ->mapping_error
s390 can also use noop_dma_ops, and while that currently does not return errors it will so in the future. Implementing the mapping_error method is the proper way to have per-ops error conditions. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
This commit is contained in:
parent
a869572c31
commit
81cac18e11
@ -8,8 +8,6 @@
|
||||
#include <linux/dma-debug.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#define DMA_ERROR_CODE (~(dma_addr_t) 0x0)
|
||||
|
||||
extern const struct dma_map_ops s390_pci_dma_ops;
|
||||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
|
@ -14,6 +14,8 @@
|
||||
#include <linux/pci.h>
|
||||
#include <asm/pci_dma.h>
|
||||
|
||||
#define S390_MAPPING_ERROR (~(dma_addr_t) 0x0)
|
||||
|
||||
static struct kmem_cache *dma_region_table_cache;
|
||||
static struct kmem_cache *dma_page_table_cache;
|
||||
static int s390_iommu_strict;
|
||||
@ -281,7 +283,7 @@ static dma_addr_t dma_alloc_address(struct device *dev, int size)
|
||||
|
||||
out_error:
|
||||
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
|
||||
return DMA_ERROR_CODE;
|
||||
return S390_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
|
||||
@ -329,7 +331,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
|
||||
/* This rounds up number of pages based on size and offset */
|
||||
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
|
||||
dma_addr = dma_alloc_address(dev, nr_pages);
|
||||
if (dma_addr == DMA_ERROR_CODE) {
|
||||
if (dma_addr == S390_MAPPING_ERROR) {
|
||||
ret = -ENOSPC;
|
||||
goto out_err;
|
||||
}
|
||||
@ -352,7 +354,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
|
||||
out_err:
|
||||
zpci_err("map error:\n");
|
||||
zpci_err_dma(ret, pa);
|
||||
return DMA_ERROR_CODE;
|
||||
return S390_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
|
||||
@ -429,7 +431,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int ret;
|
||||
|
||||
dma_addr_base = dma_alloc_address(dev, nr_pages);
|
||||
if (dma_addr_base == DMA_ERROR_CODE)
|
||||
if (dma_addr_base == S390_MAPPING_ERROR)
|
||||
return -ENOMEM;
|
||||
|
||||
dma_addr = dma_addr_base;
|
||||
@ -476,7 +478,7 @@ static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
for (i = 1; i < nr_elements; i++) {
|
||||
s = sg_next(s);
|
||||
|
||||
s->dma_address = DMA_ERROR_CODE;
|
||||
s->dma_address = S390_MAPPING_ERROR;
|
||||
s->dma_length = 0;
|
||||
|
||||
if (s->offset || (size & ~PAGE_MASK) ||
|
||||
@ -525,6 +527,11 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
s->dma_length = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int s390_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == S390_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
int zpci_dma_init_device(struct zpci_dev *zdev)
|
||||
{
|
||||
@ -657,6 +664,7 @@ const struct dma_map_ops s390_pci_dma_ops = {
|
||||
.unmap_sg = s390_dma_unmap_sg,
|
||||
.map_page = s390_dma_map_pages,
|
||||
.unmap_page = s390_dma_unmap_pages,
|
||||
.mapping_error = s390_mapping_error,
|
||||
/* if we support direct DMA this must be conditional */
|
||||
.is_phys = 0,
|
||||
/* dma_supported is unconditionally true without a callback */
|
||||
|
Loading…
Reference in New Issue
Block a user