mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-19 01:17:23 +07:00
af7ddd8a62
A huge update this time, but a lot of that is just consolidating or removing code: - provide a common DMA_MAPPING_ERROR definition and avoid indirect calls for dma_map_* error checking - use direct calls for the DMA direct mapping case, avoiding huge retpoline overhead for high performance workloads - merge the swiotlb dma_map_ops into dma-direct - provide a generic remapping DMA consistent allocator for architectures that have devices that perform DMA that is not cache coherent. Based on the existing arm64 implementation and also used for csky now. - improve the dma-debug infrastructure, including dynamic allocation of entries (Robin Murphy) - default to providing chaining scatterlist everywhere, with opt-outs for the few architectures (alpha, parisc, most arm32 variants) that can't cope with it - misc sparc32 dma-related cleanups - remove the dma_mark_clean arch hook used by swiotlb on ia64 and replace it with the generic noncoherent infrastructure - fix the return type of dma_set_max_seg_size (Niklas Söderlund) - move the dummy dma ops for not DMA capable devices from arm64 to common code (Robin Murphy) - ensure dma_alloc_coherent returns zeroed memory to avoid kernel data leaks through userspace. We already did this for most common architectures, but this ensures we do it everywhere. dma_zalloc_coherent has been deprecated and can hopefully be removed after -rc1 with a coccinelle script. -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAlwctQgLHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYMxgQ//dBpAfS4/J76CdAbYry2zqgcOUU9hIrD6NHiEMWov ltJxyvEl3LsUmIdEj3aCrYL9jZN0qsnCzn5BVj2c3jDIVgD64fAr7HDf/PbEEfKb j6/GgEnVLPZV+sQMvhNA5jOzHrkseaqPa4/pNLFZ/l8jnuZ2d+btusDWJpMoVDer TXVwtIfgeIu0gTygYOShLYXd5qptWKWsZEpbTZOO2sE6+x+ZJX7yQYUxYDTlcOIj JWVO2l5QNHPc5T9o2at+6L5aNUvnZOxT79sWgyZLn0Kc+FagKAVwfLqUEl0v7foG 8k/xca5/8p3afB1DfrIrtplJqis7cVgdyGxriwuuoO8X4F0nPyWwpGmxsBhrWwwl xTqC4UorEJ7QwoP6Azopk/vYI2QXIUBLjuCJCuFXZj9+2BGf4IfvBY1S2cLM9qLs HMcxQonuXJii044KEFS96ePEuiT+igVINweIFBKWcgNCEG0UQtyL6RQ1U5297ipF JiWZAqD+p9X52UdKS+oKfAiZEekMXn6Xyo97+YCiNpfOo0GP5eEcwhL+JpY4AiRq apPXtsRy2o1s8yfjdraUIM2Mc2n62vFKb35oUbGCd/QO9piPrFQHl6T0HHcHk4YR XrUXcHieFZBCYqh7ZVa4RL8Msq1wvGuTL4Dxl43mXdsMoUFRR6eSNWLoAV4IpOLZ WgA= =in72 -----END PGP SIGNATURE----- Merge tag 'dma-mapping-4.21' of git://git.infradead.org/users/hch/dma-mapping Pull DMA mapping updates from Christoph Hellwig: "A huge update this time, but a lot of that is just consolidating or removing code: - provide a common DMA_MAPPING_ERROR definition and avoid indirect calls for dma_map_* error checking - use direct calls for the DMA direct mapping case, avoiding huge retpoline overhead for high performance workloads - merge the swiotlb dma_map_ops into dma-direct - provide a generic remapping DMA consistent allocator for architectures that have devices that perform DMA that is not cache coherent. Based on the existing arm64 implementation and also used for csky now. - improve the dma-debug infrastructure, including dynamic allocation of entries (Robin Murphy) - default to providing chaining scatterlist everywhere, with opt-outs for the few architectures (alpha, parisc, most arm32 variants) that can't cope with it - misc sparc32 dma-related cleanups - remove the dma_mark_clean arch hook used by swiotlb on ia64 and replace it with the generic noncoherent infrastructure - fix the return type of dma_set_max_seg_size (Niklas Söderlund) - move the dummy dma ops for not DMA capable devices from arm64 to common code (Robin Murphy) - ensure dma_alloc_coherent returns zeroed memory to avoid kernel data leaks through userspace. We already did this for most common architectures, but this ensures we do it everywhere. dma_zalloc_coherent has been deprecated and can hopefully be removed after -rc1 with a coccinelle script" * tag 'dma-mapping-4.21' of git://git.infradead.org/users/hch/dma-mapping: (73 commits) dma-mapping: fix inverted logic in dma_supported dma-mapping: deprecate dma_zalloc_coherent dma-mapping: zero memory returned from dma_alloc_* sparc/iommu: fix ->map_sg return value sparc/io-unit: fix ->map_sg return value arm64: default to the direct mapping in get_arch_dma_ops PCI: Remove unused attr variable in pci_dma_configure ia64: only select ARCH_HAS_DMA_COHERENT_TO_PFN if swiotlb is enabled dma-mapping: bypass indirect calls for dma-direct vmd: use the proper dma_* APIs instead of direct methods calls dma-direct: merge swiotlb_dma_ops into the dma_direct code dma-direct: use dma_direct_map_page to implement dma_direct_map_sg dma-direct: improve addressability error reporting swiotlb: remove dma_mark_clean swiotlb: remove SWIOTLB_MAP_ERROR ACPI / scan: Refactor _CCA enforcement dma-mapping: factor out dummy DMA ops dma-mapping: always build the direct mapping code dma-mapping: move dma_cache_sync out of line dma-mapping: move various slow path functions out of line ...
124 lines
3.2 KiB
C
124 lines
3.2 KiB
C
/*
|
|
* Contains routines needed to support swiotlb for ppc.
|
|
*
|
|
* Copyright (C) 2009-2010 Freescale Semiconductor, Inc.
|
|
* Author: Becky Bruce
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms of the GNU General Public License as published by the
|
|
* Free Software Foundation; either version 2 of the License, or (at your
|
|
* option) any later version.
|
|
*
|
|
*/
|
|
|
|
#include <linux/dma-direct.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/pfn.h>
|
|
#include <linux/of_platform.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/pci.h>
|
|
|
|
#include <asm/machdep.h>
|
|
#include <asm/swiotlb.h>
|
|
#include <asm/dma.h>
|
|
|
|
unsigned int ppc_swiotlb_enable;
|
|
|
|
static u64 swiotlb_powerpc_get_required(struct device *dev)
|
|
{
|
|
u64 end, mask, max_direct_dma_addr = dev->archdata.max_direct_dma_addr;
|
|
|
|
end = memblock_end_of_DRAM();
|
|
if (max_direct_dma_addr && end > max_direct_dma_addr)
|
|
end = max_direct_dma_addr;
|
|
end += get_dma_offset(dev);
|
|
|
|
mask = 1ULL << (fls64(end) - 1);
|
|
mask += mask - 1;
|
|
|
|
return mask;
|
|
}
|
|
|
|
/*
|
|
* At the moment, all platforms that use this code only require
|
|
* swiotlb to be used if we're operating on HIGHMEM. Since
|
|
* we don't ever call anything other than map_sg, unmap_sg,
|
|
* map_page, and unmap_page on highmem, use normal dma_ops
|
|
* for everything else.
|
|
*/
|
|
const struct dma_map_ops powerpc_swiotlb_dma_ops = {
|
|
.alloc = __dma_nommu_alloc_coherent,
|
|
.free = __dma_nommu_free_coherent,
|
|
.mmap = dma_nommu_mmap_coherent,
|
|
.map_sg = dma_direct_map_sg,
|
|
.unmap_sg = dma_direct_unmap_sg,
|
|
.dma_supported = swiotlb_dma_supported,
|
|
.map_page = dma_direct_map_page,
|
|
.unmap_page = dma_direct_unmap_page,
|
|
.sync_single_for_cpu = dma_direct_sync_single_for_cpu,
|
|
.sync_single_for_device = dma_direct_sync_single_for_device,
|
|
.sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
|
|
.sync_sg_for_device = dma_direct_sync_sg_for_device,
|
|
.get_required_mask = swiotlb_powerpc_get_required,
|
|
};
|
|
|
|
void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
|
|
{
|
|
struct pci_controller *hose;
|
|
struct dev_archdata *sd;
|
|
|
|
hose = pci_bus_to_host(pdev->bus);
|
|
sd = &pdev->dev.archdata;
|
|
sd->max_direct_dma_addr =
|
|
hose->dma_window_base_cur + hose->dma_window_size;
|
|
}
|
|
|
|
static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
|
|
unsigned long action, void *data)
|
|
{
|
|
struct device *dev = data;
|
|
struct dev_archdata *sd;
|
|
|
|
/* We are only intereted in device addition */
|
|
if (action != BUS_NOTIFY_ADD_DEVICE)
|
|
return 0;
|
|
|
|
sd = &dev->archdata;
|
|
sd->max_direct_dma_addr = 0;
|
|
|
|
/* May need to bounce if the device can't address all of DRAM */
|
|
if ((dma_get_mask(dev) + 1) < memblock_end_of_DRAM())
|
|
set_dma_ops(dev, &powerpc_swiotlb_dma_ops);
|
|
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
static struct notifier_block ppc_swiotlb_plat_bus_notifier = {
|
|
.notifier_call = ppc_swiotlb_bus_notify,
|
|
.priority = 0,
|
|
};
|
|
|
|
int __init swiotlb_setup_bus_notifier(void)
|
|
{
|
|
bus_register_notifier(&platform_bus_type,
|
|
&ppc_swiotlb_plat_bus_notifier);
|
|
return 0;
|
|
}
|
|
|
|
void __init swiotlb_detect_4g(void)
|
|
{
|
|
if ((memblock_end_of_DRAM() - 1) > 0xffffffff)
|
|
ppc_swiotlb_enable = 1;
|
|
}
|
|
|
|
static int __init check_swiotlb_enabled(void)
|
|
{
|
|
if (ppc_swiotlb_enable)
|
|
swiotlb_print_info();
|
|
else
|
|
swiotlb_exit();
|
|
|
|
return 0;
|
|
}
|
|
subsys_initcall(check_swiotlb_enabled);
|