mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 05:38:21 +07:00
96601adb74
Given that a write-back (WB) mapping plus non-temporal stores is expected to be the most efficient way to access PMEM, update the definition of ARCH_HAS_PMEM_API to imply arch support for WB-mapped-PMEM. This is needed as a pre-requisite for adding PMEM to the direct map and mapping it with struct page. The above clarification for X86_64 means that memcpy_to_pmem() is permitted to use the non-temporal arch_memcpy_to_pmem() rather than needlessly fall back to default_memcpy_to_pmem() when the pcommit instruction is not available. When arch_memcpy_to_pmem() is not guaranteed to flush writes out of cache, i.e. on older X86_32 implementations where non-temporal stores may just dirty cache, ARCH_HAS_PMEM_API is simply disabled. The default fall back for persistent memory handling remains. Namely, map it with the WT (write-through) cache-type and hope for the best. arch_has_pmem_api() is updated to only indicate whether the arch provides the proper helpers to meet the minimum "writes are visible outside the cache hierarchy after memcpy_to_pmem() + wmb_pmem()". Code that cares whether wmb_pmem() actually flushes writes to pmem must now call arch_has_wmb_pmem() directly. Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> [hch: set ARCH_HAS_PMEM_API=n on x86_32] Reviewed-by: Christoph Hellwig <hch@lst.de> [toshi: x86_32 compile fixes] Signed-off-by: Toshi Kani <toshi.kani@hp.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
154 lines
4.6 KiB
C
154 lines
4.6 KiB
C
/*
|
|
* Copyright(c) 2015 Intel Corporation. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of version 2 of the GNU General Public License as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* General Public License for more details.
|
|
*/
|
|
#ifndef __ASM_X86_PMEM_H__
|
|
#define __ASM_X86_PMEM_H__
|
|
|
|
#include <linux/uaccess.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/cpufeature.h>
|
|
#include <asm/special_insns.h>
|
|
|
|
#ifdef CONFIG_ARCH_HAS_PMEM_API
|
|
/**
|
|
* arch_memcpy_to_pmem - copy data to persistent memory
|
|
* @dst: destination buffer for the copy
|
|
* @src: source buffer for the copy
|
|
* @n: length of the copy in bytes
|
|
*
|
|
* Copy data to persistent memory media via non-temporal stores so that
|
|
* a subsequent arch_wmb_pmem() can flush cpu and memory controller
|
|
* write buffers to guarantee durability.
|
|
*/
|
|
static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
|
|
size_t n)
|
|
{
|
|
int unwritten;
|
|
|
|
/*
|
|
* We are copying between two kernel buffers, if
|
|
* __copy_from_user_inatomic_nocache() returns an error (page
|
|
* fault) we would have already reported a general protection fault
|
|
* before the WARN+BUG.
|
|
*/
|
|
unwritten = __copy_from_user_inatomic_nocache((void __force *) dst,
|
|
(void __user *) src, n);
|
|
if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n",
|
|
__func__, dst, src, unwritten))
|
|
BUG();
|
|
}
|
|
|
|
/**
|
|
* arch_wmb_pmem - synchronize writes to persistent memory
|
|
*
|
|
* After a series of arch_memcpy_to_pmem() operations this drains data
|
|
* from cpu write buffers and any platform (memory controller) buffers
|
|
* to ensure that written data is durable on persistent memory media.
|
|
*/
|
|
static inline void arch_wmb_pmem(void)
|
|
{
|
|
/*
|
|
* wmb() to 'sfence' all previous writes such that they are
|
|
* architecturally visible to 'pcommit'. Note, that we've
|
|
* already arranged for pmem writes to avoid the cache via
|
|
* arch_memcpy_to_pmem().
|
|
*/
|
|
wmb();
|
|
pcommit_sfence();
|
|
}
|
|
|
|
/**
|
|
* __arch_wb_cache_pmem - write back a cache range with CLWB
|
|
* @vaddr: virtual start address
|
|
* @size: number of bytes to write back
|
|
*
|
|
* Write back a cache range using the CLWB (cache line write back)
|
|
* instruction. This function requires explicit ordering with an
|
|
* arch_wmb_pmem() call. This API is internal to the x86 PMEM implementation.
|
|
*/
|
|
static inline void __arch_wb_cache_pmem(void *vaddr, size_t size)
|
|
{
|
|
u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
|
|
unsigned long clflush_mask = x86_clflush_size - 1;
|
|
void *vend = vaddr + size;
|
|
void *p;
|
|
|
|
for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
|
|
p < vend; p += x86_clflush_size)
|
|
clwb(p);
|
|
}
|
|
|
|
/*
|
|
* copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
|
|
* iterators, so for other types (bvec & kvec) we must do a cache write-back.
|
|
*/
|
|
static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
|
|
{
|
|
return iter_is_iovec(i) == false;
|
|
}
|
|
|
|
/**
|
|
* arch_copy_from_iter_pmem - copy data from an iterator to PMEM
|
|
* @addr: PMEM destination address
|
|
* @bytes: number of bytes to copy
|
|
* @i: iterator with source data
|
|
*
|
|
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
|
|
* This function requires explicit ordering with an arch_wmb_pmem() call.
|
|
*/
|
|
static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
|
|
struct iov_iter *i)
|
|
{
|
|
void *vaddr = (void __force *)addr;
|
|
size_t len;
|
|
|
|
/* TODO: skip the write-back by always using non-temporal stores */
|
|
len = copy_from_iter_nocache(vaddr, bytes, i);
|
|
|
|
if (__iter_needs_pmem_wb(i))
|
|
__arch_wb_cache_pmem(vaddr, bytes);
|
|
|
|
return len;
|
|
}
|
|
|
|
/**
|
|
* arch_clear_pmem - zero a PMEM memory range
|
|
* @addr: virtual start address
|
|
* @size: number of bytes to zero
|
|
*
|
|
* Write zeros into the memory range starting at 'addr' for 'size' bytes.
|
|
* This function requires explicit ordering with an arch_wmb_pmem() call.
|
|
*/
|
|
static inline void arch_clear_pmem(void __pmem *addr, size_t size)
|
|
{
|
|
void *vaddr = (void __force *)addr;
|
|
|
|
/* TODO: implement the zeroing via non-temporal writes */
|
|
if (size == PAGE_SIZE && ((unsigned long)vaddr & ~PAGE_MASK) == 0)
|
|
clear_page(vaddr);
|
|
else
|
|
memset(vaddr, 0, size);
|
|
|
|
__arch_wb_cache_pmem(vaddr, size);
|
|
}
|
|
|
|
static inline bool __arch_has_wmb_pmem(void)
|
|
{
|
|
/*
|
|
* We require that wmb() be an 'sfence', that is only guaranteed on
|
|
* 64-bit builds
|
|
*/
|
|
return static_cpu_has(X86_FEATURE_PCOMMIT);
|
|
}
|
|
#endif /* CONFIG_ARCH_HAS_PMEM_API */
|
|
#endif /* __ASM_X86_PMEM_H__ */
|