mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-24 02:37:15 +07:00
67a3e8fe90
This should result in a pretty sizeable performance gain for reads. For rough comparison I did some simple read testing using PMEM to compare reads of write combining (WC) mappings vs write-back (WB). This was done on a random lab machine. PMEM reads from a write combining mapping: # dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000 100000+0 records in 100000+0 records out 409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s PMEM reads from a write-back mapping: # dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000 1000000+0 records in 1000000+0 records out 4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s To be able to safely support a write-back aperture I needed to add support for the "read flush" _DSM flag, as outlined in the DSM spec: http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf This flag tells the ND BLK driver that it needs to flush the cache lines associated with the aperture after the aperture is moved but before any new data is read. This ensures that any stale cache lines from the previous contents of the aperture will be discarded from the processor cache, and the new data will be read properly from the DIMM. We know that the cache lines are clean and will be discarded without any writeback because either a) the previous aperture operation was a read, and we never modified the contents of the aperture, or b) the previous aperture operation was a write and we must have written back the dirtied contents of the aperture to the DIMM before the I/O was completed. In order to add support for the "read flush" flag I needed to add a generic routine to invalidate cache lines, mmio_flush_range(). This is protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently only supported on x86. Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
115 lines
4.3 KiB
C
115 lines
4.3 KiB
C
#ifndef _ASM_X86_CACHEFLUSH_H
|
|
#define _ASM_X86_CACHEFLUSH_H
|
|
|
|
/* Caches aren't brain-dead on the intel. */
|
|
#include <asm-generic/cacheflush.h>
|
|
#include <asm/special_insns.h>
|
|
#include <asm/uaccess.h>
|
|
|
|
/*
|
|
* The set_memory_* API can be used to change various attributes of a virtual
|
|
* address range. The attributes include:
|
|
* Cachability : UnCached, WriteCombining, WriteThrough, WriteBack
|
|
* Executability : eXeutable, NoteXecutable
|
|
* Read/Write : ReadOnly, ReadWrite
|
|
* Presence : NotPresent
|
|
*
|
|
* Within a category, the attributes are mutually exclusive.
|
|
*
|
|
* The implementation of this API will take care of various aspects that
|
|
* are associated with changing such attributes, such as:
|
|
* - Flushing TLBs
|
|
* - Flushing CPU caches
|
|
* - Making sure aliases of the memory behind the mapping don't violate
|
|
* coherency rules as defined by the CPU in the system.
|
|
*
|
|
* What this API does not do:
|
|
* - Provide exclusion between various callers - including callers that
|
|
* operation on other mappings of the same physical page
|
|
* - Restore default attributes when a page is freed
|
|
* - Guarantee that mappings other than the requested one are
|
|
* in any state, other than that these do not violate rules for
|
|
* the CPU you have. Do not depend on any effects on other mappings,
|
|
* CPUs other than the one you have may have more relaxed rules.
|
|
* The caller is required to take care of these.
|
|
*/
|
|
|
|
int _set_memory_uc(unsigned long addr, int numpages);
|
|
int _set_memory_wc(unsigned long addr, int numpages);
|
|
int _set_memory_wt(unsigned long addr, int numpages);
|
|
int _set_memory_wb(unsigned long addr, int numpages);
|
|
int set_memory_uc(unsigned long addr, int numpages);
|
|
int set_memory_wc(unsigned long addr, int numpages);
|
|
int set_memory_wt(unsigned long addr, int numpages);
|
|
int set_memory_wb(unsigned long addr, int numpages);
|
|
int set_memory_x(unsigned long addr, int numpages);
|
|
int set_memory_nx(unsigned long addr, int numpages);
|
|
int set_memory_ro(unsigned long addr, int numpages);
|
|
int set_memory_rw(unsigned long addr, int numpages);
|
|
int set_memory_np(unsigned long addr, int numpages);
|
|
int set_memory_4k(unsigned long addr, int numpages);
|
|
|
|
int set_memory_array_uc(unsigned long *addr, int addrinarray);
|
|
int set_memory_array_wc(unsigned long *addr, int addrinarray);
|
|
int set_memory_array_wt(unsigned long *addr, int addrinarray);
|
|
int set_memory_array_wb(unsigned long *addr, int addrinarray);
|
|
|
|
int set_pages_array_uc(struct page **pages, int addrinarray);
|
|
int set_pages_array_wc(struct page **pages, int addrinarray);
|
|
int set_pages_array_wt(struct page **pages, int addrinarray);
|
|
int set_pages_array_wb(struct page **pages, int addrinarray);
|
|
|
|
/*
|
|
* For legacy compatibility with the old APIs, a few functions
|
|
* are provided that work on a "struct page".
|
|
* These functions operate ONLY on the 1:1 kernel mapping of the
|
|
* memory that the struct page represents, and internally just
|
|
* call the set_memory_* function. See the description of the
|
|
* set_memory_* function for more details on conventions.
|
|
*
|
|
* These APIs should be considered *deprecated* and are likely going to
|
|
* be removed in the future.
|
|
* The reason for this is the implicit operation on the 1:1 mapping only,
|
|
* making this not a generally useful API.
|
|
*
|
|
* Specifically, many users of the old APIs had a virtual address,
|
|
* called virt_to_page() or vmalloc_to_page() on that address to
|
|
* get a struct page* that the old API required.
|
|
* To convert these cases, use set_memory_*() on the original
|
|
* virtual address, do not use these functions.
|
|
*/
|
|
|
|
int set_pages_uc(struct page *page, int numpages);
|
|
int set_pages_wb(struct page *page, int numpages);
|
|
int set_pages_x(struct page *page, int numpages);
|
|
int set_pages_nx(struct page *page, int numpages);
|
|
int set_pages_ro(struct page *page, int numpages);
|
|
int set_pages_rw(struct page *page, int numpages);
|
|
|
|
|
|
void clflush_cache_range(void *addr, unsigned int size);
|
|
|
|
#define mmio_flush_range(addr, size) clflush_cache_range(addr, size)
|
|
|
|
#ifdef CONFIG_DEBUG_RODATA
|
|
void mark_rodata_ro(void);
|
|
extern const int rodata_test_data;
|
|
extern int kernel_set_to_readonly;
|
|
void set_kernel_text_rw(void);
|
|
void set_kernel_text_ro(void);
|
|
#else
|
|
static inline void set_kernel_text_rw(void) { }
|
|
static inline void set_kernel_text_ro(void) { }
|
|
#endif
|
|
|
|
#ifdef CONFIG_DEBUG_RODATA_TEST
|
|
int rodata_test(void);
|
|
#else
|
|
static inline int rodata_test(void)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#endif /* _ASM_X86_CACHEFLUSH_H */
|