2008-10-23 12:26:29 +07:00
|
|
|
#ifndef _ASM_X86_CACHEFLUSH_H
|
|
|
|
#define _ASM_X86_CACHEFLUSH_H
|
2007-10-16 04:28:20 +07:00
|
|
|
|
|
|
|
/* Caches aren't brain-dead on the intel. */
|
2011-01-20 18:32:14 +07:00
|
|
|
#include <asm-generic/cacheflush.h>
|
2012-03-29 00:11:12 +07:00
|
|
|
#include <asm/special_insns.h>
|
2015-06-25 14:08:39 +07:00
|
|
|
#include <asm/uaccess.h>
|
2007-10-16 04:28:20 +07:00
|
|
|
|
2008-04-17 22:41:31 +07:00
|
|
|
/*
|
|
|
|
* The set_memory_* API can be used to change various attributes of a virtual
|
|
|
|
* address range. The attributes include:
|
2015-06-04 23:55:20 +07:00
|
|
|
* Cachability : UnCached, WriteCombining, WriteThrough, WriteBack
|
2008-04-17 22:41:31 +07:00
|
|
|
* Executability : eXeutable, NoteXecutable
|
|
|
|
* Read/Write : ReadOnly, ReadWrite
|
|
|
|
* Presence : NotPresent
|
|
|
|
*
|
2011-03-18 02:24:16 +07:00
|
|
|
* Within a category, the attributes are mutually exclusive.
|
2008-04-17 22:41:31 +07:00
|
|
|
*
|
|
|
|
* The implementation of this API will take care of various aspects that
|
|
|
|
* are associated with changing such attributes, such as:
|
|
|
|
* - Flushing TLBs
|
|
|
|
* - Flushing CPU caches
|
|
|
|
* - Making sure aliases of the memory behind the mapping don't violate
|
|
|
|
* coherency rules as defined by the CPU in the system.
|
|
|
|
*
|
|
|
|
* What this API does not do:
|
|
|
|
* - Provide exclusion between various callers - including callers that
|
|
|
|
* operation on other mappings of the same physical page
|
|
|
|
* - Restore default attributes when a page is freed
|
|
|
|
* - Guarantee that mappings other than the requested one are
|
|
|
|
* in any state, other than that these do not violate rules for
|
|
|
|
* the CPU you have. Do not depend on any effects on other mappings,
|
|
|
|
* CPUs other than the one you have may have more relaxed rules.
|
|
|
|
* The caller is required to take care of these.
|
|
|
|
*/
|
x86: a new API for drivers/etc to control cache and other page attributes
Right now, if drivers or other code want to change, say, a cache attribute of a
page, the only API they have is change_page_attr(). c-p-a is a really bad API
for this, because it forces the caller to know *ALL* the attributes he wants
for the page, not just the 1 thing he wants to change. So code that wants to
set a page uncachable, needs to be aware of the NX status as well etc etc etc.
This patch introduces a set of new APIs for this, set_pages_<attr> and
set_memory_<attr>, that offer a logical change to the user, and leave all
attributes not implied by the requested logical change alone.
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 19:34:06 +07:00
|
|
|
|
2008-03-19 07:00:18 +07:00
|
|
|
int _set_memory_uc(unsigned long addr, int numpages);
|
2008-03-19 07:00:23 +07:00
|
|
|
int _set_memory_wc(unsigned long addr, int numpages);
|
2015-06-04 23:55:20 +07:00
|
|
|
int _set_memory_wt(unsigned long addr, int numpages);
|
2008-03-19 07:00:18 +07:00
|
|
|
int _set_memory_wb(unsigned long addr, int numpages);
|
x86: a new API for drivers/etc to control cache and other page attributes
Right now, if drivers or other code want to change, say, a cache attribute of a
page, the only API they have is change_page_attr(). c-p-a is a really bad API
for this, because it forces the caller to know *ALL* the attributes he wants
for the page, not just the 1 thing he wants to change. So code that wants to
set a page uncachable, needs to be aware of the NX status as well etc etc etc.
This patch introduces a set of new APIs for this, set_pages_<attr> and
set_memory_<attr>, that offer a logical change to the user, and leave all
attributes not implied by the requested logical change alone.
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 19:34:06 +07:00
|
|
|
int set_memory_uc(unsigned long addr, int numpages);
|
2008-03-19 07:00:23 +07:00
|
|
|
int set_memory_wc(unsigned long addr, int numpages);
|
2015-06-04 23:55:20 +07:00
|
|
|
int set_memory_wt(unsigned long addr, int numpages);
|
x86: a new API for drivers/etc to control cache and other page attributes
Right now, if drivers or other code want to change, say, a cache attribute of a
page, the only API they have is change_page_attr(). c-p-a is a really bad API
for this, because it forces the caller to know *ALL* the attributes he wants
for the page, not just the 1 thing he wants to change. So code that wants to
set a page uncachable, needs to be aware of the NX status as well etc etc etc.
This patch introduces a set of new APIs for this, set_pages_<attr> and
set_memory_<attr>, that offer a logical change to the user, and leave all
attributes not implied by the requested logical change alone.
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 19:34:06 +07:00
|
|
|
int set_memory_wb(unsigned long addr, int numpages);
|
|
|
|
int set_memory_x(unsigned long addr, int numpages);
|
|
|
|
int set_memory_nx(unsigned long addr, int numpages);
|
|
|
|
int set_memory_ro(unsigned long addr, int numpages);
|
|
|
|
int set_memory_rw(unsigned long addr, int numpages);
|
2008-01-30 19:34:07 +07:00
|
|
|
int set_memory_np(unsigned long addr, int numpages);
|
2008-03-12 09:53:29 +07:00
|
|
|
int set_memory_4k(unsigned long addr, int numpages);
|
x86: a new API for drivers/etc to control cache and other page attributes
Right now, if drivers or other code want to change, say, a cache attribute of a
page, the only API they have is change_page_attr(). c-p-a is a really bad API
for this, because it forces the caller to know *ALL* the attributes he wants
for the page, not just the 1 thing he wants to change. So code that wants to
set a page uncachable, needs to be aware of the NX status as well etc etc etc.
This patch introduces a set of new APIs for this, set_pages_<attr> and
set_memory_<attr>, that offer a logical change to the user, and leave all
attributes not implied by the requested logical change alone.
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 19:34:06 +07:00
|
|
|
|
2008-08-21 09:46:06 +07:00
|
|
|
int set_memory_array_uc(unsigned long *addr, int addrinarray);
|
2010-04-01 19:45:01 +07:00
|
|
|
int set_memory_array_wc(unsigned long *addr, int addrinarray);
|
2015-06-04 23:55:20 +07:00
|
|
|
int set_memory_array_wt(unsigned long *addr, int addrinarray);
|
2008-08-21 09:46:06 +07:00
|
|
|
int set_memory_array_wb(unsigned long *addr, int addrinarray);
|
|
|
|
|
2009-03-20 04:51:15 +07:00
|
|
|
int set_pages_array_uc(struct page **pages, int addrinarray);
|
2010-04-01 19:45:01 +07:00
|
|
|
int set_pages_array_wc(struct page **pages, int addrinarray);
|
2015-06-04 23:55:20 +07:00
|
|
|
int set_pages_array_wt(struct page **pages, int addrinarray);
|
2009-03-20 04:51:15 +07:00
|
|
|
int set_pages_array_wb(struct page **pages, int addrinarray);
|
|
|
|
|
2008-04-17 22:41:31 +07:00
|
|
|
/*
|
|
|
|
* For legacy compatibility with the old APIs, a few functions
|
|
|
|
* are provided that work on a "struct page".
|
|
|
|
* These functions operate ONLY on the 1:1 kernel mapping of the
|
|
|
|
* memory that the struct page represents, and internally just
|
|
|
|
* call the set_memory_* function. See the description of the
|
|
|
|
* set_memory_* function for more details on conventions.
|
|
|
|
*
|
|
|
|
* These APIs should be considered *deprecated* and are likely going to
|
|
|
|
* be removed in the future.
|
|
|
|
* The reason for this is the implicit operation on the 1:1 mapping only,
|
|
|
|
* making this not a generally useful API.
|
|
|
|
*
|
|
|
|
* Specifically, many users of the old APIs had a virtual address,
|
|
|
|
* called virt_to_page() or vmalloc_to_page() on that address to
|
|
|
|
* get a struct page* that the old API required.
|
|
|
|
* To convert these cases, use set_memory_*() on the original
|
|
|
|
* virtual address, do not use these functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int set_pages_uc(struct page *page, int numpages);
|
|
|
|
int set_pages_wb(struct page *page, int numpages);
|
|
|
|
int set_pages_x(struct page *page, int numpages);
|
|
|
|
int set_pages_nx(struct page *page, int numpages);
|
|
|
|
int set_pages_ro(struct page *page, int numpages);
|
|
|
|
int set_pages_rw(struct page *page, int numpages);
|
|
|
|
|
|
|
|
|
2008-01-30 19:34:09 +07:00
|
|
|
void clflush_cache_range(void *addr, unsigned int size);
|
2007-10-16 04:28:20 +07:00
|
|
|
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-28 02:14:20 +07:00
|
|
|
#define mmio_flush_range(addr, size) clflush_cache_range(addr, size)
|
|
|
|
|
2008-02-13 03:12:01 +07:00
|
|
|
extern const int rodata_test_data;
|
2009-10-29 09:46:56 +07:00
|
|
|
extern int kernel_set_to_readonly;
|
2009-02-18 05:57:30 +07:00
|
|
|
void set_kernel_text_rw(void);
|
|
|
|
void set_kernel_text_ro(void);
|
2008-02-13 03:12:01 +07:00
|
|
|
|
2008-01-30 19:34:08 +07:00
|
|
|
#ifdef CONFIG_DEBUG_RODATA_TEST
|
2008-02-13 03:12:01 +07:00
|
|
|
int rodata_test(void);
|
2008-01-30 19:34:08 +07:00
|
|
|
#else
|
2008-02-13 03:12:01 +07:00
|
|
|
static inline int rodata_test(void)
|
2008-01-30 19:34:08 +07:00
|
|
|
{
|
2008-02-13 03:12:01 +07:00
|
|
|
return 0;
|
2008-01-30 19:34:08 +07:00
|
|
|
}
|
|
|
|
#endif
|
2007-10-16 04:28:20 +07:00
|
|
|
|
2008-10-23 12:26:29 +07:00
|
|
|
#endif /* _ASM_X86_CACHEFLUSH_H */
|