linux_dsm_epyc7002/arch/mips/mm/cache.c

247 lines
7.2 KiB
C
Raw Normal View History

/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2007 MIPS Technologies, Inc.
*/
#include <linux/fs.h>
#include <linux/fcntl.h>
#include <linux/kernel.h>
#include <linux/linkage.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/highmem.h>
#include <asm/processor.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/setup.h>
/* Cache operations. */
void (*flush_cache_all)(void);
void (*__flush_cache_all)(void);
EXPORT_SYMBOL_GPL(__flush_cache_all);
void (*flush_cache_mm)(struct mm_struct *mm);
void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
unsigned long pfn);
void (*flush_icache_range)(unsigned long start, unsigned long end);
EXPORT_SYMBOL_GPL(flush_icache_range);
void (*local_flush_icache_range)(unsigned long start, unsigned long end);
EXPORT_SYMBOL_GPL(local_flush_icache_range);
void (*__flush_icache_user_range)(unsigned long start, unsigned long end);
EXPORT_SYMBOL_GPL(__flush_icache_user_range);
void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end);
EXPORT_SYMBOL_GPL(__local_flush_icache_user_range);
void (*__flush_cache_vmap)(void);
void (*__flush_cache_vunmap)(void);
void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
/* MIPS specific cache operations */
void (*flush_cache_sigtramp)(unsigned long addr);
void (*local_flush_data_cache_page)(void * addr);
void (*flush_data_cache_page)(unsigned long addr);
void (*flush_icache_all)(void);
EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
EXPORT_SYMBOL(flush_data_cache_page);
EXPORT_SYMBOL(flush_icache_all);
#ifdef CONFIG_DMA_NONCOHERENT
/* DMA cache operations. */
void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
void (*_dma_cache_wback)(unsigned long start, unsigned long size);
void (*_dma_cache_inv)(unsigned long start, unsigned long size);
EXPORT_SYMBOL(_dma_cache_wback_inv);
#endif /* CONFIG_DMA_NONCOHERENT */
/*
* We could optimize the case where the cache argument is not BCACHE but
* that seems very atypical use ...
*/
SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
unsigned int, cache)
{
if (bytes == 0)
return 0;
if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
return -EFAULT;
__flush_icache_user_range(addr, addr + bytes);
return 0;
}
void __flush_dcache_page(struct page *page)
{
mm: fix races between swapoff and flush dcache Thanks to commit 4b3ef9daa4fc ("mm/swap: split swap cache into 64MB trunks"), after swapoff the address_space associated with the swap device will be freed. So page_mapping() users which may touch the address_space need some kind of mechanism to prevent the address_space from being freed during accessing. The dcache flushing functions (flush_dcache_page(), etc) in architecture specific code may access the address_space of swap device for anonymous pages in swap cache via page_mapping() function. But in some cases there are no mechanisms to prevent the swap device from being swapoff, for example, CPU1 CPU2 __get_user_pages() swapoff() flush_dcache_page() mapping = page_mapping() ... exit_swap_address_space() ... kvfree(spaces) mapping_mapped(mapping) The address space may be accessed after being freed. But from cachetlb.txt and Russell King, flush_dcache_page() only care about file cache pages, for anonymous pages, flush_anon_page() should be used. The implementation of flush_dcache_page() in all architectures follows this too. They will check whether page_mapping() is NULL and whether mapping_mapped() is true to determine whether to flush the dcache immediately. And they will use interval tree (mapping->i_mmap) to find all user space mappings. While mapping_mapped() and mapping->i_mmap isn't used by anonymous pages in swap cache at all. So, to fix the race between swapoff and flush dcache, __page_mapping() is add to return the address_space for file cache pages and NULL otherwise. All page_mapping() invoking in flush dcache functions are replaced with page_mapping_file(). [akpm@linux-foundation.org: simplify page_mapping_file(), per Mike] Link: http://lkml.kernel.org/r/20180305083634.15174-1-ying.huang@intel.com Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Chen Liqin <liqin.linux@gmail.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Guan Xuetao <gxt@mprc.pku.edu.cn> Cc: "David S. Miller" <davem@davemloft.net> Cc: Chris Zankel <chris@zankel.net> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Ley Foon Tan <lftan@altera.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Andi Kleen <ak@linux.intel.com> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-04-06 06:24:39 +07:00
struct address_space *mapping = page_mapping_file(page);
unsigned long addr;
if (mapping && !mapping_mapped(mapping)) {
SetPageDcacheDirty(page);
return;
}
/*
* We could delay the flush for the !page_mapping case too. But that
* case is for exec env/arg pages and those are %99 certainly going to
* get faulted into the tlb (and thus flushed) anyways.
*/
if (PageHighMem(page))
addr = (unsigned long)kmap_atomic(page);
else
addr = (unsigned long)page_address(page);
flush_data_cache_page(addr);
if (PageHighMem(page))
__kunmap_atomic((void *)addr);
}
EXPORT_SYMBOL(__flush_dcache_page);
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
unsigned long addr = (unsigned long) page_address(page);
if (pages_do_alias(addr, vmaddr)) {
if (page_mapcount(page) && !Page_dcache_dirty(page)) {
void *kaddr;
kaddr = kmap_coherent(page, vmaddr);
flush_data_cache_page((unsigned long)kaddr);
kunmap_coherent();
} else
flush_data_cache_page(addr);
}
}
EXPORT_SYMBOL(__flush_anon_page);
MIPS: Sync icache & dcache in set_pte_at It's possible for pages to become visible prior to update_mmu_cache running if a thread within the same address space preempts the current thread or runs simultaneously on another CPU. That is, the following scenario is possible: CPU0 CPU1 write to page flush_dcache_page flush_icache_page set_pte_at map page update_mmu_cache If CPU1 maps the page in between CPU0's set_pte_at, which marks it valid & visible, and update_mmu_cache where the dcache flush occurs then CPU1s icache will fill from stale data (unless it fills from the dcache, in which case all is good, but most MIPS CPUs don't have this property). Commit 4d46a67a3eb8 ("MIPS: Fix race condition in lazy cache flushing.") attempted to fix that by performing the dcache flush in flush_icache_page such that it occurs before the set_pte_at call makes the page visible. However it has the problem that not all code that writes to pages exposed to userland call flush_icache_page. There are many callers of set_pte_at under mm/ and only 2 of them do call flush_icache_page. Thus the race window between a page becoming visible & being coherent between the icache & dcache remains open in some cases. To illustrate some of the cases, a WARN was added to __update_cache with this patch applied that triggered in cases where a page about to be flushed from the dcache was not the last page provided to flush_icache_page. That is, backtraces were obtained for cases in which the race window is left open without this patch. The 2 standout examples follow. When forking a process: [ 15.271842] [<80417630>] __update_cache+0xcc/0x188 [ 15.277274] [<80530394>] copy_page_range+0x56c/0x6ac [ 15.282861] [<8042936c>] copy_process.part.54+0xd40/0x17ac [ 15.289028] [<80429f80>] do_fork+0xe4/0x420 [ 15.293747] [<80413808>] handle_sys+0x128/0x14c When exec'ing an ELF binary: [ 14.445964] [<80417630>] __update_cache+0xcc/0x188 [ 14.451369] [<80538d88>] move_page_tables+0x414/0x498 [ 14.457075] [<8055d848>] setup_arg_pages+0x220/0x318 [ 14.462685] [<805b0f38>] load_elf_binary+0x530/0x12a0 [ 14.468374] [<8055ec3c>] search_binary_handler+0xbc/0x214 [ 14.474444] [<8055f6c0>] do_execveat_common+0x43c/0x67c [ 14.480324] [<8055f938>] do_execve+0x38/0x44 [ 14.485137] [<80413808>] handle_sys+0x128/0x14c These code paths write into a page, call flush_dcache_page then call set_pte_at without flush_icache_page inbetween. The end result is that the icache can become corrupted & userland processes may execute unexpected or invalid code, typically resulting in a reserved instruction exception, a trap or a segfault. Fix this race condition fully by performing any cache maintenance required to keep the icache & dcache in sync in set_pte_at, before the page is made valid. This has the added bonus of ensuring the cache maintenance always happens in one location, rather than being duplicated in flush_icache_page & update_mmu_cache. It also matches the way other architectures solve the same problem (see arm, ia64 & powerpc). Signed-off-by: Paul Burton <paul.burton@imgtec.com> Reported-by: Ionela Voinescu <ionela.voinescu@imgtec.com> Cc: Lars Persson <lars.persson@axis.com> Fixes: 4d46a67a3eb8 ("MIPS: Fix race condition in lazy cache flushing.") Cc: Steven J. Hill <sjhill@realitydiluted.com> Cc: David Daney <david.daney@cavium.com> Cc: Huacai Chen <chenhc@lemote.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Jerome Marchand <jmarchan@redhat.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: stable <stable@vger.kernel.org> # v4.1+ Patchwork: https://patchwork.linux-mips.org/patch/12722/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-03-01 09:37:59 +07:00
void __update_cache(unsigned long address, pte_t pte)
{
struct page *page;
unsigned long pfn, addr;
MIPS: Sync icache & dcache in set_pte_at It's possible for pages to become visible prior to update_mmu_cache running if a thread within the same address space preempts the current thread or runs simultaneously on another CPU. That is, the following scenario is possible: CPU0 CPU1 write to page flush_dcache_page flush_icache_page set_pte_at map page update_mmu_cache If CPU1 maps the page in between CPU0's set_pte_at, which marks it valid & visible, and update_mmu_cache where the dcache flush occurs then CPU1s icache will fill from stale data (unless it fills from the dcache, in which case all is good, but most MIPS CPUs don't have this property). Commit 4d46a67a3eb8 ("MIPS: Fix race condition in lazy cache flushing.") attempted to fix that by performing the dcache flush in flush_icache_page such that it occurs before the set_pte_at call makes the page visible. However it has the problem that not all code that writes to pages exposed to userland call flush_icache_page. There are many callers of set_pte_at under mm/ and only 2 of them do call flush_icache_page. Thus the race window between a page becoming visible & being coherent between the icache & dcache remains open in some cases. To illustrate some of the cases, a WARN was added to __update_cache with this patch applied that triggered in cases where a page about to be flushed from the dcache was not the last page provided to flush_icache_page. That is, backtraces were obtained for cases in which the race window is left open without this patch. The 2 standout examples follow. When forking a process: [ 15.271842] [<80417630>] __update_cache+0xcc/0x188 [ 15.277274] [<80530394>] copy_page_range+0x56c/0x6ac [ 15.282861] [<8042936c>] copy_process.part.54+0xd40/0x17ac [ 15.289028] [<80429f80>] do_fork+0xe4/0x420 [ 15.293747] [<80413808>] handle_sys+0x128/0x14c When exec'ing an ELF binary: [ 14.445964] [<80417630>] __update_cache+0xcc/0x188 [ 14.451369] [<80538d88>] move_page_tables+0x414/0x498 [ 14.457075] [<8055d848>] setup_arg_pages+0x220/0x318 [ 14.462685] [<805b0f38>] load_elf_binary+0x530/0x12a0 [ 14.468374] [<8055ec3c>] search_binary_handler+0xbc/0x214 [ 14.474444] [<8055f6c0>] do_execveat_common+0x43c/0x67c [ 14.480324] [<8055f938>] do_execve+0x38/0x44 [ 14.485137] [<80413808>] handle_sys+0x128/0x14c These code paths write into a page, call flush_dcache_page then call set_pte_at without flush_icache_page inbetween. The end result is that the icache can become corrupted & userland processes may execute unexpected or invalid code, typically resulting in a reserved instruction exception, a trap or a segfault. Fix this race condition fully by performing any cache maintenance required to keep the icache & dcache in sync in set_pte_at, before the page is made valid. This has the added bonus of ensuring the cache maintenance always happens in one location, rather than being duplicated in flush_icache_page & update_mmu_cache. It also matches the way other architectures solve the same problem (see arm, ia64 & powerpc). Signed-off-by: Paul Burton <paul.burton@imgtec.com> Reported-by: Ionela Voinescu <ionela.voinescu@imgtec.com> Cc: Lars Persson <lars.persson@axis.com> Fixes: 4d46a67a3eb8 ("MIPS: Fix race condition in lazy cache flushing.") Cc: Steven J. Hill <sjhill@realitydiluted.com> Cc: David Daney <david.daney@cavium.com> Cc: Huacai Chen <chenhc@lemote.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Jerome Marchand <jmarchan@redhat.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: stable <stable@vger.kernel.org> # v4.1+ Patchwork: https://patchwork.linux-mips.org/patch/12722/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-03-01 09:37:59 +07:00
int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
pfn = pte_pfn(pte);
[MIPS] Retire flush_icache_page from mm use. On the 34K the redundant cache operations were causing excessive stalls resulting in realtime code running on the second VPE missing its deadline. For all other platforms this patch is just a significant performance improvment as illustrated by below benchmark numbers. Processor, Processes - times in microseconds - smaller is better ------------------------------------------------------------------------------ Host OS Mhz null null open slct sig sig fork exec sh call I/O stat clos TCP inst hndl proc proc proc --------- ------------- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- 25Kf 2.6.18-rc4 533 0.49 1.16 7.57 33.4 30.5 1.34 12.4 5497 17.K 54.K 25Kf 2.6.18-rc4-p 533 0.49 1.16 6.68 23.0 30.7 1.36 8.55 5030 16.K 48.K 4Kc 2.6.18-rc4 80 4.21 15.0 131. 289. 261. 16.5 258. 18.K 70.K 227K 4Kc 2.6.18-rc4-p 80 4.34 13.1 128. 285. 262. 18.2 258. 12.K 52.K 176K 34Kc 2.6.18-rc4 40 5.01 14.0 61.6 90.0 477. 17.9 94.7 29.K 108K 342K 34Kc 2.6.18-rc4-p 40 4.98 13.9 61.2 89.7 475. 17.6 93.7 8758 44.K 158K BCM1480 2.6.18-rc4 700 0.28 0.60 3.68 5.92 16.0 0.78 5.08 931. 3163 15.K BCM1480 2.6.18-rc4-p 700 0.28 0.61 3.65 5.85 16.0 0.79 5.20 395. 1464 8385 TX49-16K 2.6.18-rc3 197 0.73 2.41 19.0 37.8 82.9 2.94 17.5 4438 14.K 56.K TX49-16K 2.6.18-rc3-p 197 0.73 2.40 19.9 36.3 82.9 2.94 23.4 2577 9103 38.K TX49-32K 2.6.18-rc3 396 0.36 1.19 6.80 11.8 41.0 1.46 8.17 2738 8465 32.K TX49-32K 2.6.18-rc3-p 396 0.36 1.19 6.82 10.2 41.0 1.46 8.18 1330 4638 18.K Original patch by me with enhancements by Atsushi Nemoto. Signed-off-by: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
2006-08-12 22:40:08 +07:00
if (unlikely(!pfn_valid(pfn)))
return;
page = pfn_to_page(pfn);
MIPS: Sync icache & dcache in set_pte_at It's possible for pages to become visible prior to update_mmu_cache running if a thread within the same address space preempts the current thread or runs simultaneously on another CPU. That is, the following scenario is possible: CPU0 CPU1 write to page flush_dcache_page flush_icache_page set_pte_at map page update_mmu_cache If CPU1 maps the page in between CPU0's set_pte_at, which marks it valid & visible, and update_mmu_cache where the dcache flush occurs then CPU1s icache will fill from stale data (unless it fills from the dcache, in which case all is good, but most MIPS CPUs don't have this property). Commit 4d46a67a3eb8 ("MIPS: Fix race condition in lazy cache flushing.") attempted to fix that by performing the dcache flush in flush_icache_page such that it occurs before the set_pte_at call makes the page visible. However it has the problem that not all code that writes to pages exposed to userland call flush_icache_page. There are many callers of set_pte_at under mm/ and only 2 of them do call flush_icache_page. Thus the race window between a page becoming visible & being coherent between the icache & dcache remains open in some cases. To illustrate some of the cases, a WARN was added to __update_cache with this patch applied that triggered in cases where a page about to be flushed from the dcache was not the last page provided to flush_icache_page. That is, backtraces were obtained for cases in which the race window is left open without this patch. The 2 standout examples follow. When forking a process: [ 15.271842] [<80417630>] __update_cache+0xcc/0x188 [ 15.277274] [<80530394>] copy_page_range+0x56c/0x6ac [ 15.282861] [<8042936c>] copy_process.part.54+0xd40/0x17ac [ 15.289028] [<80429f80>] do_fork+0xe4/0x420 [ 15.293747] [<80413808>] handle_sys+0x128/0x14c When exec'ing an ELF binary: [ 14.445964] [<80417630>] __update_cache+0xcc/0x188 [ 14.451369] [<80538d88>] move_page_tables+0x414/0x498 [ 14.457075] [<8055d848>] setup_arg_pages+0x220/0x318 [ 14.462685] [<805b0f38>] load_elf_binary+0x530/0x12a0 [ 14.468374] [<8055ec3c>] search_binary_handler+0xbc/0x214 [ 14.474444] [<8055f6c0>] do_execveat_common+0x43c/0x67c [ 14.480324] [<8055f938>] do_execve+0x38/0x44 [ 14.485137] [<80413808>] handle_sys+0x128/0x14c These code paths write into a page, call flush_dcache_page then call set_pte_at without flush_icache_page inbetween. The end result is that the icache can become corrupted & userland processes may execute unexpected or invalid code, typically resulting in a reserved instruction exception, a trap or a segfault. Fix this race condition fully by performing any cache maintenance required to keep the icache & dcache in sync in set_pte_at, before the page is made valid. This has the added bonus of ensuring the cache maintenance always happens in one location, rather than being duplicated in flush_icache_page & update_mmu_cache. It also matches the way other architectures solve the same problem (see arm, ia64 & powerpc). Signed-off-by: Paul Burton <paul.burton@imgtec.com> Reported-by: Ionela Voinescu <ionela.voinescu@imgtec.com> Cc: Lars Persson <lars.persson@axis.com> Fixes: 4d46a67a3eb8 ("MIPS: Fix race condition in lazy cache flushing.") Cc: Steven J. Hill <sjhill@realitydiluted.com> Cc: David Daney <david.daney@cavium.com> Cc: Huacai Chen <chenhc@lemote.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Jerome Marchand <jmarchan@redhat.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: stable <stable@vger.kernel.org> # v4.1+ Patchwork: https://patchwork.linux-mips.org/patch/12722/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-03-01 09:37:59 +07:00
if (Page_dcache_dirty(page)) {
if (PageHighMem(page))
addr = (unsigned long)kmap_atomic(page);
else
addr = (unsigned long)page_address(page);
if (exec || pages_do_alias(addr, address & PAGE_MASK))
flush_data_cache_page(addr);
if (PageHighMem(page))
__kunmap_atomic((void *)addr);
ClearPageDcacheDirty(page);
}
}
unsigned long _page_cachable_default;
EXPORT_SYMBOL(_page_cachable_default);
static inline void setup_protection_map(void)
{
if (cpu_has_rixi) {
protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
} else {
protection_map[0] = PAGE_NONE;
protection_map[1] = PAGE_READONLY;
protection_map[2] = PAGE_COPY;
protection_map[3] = PAGE_COPY;
protection_map[4] = PAGE_READONLY;
protection_map[5] = PAGE_READONLY;
protection_map[6] = PAGE_COPY;
protection_map[7] = PAGE_COPY;
protection_map[8] = PAGE_NONE;
protection_map[9] = PAGE_READONLY;
protection_map[10] = PAGE_SHARED;
protection_map[11] = PAGE_SHARED;
protection_map[12] = PAGE_READONLY;
protection_map[13] = PAGE_READONLY;
protection_map[14] = PAGE_SHARED;
protection_map[15] = PAGE_SHARED;
}
}
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code commit 3747069b25e419f6b51395f48127e9812abc3596 upstream. The __cpuinit type of throwaway sections might have made sense some time ago when RAM was more constrained, but now the savings do not offset the cost and complications. For example, the fix in commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time") is a good example of the nasty type of bugs that can be created with improper use of the various __init prefixes. After a discussion on LKML[1] it was decided that cpuinit should go the way of devinit and be phased out. Once all the users are gone, we can then finally remove the macros themselves from linux/init.h. Note that some harmless section mismatch warnings may result, since notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c) and are flagged as __cpuinit -- so if we remove the __cpuinit from the arch specific callers, we will also get section mismatch warnings. As an intermediate step, we intend to turn the linux/init.h cpuinit related content into no-ops as early as possible, since that will get rid of these warnings. In any case, they are temporary and harmless. Here, we remove all the MIPS __cpuinit from C code and __CPUINIT from asm files. MIPS is interesting in this respect, because there are also uasm users hiding behind their own renamed versions of the __cpuinit macros. [1] https://lkml.org/lkml/2013/5/20/589 [ralf@linux-mips.org: Folded in Paul's followup fix.] Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/5494/ Patchwork: https://patchwork.linux-mips.org/patch/5495/ Patchwork: https://patchwork.linux-mips.org/patch/5509/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 20:38:59 +07:00
void cpu_cache_init(void)
{
if (cpu_has_3k_cache) {
extern void __weak r3k_cache_init(void);
r3k_cache_init();
}
if (cpu_has_6k_cache) {
extern void __weak r6k_cache_init(void);
r6k_cache_init();
}
if (cpu_has_4k_cache) {
extern void __weak r4k_cache_init(void);
r4k_cache_init();
}
if (cpu_has_8k_cache) {
extern void __weak r8k_cache_init(void);
r8k_cache_init();
}
if (cpu_has_tx39_cache) {
extern void __weak tx39_cache_init(void);
tx39_cache_init();
}
if (cpu_has_octeon_cache) {
extern void __weak octeon_cache_init(void);
octeon_cache_init();
}
setup_protection_map();
}
int __weak __uncached_access(struct file *file, unsigned long addr)
{
vfs: Implement proper O_SYNC semantics While Linux provided an O_SYNC flag basically since day 1, it took until Linux 2.4.0-test12pre2 to actually get it implemented for filesystems, since that day we had generic_osync_around with only minor changes and the great "For now, when the user asks for O_SYNC, we'll actually give O_DSYNC" comment. This patch intends to actually give us real O_SYNC semantics in addition to the O_DSYNC semantics. After Jan's O_SYNC patches which are required before this patch it's actually surprisingly simple, we just need to figure out when to set the datasync flag to vfs_fsync_range and when not. This patch renames the existing O_SYNC flag to O_DSYNC while keeping it's numerical value to keep binary compatibility, and adds a new real O_SYNC flag. To guarantee backwards compatiblity it is defined as expanding to both the O_DSYNC and the new additional binary flag (__O_SYNC) to make sure we are backwards-compatible when compiled against the new headers. This also means that all places that don't care about the differences can just check O_DSYNC and get the right behaviour for O_SYNC, too - only places that actuall care need to check __O_SYNC in addition. Drivers and network filesystems have been updated in a fail safe way to always do the full sync magic if O_DSYNC is set. The few places setting O_SYNC for lower layers are kept that way for now to stay failsafe. We enforce that O_DSYNC is set when __O_SYNC is set early in the open path to make sure we always get these sane options. Note that parisc really screwed up their headers as they already define a O_DSYNC that has always been a no-op. We try to repair it by using it for the new O_DSYNC and redefinining O_SYNC to send both the traditional O_SYNC numerical value _and_ the O_DSYNC one. Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Grant Grundler <grundler@parisc-linux.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Ingo Molnar <mingo@elte.hu> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Andreas Dilger <adilger@sun.com> Acked-by: Trond Myklebust <Trond.Myklebust@netapp.com> Acked-by: Kyle McMartin <kyle@mcmartin.ca> Acked-by: Ulrich Drepper <drepper@redhat.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Jan Kara <jack@suse.cz>
2009-10-27 17:05:28 +07:00
if (file->f_flags & O_DSYNC)
return 1;
return addr >= __pa(high_memory);
}