Merge branch 'akpm' (patches from Andrew)

Merge more updates from Andrew Morton:
 "Most of the rest of MM and various other things. Some Kconfig rework
  still awaits merges of dependent trees from linux-next.

  Subsystems affected by this patch series: mm/hotfixes, mm/memcg,
  mm/vmstat, mm/thp, procfs, sysctl, misc, notifiers, core-kernel,
  bitops, lib, checkpatch, epoll, binfmt, init, rapidio, uaccess, kcov,
  ubsan, ipc, bitmap, mm/pagemap"

* akpm: (86 commits)
  mm: remove __ARCH_HAS_4LEVEL_HACK and include/asm-generic/4level-fixup.h
  um: add support for folded p4d page tables
  um: remove unused pxx_offset_proc() and addr_pte() functions
  sparc32: use pgtable-nopud instead of 4level-fixup
  parisc/hugetlb: use pgtable-nopXd instead of 4level-fixup
  parisc: use pgtable-nopXd instead of 4level-fixup
  nds32: use pgtable-nopmd instead of 4level-fixup
  microblaze: use pgtable-nopmd instead of 4level-fixup
  m68k: mm: use pgtable-nopXd instead of 4level-fixup
  m68k: nommu: use pgtable-nopud instead of 4level-fixup
  c6x: use pgtable-nopud instead of 4level-fixup
  arm: nommu: use pgtable-nopud instead of 4level-fixup
  alpha: use pgtable-nopud instead of 4level-fixup
  gpio: pca953x: tighten up indentation
  gpio: pca953x: convert to use bitmap API
  gpio: pca953x: use input from regs structure in pca953x_irq_pending()
  gpio: pca953x: remove redundant variable and check in IRQ handler
  lib/bitmap: introduce bitmap_replace() helper
  lib/test_bitmap: fix comment about this file
  lib/test_bitmap: move exp1 and exp2 upper for others to use
  ...
This commit is contained in:
Linus Torvalds 2019-12-05 09:46:26 -08:00
commit 5ecc9d15f7
154 changed files with 5254 additions and 1344 deletions

2
.gitattributes vendored
View File

@ -1,2 +1,4 @@
*.c diff=cpp
*.h diff=cpp
*.dtsi diff=dts
*.dts diff=dts

View File

@ -129,7 +129,7 @@ writing of special-purpose memory allocators in the future.
:functions: gen_pool_for_each_chunk
.. kernel-doc:: lib/genalloc.c
:functions: addr_in_gen_pool
:functions: gen_pool_has_addr
.. kernel-doc:: lib/genalloc.c
:functions: gen_pool_avail

View File

@ -34,6 +34,7 @@ Profiling data will only become accessible once debugfs has been mounted::
Coverage collection
-------------------
The following program demonstrates coverage collection from within a test
program using kcov:
@ -128,6 +129,7 @@ only need to enable coverage (disable happens automatically on thread end).
Comparison operands collection
------------------------------
Comparison operands collection is similar to coverage collection:
.. code-block:: c
@ -202,3 +204,130 @@ Comparison operands collection is similar to coverage collection:
Note that the kcov modes (coverage collection or comparison operands) are
mutually exclusive.
Remote coverage collection
--------------------------
With KCOV_ENABLE coverage is collected only for syscalls that are issued
from the current process. With KCOV_REMOTE_ENABLE it's possible to collect
coverage for arbitrary parts of the kernel code, provided that those parts
are annotated with kcov_remote_start()/kcov_remote_stop().
This allows to collect coverage from two types of kernel background
threads: the global ones, that are spawned during kernel boot in a limited
number of instances (e.g. one USB hub_event() worker thread is spawned per
USB HCD); and the local ones, that are spawned when a user interacts with
some kernel interface (e.g. vhost workers).
To enable collecting coverage from a global background thread, a unique
global handle must be assigned and passed to the corresponding
kcov_remote_start() call. Then a userspace process can pass a list of such
handles to the KCOV_REMOTE_ENABLE ioctl in the handles array field of the
kcov_remote_arg struct. This will attach the used kcov device to the code
sections, that are referenced by those handles.
Since there might be many local background threads spawned from different
userspace processes, we can't use a single global handle per annotation.
Instead, the userspace process passes a non-zero handle through the
common_handle field of the kcov_remote_arg struct. This common handle gets
saved to the kcov_handle field in the current task_struct and needs to be
passed to the newly spawned threads via custom annotations. Those threads
should in turn be annotated with kcov_remote_start()/kcov_remote_stop().
Internally kcov stores handles as u64 integers. The top byte of a handle
is used to denote the id of a subsystem that this handle belongs to, and
the lower 4 bytes are used to denote the id of a thread instance within
that subsystem. A reserved value 0 is used as a subsystem id for common
handles as they don't belong to a particular subsystem. The bytes 4-7 are
currently reserved and must be zero. In the future the number of bytes
used for the subsystem or handle ids might be increased.
When a particular userspace proccess collects coverage by via a common
handle, kcov will collect coverage for each code section that is annotated
to use the common handle obtained as kcov_handle from the current
task_struct. However non common handles allow to collect coverage
selectively from different subsystems.
.. code-block:: c
struct kcov_remote_arg {
unsigned trace_mode;
unsigned area_size;
unsigned num_handles;
uint64_t common_handle;
uint64_t handles[0];
};
#define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
#define KCOV_DISABLE _IO('c', 101)
#define KCOV_REMOTE_ENABLE _IOW('c', 102, struct kcov_remote_arg)
#define COVER_SIZE (64 << 10)
#define KCOV_TRACE_PC 0
#define KCOV_SUBSYSTEM_COMMON (0x00ull << 56)
#define KCOV_SUBSYSTEM_USB (0x01ull << 56)
#define KCOV_SUBSYSTEM_MASK (0xffull << 56)
#define KCOV_INSTANCE_MASK (0xffffffffull)
static inline __u64 kcov_remote_handle(__u64 subsys, __u64 inst)
{
if (subsys & ~KCOV_SUBSYSTEM_MASK || inst & ~KCOV_INSTANCE_MASK)
return 0;
return subsys | inst;
}
#define KCOV_COMMON_ID 0x42
#define KCOV_USB_BUS_NUM 1
int main(int argc, char **argv)
{
int fd;
unsigned long *cover, n, i;
struct kcov_remote_arg *arg;
fd = open("/sys/kernel/debug/kcov", O_RDWR);
if (fd == -1)
perror("open"), exit(1);
if (ioctl(fd, KCOV_INIT_TRACE, COVER_SIZE))
perror("ioctl"), exit(1);
cover = (unsigned long*)mmap(NULL, COVER_SIZE * sizeof(unsigned long),
PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if ((void*)cover == MAP_FAILED)
perror("mmap"), exit(1);
/* Enable coverage collection via common handle and from USB bus #1. */
arg = calloc(1, sizeof(*arg) + sizeof(uint64_t));
if (!arg)
perror("calloc"), exit(1);
arg->trace_mode = KCOV_TRACE_PC;
arg->area_size = COVER_SIZE;
arg->num_handles = 1;
arg->common_handle = kcov_remote_handle(KCOV_SUBSYSTEM_COMMON,
KCOV_COMMON_ID);
arg->handles[0] = kcov_remote_handle(KCOV_SUBSYSTEM_USB,
KCOV_USB_BUS_NUM);
if (ioctl(fd, KCOV_REMOTE_ENABLE, arg))
perror("ioctl"), free(arg), exit(1);
free(arg);
/*
* Here the user needs to trigger execution of a kernel code section
* that is either annotated with the common handle, or to trigger some
* activity on USB bus #1.
*/
sleep(2);
n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED);
for (i = 0; i < n; i++)
printf("0x%lx\n", cover[i + 1]);
if (ioctl(fd, KCOV_DISABLE, 0))
perror("ioctl"), exit(1);
if (munmap(cover, COVER_SIZE * sizeof(unsigned long)))
perror("munmap"), exit(1);
if (close(fd))
perror("close"), exit(1);
return 0;
}

View File

@ -72,11 +72,11 @@ config KPROBES
If in doubt, say "N".
config JUMP_LABEL
bool "Optimize very unlikely/likely branches"
depends on HAVE_ARCH_JUMP_LABEL
depends on CC_HAS_ASM_GOTO
help
This option enables a transparent branch optimization that
bool "Optimize very unlikely/likely branches"
depends on HAVE_ARCH_JUMP_LABEL
depends on CC_HAS_ASM_GOTO
help
This option enables a transparent branch optimization that
makes certain almost-always-true or almost-always-false branch
conditions even cheaper to execute within the kernel.
@ -84,7 +84,7 @@ config JUMP_LABEL
scheduler functionality, networking code and KVM have such
branches and include support for this optimization technique.
If it is detected that the compiler has support for "asm goto",
If it is detected that the compiler has support for "asm goto",
the kernel will compile such branches with just a nop
instruction. When the condition flag is toggled to true, the
nop will be converted to a jump instruction to execute the
@ -151,8 +151,8 @@ config HAVE_EFFICIENT_UNALIGNED_ACCESS
information on the topic of unaligned memory accesses.
config ARCH_USE_BUILTIN_BSWAP
bool
help
bool
help
Modern versions of GCC (since 4.4) have builtin functions
for handling byte-swapping. Using these, instead of the old
inline assembler that the architecture code provides in the
@ -221,10 +221,10 @@ config HAVE_DMA_CONTIGUOUS
bool
config GENERIC_SMP_IDLE_THREAD
bool
bool
config GENERIC_IDLE_POLL_SETUP
bool
bool
config ARCH_HAS_FORTIFY_SOURCE
bool
@ -257,7 +257,7 @@ config ARCH_HAS_UNCACHED_SEGMENT
# Select if arch init_task must go in the __init_task_data section
config ARCH_TASK_STRUCT_ON_STACK
bool
bool
# Select if arch has its private alloc_task_struct() function
config ARCH_TASK_STRUCT_ALLOCATOR

View File

@ -73,7 +73,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32))
#define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> 32))
#define pte_pfn(pte) (pte_val(pte) >> 32)
#define mk_pte(page, pgprot) \

View File

@ -27,9 +27,9 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
}
static inline void
pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
pgd_set(pgd, pmd);
pud_set(pud, pmd);
}
extern pgd_t *pgd_alloc(struct mm_struct *mm);

View File

@ -2,7 +2,7 @@
#ifndef _ALPHA_PGTABLE_H
#define _ALPHA_PGTABLE_H
#include <asm-generic/4level-fixup.h>
#include <asm-generic/pgtable-nopud.h>
/*
* This file contains the functions and defines necessary to modify and use
@ -226,8 +226,8 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
{ pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
{ pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
extern inline void pud_set(pud_t * pudp, pmd_t * pmdp)
{ pud_val(*pudp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
extern inline unsigned long
@ -238,11 +238,11 @@ pmd_page_vaddr(pmd_t pmd)
#ifndef CONFIG_DISCONTIGMEM
#define pmd_page(pmd) (mem_map + ((pmd_val(pmd) & _PFN_MASK) >> 32))
#define pgd_page(pgd) (mem_map + ((pgd_val(pgd) & _PFN_MASK) >> 32))
#define pud_page(pud) (mem_map + ((pud_val(pud) & _PFN_MASK) >> 32))
#endif
extern inline unsigned long pgd_page_vaddr(pgd_t pgd)
{ return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
extern inline unsigned long pud_page_vaddr(pud_t pgd)
{ return PAGE_OFFSET + ((pud_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
extern inline int pte_none(pte_t pte) { return !pte_val(pte); }
extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; }
@ -256,10 +256,10 @@ extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~_PFN_MASK) != _P
extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_VALID; }
extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; }
extern inline int pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE; }
extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_VALID; }
extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
extern inline int pud_none(pud_t pud) { return !pud_val(pud); }
extern inline int pud_bad(pud_t pud) { return (pud_val(pud) & ~_PFN_MASK) != _PAGE_TABLE; }
extern inline int pud_present(pud_t pud) { return pud_val(pud) & _PAGE_VALID; }
extern inline void pud_clear(pud_t * pudp) { pud_val(*pudp) = 0; }
/*
* The following only work if pte_present() is true.
@ -301,9 +301,9 @@ extern inline pte_t pte_mkspecial(pte_t pte) { return pte; }
*/
/* Find an entry in the second-level page table.. */
extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address)
{
pmd_t *ret = (pmd_t *) pgd_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
pmd_t *ret = (pmd_t *) pud_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
smp_read_barrier_depends(); /* see above */
return ret;
}

View File

@ -146,6 +146,8 @@ callback_init(void * kernel_end)
{
struct crb_struct * crb;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
void *two_pages;
@ -184,8 +186,10 @@ callback_init(void * kernel_end)
memset(two_pages, 0, 2*PAGE_SIZE);
pgd = pgd_offset_k(VMALLOC_START);
pgd_set(pgd, (pmd_t *)two_pages);
pmd = pmd_offset(pgd, VMALLOC_START);
p4d = p4d_offset(pgd, VMALLOC_START);
pud = pud_offset(p4d, VMALLOC_START);
pud_set(pud, (pmd_t *)two_pages);
pmd = pmd_offset(pud, VMALLOC_START);
pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
if (alpha_using_srm) {
@ -214,9 +218,9 @@ callback_init(void * kernel_end)
/* Newer consoles (especially on larger
systems) may require more pages of
PTEs. Grab additional pages as needed. */
if (pmd != pmd_offset(pgd, vaddr)) {
if (pmd != pmd_offset(pud, vaddr)) {
memset(kernel_end, 0, PAGE_SIZE);
pmd = pmd_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
pmd_set(pmd, (pte_t *)kernel_end);
kernel_end += PAGE_SIZE;
}

View File

@ -12,7 +12,7 @@
#ifndef CONFIG_MMU
#include <asm-generic/4level-fixup.h>
#include <asm-generic/pgtable-nopud.h>
#include <asm/pgtable-nommu.h>
#else

View File

@ -529,7 +529,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
static bool __in_atomic_pool(void *start, size_t size)
{
return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
return gen_pool_has_addr(atomic_pool, (unsigned long)start, size);
}
static int __free_from_pool(void *start, size_t size)

View File

@ -8,7 +8,7 @@
#ifndef _ASM_C6X_PGTABLE_H
#define _ASM_C6X_PGTABLE_H
#include <asm-generic/4level-fixup.h>
#include <asm-generic/pgtable-nopud.h>
#include <asm/setup.h>
#include <asm/page.h>

View File

@ -28,9 +28,6 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
return (pmd_t *) pgd;
}
#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
(unsigned long)(page_address(page)))
@ -45,8 +42,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
__free_page(page);
}
#define __pmd_free_tlb(tlb, pmd, address) do { } while (0)
static inline struct page *pte_alloc_one(struct mm_struct *mm)
{
struct page *page = alloc_pages(GFP_DMA, 0);
@ -100,6 +95,4 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
return new_pgd;
}
#define pgd_populate(mm, pmd, pte) BUG()
#endif /* M68K_MCF_PGALLOC_H */

View File

@ -198,17 +198,9 @@ static inline int pmd_bad2(pmd_t *pmd) { return 0; }
#define pmd_present(pmd) (!pmd_none2(&(pmd)))
static inline void pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
static inline int pgd_none(pgd_t pgd) { return 0; }
static inline int pgd_bad(pgd_t pgd) { return 0; }
static inline int pgd_present(pgd_t pgd) { return 1; }
static inline void pgd_clear(pgd_t *pgdp) {}
#define pte_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \
__FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
__FILE__, __LINE__, pmd_val(e))
#define pgd_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
__FILE__, __LINE__, pgd_val(e))
@ -339,14 +331,6 @@ extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
*/
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
/*
* Find an entry in the second-level pagetable.
*/
static inline pmd_t *pmd_offset(pgd_t *pgd, unsigned long address)
{
return (pmd_t *) pgd;
}
/*
* Find an entry in the third-level pagetable.
*/
@ -360,12 +344,16 @@ static inline pmd_t *pmd_offset(pgd_t *pgd, unsigned long address)
static inline void nocache_page(void *vaddr)
{
pgd_t *dir;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
unsigned long addr = (unsigned long) vaddr;
dir = pgd_offset_k(addr);
pmdp = pmd_offset(dir, addr);
p4dp = p4d_offset(dir, addr);
pudp = pud_offset(p4dp, addr);
pmdp = pmd_offset(pudp, addr);
ptep = pte_offset_kernel(pmdp, addr);
*ptep = pte_mknocache(*ptep);
}
@ -376,12 +364,16 @@ static inline void nocache_page(void *vaddr)
static inline void cache_page(void *vaddr)
{
pgd_t *dir;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
unsigned long addr = (unsigned long) vaddr;
dir = pgd_offset_k(addr);
pmdp = pmd_offset(dir, addr);
p4dp = p4d_offset(dir, addr);
pudp = pud_offset(p4dp, addr);
pmdp = pmd_offset(pudp, addr);
ptep = pte_offset_kernel(pmdp, addr);
*ptep = pte_mkcache(*ptep);
}

View File

@ -100,6 +100,8 @@ static inline void load_ksp_mmu(struct task_struct *task)
struct mm_struct *mm;
int asid;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long mmuar;
@ -127,7 +129,15 @@ static inline void load_ksp_mmu(struct task_struct *task)
if (pgd_none(*pgd))
goto bug;
pmd = pmd_offset(pgd, mmuar);
p4d = p4d_offset(pgd, mmuar);
if (p4d_none(*p4d))
goto bug;
pud = pud_offset(p4d, mmuar);
if (pud_none(*pud))
goto bug;
pmd = pmd_offset(pud, mmuar);
if (pmd_none(*pmd))
goto bug;

View File

@ -106,9 +106,9 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page
}
#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
pgd_set(pgd, pmd);
pud_set(pud, pmd);
}
#endif /* _MOTOROLA_PGALLOC_H */

View File

@ -117,14 +117,14 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
}
}
static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
static inline void pud_set(pud_t *pudp, pmd_t *pmdp)
{
pgd_val(*pgdp) = _PAGE_TABLE | _PAGE_ACCESSED | __pa(pmdp);
pud_val(*pudp) = _PAGE_TABLE | _PAGE_ACCESSED | __pa(pmdp);
}
#define __pte_page(pte) ((unsigned long)__va(pte_val(pte) & PAGE_MASK))
#define __pmd_page(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK))
#define __pgd_page(pgd) ((unsigned long)__va(pgd_val(pgd) & _TABLE_MASK))
#define pud_page_vaddr(pud) ((unsigned long)__va(pud_val(pud) & _TABLE_MASK))
#define pte_none(pte) (!pte_val(pte))
@ -147,11 +147,11 @@ static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
#define pgd_none(pgd) (!pgd_val(pgd))
#define pgd_bad(pgd) ((pgd_val(pgd) & _DESCTYPE_MASK) != _PAGE_TABLE)
#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_TABLE)
#define pgd_clear(pgdp) ({ pgd_val(*pgdp) = 0; })
#define pgd_page(pgd) (mem_map + ((unsigned long)(__va(pgd_val(pgd)) - PAGE_OFFSET) >> PAGE_SHIFT))
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) ((pud_val(pud) & _DESCTYPE_MASK) != _PAGE_TABLE)
#define pud_present(pud) (pud_val(pud) & _PAGE_TABLE)
#define pud_clear(pudp) ({ pud_val(*pudp) = 0; })
#define pud_page(pud) (mem_map + ((unsigned long)(__va(pud_val(pud)) - PAGE_OFFSET) >> PAGE_SHIFT))
#define pte_ERROR(e) \
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
@ -209,9 +209,9 @@ static inline pgd_t *pgd_offset_k(unsigned long address)
/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
static inline pmd_t *pmd_offset(pud_t *dir, unsigned long address)
{
return (pmd_t *)__pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1));
return (pmd_t *)pud_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1));
}
/* Find an entry in the third-level page table.. */
@ -239,11 +239,15 @@ static inline void nocache_page(void *vaddr)
if (CPU_IS_040_OR_060) {
pgd_t *dir;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
dir = pgd_offset_k(addr);
pmdp = pmd_offset(dir, addr);
p4dp = p4d_offset(dir, addr);
pudp = pud_offset(p4dp, addr);
pmdp = pmd_offset(pudp, addr);
ptep = pte_offset_kernel(pmdp, addr);
*ptep = pte_mknocache(*ptep);
}
@ -255,11 +259,15 @@ static inline void cache_page(void *vaddr)
if (CPU_IS_040_OR_060) {
pgd_t *dir;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
dir = pgd_offset_k(addr);
pmdp = pmd_offset(dir, addr);
p4dp = p4d_offset(dir, addr);
pudp = pud_offset(p4dp, addr);
pmdp = pmd_offset(pudp, addr);
ptep = pte_offset_kernel(pmdp, addr);
*ptep = pte_mkcache(*ptep);
}

View File

@ -21,19 +21,22 @@
/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long pte; } pte_t;
#if !defined(CONFIG_MMU) || CONFIG_PGTABLE_LEVELS == 3
typedef struct { unsigned long pmd[16]; } pmd_t;
#define pmd_val(x) ((&x)->pmd[0])
#define __pmd(x) ((pmd_t) { { (x) }, })
#endif
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct page *pgtable_t;
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((&x)->pmd[0])
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { { (x) }, })
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )

View File

@ -2,7 +2,12 @@
#ifndef _M68K_PGTABLE_H
#define _M68K_PGTABLE_H
#include <asm-generic/4level-fixup.h>
#if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE)
#include <asm-generic/pgtable-nopmd.h>
#else
#include <asm-generic/pgtable-nopud.h>
#endif
#include <asm/setup.h>
@ -30,9 +35,7 @@
/* PMD_SHIFT determines the size of the area a second-level page table can map */
#ifdef CONFIG_SUN3
#define PMD_SHIFT 17
#else
#if CONFIG_PGTABLE_LEVELS == 3
#define PMD_SHIFT 22
#endif
#define PMD_SIZE (1UL << PMD_SHIFT)

View File

@ -2,7 +2,7 @@
#ifndef _M68KNOMMU_PGTABLE_H
#define _M68KNOMMU_PGTABLE_H
#include <asm-generic/4level-fixup.h>
#include <asm-generic/pgtable-nopud.h>
/*
* (C) Copyright 2000-2002, Greg Ungerer <gerg@snapgear.com>

View File

@ -17,8 +17,6 @@
extern const char bad_pmd_string[];
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
#define __pte_free_tlb(tlb,pte,addr) \
do { \
pgtable_pte_page_dtor(pte); \
@ -41,7 +39,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page
* inside the pgd, so has no extra memory associated with it.
*/
#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb, x, addr) do { } while (0)
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
@ -58,6 +55,4 @@ static inline pgd_t * pgd_alloc(struct mm_struct *mm)
return new_pgd;
}
#define pgd_populate(mm, pmd, pte) BUG()
#endif /* SUN3_PGALLOC_H */

View File

@ -110,11 +110,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pmd_set(pmdp,ptep) do {} while (0)
static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
{
pgd_val(*pgdp) = virt_to_phys(pmdp);
}
#define __pte_page(pte) \
((unsigned long) __va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT))
#define __pmd_page(pmd) \
@ -145,16 +140,9 @@ static inline int pmd_present2 (pmd_t *pmd) { return pmd_val (*pmd) & SUN3_PMD_V
#define pmd_present(pmd) (!pmd_none2(&(pmd)))
static inline void pmd_clear (pmd_t *pmdp) { pmd_val (*pmdp) = 0; }
static inline int pgd_none (pgd_t pgd) { return 0; }
static inline int pgd_bad (pgd_t pgd) { return 0; }
static inline int pgd_present (pgd_t pgd) { return 1; }
static inline void pgd_clear (pgd_t *pgdp) {}
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
@ -194,12 +182,6 @@ extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
/* Find an entry in a kernel pagetable directory. */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
/* Find an entry in the second-level pagetable. */
static inline pmd_t *pmd_offset (pgd_t *pgd, unsigned long address)
{
return (pmd_t *) pgd;
}
/* Find an entry in the third-level pagetable. */
#define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
#define pte_offset_kernel(pmd, address) ((pte_t *) __pmd_page(*pmd) + pte_index(address))

View File

@ -465,6 +465,8 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
for (;;) {
struct mm_struct *mm = current->mm;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
spinlock_t *ptl;
@ -474,7 +476,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
pgd = pgd_offset(mm, (unsigned long)mem);
if (!pgd_present(*pgd))
goto bad_access;
pmd = pmd_offset(pgd, (unsigned long)mem);
p4d = p4d_offset(pgd, (unsigned long)mem);
if (!p4d_present(*p4d))
goto bad_access;
pud = pud_offset(p4d, (unsigned long)mem);
if (!pud_present(*pud))
goto bad_access;
pmd = pmd_offset(pud, (unsigned long)mem);
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);

View File

@ -130,8 +130,10 @@ static inline void init_pointer_tables(void)
/* insert pointer tables allocated so far into the tablelist */
init_pointer_table((unsigned long)kernel_pg_dir);
for (i = 0; i < PTRS_PER_PGD; i++) {
if (pgd_present(kernel_pg_dir[i]))
init_pointer_table(__pgd_page(kernel_pg_dir[i]));
pud_t *pud = (pud_t *)(&kernel_pg_dir[i]);
if (pud_present(*pud))
init_pointer_table(pgd_page_vaddr(kernel_pg_dir[i]));
}
/* insert also pointer table that we used to unmap the zero page */

View File

@ -63,18 +63,23 @@ static void __free_io_area(void *addr, unsigned long size)
{
unsigned long virtaddr = (unsigned long)addr;
pgd_t *pgd_dir;
p4d_t *p4d_dir;
pud_t *pud_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
while ((long)size > 0) {
pgd_dir = pgd_offset_k(virtaddr);
if (pgd_bad(*pgd_dir)) {
printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
pgd_clear(pgd_dir);
p4d_dir = p4d_offset(pgd_dir, virtaddr);
pud_dir = pud_offset(p4d_dir, virtaddr);
if (pud_bad(*pud_dir)) {
printk("iounmap: bad pud(%08lx)\n", pud_val(*pud_dir));
pud_clear(pud_dir);
return;
}
pmd_dir = pmd_offset(pgd_dir, virtaddr);
pmd_dir = pmd_offset(pud_dir, virtaddr);
#if CONFIG_PGTABLE_LEVELS == 3
if (CPU_IS_020_OR_030) {
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
@ -87,6 +92,7 @@ static void __free_io_area(void *addr, unsigned long size)
} else if (pmd_type == 0)
continue;
}
#endif
if (pmd_bad(*pmd_dir)) {
printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
@ -159,6 +165,8 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla
unsigned long virtaddr, retaddr;
long offset;
pgd_t *pgd_dir;
p4d_t *p4d_dir;
pud_t *pud_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
@ -245,18 +253,23 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla
printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
#endif
pgd_dir = pgd_offset_k(virtaddr);
pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
p4d_dir = p4d_offset(pgd_dir, virtaddr);
pud_dir = pud_offset(p4d_dir, virtaddr);
pmd_dir = pmd_alloc(&init_mm, pud_dir, virtaddr);
if (!pmd_dir) {
printk("ioremap: no mem for pmd_dir\n");
return NULL;
}
#if CONFIG_PGTABLE_LEVELS == 3
if (CPU_IS_020_OR_030) {
pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
physaddr += PTRTREESIZE;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
} else {
} else
#endif
{
pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
if (!pte_dir) {
printk("ioremap: no mem for pte_dir\n");
@ -307,6 +320,8 @@ void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
{
unsigned long virtaddr = (unsigned long)addr;
pgd_t *pgd_dir;
p4d_t *p4d_dir;
pud_t *pud_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
@ -341,13 +356,16 @@ void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
while ((long)size > 0) {
pgd_dir = pgd_offset_k(virtaddr);
if (pgd_bad(*pgd_dir)) {
printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
pgd_clear(pgd_dir);
p4d_dir = p4d_offset(pgd_dir, virtaddr);
pud_dir = pud_offset(p4d_dir, virtaddr);
if (pud_bad(*pud_dir)) {
printk("iocachemode: bad pud(%08lx)\n", pud_val(*pud_dir));
pud_clear(pud_dir);
return;
}
pmd_dir = pmd_offset(pgd_dir, virtaddr);
pmd_dir = pmd_offset(pud_dir, virtaddr);
#if CONFIG_PGTABLE_LEVELS == 3
if (CPU_IS_020_OR_030) {
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
@ -359,6 +377,7 @@ void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
continue;
}
}
#endif
if (pmd_bad(*pmd_dir)) {
printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));

View File

@ -92,6 +92,8 @@ int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
unsigned long flags, mmuar, mmutr;
struct mm_struct *mm;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int asid;
@ -113,7 +115,19 @@ int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
return -1;
}
pmd = pmd_offset(pgd, mmuar);
p4d = p4d_offset(pgd, mmuar);
if (p4d_none(*p4d)) {
local_irq_restore(flags);
return -1;
}
pud = pud_offset(p4d, mmuar);
if (pud_none(*pud)) {
local_irq_restore(flags);
return -1;
}
pmd = pmd_offset(pud, mmuar);
if (pmd_none(*pmd)) {
local_irq_restore(flags);
return -1;

View File

@ -82,9 +82,11 @@ static pmd_t * __init kernel_ptr_table(void)
*/
last = (unsigned long)kernel_pg_dir;
for (i = 0; i < PTRS_PER_PGD; i++) {
if (!pgd_present(kernel_pg_dir[i]))
pud_t *pud = (pud_t *)(&kernel_pg_dir[i]);
if (!pud_present(*pud))
continue;
pmd = __pgd_page(kernel_pg_dir[i]);
pmd = pgd_page_vaddr(kernel_pg_dir[i]);
if (pmd > last)
last = pmd;
}
@ -118,6 +120,8 @@ static void __init map_node(int node)
#define ROOTTREESIZE (32*1024*1024)
unsigned long physaddr, virtaddr, size;
pgd_t *pgd_dir;
p4d_t *p4d_dir;
pud_t *pud_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
@ -149,14 +153,16 @@ static void __init map_node(int node)
continue;
}
}
if (!pgd_present(*pgd_dir)) {
p4d_dir = p4d_offset(pgd_dir, virtaddr);
pud_dir = pud_offset(p4d_dir, virtaddr);
if (!pud_present(*pud_dir)) {
pmd_dir = kernel_ptr_table();
#ifdef DEBUG
printk ("[new pointer %p]", pmd_dir);
#endif
pgd_set(pgd_dir, pmd_dir);
pud_set(pud_dir, pmd_dir);
} else
pmd_dir = pmd_offset(pgd_dir, virtaddr);
pmd_dir = pmd_offset(pud_dir, virtaddr);
if (CPU_IS_020_OR_030) {
if (virtaddr) {
@ -304,4 +310,3 @@ void __init paging_init(void)
node_set_state(i, N_NORMAL_MEMORY);
}
}

View File

@ -80,6 +80,8 @@ inline int dvma_map_cpu(unsigned long kaddr,
unsigned long vaddr, int len)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
unsigned long end;
int ret = 0;
@ -90,12 +92,14 @@ inline int dvma_map_cpu(unsigned long kaddr,
pr_debug("dvma: mapping kern %08lx to virt %08lx\n", kaddr, vaddr);
pgd = pgd_offset_k(vaddr);
p4d = p4d_offset(pgd, vaddr);
pud = pud_offset(p4d, vaddr);
do {
pmd_t *pmd;
unsigned long end2;
if((pmd = pmd_alloc(&init_mm, pgd, vaddr)) == NULL) {
if((pmd = pmd_alloc(&init_mm, pud, vaddr)) == NULL) {
ret = -ENOMEM;
goto out;
}
@ -196,4 +200,3 @@ void dvma_unmap_iommu(unsigned long baddr, int len)
}
}

View File

@ -90,7 +90,6 @@ typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pgprot; } pgprot_t;
/* FIXME this can depend on linux kernel version */
# ifdef CONFIG_MMU
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
# else /* CONFIG_MMU */
typedef struct { unsigned long ste[64]; } pmd_t;
@ -103,7 +102,6 @@ typedef struct { p4d_t pge[1]; } pgd_t;
# define pgprot_val(x) ((x).pgprot)
# ifdef CONFIG_MMU
# define pmd_val(x) ((x).pmd)
# define pgd_val(x) ((x).pgd)
# else /* CONFIG_MMU */
# define pmd_val(x) ((x).ste[0])
@ -112,7 +110,6 @@ typedef struct { p4d_t pge[1]; } pgd_t;
# endif /* CONFIG_MMU */
# define __pte(x) ((pte_t) { (x) })
# define __pmd(x) ((pmd_t) { (x) })
# define __pgd(x) ((pgd_t) { (x) })
# define __pgprot(x) ((pgprot_t) { (x) })

View File

@ -41,13 +41,6 @@ static inline void free_pgd(pgd_t *pgd)
#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* We don't have any real pmd's, and this code never triggers because
* the pgd will always be present..
*/
#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
@ -58,15 +51,6 @@ extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
#define pmd_populate_kernel(mm, pmd, pte) \
(pmd_val(*(pmd)) = (unsigned long) (pte))
/*
* We don't have any real pmd's, and this code never triggers because
* the pgd will always be present..
*/
#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x)
#define pgd_populate(mm, pmd, pte) BUG()
#endif /* CONFIG_MMU */
#endif /* _ASM_MICROBLAZE_PGALLOC_H */

View File

@ -59,9 +59,7 @@ extern int mem_init_done;
#else /* CONFIG_MMU */
#include <asm-generic/4level-fixup.h>
#define __PAGETABLE_PMD_FOLDED 1
#include <asm-generic/pgtable-nopmd.h>
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
@ -138,13 +136,8 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
*
*/
/* PMD_SHIFT determines the size of the area mapped by the PTE pages */
#define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT)
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
/* PGDIR_SHIFT determines what a top-level page table entry can map */
#define PGDIR_SHIFT PMD_SHIFT
#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
@ -165,9 +158,6 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
#define pte_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
__FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
__FILE__, __LINE__, pmd_val(e))
#define pgd_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
__FILE__, __LINE__, pgd_val(e))
@ -313,18 +303,6 @@ extern unsigned long empty_zero_page[1024];
__pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot))
#ifndef __ASSEMBLY__
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level
* setup: the pgd is never bad, and a pmd always exists (as it's folded
* into the pgd entry)
*/
static inline int pgd_none(pgd_t pgd) { return 0; }
static inline int pgd_bad(pgd_t pgd) { return 0; }
static inline int pgd_present(pgd_t pgd) { return 1; }
#define pgd_clear(xp) do { } while (0)
#define pgd_page(pgd) \
((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
@ -479,12 +457,6 @@ static inline void ptep_mkdirty(struct mm_struct *mm,
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
{
return (pmd_t *) dir;
}
/* Find an entry in the third-level page table.. */
#define pte_index(address) \
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))

View File

@ -160,6 +160,9 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
int err = 0, sig = ksig->sig;
unsigned long address = 0;
#ifdef CONFIG_MMU
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
#endif
@ -195,9 +198,10 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
address = ((unsigned long)frame->tramp);
#ifdef CONFIG_MMU
pmdp = pmd_offset(pud_offset(
pgd_offset(current->mm, address),
address), address);
pgdp = pgd_offset(current->mm, address);
p4dp = p4d_offset(pgdp, address);
pudp = pud_offset(p4dp, address);
pmdp = pmd_offset(pudp, address);
preempt_disable();
ptep = pte_offset_map(pmdp, address);

View File

@ -53,8 +53,11 @@ EXPORT_SYMBOL(kmap_prot);
static inline pte_t *virt_to_kpte(unsigned long vaddr)
{
return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr),
vaddr), vaddr);
pgd_t *pgd = pgd_offset_k(vaddr);
p4d_t *p4d = p4d_offset(pgd, vaddr);
pud_t *pud = pud_offset(p4d, vaddr);
return pte_offset_kernel(pmd_offset(pud, vaddr), vaddr);
}
static void __init highmem_init(void)

View File

@ -134,11 +134,16 @@ EXPORT_SYMBOL(iounmap);
int map_page(unsigned long va, phys_addr_t pa, int flags)
{
p4d_t *p4d;
pud_t *pud;
pmd_t *pd;
pte_t *pg;
int err = -ENOMEM;
/* Use upper 10 bits of VA to index the first level map */
pd = pmd_offset(pgd_offset_k(va), va);
p4d = p4d_offset(pgd_offset_k(va), va);
pud = pud_offset(p4d, va);
pd = pmd_offset(pud, va);
/* Use middle 10 bits of VA to index the second-level map */
pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
/* pg = pte_alloc_kernel(&init_mm, pd, va); */
@ -188,13 +193,17 @@ void __init mapin_ram(void)
static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int retval = 0;
pgd = pgd_offset(mm, addr & PAGE_MASK);
if (pgd) {
pmd = pmd_offset(pgd, addr & PAGE_MASK);
p4d = p4d_offset(pgd, addr & PAGE_MASK);
pud = pud_offset(p4d, addr & PAGE_MASK);
pmd = pmd_offset(pud, addr & PAGE_MASK);
if (pmd_present(*pmd)) {
pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
if (pte) {

View File

@ -2,6 +2,7 @@
#ifndef _ASM_MSGBUF_H
#define _ASM_MSGBUF_H
#include <asm/ipcbuf.h>
/*
* The msqid64_ds structure for the MIPS architecture.

View File

@ -2,6 +2,8 @@
#ifndef _ASM_SEMBUF_H
#define _ASM_SEMBUF_H
#include <asm/ipcbuf.h>
/*
* The semid64_ds structure for the MIPS architecture.
* Note extra padding because this structure is passed back and forth

View File

@ -41,17 +41,14 @@ void clear_page(void *page);
void copy_page(void *to, void *from);
typedef unsigned long pte_t;
typedef unsigned long pmd_t;
typedef unsigned long pgd_t;
typedef unsigned long pgprot_t;
#define pte_val(x) (x)
#define pmd_val(x) (x)
#define pgd_val(x) (x)
#define pgprot_val(x) (x)
#define __pte(x) (x)
#define __pmd(x) (x)
#define __pgd(x) (x)
#define __pgprot(x) (x)

View File

@ -15,9 +15,6 @@
/*
* Since we have only two-level page tables, these are trivial
*/
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(mm, pmd) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
#define pmd_pgtable(pmd) pmd_page(pmd)
extern pgd_t *pgd_alloc(struct mm_struct *mm);

View File

@ -4,8 +4,7 @@
#ifndef _ASMNDS32_PGTABLE_H
#define _ASMNDS32_PGTABLE_H
#define __PAGETABLE_PMD_FOLDED 1
#include <asm-generic/4level-fixup.h>
#include <asm-generic/pgtable-nopmd.h>
#include <linux/sizes.h>
#include <asm/memory.h>
@ -18,26 +17,20 @@
#ifdef CONFIG_ANDES_PAGE_SIZE_4KB
#define PGDIR_SHIFT 22
#define PTRS_PER_PGD 1024
#define PMD_SHIFT 22
#define PTRS_PER_PMD 1
#define PTRS_PER_PTE 1024
#endif
#ifdef CONFIG_ANDES_PAGE_SIZE_8KB
#define PGDIR_SHIFT 24
#define PTRS_PER_PGD 256
#define PMD_SHIFT 24
#define PTRS_PER_PMD 1
#define PTRS_PER_PTE 2048
#endif
#ifndef __ASSEMBLY__
extern void __pte_error(const char *file, int line, unsigned long val);
extern void __pmd_error(const char *file, int line, unsigned long val);
extern void __pgd_error(const char *file, int line, unsigned long val);
#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
#endif /* !__ASSEMBLY__ */
@ -368,9 +361,6 @@ static inline pmd_t __mk_pmd(pte_t * ptep, unsigned long prot)
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
/* Find an entry in the second-level page table.. */
#define pmd_offset(dir, addr) ((pmd_t *)(dir))
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const unsigned long mask = 0xfff;

View File

@ -7,6 +7,5 @@
#include <asm-generic/tlb.h>
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tln)->mm, pmd)
#endif

View File

@ -14,6 +14,7 @@ unsigned int *phy_addr_sp_tmp;
static void nds32_suspend2ram(void)
{
pgd_t *pgdv;
p4d_t *p4dv;
pud_t *pudv;
pmd_t *pmdv;
pte_t *ptev;
@ -21,7 +22,8 @@ static void nds32_suspend2ram(void)
pgdv = (pgd_t *)__va((__nds32__mfsr(NDS32_SR_L1_PPTB) &
L1_PPTB_mskBASE)) + pgd_index((unsigned int)cpu_resume);
pudv = pud_offset(pgdv, (unsigned int)cpu_resume);
p4dv = p4d_offset(pgdv, (unsigned int)cpu_resume);
pudv = pud_offset(p4dv, (unsigned int)cpu_resume);
pmdv = pmd_offset(pudv, (unsigned int)cpu_resume);
ptev = pte_offset_map(pmdv, (unsigned int)cpu_resume);

View File

@ -31,6 +31,8 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
pr_alert("[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
do {
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
if (pgd_none(*pgd))
@ -41,7 +43,9 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
break;
}
pmd = pmd_offset(pgd, addr);
p4d = p4d_offset(pgd, addr);
pud = pud_offset(p4d, addr);
pmd = pmd_offset(pud, addr);
#if PTRS_PER_PMD != 1
pr_alert(", *pmd=%08lx", pmd_val(*pmd));
#endif
@ -359,6 +363,7 @@ void do_page_fault(unsigned long entry, unsigned long addr,
unsigned int index = pgd_index(addr);
pgd_t *pgd, *pgd_k;
p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
@ -369,8 +374,13 @@ void do_page_fault(unsigned long entry, unsigned long addr,
if (!pgd_present(*pgd_k))
goto no_context;
pud = pud_offset(pgd, addr);
pud_k = pud_offset(pgd_k, addr);
p4d = p4d_offset(pgd, addr);
p4d_k = p4d_offset(pgd_k, addr);
if (!p4d_present(*p4d_k))
goto no_context;
pud = pud_offset(p4d, addr);
pud_k = pud_offset(p4d_k, addr);
if (!pud_present(*pud_k))
goto no_context;

View File

@ -54,6 +54,7 @@ static void __init map_ram(void)
{
unsigned long v, p, e;
pgd_t *pge;
p4d_t *p4e;
pud_t *pue;
pmd_t *pme;
pte_t *pte;
@ -69,7 +70,8 @@ static void __init map_ram(void)
while (p < e) {
int j;
pue = pud_offset(pge, v);
p4e = p4d_offset(pge, v);
pue = pud_offset(p4e, v);
pme = pmd_offset(pue, v);
if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
@ -100,6 +102,7 @@ static void __init fixedrange_init(void)
{
unsigned long vaddr;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
#ifdef CONFIG_HIGHMEM
@ -111,7 +114,8 @@ static void __init fixedrange_init(void)
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
pgd = swapper_pg_dir + pgd_index(vaddr);
pud = pud_offset(pgd, vaddr);
p4d = p4d_offset(pgd, vaddr);
pud = pud_offset(p4d, vaddr);
pmd = pmd_offset(pud, vaddr);
fixmap_pmd_p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!fixmap_pmd_p)
@ -126,7 +130,8 @@ static void __init fixedrange_init(void)
vaddr = PKMAP_BASE;
pgd = swapper_pg_dir + pgd_index(vaddr);
pud = pud_offset(pgd, vaddr);
p4d = p4d_offset(pgd, vaddr);
pud = pud_offset(p4d, vaddr);
pmd = pmd_offset(pud, vaddr);
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pte)

View File

@ -74,6 +74,8 @@ void setup_mm_for_reboot(char mode)
{
unsigned long pmdval;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
int i;
@ -84,7 +86,9 @@ void setup_mm_for_reboot(char mode)
for (i = 0; i < USER_PTRS_PER_PGD; i++) {
pmdval = (i << PGDIR_SHIFT);
pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT);
p4d = p4d_offset(pgd, i << PGDIR_SHIFT);
pud = pud_offset(p4d, i << PGDIR_SHIFT);
pmd = pmd_offset(pud + i, i << PGDIR_SHIFT);
set_pmd(pmd, __pmd(pmdval));
}
}

View File

@ -16,10 +16,14 @@ extern struct cache_info L1_cache_info[2];
int va_kernel_present(unsigned long addr)
{
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *ptep, pte;
pmd = pmd_offset(pgd_offset_k(addr), addr);
p4d = p4d_offset(pgd_offset_k(addr), addr);
pud = pud_offset(p4d, addr);
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd)) {
ptep = pte_offset_map(pmd, addr);
pte = *ptep;
@ -32,20 +36,24 @@ int va_kernel_present(unsigned long addr)
pte_t va_present(struct mm_struct * mm, unsigned long addr)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *ptep, pte;
pgd = pgd_offset(mm, addr);
if (!pgd_none(*pgd)) {
pud = pud_offset(pgd, addr);
if (!pud_none(*pud)) {
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd)) {
ptep = pte_offset_map(pmd, addr);
pte = *ptep;
if (pte_present(pte))
return pte;
p4d = p4d_offset(pgd, addr);
if (!p4d_none(*p4d)) {
pud = pud_offset(p4d, addr);
if (!pud_none(*pud)) {
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd)) {
ptep = pte_offset_map(pmd, addr);
pte = *ptep;
if (pte_present(pte))
return pte;
}
}
}
}

View File

@ -42,48 +42,54 @@ typedef struct { unsigned long pte; } pte_t; /* either 32 or 64bit */
/* NOTE: even on 64 bits, these entries are __u32 because we allocate
* the pmd and pgd in ZONE_DMA (i.e. under 4GB) */
typedef struct { __u32 pmd; } pmd_t;
typedef struct { __u32 pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte)
/* These do not work lvalues, so make sure we don't use them as such. */
#if CONFIG_PGTABLE_LEVELS == 3
typedef struct { __u32 pmd; } pmd_t;
#define __pmd(x) ((pmd_t) { (x) } )
/* pXd_val() do not work as lvalues, so make sure we don't use them as such. */
#define pmd_val(x) ((x).pmd + 0)
#endif
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd + 0)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
#define __pmd_val_set(x,n) (x).pmd = (n)
#define __pgd_val_set(x,n) (x).pgd = (n)
#else
/*
* .. while these make it easier on the compiler
*/
typedef unsigned long pte_t;
#if CONFIG_PGTABLE_LEVELS == 3
typedef __u32 pmd_t;
#define pmd_val(x) (x)
#define __pmd(x) (x)
#endif
typedef __u32 pgd_t;
typedef unsigned long pgprot_t;
#define pte_val(x) (x)
#define pmd_val(x) (x)
#define pgd_val(x) (x)
#define pgprot_val(x) (x)
#define __pte(x) (x)
#define __pmd(x) (x)
#define __pgd(x) (x)
#define __pgprot(x) (x)
#define __pmd_val_set(x,n) (x) = (n)
#define __pgd_val_set(x,n) (x) = (n)
#endif /* STRICT_MM_TYPECHECKS */
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
#if CONFIG_PGTABLE_LEVELS == 3
#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
#endif
typedef struct page *pgtable_t;
typedef struct __physmem_range {

View File

@ -34,13 +34,13 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
/* Populate first pmd with allocated memory. We mark it
* with PxD_FLAG_ATTACHED as a signal to the system that this
* pmd entry may not be cleared. */
__pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT |
PxD_FLAG_VALID |
PxD_FLAG_ATTACHED)
+ (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
set_pgd(actual_pgd, __pgd((PxD_FLAG_PRESENT |
PxD_FLAG_VALID |
PxD_FLAG_ATTACHED)
+ (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)));
/* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
* a signal that this pmd may not be freed */
__pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
set_pgd(pgd, __pgd(PxD_FLAG_ATTACHED));
#endif
}
spin_lock_init(pgd_spinlock(actual_pgd));
@ -59,10 +59,10 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
/* Three Level Page Table Support for pmd's */
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
__pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
(__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
set_pud(pud, __pud((PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
(__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)));
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
@ -88,19 +88,6 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
free_pages((unsigned long)pmd, PMD_ORDER);
}
#else
/* Two Level Page Table Support for pmd's */
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(mm, x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
#endif
static inline void
@ -110,14 +97,14 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
/* preserve the gateway marker if this is the beginning of
* the permanent pmd */
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
__pmd_val_set(*pmd, (PxD_FLAG_PRESENT |
PxD_FLAG_VALID |
PxD_FLAG_ATTACHED)
+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
set_pmd(pmd, __pmd((PxD_FLAG_PRESENT |
PxD_FLAG_VALID |
PxD_FLAG_ATTACHED)
+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
else
#endif
__pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID)
+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID)
+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
}
#define pmd_populate(mm, pmd, pte_page) \

View File

@ -3,7 +3,12 @@
#define _PARISC_PGTABLE_H
#include <asm/page.h>
#include <asm-generic/4level-fixup.h>
#if CONFIG_PGTABLE_LEVELS == 3
#include <asm-generic/pgtable-nopud.h>
#elif CONFIG_PGTABLE_LEVELS == 2
#include <asm-generic/pgtable-nopmd.h>
#endif
#include <asm/fixmap.h>
@ -101,8 +106,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define pte_ERROR(e) \
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#if CONFIG_PGTABLE_LEVELS == 3
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))
#endif
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
@ -132,19 +139,18 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define PTRS_PER_PTE (1UL << BITS_PER_PTE)
/* Definitions for 2nd level */
#if CONFIG_PGTABLE_LEVELS == 3
#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#if CONFIG_PGTABLE_LEVELS == 3
#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
#define PTRS_PER_PMD (1UL << BITS_PER_PMD)
#else
#define __PAGETABLE_PMD_FOLDED 1
#define BITS_PER_PMD 0
#endif
#define PTRS_PER_PMD (1UL << BITS_PER_PMD)
/* Definitions for 1st level */
#define PGDIR_SHIFT (PMD_SHIFT + BITS_PER_PMD)
#define PGDIR_SHIFT (PLD_SHIFT + BITS_PER_PTE + BITS_PER_PMD)
#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
#define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT)
#else
@ -317,6 +323,8 @@ extern unsigned long *empty_zero_page;
#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
#define pud_flag(x) (pud_val(x) & PxD_FLAG_MASK)
#define pud_address(x) ((unsigned long)(pud_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
#define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK)
#define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
@ -334,42 +342,32 @@ static inline void pmd_clear(pmd_t *pmd) {
if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
/* This is the entry pointing to the permanent pmd
* attached to the pgd; cannot clear it */
__pmd_val_set(*pmd, PxD_FLAG_ATTACHED);
set_pmd(pmd, __pmd(PxD_FLAG_ATTACHED));
else
#endif
__pmd_val_set(*pmd, 0);
set_pmd(pmd, __pmd(0));
}
#if CONFIG_PGTABLE_LEVELS == 3
#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd)))
#define pgd_page(pgd) virt_to_page((void *)pgd_page_vaddr(pgd))
#define pud_page_vaddr(pud) ((unsigned long) __va(pud_address(pud)))
#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
/* For 64 bit we have three level tables */
#define pgd_none(x) (!pgd_val(x))
#define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID))
#define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT)
static inline void pgd_clear(pgd_t *pgd) {
#define pud_none(x) (!pud_val(x))
#define pud_bad(x) (!(pud_flag(x) & PxD_FLAG_VALID))
#define pud_present(x) (pud_flag(x) & PxD_FLAG_PRESENT)
static inline void pud_clear(pud_t *pud) {
#if CONFIG_PGTABLE_LEVELS == 3
if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED)
/* This is the permanent pmd attached to the pgd; cannot
if(pud_flag(*pud) & PxD_FLAG_ATTACHED)
/* This is the permanent pmd attached to the pud; cannot
* free it */
return;
#endif
__pgd_val_set(*pgd, 0);
set_pud(pud, __pud(0));
}
#else
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level
* setup: the pgd is never bad, and a pmd always exists (as it's folded
* into the pgd entry)
*/
static inline int pgd_none(pgd_t pgd) { return 0; }
static inline int pgd_bad(pgd_t pgd) { return 0; }
static inline int pgd_present(pgd_t pgd) { return 1; }
static inline void pgd_clear(pgd_t * pgdp) { }
#endif
/*
@ -452,7 +450,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#if CONFIG_PGTABLE_LEVELS == 3
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
#define pmd_offset(dir,address) \
((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address))
((pmd_t *) pud_page_vaddr(*(dir)) + pmd_index(address))
#else
#define pmd_offset(dir,addr) ((pmd_t *) dir)
#endif

View File

@ -4,7 +4,9 @@
#include <asm-generic/tlb.h>
#if CONFIG_PGTABLE_LEVELS == 3
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
#endif
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
#endif

View File

@ -3,6 +3,7 @@
#define _PARISC_MSGBUF_H
#include <asm/bitsperlong.h>
#include <asm/ipcbuf.h>
/*
* The msqid64_ds structure for parisc architecture, copied from sparc.

View File

@ -3,6 +3,7 @@
#define _PARISC_SEMBUF_H
#include <asm/bitsperlong.h>
#include <asm/ipcbuf.h>
/*
* The semid64_ds structure for parisc architecture.

View File

@ -534,11 +534,14 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
pte_t *ptep = NULL;
if (!pgd_none(*pgd)) {
pud_t *pud = pud_offset(pgd, addr);
if (!pud_none(*pud)) {
pmd_t *pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd))
ptep = pte_offset_map(pmd, addr);
p4d_t *p4d = p4d_offset(pgd, addr);
if (!p4d_none(*p4d)) {
pud_t *pud = pud_offset(p4d, addr);
if (!pud_none(*pud)) {
pmd_t *pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd))
ptep = pte_offset_map(pmd, addr);
}
}
}
return ptep;

View File

@ -133,9 +133,14 @@ static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
dir = pgd_offset_k(vaddr);
do {
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pmd = pmd_alloc(NULL, dir, vaddr);
p4d = p4d_offset(dir, vaddr);
pud = pud_offset(p4d, vaddr);
pmd = pmd_alloc(NULL, pud, vaddr);
if (!pmd)
return -ENOMEM;
if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))

View File

@ -14,11 +14,13 @@ void notrace set_fixmap(enum fixed_addresses idx, phys_addr_t phys)
{
unsigned long vaddr = __fix_to_virt(idx);
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr);
p4d_t *p4d = p4d_offset(pgd, vaddr);
pud_t *pud = pud_offset(p4d, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
pte_t *pte;
if (pmd_none(*pmd))
pmd = pmd_alloc(NULL, pgd, vaddr);
pmd = pmd_alloc(NULL, pud, vaddr);
pte = pte_offset_kernel(pmd, vaddr);
if (pte_none(*pte))
@ -32,7 +34,9 @@ void notrace clear_fixmap(enum fixed_addresses idx)
{
unsigned long vaddr = __fix_to_virt(idx);
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr);
p4d_t *p4d = p4d_offset(pgd, vaddr);
pud_t *pud = pud_offset(p4d, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
pte_t *pte = pte_offset_kernel(pmd, vaddr);
if (WARN_ON(pte_none(*pte)))

View File

@ -49,6 +49,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
@ -61,7 +62,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
addr &= HPAGE_MASK;
pgd = pgd_offset(mm, addr);
pud = pud_alloc(mm, pgd, addr);
p4d = p4d_offset(pgd, addr);
pud = pud_alloc(mm, p4d, addr);
if (pud) {
pmd = pmd_alloc(mm, pud, addr);
if (pmd)
@ -74,6 +76,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
@ -82,11 +85,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
pgd = pgd_offset(mm, addr);
if (!pgd_none(*pgd)) {
pud = pud_offset(pgd, addr);
if (!pud_none(*pud)) {
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd))
pte = pte_offset_map(pmd, addr);
p4d = p4d_offset(pgd, addr);
if (!p4d_none(*p4d)) {
pud = pud_offset(p4d, addr);
if (!pud_none(*pud)) {
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd))
pte = pte_offset_map(pmd, addr);
}
}
}
return pte;

View File

@ -2,6 +2,8 @@
#ifndef _ASM_POWERPC_MSGBUF_H
#define _ASM_POWERPC_MSGBUF_H
#include <asm/ipcbuf.h>
/*
* The msqid64_ds structure for the PowerPC architecture.
* Note extra padding because this structure is passed back and forth

View File

@ -2,6 +2,8 @@
#ifndef _ASM_POWERPC_SEMBUF_H
#define _ASM_POWERPC_SEMBUF_H
#include <asm/ipcbuf.h>
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License

View File

@ -2,6 +2,8 @@
#ifndef __S390_IPCBUF_H__
#define __S390_IPCBUF_H__
#include <linux/posix_types.h>
/*
* The user_ipc_perm structure for S/390 architecture.
* Note extra padding because this structure is passed back and forth

View File

@ -26,14 +26,14 @@ static inline void free_pgd_fast(pgd_t *pgd)
#define pgd_free(mm, pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast()
static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
static inline void pud_set(pud_t * pudp, pmd_t * pmdp)
{
unsigned long pa = __nocache_pa(pmdp);
set_pte((pte_t *)pgdp, __pte((SRMMU_ET_PTD | (pa >> 4))));
set_pte((pte_t *)pudp, __pte((SRMMU_ET_PTD | (pa >> 4))));
}
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
#define pud_populate(MM, PGD, PMD) pud_set(PGD, PMD)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
unsigned long address)

View File

@ -12,7 +12,7 @@
#include <linux/const.h>
#ifndef __ASSEMBLY__
#include <asm-generic/4level-fixup.h>
#include <asm-generic/pgtable-nopud.h>
#include <linux/spinlock.h>
#include <linux/mm_types.h>
@ -132,12 +132,12 @@ static inline struct page *pmd_page(pmd_t pmd)
return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
}
static inline unsigned long pgd_page_vaddr(pgd_t pgd)
static inline unsigned long pud_page_vaddr(pud_t pud)
{
if (srmmu_device_memory(pgd_val(pgd))) {
if (srmmu_device_memory(pud_val(pud))) {
return ~0;
} else {
unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK;
unsigned long v = pud_val(pud) & SRMMU_PTD_PMASK;
return (unsigned long)__nocache_va(v << 4);
}
}
@ -184,24 +184,24 @@ static inline void pmd_clear(pmd_t *pmdp)
set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
}
static inline int pgd_none(pgd_t pgd)
static inline int pud_none(pud_t pud)
{
return !(pgd_val(pgd) & 0xFFFFFFF);
return !(pud_val(pud) & 0xFFFFFFF);
}
static inline int pgd_bad(pgd_t pgd)
static inline int pud_bad(pud_t pud)
{
return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
return (pud_val(pud) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
}
static inline int pgd_present(pgd_t pgd)
static inline int pud_present(pud_t pud)
{
return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
return ((pud_val(pud) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
}
static inline void pgd_clear(pgd_t *pgdp)
static inline void pud_clear(pud_t *pudp)
{
set_pte((pte_t *)pgdp, __pte(0));
set_pte((pte_t *)pudp, __pte(0));
}
/*
@ -319,9 +319,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address)
static inline pmd_t *pmd_offset(pud_t * dir, unsigned long address)
{
return (pmd_t *) pgd_page_vaddr(*dir) +
return (pmd_t *) pud_page_vaddr(*dir) +
((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
}

View File

@ -2,6 +2,8 @@
#ifndef __SPARC_IPCBUF_H
#define __SPARC_IPCBUF_H
#include <linux/posix_types.h>
/*
* The ipc64_perm structure for sparc/sparc64 architecture.
* Note extra padding because this structure is passed back and forth

View File

@ -2,6 +2,8 @@
#ifndef _SPARC_MSGBUF_H
#define _SPARC_MSGBUF_H
#include <asm/ipcbuf.h>
/*
* The msqid64_ds structure for sparc64 architecture.
* Note extra padding because this structure is passed back and forth

View File

@ -2,6 +2,8 @@
#ifndef _SPARC_SEMBUF_H
#define _SPARC_SEMBUF_H
#include <asm/ipcbuf.h>
/*
* The semid64_ds structure for sparc architecture.
* Note extra padding because this structure is passed back and forth

View File

@ -351,6 +351,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
*/
int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pgd = tsk->active_mm->pgd + offset;
@ -363,8 +365,13 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
return;
}
pmd = pmd_offset(pgd, address);
pmd_k = pmd_offset(pgd_k, address);
p4d = p4d_offset(pgd, address);
pud = pud_offset(p4d, address);
pmd = pmd_offset(pud, address);
p4d_k = p4d_offset(pgd_k, address);
pud_k = pud_offset(p4d_k, address);
pmd_k = pmd_offset(pud_k, address);
if (pmd_present(*pmd) || !pmd_present(*pmd_k))
goto bad_area_nosemaphore;

View File

@ -39,10 +39,14 @@ static pte_t *kmap_pte;
void __init kmap_init(void)
{
unsigned long address;
p4d_t *p4d;
pud_t *pud;
pmd_t *dir;
address = __fix_to_virt(FIX_KMAP_BEGIN);
dir = pmd_offset(pgd_offset_k(address), address);
p4d = p4d_offset(pgd_offset_k(address), address);
pud = pud_offset(p4d, address);
dir = pmd_offset(pud, address);
/* cache the first kmap pte */
kmap_pte = pte_offset_kernel(dir, address);

View File

@ -239,12 +239,16 @@ static void *iounit_alloc(struct device *dev, size_t len,
page = va;
{
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
long i;
pgdp = pgd_offset(&init_mm, addr);
pmdp = pmd_offset(pgdp, addr);
p4dp = p4d_offset(pgdp, addr);
pudp = pud_offset(p4dp, addr);
pmdp = pmd_offset(pudp, addr);
ptep = pte_offset_map(pmdp, addr);
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));

View File

@ -343,6 +343,8 @@ static void *sbus_iommu_alloc(struct device *dev, size_t len,
page = va;
{
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@ -354,7 +356,9 @@ static void *sbus_iommu_alloc(struct device *dev, size_t len,
__flush_page_to_ram(page);
pgdp = pgd_offset(&init_mm, addr);
pmdp = pmd_offset(pgdp, addr);
p4dp = p4d_offset(pgdp, addr);
pudp = pud_offset(p4dp, addr);
pmdp = pmd_offset(pudp, addr);
ptep = pte_offset_map(pmdp, addr);
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));

View File

@ -296,6 +296,8 @@ static void __init srmmu_nocache_init(void)
void *srmmu_nocache_bitmap;
unsigned int bitmap_bits;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long paddr, vaddr;
@ -329,6 +331,8 @@ static void __init srmmu_nocache_init(void)
while (vaddr < srmmu_nocache_end) {
pgd = pgd_offset_k(vaddr);
p4d = p4d_offset(__nocache_fix(pgd), vaddr);
pud = pud_offset(__nocache_fix(p4d), vaddr);
pmd = pmd_offset(__nocache_fix(pgd), vaddr);
pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
@ -516,13 +520,17 @@ static inline void srmmu_mapioaddr(unsigned long physaddr,
unsigned long virt_addr, int bus_type)
{
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
unsigned long tmp;
physaddr &= PAGE_MASK;
pgdp = pgd_offset_k(virt_addr);
pmdp = pmd_offset(pgdp, virt_addr);
p4dp = p4d_offset(pgdp, virt_addr);
pudp = pud_offset(p4dp, virt_addr);
pmdp = pmd_offset(pudp, virt_addr);
ptep = pte_offset_kernel(pmdp, virt_addr);
tmp = (physaddr >> 4) | SRMMU_ET_PTE;
@ -551,11 +559,16 @@ void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
static inline void srmmu_unmapioaddr(unsigned long virt_addr)
{
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
pgdp = pgd_offset_k(virt_addr);
pmdp = pmd_offset(pgdp, virt_addr);
p4dp = p4d_offset(pgdp, virt_addr);
pudp = pud_offset(p4dp, virt_addr);
pmdp = pmd_offset(pudp, virt_addr);
ptep = pte_offset_kernel(pmdp, virt_addr);
/* No need to flush uncacheable page. */
@ -693,20 +706,24 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
unsigned long end)
{
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
while (start < end) {
pgdp = pgd_offset_k(start);
if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
p4dp = p4d_offset(pgdp, start);
pudp = pud_offset(p4dp, start);
if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
pmdp = __srmmu_get_nocache(
SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL)
early_pgtable_allocfail("pmd");
memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
pgd_set(__nocache_fix(pgdp), pmdp);
pud_set(__nocache_fix(pudp), pmdp);
}
pmdp = pmd_offset(__nocache_fix(pgdp), start);
pmdp = pmd_offset(__nocache_fix(pudp), start);
if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
if (ptep == NULL)
@ -724,19 +741,23 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
unsigned long end)
{
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
while (start < end) {
pgdp = pgd_offset_k(start);
if (pgd_none(*pgdp)) {
p4dp = p4d_offset(pgdp, start);
pudp = pud_offset(p4dp, start);
if (pud_none(*pudp)) {
pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL)
early_pgtable_allocfail("pmd");
memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
pgd_set(pgdp, pmdp);
pud_set((pud_t *)pgdp, pmdp);
}
pmdp = pmd_offset(pgdp, start);
pmdp = pmd_offset(pudp, start);
if (srmmu_pmd_none(*pmdp)) {
ptep = __srmmu_get_nocache(PTE_SIZE,
PTE_SIZE);
@ -779,6 +800,8 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
unsigned long probed;
unsigned long addr;
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
@ -810,18 +833,20 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
}
pgdp = pgd_offset_k(start);
p4dp = p4d_offset(pgdp, start);
pudp = pud_offset(p4dp, start);
if (what == 2) {
*(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
start += SRMMU_PGDIR_SIZE;
continue;
}
if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL)
early_pgtable_allocfail("pmd");
memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
pgd_set(__nocache_fix(pgdp), pmdp);
pud_set(__nocache_fix(pudp), pmdp);
}
pmdp = pmd_offset(__nocache_fix(pgdp), start);
if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
@ -906,6 +931,8 @@ void __init srmmu_paging_init(void)
phandle cpunode;
char node_str[128];
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long pages_avail;
@ -967,7 +994,9 @@ void __init srmmu_paging_init(void)
srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
pgd = pgd_offset_k(PKMAP_BASE);
pmd = pmd_offset(pgd, PKMAP_BASE);
p4d = p4d_offset(pgd, PKMAP_BASE);
pud = pud_offset(p4d, PKMAP_BASE);
pmd = pmd_offset(pud, PKMAP_BASE);
pte = pte_offset_kernel(pmd, PKMAP_BASE);
pkmap_page_table = pte;

View File

@ -8,7 +8,6 @@
#ifndef __UM_PGTABLE_2LEVEL_H
#define __UM_PGTABLE_2LEVEL_H
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
/* PGDIR_SHIFT determines what a third-level page table entry can map */

View File

@ -7,7 +7,6 @@
#ifndef __UM_PGTABLE_3LEVEL_H
#define __UM_PGTABLE_3LEVEL_H
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
/* PGDIR_SHIFT determines what a third-level page table entry can map */

View File

@ -106,6 +106,9 @@ extern unsigned long end_iomem;
#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
#define p4d_newpage(x) (p4d_val(x) & _PAGE_NEWPAGE)
#define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEWPAGE)
#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
#define pte_page(x) pfn_to_page(pte_pfn(x))

View File

@ -96,6 +96,7 @@ static void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
int i, j;
@ -107,7 +108,8 @@ static void __init fixrange_init(unsigned long start, unsigned long end,
pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
pud = pud_offset(pgd, vaddr);
p4d = p4d_offset(pgd, vaddr);
pud = pud_offset(p4d, vaddr);
if (pud_none(*pud))
one_md_table_init(pud);
pmd = pmd_offset(pud, vaddr);
@ -124,6 +126,7 @@ static void __init fixaddr_user_init( void)
#ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
long size = FIXADDR_USER_END - FIXADDR_USER_START;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@ -144,7 +147,8 @@ static void __init fixaddr_user_init( void)
for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
p += PAGE_SIZE) {
pgd = swapper_pg_dir + pgd_index(vaddr);
pud = pud_offset(pgd, vaddr);
p4d = p4d_offset(pgd, vaddr);
pud = pud_offset(p4d, vaddr);
pmd = pmd_offset(pud, vaddr);
pte = pte_offset_kernel(pmd, vaddr);
pte_set_val(*pte, p, PAGE_READONLY);

View File

@ -19,15 +19,21 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
unsigned long kernel)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset(mm, proc);
pud = pud_alloc(mm, pgd, proc);
if (!pud)
p4d = p4d_alloc(mm, pgd, proc);
if (!p4d)
goto out;
pud = pud_alloc(mm, p4d, proc);
if (!pud)
goto out_pud;
pmd = pmd_alloc(mm, pud, proc);
if (!pmd)
goto out_pmd;
@ -44,6 +50,8 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
pmd_free(mm, pmd);
out_pmd:
pud_free(mm, pud);
out_pud:
p4d_free(mm, p4d);
out:
return -ENOMEM;
}

View File

@ -17,6 +17,7 @@
pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
@ -27,7 +28,11 @@ pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr)
if (!pgd_present(*pgd))
return NULL;
pud = pud_offset(pgd, addr);
p4d = p4d_offset(pgd, addr);
if (!p4d_present(*p4d))
return NULL;
pud = pud_offset(p4d, addr);
if (!pud_present(*pud))
return NULL;

View File

@ -277,7 +277,7 @@ static inline int update_pmd_range(pud_t *pud, unsigned long addr,
return ret;
}
static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
unsigned long end,
struct host_vm_change *hvc)
{
@ -285,7 +285,7 @@ static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
unsigned long next;
int ret = 0;
pud = pud_offset(pgd, addr);
pud = pud_offset(p4d, addr);
do {
next = pud_addr_end(addr, end);
if (!pud_present(*pud)) {
@ -299,6 +299,28 @@ static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
return ret;
}
static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
unsigned long end,
struct host_vm_change *hvc)
{
p4d_t *p4d;
unsigned long next;
int ret = 0;
p4d = p4d_offset(pgd, addr);
do {
next = p4d_addr_end(addr, end);
if (!p4d_present(*p4d)) {
if (hvc->force || p4d_newpage(*p4d)) {
ret = add_munmap(addr, next - addr, hvc);
p4d_mkuptodate(*p4d);
}
} else
ret = update_pud_range(p4d, addr, next, hvc);
} while (p4d++, addr = next, ((addr < end) && !ret));
return ret;
}
void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
unsigned long end_addr, int force)
{
@ -316,8 +338,8 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
ret = add_munmap(addr, next - addr, &hvc);
pgd_mkuptodate(*pgd);
}
}
else ret = update_pud_range(pgd, addr, next, &hvc);
} else
ret = update_p4d_range(pgd, addr, next, &hvc);
} while (pgd++, addr = next, ((addr < end_addr) && !ret));
if (!ret)
@ -338,6 +360,7 @@ static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
{
struct mm_struct *mm;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@ -364,7 +387,23 @@ static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
continue;
}
pud = pud_offset(pgd, addr);
p4d = p4d_offset(pgd, addr);
if (!p4d_present(*p4d)) {
last = ADD_ROUND(addr, P4D_SIZE);
if (last > end)
last = end;
if (p4d_newpage(*p4d)) {
updated = 1;
err = add_munmap(addr, last - addr, &hvc);
if (err < 0)
panic("munmap failed, errno = %d\n",
-err);
}
addr = last;
continue;
}
pud = pud_offset(p4d, addr);
if (!pud_present(*pud)) {
last = ADD_ROUND(addr, PUD_SIZE);
if (last > end)
@ -424,6 +463,7 @@ static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@ -437,7 +477,11 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
if (!pgd_present(*pgd))
goto kill;
pud = pud_offset(pgd, address);
p4d = p4d_offset(pgd, address);
if (!p4d_present(*p4d))
goto kill;
pud = pud_offset(p4d, address);
if (!pud_present(*pud))
goto kill;
@ -490,35 +534,6 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
force_sig(SIGKILL);
}
pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
{
return pgd_offset(mm, address);
}
pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
{
return pud_offset(pgd, address);
}
pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
{
return pmd_offset(pud, address);
}
pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
{
return pte_offset_kernel(pmd, address);
}
pte_t *addr_pte(struct task_struct *task, unsigned long addr)
{
pgd_t *pgd = pgd_offset(task->mm, addr);
pud_t *pud = pud_offset(pgd, addr);
pmd_t *pmd = pmd_offset(pud, addr);
return pte_offset_map(pmd, addr);
}
void flush_tlb_all(void)
{
/*

View File

@ -28,6 +28,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@ -104,7 +105,8 @@ int handle_page_fault(unsigned long address, unsigned long ip,
}
pgd = pgd_offset(mm, address);
pud = pud_offset(pgd, address);
p4d = p4d_offset(pgd, address);
pud = pud_offset(p4d, address);
pmd = pmd_offset(pud, address);
pte = pte_offset_kernel(pmd, address);
} while (!pte_present(*pte));

View File

@ -5,6 +5,9 @@
#if !defined(__x86_64__) || !defined(__ILP32__)
#include <asm-generic/msgbuf.h>
#else
#include <asm/ipcbuf.h>
/*
* The msqid64_ds structure for x86 architecture with x32 ABI.
*

View File

@ -2,6 +2,8 @@
#ifndef _ASM_X86_SEMBUF_H
#define _ASM_X86_SEMBUF_H
#include <asm/ipcbuf.h>
/*
* The semid64_ds structure for x86 architecture.
* Note extra padding because this structure is passed back and forth

View File

@ -12,6 +12,8 @@
#ifndef _XTENSA_IPCBUF_H
#define _XTENSA_IPCBUF_H
#include <linux/posix_types.h>
/*
* Pad space is left for:
* - 32-bit mode_t and seq

View File

@ -17,6 +17,8 @@
#ifndef _XTENSA_MSGBUF_H
#define _XTENSA_MSGBUF_H
#include <asm/ipcbuf.h>
struct msqid64_ds {
struct ipc64_perm msg_perm;
#ifdef __XTENSA_EB__

View File

@ -22,6 +22,7 @@
#define _XTENSA_SEMBUF_H
#include <asm/byteorder.h>
#include <asm/ipcbuf.h>
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */

View File

@ -287,31 +287,6 @@ static int charlcd_init_display(struct charlcd *lcd)
return 0;
}
/*
* Parses an unsigned integer from a string, until a non-digit character
* is found. The empty string is not accepted. No overflow checks are done.
*
* Returns whether the parsing was successful. Only in that case
* the output parameters are written to.
*
* TODO: If the kernel adds an inplace version of kstrtoul(), this function
* could be easily replaced by that.
*/
static bool parse_n(const char *s, unsigned long *res, const char **next_s)
{
if (!isdigit(*s))
return false;
*res = 0;
while (isdigit(*s)) {
*res = *res * 10 + (*s - '0');
++s;
}
*next_s = s;
return true;
}
/*
* Parses a movement command of the form "(.*);", where the group can be
* any number of subcommands of the form "(x|y)[0-9]+".
@ -336,6 +311,7 @@ static bool parse_xy(const char *s, unsigned long *x, unsigned long *y)
{
unsigned long new_x = *x;
unsigned long new_y = *y;
char *p;
for (;;) {
if (!*s)
@ -345,11 +321,15 @@ static bool parse_xy(const char *s, unsigned long *x, unsigned long *y)
break;
if (*s == 'x') {
if (!parse_n(s + 1, &new_x, &s))
new_x = simple_strtoul(s + 1, &p, 10);
if (p == s + 1)
return false;
s = p;
} else if (*s == 'y') {
if (!parse_n(s + 1, &new_y, &s))
new_y = simple_strtoul(s + 1, &p, 10);
if (p == s + 1)
return false;
s = p;
} else {
return false;
}

View File

@ -496,20 +496,17 @@ static ssize_t node_read_vmstat(struct device *dev,
int n = 0;
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
n += sprintf(buf+n, "%s %lu\n", vmstat_text[i],
n += sprintf(buf+n, "%s %lu\n", zone_stat_name(i),
sum_zone_node_page_state(nid, i));
#ifdef CONFIG_NUMA
for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
n += sprintf(buf+n, "%s %lu\n",
vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
n += sprintf(buf+n, "%s %lu\n", numa_stat_name(i),
sum_zone_numa_state(nid, i));
#endif
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
n += sprintf(buf+n, "%s %lu\n",
vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
NR_VM_NUMA_STAT_ITEMS],
n += sprintf(buf+n, "%s %lu\n", node_stat_name(i),
node_page_state(pgdat, i));
return n;

View File

@ -178,46 +178,25 @@ static int dio48e_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(port_state & mask);
}
static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
static int dio48e_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
size_t i;
static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
const unsigned int gpio_reg_size = 8;
unsigned int bits_offset;
size_t word_index;
unsigned int word_offset;
unsigned long word_mask;
const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0);
unsigned long offset;
unsigned long gpio_mask;
unsigned int port_addr;
unsigned long port_state;
/* clear bits array to a clean slate */
bitmap_zero(bits, chip->ngpio);
/* get bits are evaluated a gpio port register at a time */
for (i = 0; i < ARRAY_SIZE(ports); i++) {
/* gpio offset in bits array */
bits_offset = i * gpio_reg_size;
for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
port_addr = dio48egpio->base + ports[offset / 8];
port_state = inb(port_addr) & gpio_mask;
/* word index for bits array */
word_index = BIT_WORD(bits_offset);
/* gpio offset within current word of bits array */
word_offset = bits_offset % BITS_PER_LONG;
/* mask of get bits for current gpio within current word */
word_mask = mask[word_index] & (port_mask << word_offset);
if (!word_mask) {
/* no get bits in this port so skip to next one */
continue;
}
/* read bits from current gpio port */
port_state = inb(dio48egpio->base + ports[i]);
/* store acquired bits at respective bits array offset */
bits[word_index] |= (port_state << word_offset) & word_mask;
bitmap_set_value8(bits, port_state, offset);
}
return 0;
@ -247,37 +226,27 @@ static void dio48e_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
unsigned int i;
const unsigned int gpio_reg_size = 8;
unsigned int port;
unsigned int out_port;
unsigned int bitmask;
unsigned long offset;
unsigned long gpio_mask;
size_t index;
unsigned int port_addr;
unsigned long bitmask;
unsigned long flags;
/* set bits are evaluated a gpio register size at a time */
for (i = 0; i < chip->ngpio; i += gpio_reg_size) {
/* no more set bits in this mask word; skip to the next word */
if (!mask[BIT_WORD(i)]) {
i = (BIT_WORD(i) + 1) * BITS_PER_LONG - gpio_reg_size;
continue;
}
for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
index = offset / 8;
port_addr = dio48egpio->base + ports[index];
port = i / gpio_reg_size;
out_port = (port > 2) ? port + 1 : port;
bitmask = mask[BIT_WORD(i)] & bits[BIT_WORD(i)];
bitmask = bitmap_get_value8(bits, offset) & gpio_mask;
raw_spin_lock_irqsave(&dio48egpio->lock, flags);
/* update output state data and set device gpio register */
dio48egpio->out_state[port] &= ~mask[BIT_WORD(i)];
dio48egpio->out_state[port] |= bitmask;
outb(dio48egpio->out_state[port], dio48egpio->base + out_port);
dio48egpio->out_state[index] &= ~gpio_mask;
dio48egpio->out_state[index] |= bitmask;
outb(dio48egpio->out_state[index], port_addr);
raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
/* prepare for next gpio register set */
mask[BIT_WORD(i)] >>= gpio_reg_size;
bits[BIT_WORD(i)] >>= gpio_reg_size;
}
}

View File

@ -85,42 +85,20 @@ static int idi_48_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
size_t i;
unsigned long offset;
unsigned long gpio_mask;
static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
const unsigned int gpio_reg_size = 8;
unsigned int bits_offset;
size_t word_index;
unsigned int word_offset;
unsigned long word_mask;
const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0);
unsigned int port_addr;
unsigned long port_state;
/* clear bits array to a clean slate */
bitmap_zero(bits, chip->ngpio);
/* get bits are evaluated a gpio port register at a time */
for (i = 0; i < ARRAY_SIZE(ports); i++) {
/* gpio offset in bits array */
bits_offset = i * gpio_reg_size;
for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
port_addr = idi48gpio->base + ports[offset / 8];
port_state = inb(port_addr) & gpio_mask;
/* word index for bits array */
word_index = BIT_WORD(bits_offset);
/* gpio offset within current word of bits array */
word_offset = bits_offset % BITS_PER_LONG;
/* mask of get bits for current gpio within current word */
word_mask = mask[word_index] & (port_mask << word_offset);
if (!word_mask) {
/* no get bits in this port so skip to next one */
continue;
}
/* read bits from current gpio port */
port_state = inb(idi48gpio->base + ports[i]);
/* store acquired bits at respective bits array offset */
bits[word_index] |= (port_state << word_offset) & word_mask;
bitmap_set_value8(bits, port_state, offset);
}
return 0;

View File

@ -6,6 +6,7 @@
* Copyright (C) 2010 Miguel Gaio <miguel.gaio@efixo.com>
*/
#include <linux/bitops.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/module.h>
@ -72,20 +73,18 @@ static void gen_74x164_set_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
struct gen_74x164_chip *chip = gpiochip_get_data(gc);
unsigned int i, idx, shift;
u8 bank, bankmask;
unsigned long offset;
unsigned long bankmask;
size_t bank;
unsigned long bitmask;
mutex_lock(&chip->lock);
for (i = 0, bank = chip->registers - 1; i < chip->registers;
i++, bank--) {
idx = i / sizeof(*mask);
shift = i % sizeof(*mask) * BITS_PER_BYTE;
bankmask = mask[idx] >> shift;
if (!bankmask)
continue;
for_each_set_clump8(offset, bankmask, mask, chip->registers * 8) {
bank = chip->registers - 1 - offset / 8;
bitmask = bitmap_get_value8(bits, offset) & bankmask;
chip->buffer[bank] &= ~bankmask;
chip->buffer[bank] |= bankmask & (bits[idx] >> shift);
chip->buffer[bank] |= bitmask;
}
__gen_74x164_write_config(chip);
mutex_unlock(&chip->lock);

View File

@ -167,46 +167,25 @@ static int gpiomm_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(port_state & mask);
}
static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
static int gpiomm_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
size_t i;
static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
const unsigned int gpio_reg_size = 8;
unsigned int bits_offset;
size_t word_index;
unsigned int word_offset;
unsigned long word_mask;
const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0);
unsigned long offset;
unsigned long gpio_mask;
unsigned int port_addr;
unsigned long port_state;
/* clear bits array to a clean slate */
bitmap_zero(bits, chip->ngpio);
/* get bits are evaluated a gpio port register at a time */
for (i = 0; i < ARRAY_SIZE(ports); i++) {
/* gpio offset in bits array */
bits_offset = i * gpio_reg_size;
for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
port_addr = gpiommgpio->base + ports[offset / 8];
port_state = inb(port_addr) & gpio_mask;
/* word index for bits array */
word_index = BIT_WORD(bits_offset);
/* gpio offset within current word of bits array */
word_offset = bits_offset % BITS_PER_LONG;
/* mask of get bits for current gpio within current word */
word_mask = mask[word_index] & (port_mask << word_offset);
if (!word_mask) {
/* no get bits in this port so skip to next one */
continue;
}
/* read bits from current gpio port */
port_state = inb(gpiommgpio->base + ports[i]);
/* store acquired bits at respective bits array offset */
bits[word_index] |= (port_state << word_offset) & word_mask;
bitmap_set_value8(bits, port_state, offset);
}
return 0;
@ -237,37 +216,27 @@ static void gpiomm_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
unsigned int i;
const unsigned int gpio_reg_size = 8;
unsigned int port;
unsigned int out_port;
unsigned int bitmask;
unsigned long offset;
unsigned long gpio_mask;
size_t index;
unsigned int port_addr;
unsigned long bitmask;
unsigned long flags;
/* set bits are evaluated a gpio register size at a time */
for (i = 0; i < chip->ngpio; i += gpio_reg_size) {
/* no more set bits in this mask word; skip to the next word */
if (!mask[BIT_WORD(i)]) {
i = (BIT_WORD(i) + 1) * BITS_PER_LONG - gpio_reg_size;
continue;
}
for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
index = offset / 8;
port_addr = gpiommgpio->base + ports[index];
port = i / gpio_reg_size;
out_port = (port > 2) ? port + 1 : port;
bitmask = mask[BIT_WORD(i)] & bits[BIT_WORD(i)];
bitmask = bitmap_get_value8(bits, offset) & gpio_mask;
spin_lock_irqsave(&gpiommgpio->lock, flags);
/* update output state data and set device gpio register */
gpiommgpio->out_state[port] &= ~mask[BIT_WORD(i)];
gpiommgpio->out_state[port] |= bitmask;
outb(gpiommgpio->out_state[port], gpiommgpio->base + out_port);
gpiommgpio->out_state[index] &= ~gpio_mask;
gpiommgpio->out_state[index] |= bitmask;
outb(gpiommgpio->out_state[index], port_addr);
spin_unlock_irqrestore(&gpiommgpio->lock, flags);
/* prepare for next gpio register set */
mask[BIT_WORD(i)] >>= gpio_reg_size;
bits[BIT_WORD(i)] >>= gpio_reg_size;
}
}

View File

@ -31,6 +31,7 @@
*/
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/crc8.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
@ -232,16 +233,20 @@ static int max3191x_get_multiple(struct gpio_chip *gpio, unsigned long *mask,
unsigned long *bits)
{
struct max3191x_chip *max3191x = gpiochip_get_data(gpio);
int ret, bit = 0, wordlen = max3191x_wordlen(max3191x);
const unsigned int wordlen = max3191x_wordlen(max3191x);
int ret;
unsigned long bit;
unsigned long gpio_mask;
unsigned long in;
mutex_lock(&max3191x->lock);
ret = max3191x_readout_locked(max3191x);
if (ret)
goto out_unlock;
while ((bit = find_next_bit(mask, gpio->ngpio, bit)) != gpio->ngpio) {
bitmap_zero(bits, gpio->ngpio);
for_each_set_clump8(bit, gpio_mask, mask, gpio->ngpio) {
unsigned int chipnum = bit / MAX3191X_NGPIO;
unsigned long in, shift, index;
if (max3191x_chip_is_faulting(max3191x, chipnum)) {
ret = -EIO;
@ -249,12 +254,8 @@ static int max3191x_get_multiple(struct gpio_chip *gpio, unsigned long *mask,
}
in = ((u8 *)max3191x->xfer.rx_buf)[chipnum * wordlen];
shift = round_down(bit % BITS_PER_LONG, MAX3191X_NGPIO);
index = bit / BITS_PER_LONG;
bits[index] &= ~(mask[index] & (0xff << shift));
bits[index] |= mask[index] & (in << shift); /* copy bits */
bit = (chipnum + 1) * MAX3191X_NGPIO; /* go to next chip */
in &= gpio_mask;
bitmap_set_value8(bits, in, bit);
}
out_unlock:

View File

@ -9,7 +9,7 @@
*/
#include <linux/acpi.h>
#include <linux/bits.h>
#include <linux/bitmap.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@ -115,6 +115,7 @@ MODULE_DEVICE_TABLE(acpi, pca953x_acpi_ids);
#define MAX_BANK 5
#define BANK_SZ 8
#define MAX_LINE (MAX_BANK * BANK_SZ)
#define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ)
@ -146,10 +147,10 @@ struct pca953x_chip {
#ifdef CONFIG_GPIO_PCA953X_IRQ
struct mutex irq_lock;
u8 irq_mask[MAX_BANK];
u8 irq_stat[MAX_BANK];
u8 irq_trig_raise[MAX_BANK];
u8 irq_trig_fall[MAX_BANK];
DECLARE_BITMAP(irq_mask, MAX_LINE);
DECLARE_BITMAP(irq_stat, MAX_LINE);
DECLARE_BITMAP(irq_trig_raise, MAX_LINE);
DECLARE_BITMAP(irq_trig_fall, MAX_LINE);
struct irq_chip irq_chip;
#endif
atomic_t wakeup_path;
@ -333,12 +334,16 @@ static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off,
return regaddr;
}
static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
static int pca953x_write_regs(struct pca953x_chip *chip, int reg, unsigned long *val)
{
u8 regaddr = pca953x_recalc_addr(chip, reg, 0, true, true);
int ret;
u8 value[MAX_BANK];
int i, ret;
ret = regmap_bulk_write(chip->regmap, regaddr, val, NBANK(chip));
for (i = 0; i < NBANK(chip); i++)
value[i] = bitmap_get_value8(val, i * BANK_SZ);
ret = regmap_bulk_write(chip->regmap, regaddr, value, NBANK(chip));
if (ret < 0) {
dev_err(&chip->client->dev, "failed writing register\n");
return ret;
@ -347,17 +352,21 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
return 0;
}
static int pca953x_read_regs(struct pca953x_chip *chip, int reg, u8 *val)
static int pca953x_read_regs(struct pca953x_chip *chip, int reg, unsigned long *val)
{
u8 regaddr = pca953x_recalc_addr(chip, reg, 0, false, true);
int ret;
u8 value[MAX_BANK];
int i, ret;
ret = regmap_bulk_read(chip->regmap, regaddr, val, NBANK(chip));
ret = regmap_bulk_read(chip->regmap, regaddr, value, NBANK(chip));
if (ret < 0) {
dev_err(&chip->client->dev, "failed reading register\n");
return ret;
}
for (i = 0; i < NBANK(chip); i++)
bitmap_set_value8(val, value[i], i * BANK_SZ);
return 0;
}
@ -412,7 +421,9 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
ret = regmap_read(chip->regmap, inreg, &reg_val);
mutex_unlock(&chip->i2c_lock);
if (ret < 0) {
/* NOTE: diagnostic already emitted; that's all we should
/*
* NOTE:
* diagnostic already emitted; that's all we should
* do unless gpio_*_value_cansleep() calls become different
* from their nonsleeping siblings (and report faults).
*/
@ -459,9 +470,7 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
unsigned int bank_mask, bank_val;
int bank;
u8 reg_val[MAX_BANK];
DECLARE_BITMAP(reg_val, MAX_LINE);
int ret;
mutex_lock(&chip->i2c_lock);
@ -469,16 +478,7 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
if (ret)
goto exit;
for (bank = 0; bank < NBANK(chip); bank++) {
bank_mask = mask[bank / sizeof(*mask)] >>
((bank % sizeof(*mask)) * 8);
if (bank_mask) {
bank_val = bits[bank / sizeof(*bits)] >>
((bank % sizeof(*bits)) * 8);
bank_val &= bank_mask;
reg_val[bank] = (reg_val[bank] & ~bank_mask) | bank_val;
}
}
bitmap_replace(reg_val, reg_val, bits, mask, gc->ngpio);
pca953x_write_regs(chip, chip->regs->output, reg_val);
exit:
@ -605,10 +605,9 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 new_irqs;
int level, i;
u8 invert_irq_mask[MAX_BANK];
u8 reg_direction[MAX_BANK];
DECLARE_BITMAP(irq_mask, MAX_LINE);
DECLARE_BITMAP(reg_direction, MAX_LINE);
int level;
pca953x_read_regs(chip, chip->regs->direction, reg_direction);
@ -616,25 +615,18 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
/* Enable latch on interrupt-enabled inputs */
pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask);
for (i = 0; i < NBANK(chip); i++)
invert_irq_mask[i] = ~chip->irq_mask[i];
bitmap_complement(irq_mask, chip->irq_mask, gc->ngpio);
/* Unmask enabled interrupts */
pca953x_write_regs(chip, PCAL953X_INT_MASK, invert_irq_mask);
pca953x_write_regs(chip, PCAL953X_INT_MASK, irq_mask);
}
bitmap_or(irq_mask, chip->irq_trig_fall, chip->irq_trig_raise, gc->ngpio);
bitmap_and(irq_mask, irq_mask, reg_direction, gc->ngpio);
/* Look for any newly setup interrupt */
for (i = 0; i < NBANK(chip); i++) {
new_irqs = chip->irq_trig_fall[i] | chip->irq_trig_raise[i];
new_irqs &= reg_direction[i];
while (new_irqs) {
level = __ffs(new_irqs);
pca953x_gpio_direction_input(&chip->gpio_chip,
level + (BANK_SZ * i));
new_irqs &= ~(1 << level);
}
}
for_each_set_bit(level, irq_mask, gc->ngpio)
pca953x_gpio_direction_input(&chip->gpio_chip, level);
mutex_unlock(&chip->irq_lock);
}
@ -675,15 +667,15 @@ static void pca953x_irq_shutdown(struct irq_data *d)
chip->irq_trig_fall[d->hwirq / BANK_SZ] &= ~mask;
}
static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pending)
{
u8 cur_stat[MAX_BANK];
u8 old_stat[MAX_BANK];
bool pending_seen = false;
bool trigger_seen = false;
u8 trigger[MAX_BANK];
u8 reg_direction[MAX_BANK];
int ret, i;
struct gpio_chip *gc = &chip->gpio_chip;
DECLARE_BITMAP(reg_direction, MAX_LINE);
DECLARE_BITMAP(old_stat, MAX_LINE);
DECLARE_BITMAP(cur_stat, MAX_LINE);
DECLARE_BITMAP(new_stat, MAX_LINE);
DECLARE_BITMAP(trigger, MAX_LINE);
int ret;
if (chip->driver_data & PCA_PCAL) {
/* Read the current interrupt status from the device */
@ -692,20 +684,16 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
return false;
/* Check latched inputs and clear interrupt status */
ret = pca953x_read_regs(chip, PCA953X_INPUT, cur_stat);
ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
if (ret)
return false;
for (i = 0; i < NBANK(chip); i++) {
/* Apply filter for rising/falling edge selection */
pending[i] = (~cur_stat[i] & chip->irq_trig_fall[i]) |
(cur_stat[i] & chip->irq_trig_raise[i]);
pending[i] &= trigger[i];
if (pending[i])
pending_seen = true;
}
/* Apply filter for rising/falling edge selection */
bitmap_replace(new_stat, chip->irq_trig_fall, chip->irq_trig_raise, cur_stat, gc->ngpio);
return pending_seen;
bitmap_and(pending, new_stat, trigger, gc->ngpio);
return !bitmap_empty(pending, gc->ngpio);
}
ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
@ -714,64 +702,49 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
/* Remove output pins from the equation */
pca953x_read_regs(chip, chip->regs->direction, reg_direction);
for (i = 0; i < NBANK(chip); i++)
cur_stat[i] &= reg_direction[i];
memcpy(old_stat, chip->irq_stat, NBANK(chip));
bitmap_copy(old_stat, chip->irq_stat, gc->ngpio);
for (i = 0; i < NBANK(chip); i++) {
trigger[i] = (cur_stat[i] ^ old_stat[i]) & chip->irq_mask[i];
if (trigger[i])
trigger_seen = true;
}
bitmap_and(new_stat, cur_stat, reg_direction, gc->ngpio);
bitmap_xor(cur_stat, new_stat, old_stat, gc->ngpio);
bitmap_and(trigger, cur_stat, chip->irq_mask, gc->ngpio);
if (!trigger_seen)
if (bitmap_empty(trigger, gc->ngpio))
return false;
memcpy(chip->irq_stat, cur_stat, NBANK(chip));
bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
for (i = 0; i < NBANK(chip); i++) {
pending[i] = (old_stat[i] & chip->irq_trig_fall[i]) |
(cur_stat[i] & chip->irq_trig_raise[i]);
pending[i] &= trigger[i];
if (pending[i])
pending_seen = true;
}
bitmap_and(cur_stat, chip->irq_trig_fall, old_stat, gc->ngpio);
bitmap_and(old_stat, chip->irq_trig_raise, new_stat, gc->ngpio);
bitmap_or(new_stat, old_stat, cur_stat, gc->ngpio);
bitmap_and(pending, new_stat, trigger, gc->ngpio);
return pending_seen;
return !bitmap_empty(pending, gc->ngpio);
}
static irqreturn_t pca953x_irq_handler(int irq, void *devid)
{
struct pca953x_chip *chip = devid;
u8 pending[MAX_BANK];
u8 level;
unsigned nhandled = 0;
int i;
struct gpio_chip *gc = &chip->gpio_chip;
DECLARE_BITMAP(pending, MAX_LINE);
int level;
if (!pca953x_irq_pending(chip, pending))
return IRQ_NONE;
for (i = 0; i < NBANK(chip); i++) {
while (pending[i]) {
level = __ffs(pending[i]);
handle_nested_irq(irq_find_mapping(chip->gpio_chip.irq.domain,
level + (BANK_SZ * i)));
pending[i] &= ~(1 << level);
nhandled++;
}
}
for_each_set_bit(level, pending, gc->ngpio)
handle_nested_irq(irq_find_mapping(gc->irq.domain, level));
return (nhandled > 0) ? IRQ_HANDLED : IRQ_NONE;
return IRQ_HANDLED;
}
static int pca953x_irq_setup(struct pca953x_chip *chip,
int irq_base)
static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
{
struct i2c_client *client = chip->client;
struct irq_chip *irq_chip = &chip->irq_chip;
u8 reg_direction[MAX_BANK];
int ret, i;
DECLARE_BITMAP(reg_direction, MAX_LINE);
DECLARE_BITMAP(irq_stat, MAX_LINE);
int ret;
if (!client->irq)
return 0;
@ -782,7 +755,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
if (!(chip->driver_data & PCA_INT))
return 0;
ret = pca953x_read_regs(chip, chip->regs->input, chip->irq_stat);
ret = pca953x_read_regs(chip, chip->regs->input, irq_stat);
if (ret)
return ret;
@ -792,8 +765,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
* this purpose.
*/
pca953x_read_regs(chip, chip->regs->direction, reg_direction);
for (i = 0; i < NBANK(chip); i++)
chip->irq_stat[i] &= reg_direction[i];
bitmap_and(chip->irq_stat, irq_stat, reg_direction, chip->gpio_chip.ngpio);
mutex_init(&chip->irq_lock);
ret = devm_request_threaded_irq(&client->dev, client->irq,
@ -816,9 +788,9 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
irq_chip->irq_set_type = pca953x_irq_set_type;
irq_chip->irq_shutdown = pca953x_irq_shutdown;
ret = gpiochip_irqchip_add_nested(&chip->gpio_chip, irq_chip,
irq_base, handle_simple_irq,
IRQ_TYPE_NONE);
ret = gpiochip_irqchip_add_nested(&chip->gpio_chip, irq_chip,
irq_base, handle_simple_irq,
IRQ_TYPE_NONE);
if (ret) {
dev_err(&client->dev,
"could not connect irqchip to gpiochip\n");
@ -845,8 +817,8 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert)
{
DECLARE_BITMAP(val, MAX_LINE);
int ret;
u8 val[MAX_BANK];
ret = regcache_sync_region(chip->regmap, chip->regs->output,
chip->regs->output + NBANK(chip));
@ -860,9 +832,9 @@ static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert)
/* set platform specific polarity inversion */
if (invert)
memset(val, 0xFF, NBANK(chip));
bitmap_fill(val, MAX_LINE);
else
memset(val, 0, NBANK(chip));
bitmap_zero(val, MAX_LINE);
ret = pca953x_write_regs(chip, chip->regs->invert, val);
out:
@ -871,8 +843,8 @@ static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert)
static int device_pca957x_init(struct pca953x_chip *chip, u32 invert)
{
DECLARE_BITMAP(val, MAX_LINE);
int ret;
u8 val[MAX_BANK];
ret = device_pca95xx_init(chip, invert);
if (ret)
@ -892,7 +864,7 @@ static int device_pca957x_init(struct pca953x_chip *chip, u32 invert)
static const struct of_device_id pca953x_dt_ids[];
static int pca953x_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
const struct i2c_device_id *i2c_id)
{
struct pca953x_platform_data *pdata;
struct pca953x_chip *chip;
@ -901,8 +873,7 @@ static int pca953x_probe(struct i2c_client *client,
u32 invert = 0;
struct regulator *reg;
chip = devm_kzalloc(&client->dev,
sizeof(struct pca953x_chip), GFP_KERNEL);
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
@ -1016,7 +987,7 @@ static int pca953x_probe(struct i2c_client *client,
if (pdata && pdata->setup) {
ret = pdata->setup(client, chip->gpio_chip.base,
chip->gpio_chip.ngpio, pdata->context);
chip->gpio_chip.ngpio, pdata->context);
if (ret < 0)
dev_warn(&client->dev, "setup failed, %d\n", ret);
}
@ -1036,7 +1007,7 @@ static int pca953x_remove(struct i2c_client *client)
if (pdata && pdata->teardown) {
ret = pdata->teardown(client, chip->gpio_chip.base,
chip->gpio_chip.ngpio, pdata->context);
chip->gpio_chip.ngpio, pdata->context);
if (ret < 0)
dev_err(&client->dev, "teardown failed, %d\n", ret);
} else {

View File

@ -100,45 +100,23 @@ static int idio_16_gpio_get_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
size_t i;
const unsigned int gpio_reg_size = 8;
unsigned int bits_offset;
size_t word_index;
unsigned int word_offset;
unsigned long word_mask;
const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0);
unsigned long port_state;
unsigned long offset;
unsigned long gpio_mask;
void __iomem *ports[] = {
&idio16gpio->reg->out0_7, &idio16gpio->reg->out8_15,
&idio16gpio->reg->in0_7, &idio16gpio->reg->in8_15,
};
void __iomem *port_addr;
unsigned long port_state;
/* clear bits array to a clean slate */
bitmap_zero(bits, chip->ngpio);
/* get bits are evaluated a gpio port register at a time */
for (i = 0; i < ARRAY_SIZE(ports); i++) {
/* gpio offset in bits array */
bits_offset = i * gpio_reg_size;
for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
port_addr = ports[offset / 8];
port_state = ioread8(port_addr) & gpio_mask;
/* word index for bits array */
word_index = BIT_WORD(bits_offset);
/* gpio offset within current word of bits array */
word_offset = bits_offset % BITS_PER_LONG;
/* mask of get bits for current gpio within current word */
word_mask = mask[word_index] & (port_mask << word_offset);
if (!word_mask) {
/* no get bits in this port so skip to next one */
continue;
}
/* read bits from current gpio port */
port_state = ioread8(ports[i]);
/* store acquired bits at respective bits array offset */
bits[word_index] |= (port_state << word_offset) & word_mask;
bitmap_set_value8(bits, port_state, offset);
}
return 0;
@ -178,30 +156,31 @@ static void idio_16_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
unsigned long offset;
unsigned long gpio_mask;
void __iomem *ports[] = {
&idio16gpio->reg->out0_7, &idio16gpio->reg->out8_15,
};
size_t index;
void __iomem *port_addr;
unsigned long bitmask;
unsigned long flags;
unsigned int out_state;
unsigned long out_state;
raw_spin_lock_irqsave(&idio16gpio->lock, flags);
for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
index = offset / 8;
port_addr = ports[index];
/* process output lines 0-7 */
if (*mask & 0xFF) {
out_state = ioread8(&idio16gpio->reg->out0_7) & ~*mask;
out_state |= *mask & *bits;
iowrite8(out_state, &idio16gpio->reg->out0_7);
bitmask = bitmap_get_value8(bits, offset) & gpio_mask;
raw_spin_lock_irqsave(&idio16gpio->lock, flags);
out_state = ioread8(port_addr) & ~gpio_mask;
out_state |= bitmask;
iowrite8(out_state, port_addr);
raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
/* shift to next output line word */
*mask >>= 8;
/* process output lines 8-15 */
if (*mask & 0xFF) {
*bits >>= 8;
out_state = ioread8(&idio16gpio->reg->out8_15) & ~*mask;
out_state |= *mask & *bits;
iowrite8(out_state, &idio16gpio->reg->out8_15);
}
raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
static void idio_16_irq_ack(struct irq_data *data)

View File

@ -201,52 +201,34 @@ static int idio_24_gpio_get_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
size_t i;
const unsigned int gpio_reg_size = 8;
unsigned int bits_offset;
size_t word_index;
unsigned int word_offset;
unsigned long word_mask;
const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0);
unsigned long port_state;
unsigned long offset;
unsigned long gpio_mask;
void __iomem *ports[] = {
&idio24gpio->reg->out0_7, &idio24gpio->reg->out8_15,
&idio24gpio->reg->out16_23, &idio24gpio->reg->in0_7,
&idio24gpio->reg->in8_15, &idio24gpio->reg->in16_23,
};
size_t index;
unsigned long port_state;
const unsigned long out_mode_mask = BIT(1);
/* clear bits array to a clean slate */
bitmap_zero(bits, chip->ngpio);
/* get bits are evaluated a gpio port register at a time */
for (i = 0; i < ARRAY_SIZE(ports) + 1; i++) {
/* gpio offset in bits array */
bits_offset = i * gpio_reg_size;
/* word index for bits array */
word_index = BIT_WORD(bits_offset);
/* gpio offset within current word of bits array */
word_offset = bits_offset % BITS_PER_LONG;
/* mask of get bits for current gpio within current word */
word_mask = mask[word_index] & (port_mask << word_offset);
if (!word_mask) {
/* no get bits in this port so skip to next one */
continue;
}
for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
index = offset / 8;
/* read bits from current gpio port (port 6 is TTL GPIO) */
if (i < 6)
port_state = ioread8(ports[i]);
if (index < 6)
port_state = ioread8(ports[index]);
else if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask)
port_state = ioread8(&idio24gpio->reg->ttl_out0_7);
else
port_state = ioread8(&idio24gpio->reg->ttl_in0_7);
/* store acquired bits at respective bits array offset */
bits[word_index] |= (port_state << word_offset) & word_mask;
port_state &= gpio_mask;
bitmap_set_value8(bits, port_state, offset);
}
return 0;
@ -297,59 +279,48 @@ static void idio_24_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
size_t i;
unsigned long bits_offset;
unsigned long offset;
unsigned long gpio_mask;
const unsigned int gpio_reg_size = 8;
const unsigned long port_mask = GENMASK(gpio_reg_size, 0);
unsigned long flags;
unsigned int out_state;
void __iomem *ports[] = {
&idio24gpio->reg->out0_7, &idio24gpio->reg->out8_15,
&idio24gpio->reg->out16_23
};
size_t index;
unsigned long bitmask;
unsigned long flags;
unsigned long out_state;
const unsigned long out_mode_mask = BIT(1);
const unsigned int ttl_offset = 48;
const size_t ttl_i = BIT_WORD(ttl_offset);
const unsigned int word_offset = ttl_offset % BITS_PER_LONG;
const unsigned long ttl_mask = (mask[ttl_i] >> word_offset) & port_mask;
const unsigned long ttl_bits = (bits[ttl_i] >> word_offset) & ttl_mask;
/* set bits are processed a gpio port register at a time */
for (i = 0; i < ARRAY_SIZE(ports); i++) {
/* gpio offset in bits array */
bits_offset = i * gpio_reg_size;
for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
index = offset / 8;
/* check if any set bits for current port */
gpio_mask = (*mask >> bits_offset) & port_mask;
if (!gpio_mask) {
/* no set bits for this port so move on to next port */
continue;
}
bitmask = bitmap_get_value8(bits, offset) & gpio_mask;
raw_spin_lock_irqsave(&idio24gpio->lock, flags);
/* process output lines */
out_state = ioread8(ports[i]) & ~gpio_mask;
out_state |= (*bits >> bits_offset) & gpio_mask;
iowrite8(out_state, ports[i]);
/* read bits from current gpio port (port 6 is TTL GPIO) */
if (index < 6) {
out_state = ioread8(ports[index]);
} else if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask) {
out_state = ioread8(&idio24gpio->reg->ttl_out0_7);
} else {
/* skip TTL GPIO if set for input */
raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
continue;
}
/* set requested bit states */
out_state &= ~gpio_mask;
out_state |= bitmask;
/* write bits for current gpio port (port 6 is TTL GPIO) */
if (index < 6)
iowrite8(out_state, ports[index]);
else
iowrite8(out_state, &idio24gpio->reg->ttl_out0_7);
raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
}
/* check if setting TTL lines and if they are in output mode */
if (!ttl_mask || !(ioread8(&idio24gpio->reg->ctl) & out_mode_mask))
return;
/* handle TTL output */
raw_spin_lock_irqsave(&idio24gpio->lock, flags);
/* process output lines */
out_state = ioread8(&idio24gpio->reg->ttl_out0_7) & ~ttl_mask;
out_state |= ttl_bits;
iowrite8(out_state, &idio24gpio->reg->ttl_out0_7);
raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
}
static void idio_24_irq_ack(struct irq_data *data)

View File

@ -96,16 +96,16 @@ static int pisosr_gpio_get_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct pisosr_gpio *gpio = gpiochip_get_data(chip);
unsigned int nbytes = DIV_ROUND_UP(chip->ngpio, 8);
unsigned int i, j;
unsigned long offset;
unsigned long gpio_mask;
unsigned long buffer_state;
pisosr_gpio_refresh(gpio);
bitmap_zero(bits, chip->ngpio);
for (i = 0; i < nbytes; i++) {
j = i / sizeof(unsigned long);
bits[j] |= ((unsigned long) gpio->buffer[i])
<< (8 * (i % sizeof(unsigned long)));
for_each_set_clump8(offset, gpio_mask, mask, chip->ngpio) {
buffer_state = gpio->buffer[offset / 8] & gpio_mask;
bitmap_set_value8(bits, buffer_state, offset);
}
return 0;

View File

@ -15,9 +15,6 @@
#include <linux/spinlock.h>
#include <dt-bindings/gpio/uniphier-gpio.h>
#define UNIPHIER_GPIO_BANK_MASK \
GENMASK((UNIPHIER_GPIO_LINES_PER_BANK) - 1, 0)
#define UNIPHIER_GPIO_IRQ_MAX_NUM 24
#define UNIPHIER_GPIO_PORT_DATA 0x0 /* data */
@ -150,15 +147,11 @@ static void uniphier_gpio_set(struct gpio_chip *chip,
static void uniphier_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
unsigned int bank, shift, bank_mask, bank_bits;
int i;
unsigned long i, bank, bank_mask, bank_bits;
for (i = 0; i < chip->ngpio; i += UNIPHIER_GPIO_LINES_PER_BANK) {
for_each_set_clump8(i, bank_mask, mask, chip->ngpio) {
bank = i / UNIPHIER_GPIO_LINES_PER_BANK;
shift = i % BITS_PER_LONG;
bank_mask = (mask[BIT_WORD(i)] >> shift) &
UNIPHIER_GPIO_BANK_MASK;
bank_bits = bits[BIT_WORD(i)] >> shift;
bank_bits = bitmap_get_value8(bits, i);
uniphier_gpio_bank_write(chip, bank, UNIPHIER_GPIO_PORT_DATA,
bank_mask, bank_bits);

View File

@ -129,42 +129,19 @@ static int ws16c48_gpio_get_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
const unsigned int gpio_reg_size = 8;
size_t i;
const size_t num_ports = chip->ngpio / gpio_reg_size;
unsigned int bits_offset;
size_t word_index;
unsigned int word_offset;
unsigned long word_mask;
const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0);
unsigned long offset;
unsigned long gpio_mask;
unsigned int port_addr;
unsigned long port_state;
/* clear bits array to a clean slate */
bitmap_zero(bits, chip->ngpio);
/* get bits are evaluated a gpio port register at a time */
for (i = 0; i < num_ports; i++) {
/* gpio offset in bits array */
bits_offset = i * gpio_reg_size;
for_each_set_clump8(offset, gpio_mask, mask, chip->ngpio) {
port_addr = ws16c48gpio->base + offset / 8;
port_state = inb(port_addr) & gpio_mask;
/* word index for bits array */
word_index = BIT_WORD(bits_offset);
/* gpio offset within current word of bits array */
word_offset = bits_offset % BITS_PER_LONG;
/* mask of get bits for current gpio within current word */
word_mask = mask[word_index] & (port_mask << word_offset);
if (!word_mask) {
/* no get bits in this port so skip to next one */
continue;
}
/* read bits from current gpio port */
port_state = inb(ws16c48gpio->base + i);
/* store acquired bits at respective bits array offset */
bits[word_index] |= (port_state << word_offset) & word_mask;
bitmap_set_value8(bits, port_state, offset);
}
return 0;
@ -198,39 +175,29 @@ static void ws16c48_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
unsigned int i;
const unsigned int gpio_reg_size = 8;
unsigned int port;
unsigned int iomask;
unsigned int bitmask;
unsigned long offset;
unsigned long gpio_mask;
size_t index;
unsigned int port_addr;
unsigned long bitmask;
unsigned long flags;
/* set bits are evaluated a gpio register size at a time */
for (i = 0; i < chip->ngpio; i += gpio_reg_size) {
/* no more set bits in this mask word; skip to the next word */
if (!mask[BIT_WORD(i)]) {
i = (BIT_WORD(i) + 1) * BITS_PER_LONG - gpio_reg_size;
continue;
}
port = i / gpio_reg_size;
for_each_set_clump8(offset, gpio_mask, mask, chip->ngpio) {
index = offset / 8;
port_addr = ws16c48gpio->base + index;
/* mask out GPIO configured for input */
iomask = mask[BIT_WORD(i)] & ~ws16c48gpio->io_state[port];
bitmask = iomask & bits[BIT_WORD(i)];
gpio_mask &= ~ws16c48gpio->io_state[index];
bitmask = bitmap_get_value8(bits, offset) & gpio_mask;
raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
/* update output state data and set device gpio register */
ws16c48gpio->out_state[port] &= ~iomask;
ws16c48gpio->out_state[port] |= bitmask;
outb(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
ws16c48gpio->out_state[index] &= ~gpio_mask;
ws16c48gpio->out_state[index] |= bitmask;
outb(ws16c48gpio->out_state[index], port_addr);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
/* prepare for next gpio register set */
mask[BIT_WORD(i)] >>= gpio_reg_size;
bits[BIT_WORD(i)] >>= gpio_reg_size;
}
}

View File

@ -561,7 +561,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
struct drm_property_blob *blob;
int ret;
if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
if (!length || length > INT_MAX - sizeof(struct drm_property_blob))
return ERR_PTR(-EINVAL);
blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);

View File

@ -96,7 +96,7 @@ void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
if (!part)
return NULL;
if (!addr_in_gen_pool(pool, (unsigned long)dst, size))
if (!gen_pool_has_addr(pool, (unsigned long)dst, size))
return NULL;
base = (unsigned long)part->base;

View File

@ -9,6 +9,8 @@
#include <linux/rio.h>
#include <linux/module.h>
#include <linux/rio_drv.h>
/*
* Wrappers for all RIO configuration access functions. They just check
* alignment and call the low-level functions pointed to by rio_mport->ops.

View File

@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/rio.h>
#include <linux/rio_ids.h>
#include <linux/rio_drv.h>
#include "rio.h"

View File

@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
@ -103,6 +104,7 @@ static int update_trip_temp(struct intel_soc_dts_sensor_entry *dts,
int status;
u32 temp_out;
u32 out;
unsigned long update_ptps;
u32 store_ptps;
u32 store_ptmc;
u32 store_te_out;
@ -120,8 +122,10 @@ static int update_trip_temp(struct intel_soc_dts_sensor_entry *dts,
if (status)
return status;
out = (store_ptps & ~(0xFF << (thres_index * 8)));
out |= (temp_out & 0xFF) << (thres_index * 8);
update_ptps = store_ptps;
bitmap_set_value8(&update_ptps, temp_out & 0xFF, thres_index * 8);
out = update_ptps;
status = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE,
SOC_DTS_OFFSET_PTPS, out);
if (status)
@ -223,6 +227,7 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd,
u32 out;
struct intel_soc_dts_sensor_entry *dts;
struct intel_soc_dts_sensors *sensors;
unsigned long raw;
dts = tzd->devdata;
sensors = dts->sensors;
@ -231,8 +236,8 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd,
if (status)
return status;
out = (out & dts->temp_mask) >> dts->temp_shift;
out -= SOC_DTS_TJMAX_ENCODING;
raw = out;
out = bitmap_get_value8(&raw, dts->id * 8) - SOC_DTS_TJMAX_ENCODING;
*temp = sensors->tj_max - out * 1000;
return 0;
@ -280,11 +285,14 @@ static int add_dts_thermal_zone(int id, struct intel_soc_dts_sensor_entry *dts,
int read_only_trip_cnt)
{
char name[10];
unsigned long trip;
int trip_count = 0;
int trip_mask = 0;
int writable_trip_cnt = 0;
unsigned long ptps;
u32 store_ptps;
unsigned long i;
int ret;
int i;
/* Store status to restor on exit */
ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
@ -293,11 +301,10 @@ static int add_dts_thermal_zone(int id, struct intel_soc_dts_sensor_entry *dts,
goto err_ret;
dts->id = id;
dts->temp_mask = 0x00FF << (id * 8);
dts->temp_shift = id * 8;
if (notification_support) {
trip_count = min(SOC_MAX_DTS_TRIPS, trip_cnt);
trip_mask = BIT(trip_count - read_only_trip_cnt) - 1;
writable_trip_cnt = trip_count - read_only_trip_cnt;
trip_mask = GENMASK(writable_trip_cnt - 1, 0);
}
/* Check if the writable trip we provide is not used by BIOS */
@ -306,11 +313,9 @@ static int add_dts_thermal_zone(int id, struct intel_soc_dts_sensor_entry *dts,
if (ret)
trip_mask = 0;
else {
for (i = 0; i < trip_count; ++i) {
if (trip_mask & BIT(i))
if (store_ptps & (0xff << (i * 8)))
trip_mask &= ~BIT(i);
}
ptps = store_ptps;
for_each_set_clump8(i, trip, &ptps, writable_trip_cnt * 8)
trip_mask &= ~BIT(i / 8);
}
dts->trip_mask = trip_mask;
dts->trip_count = trip_count;

View File

@ -24,8 +24,6 @@ struct intel_soc_dts_sensors;
struct intel_soc_dts_sensor_entry {
int id;
u32 temp_mask;
u32 temp_shift;
u32 store_status;
u32 trip_mask;
u32 trip_count;

View File

@ -18,6 +18,7 @@
#include <linux/sched/mm.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/kcov.h>
#include <linux/ioctl.h>
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
@ -5484,6 +5485,8 @@ static void hub_event(struct work_struct *work)
hub_dev = hub->intfdev;
intf = to_usb_interface(hub_dev);
kcov_remote_start_usb((u64)hdev->bus->busnum);
dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n",
hdev->state, hdev->maxchild,
/* NOTE: expects max 15 ports... */
@ -5590,6 +5593,8 @@ static void hub_event(struct work_struct *work)
/* Balance the stuff in kick_hub_wq() and allow autosuspend */
usb_autopm_put_interface(intf);
kref_put(&hub->kref, hub_release);
kcov_remote_stop();
}
static const struct usb_device_id hub_id_table[] = {

Some files were not shown because too many files have changed in this diff Show More