Xtensa updates for v5.5:

- add support for execute in place (XIP) kernels
 - improvements in inline assembly: use named arguments and "m"
   constraints where possible
 - improve stack dumping
 - clean up system_call code and syscall tracing
 - various small fixes and cleanups
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEK2eFS5jlMn3N6xfYUfnMkfg/oEQFAl3kk8ETHGpjbXZia2Jj
 QGdtYWlsLmNvbQAKCRBR+cyR+D+gRMp2EACpOxDBdacJjgSKKj5IFwbu2c/O74rA
 q2tBf4iz82UfNxs2R41wZX7oD9CIp6VEJVnLxbDMqwyTN62NSfnQ2R1j3CiISeqB
 wKDZzGHCUCfoTja4zR/yZY1fh5YZ8VzOytgpy3zkHoFaxyQPcTDBS6PLYyFqYyuR
 Ktxiv6c0T5NQ1BEF8u2VzxM9oGZo3zaraMal3s1CCjGR8Ej6i6K6vwOv3glPRzfP
 t121u3GqL3RwdygHDeXcm4tbBcMp7CkawoKsthhe/+/PPVmWgt3JkqFRPnvsJrvw
 MRdrfJ8s23eJJ9n2iLlBZPUp0vHc533Av5jsH4RWSPdT3Osb0RISJQmNUtKGZI08
 ur2pb6pgJVHvIxo+Eu2+FVYkOJXGhKQqYD2EAY2/4PVDX6x9XPO5h2+hTaESccta
 6qo0dvdiO4ut2LuUyCssn5fBPT+FWgaf+1Vh5Pcaa+y6PZKBcVhyt6qZpbTDFtlu
 cQRILPYTxtOAX8MGD/OMF1a2wVisORBZKEgYAdFbaNT+cWYLW0sMlHAzjxx2Ok42
 HeVPujoqjRaNGNKCGdqhOevPlXsMdtu4SFPKCQIZhIA2vg4mjxykx7ZE/sOcCZS3
 P0dtJXvRVg6rfWIM6SCuufrg70PwkIO6ufz0ZdLWPIb+4KHxxOaJ+fdaPwXhREKK
 2AWSa13Vd63O3Q==
 =/plV
 -----END PGP SIGNATURE-----

Merge tag 'xtensa-20191201' of git://github.com/jcmvbkbc/linux-xtensa

Pull Xtensa updates from Max Filippov:

 - add support for execute in place (XIP) kernels

 - improvements in inline assembly: use named arguments and "m"
   constraints where possible

 - improve stack dumping

 - clean up system_call code and syscall tracing

 - various small fixes and cleanups

* tag 'xtensa-20191201' of git://github.com/jcmvbkbc/linux-xtensa: (30 commits)
  xtensa: clean up system_call/xtensa_rt_sigreturn interaction
  xtensa: fix system_call interaction with ptrace
  xtensa: rearrange syscall tracing
  xtensa: fix syscall_set_return_value
  xtensa: drop unneeded headers from coprocessor.S
  xtensa: entry: Remove unneeded need_resched() loop
  xtensa: use MEMBLOCK_ALLOC_ANYWHERE for KASAN shadow map
  xtensa: fix TLB sanity checker
  xtensa: get rid of __ARCH_USE_5LEVEL_HACK
  xtensa: mm: fix PMD folding implementation
  xtensa: make stack dump size configurable
  xtensa: improve stack dumping
  xtensa: use "m" constraint instead of "r" in futex.h assembly
  xtensa: use "m" constraint instead of "a" in cmpxchg.h assembly
  xtensa: use named assembly arguments in cmpxchg.h
  xtensa: use "m" constraint instead of "a" in atomic.h assembly
  xtensa: use named assembly arguments in atomic.h
  xtensa: use "m" constraint instead of "a" in bitops.h assembly
  xtensa: use named assembly arguments in bitops.h
  xtensa: use macros to generate *_bit and test_and_*_bit functions
  ...
This commit is contained in:
Linus Torvalds 2019-12-03 12:46:44 -08:00
commit 4d7048f551
37 changed files with 769 additions and 660 deletions

View File

@ -30,5 +30,5 @@
| um: | TODO |
| unicore32: | TODO |
| x86: | ok |
| xtensa: | TODO |
| xtensa: | ok |
-----------------------

View File

@ -21,8 +21,8 @@ config XTENSA
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK
select GENERIC_STRNCPY_FROM_USER if KASAN
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if MMU
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
select HAVE_ARCH_TRACEHOOK
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
@ -215,151 +215,6 @@ config HOTPLUG_CPU
Say N if you want to disable CPU hotplug.
config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
bool "Initialize Xtensa MMU inside the Linux kernel code"
depends on !XTENSA_VARIANT_FSF && !XTENSA_VARIANT_DC232B
default y if XTENSA_VARIANT_DC233C || XTENSA_VARIANT_CUSTOM
help
Earlier version initialized the MMU in the exception vector
before jumping to _startup in head.S and had an advantage that
it was possible to place a software breakpoint at 'reset' and
then enter your normal kernel breakpoints once the MMU was mapped
to the kernel mappings (0XC0000000).
This unfortunately won't work for U-Boot and likely also wont
work for using KEXEC to have a hot kernel ready for doing a
KDUMP.
So now the MMU is initialized in head.S but it's necessary to
use hardware breakpoints (gdb 'hbreak' cmd) to break at _startup.
xt-gdb can't place a Software Breakpoint in the 0XD region prior
to mapping the MMU and after mapping even if the area of low memory
was mapped gdb wouldn't remove the breakpoint on hitting it as the
PC wouldn't match. Since Hardware Breakpoints are recommended for
Linux configurations it seems reasonable to just assume they exist
and leave this older mechanism for unfortunate souls that choose
not to follow Tensilica's recommendation.
Selecting this will cause U-Boot to set the KERNEL Load and Entry
address at 0x00003000 instead of the mapped std of 0xD0003000.
If in doubt, say Y.
config MEMMAP_CACHEATTR
hex "Cache attributes for the memory address space"
depends on !MMU
default 0x22222222
help
These cache attributes are set up for noMMU systems. Each hex digit
specifies cache attributes for the corresponding 512MB memory
region: bits 0..3 -- for addresses 0x00000000..0x1fffffff,
bits 4..7 -- for addresses 0x20000000..0x3fffffff, and so on.
Cache attribute values are specific for the MMU type.
For region protection MMUs:
1: WT cached,
2: cache bypass,
4: WB cached,
f: illegal.
For ful MMU:
bit 0: executable,
bit 1: writable,
bits 2..3:
0: cache bypass,
1: WB cache,
2: WT cache,
3: special (c and e are illegal, f is reserved).
For MPU:
0: illegal,
1: WB cache,
2: WB, no-write-allocate cache,
3: WT cache,
4: cache bypass.
config KSEG_PADDR
hex "Physical address of the KSEG mapping"
depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX && MMU
default 0x00000000
help
This is the physical address where KSEG is mapped. Please refer to
the chosen KSEG layout help for the required address alignment.
Unpacked kernel image (including vectors) must be located completely
within KSEG.
Physical memory below this address is not available to linux.
If unsure, leave the default value here.
config KERNEL_LOAD_ADDRESS
hex "Kernel load address"
default 0x60003000 if !MMU
default 0x00003000 if MMU && INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
default 0xd0003000 if MMU && !INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
help
This is the address where the kernel is loaded.
It is virtual address for MMUv2 configurations and physical address
for all other configurations.
If unsure, leave the default value here.
config VECTORS_OFFSET
hex "Kernel vectors offset"
default 0x00003000
help
This is the offset of the kernel image from the relocatable vectors
base.
If unsure, leave the default value here.
choice
prompt "KSEG layout"
depends on MMU
default XTENSA_KSEG_MMU_V2
config XTENSA_KSEG_MMU_V2
bool "MMUv2: 128MB cached + 128MB uncached"
help
MMUv2 compatible kernel memory map: TLB way 5 maps 128MB starting
at KSEG_PADDR to 0xd0000000 with cache and to 0xd8000000
without cache.
KSEG_PADDR must be aligned to 128MB.
config XTENSA_KSEG_256M
bool "256MB cached + 256MB uncached"
depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
help
TLB way 6 maps 256MB starting at KSEG_PADDR to 0xb0000000
with cache and to 0xc0000000 without cache.
KSEG_PADDR must be aligned to 256MB.
config XTENSA_KSEG_512M
bool "512MB cached + 512MB uncached"
depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
help
TLB way 6 maps 512MB starting at KSEG_PADDR to 0xa0000000
with cache and to 0xc0000000 without cache.
KSEG_PADDR must be aligned to 256MB.
endchoice
config HIGHMEM
bool "High Memory Support"
depends on MMU
help
Linux can use the full amount of RAM in the system by
default. However, the default MMUv2 setup only maps the
lowermost 128 MB of memory linearly to the areas starting
at 0xd0000000 (cached) and 0xd8000000 (uncached).
When there are more than 128 MB memory in the system not
all of it can be "permanently mapped" by the kernel.
The physical memory that's not permanently mapped is called
"high memory".
If you are compiling a kernel which will never run on a
machine with more than 128 MB total physical RAM, answer
N here.
If unsure, say Y.
config FAST_SYSCALL_XTENSA
bool "Enable fast atomic syscalls"
default n
@ -446,6 +301,9 @@ config XTENSA_CALIBRATE_CCOUNT
config SERIAL_CONSOLE
def_bool n
config PLATFORM_HAVE_XIP
def_bool n
menu "Platform options"
choice
@ -472,6 +330,7 @@ config XTENSA_PLATFORM_XTFPGA
select PLATFORM_WANT_DEFAULT_MEM if !MMU
select SERIAL_CONSOLE
select XTENSA_CALIBRATE_CCOUNT
select PLATFORM_HAVE_XIP
help
XTFPGA is the name of Tensilica board family (LX60, LX110, LX200, ML605).
This hardware is capable of running a full Linux distribution.
@ -563,34 +422,6 @@ config SIMDISK1_FILENAME
Another simulated disk in a host file for a buildroot-independent
storage.
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
default "11"
help
The kernel memory allocator divides physically contiguous memory
blocks into "zones", where each zone is a power of two number of
pages. This option selects the largest power of two that the kernel
keeps in the memory allocator. If you need to allocate very large
blocks of physically contiguous memory, then you may need to
increase this value.
This config option is actually maximum order plus one. For example,
a value of 11 means that the largest free memory block is 2^10 pages.
config PLATFORM_WANT_DEFAULT_MEM
def_bool n
config DEFAULT_MEM_START
hex
prompt "PAGE_OFFSET/PHYS_OFFSET" if !MMU && PLATFORM_WANT_DEFAULT_MEM
default 0x60000000 if PLATFORM_WANT_DEFAULT_MEM
default 0x00000000
help
This is the base address used for both PAGE_OFFSET and PHYS_OFFSET
in noMMU configurations.
If unsure, leave the default value here.
config XTFPGA_LCD
bool "Enable XTFPGA LCD driver"
depends on XTENSA_PLATFORM_XTFPGA
@ -621,6 +452,221 @@ config XTFPGA_LCD_8BIT_ACCESS
only be used with 8-bit interface. Please consult prototyping user
guide for your board for the correct interface width.
comment "Kernel memory layout"
config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
bool "Initialize Xtensa MMU inside the Linux kernel code"
depends on !XTENSA_VARIANT_FSF && !XTENSA_VARIANT_DC232B
default y if XTENSA_VARIANT_DC233C || XTENSA_VARIANT_CUSTOM
help
Earlier version initialized the MMU in the exception vector
before jumping to _startup in head.S and had an advantage that
it was possible to place a software breakpoint at 'reset' and
then enter your normal kernel breakpoints once the MMU was mapped
to the kernel mappings (0XC0000000).
This unfortunately won't work for U-Boot and likely also wont
work for using KEXEC to have a hot kernel ready for doing a
KDUMP.
So now the MMU is initialized in head.S but it's necessary to
use hardware breakpoints (gdb 'hbreak' cmd) to break at _startup.
xt-gdb can't place a Software Breakpoint in the 0XD region prior
to mapping the MMU and after mapping even if the area of low memory
was mapped gdb wouldn't remove the breakpoint on hitting it as the
PC wouldn't match. Since Hardware Breakpoints are recommended for
Linux configurations it seems reasonable to just assume they exist
and leave this older mechanism for unfortunate souls that choose
not to follow Tensilica's recommendation.
Selecting this will cause U-Boot to set the KERNEL Load and Entry
address at 0x00003000 instead of the mapped std of 0xD0003000.
If in doubt, say Y.
config XIP_KERNEL
bool "Kernel Execute-In-Place from ROM"
depends on PLATFORM_HAVE_XIP
help
Execute-In-Place allows the kernel to run from non-volatile storage
directly addressable by the CPU, such as NOR flash. This saves RAM
space since the text section of the kernel is not loaded from flash
to RAM. Read-write sections, such as the data section and stack,
are still copied to RAM. The XIP kernel is not compressed since
it has to run directly from flash, so it will take more space to
store it. The flash address used to link the kernel object files,
and for storing it, is configuration dependent. Therefore, if you
say Y here, you must know the proper physical address where to
store the kernel image depending on your own flash memory usage.
Also note that the make target becomes "make xipImage" rather than
"make Image" or "make uImage". The final kernel binary to put in
ROM memory will be arch/xtensa/boot/xipImage.
If unsure, say N.
config MEMMAP_CACHEATTR
hex "Cache attributes for the memory address space"
depends on !MMU
default 0x22222222
help
These cache attributes are set up for noMMU systems. Each hex digit
specifies cache attributes for the corresponding 512MB memory
region: bits 0..3 -- for addresses 0x00000000..0x1fffffff,
bits 4..7 -- for addresses 0x20000000..0x3fffffff, and so on.
Cache attribute values are specific for the MMU type.
For region protection MMUs:
1: WT cached,
2: cache bypass,
4: WB cached,
f: illegal.
For ful MMU:
bit 0: executable,
bit 1: writable,
bits 2..3:
0: cache bypass,
1: WB cache,
2: WT cache,
3: special (c and e are illegal, f is reserved).
For MPU:
0: illegal,
1: WB cache,
2: WB, no-write-allocate cache,
3: WT cache,
4: cache bypass.
config KSEG_PADDR
hex "Physical address of the KSEG mapping"
depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX && MMU
default 0x00000000
help
This is the physical address where KSEG is mapped. Please refer to
the chosen KSEG layout help for the required address alignment.
Unpacked kernel image (including vectors) must be located completely
within KSEG.
Physical memory below this address is not available to linux.
If unsure, leave the default value here.
config KERNEL_VIRTUAL_ADDRESS
hex "Kernel virtual address"
depends on MMU && XIP_KERNEL
default 0xd0003000
help
This is the virtual address where the XIP kernel is mapped.
XIP kernel may be mapped into KSEG or KIO region, virtual address
provided here must match kernel load address provided in
KERNEL_LOAD_ADDRESS.
config KERNEL_LOAD_ADDRESS
hex "Kernel load address"
default 0x60003000 if !MMU
default 0x00003000 if MMU && INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
default 0xd0003000 if MMU && !INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
help
This is the address where the kernel is loaded.
It is virtual address for MMUv2 configurations and physical address
for all other configurations.
If unsure, leave the default value here.
config VECTORS_OFFSET
hex "Kernel vectors offset"
default 0x00003000
depends on !XIP_KERNEL
help
This is the offset of the kernel image from the relocatable vectors
base.
If unsure, leave the default value here.
config XIP_DATA_ADDR
hex "XIP kernel data virtual address"
depends on XIP_KERNEL
default 0x00000000
help
This is the virtual address where XIP kernel data is copied.
It must be within KSEG if MMU is used.
config PLATFORM_WANT_DEFAULT_MEM
def_bool n
config DEFAULT_MEM_START
hex
prompt "PAGE_OFFSET/PHYS_OFFSET" if !MMU && PLATFORM_WANT_DEFAULT_MEM
default 0x60000000 if PLATFORM_WANT_DEFAULT_MEM
default 0x00000000
help
This is the base address used for both PAGE_OFFSET and PHYS_OFFSET
in noMMU configurations.
If unsure, leave the default value here.
choice
prompt "KSEG layout"
depends on MMU
default XTENSA_KSEG_MMU_V2
config XTENSA_KSEG_MMU_V2
bool "MMUv2: 128MB cached + 128MB uncached"
help
MMUv2 compatible kernel memory map: TLB way 5 maps 128MB starting
at KSEG_PADDR to 0xd0000000 with cache and to 0xd8000000
without cache.
KSEG_PADDR must be aligned to 128MB.
config XTENSA_KSEG_256M
bool "256MB cached + 256MB uncached"
depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
help
TLB way 6 maps 256MB starting at KSEG_PADDR to 0xb0000000
with cache and to 0xc0000000 without cache.
KSEG_PADDR must be aligned to 256MB.
config XTENSA_KSEG_512M
bool "512MB cached + 512MB uncached"
depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
help
TLB way 6 maps 512MB starting at KSEG_PADDR to 0xa0000000
with cache and to 0xc0000000 without cache.
KSEG_PADDR must be aligned to 256MB.
endchoice
config HIGHMEM
bool "High Memory Support"
depends on MMU
help
Linux can use the full amount of RAM in the system by
default. However, the default MMUv2 setup only maps the
lowermost 128 MB of memory linearly to the areas starting
at 0xd0000000 (cached) and 0xd8000000 (uncached).
When there are more than 128 MB memory in the system not
all of it can be "permanently mapped" by the kernel.
The physical memory that's not permanently mapped is called
"high memory".
If you are compiling a kernel which will never run on a
machine with more than 128 MB total physical RAM, answer
N here.
If unsure, say Y.
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
default "11"
help
The kernel memory allocator divides physically contiguous memory
blocks into "zones", where each zone is a power of two number of
pages. This option selects the largest power of two that the kernel
keeps in the memory allocator. If you need to allocate very large
blocks of physically contiguous memory, then you may need to
increase this value.
This config option is actually maximum order plus one. For example,
a value of 11 means that the largest free memory block is 2^10 pages.
endmenu
menu "Power management options"

View File

@ -31,3 +31,10 @@ config S32C1I_SELFTEST
It is easy to make wrong hardware configuration, this test should catch it early.
Say 'N' on stable hardware.
config PRINT_STACK_DEPTH
int "Stack depth to print" if DEBUG_KERNEL
default 64
help
This option allows you to set the stack depth that the kernel
prints in stack traces.

View File

@ -87,7 +87,7 @@ drivers-$(CONFIG_OPROFILE) += arch/xtensa/oprofile/
boot := arch/xtensa/boot
all Image zImage uImage: vmlinux
all Image zImage uImage xipImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
archheaders:
@ -97,4 +97,5 @@ define archhelp
@echo '* Image - Kernel ELF image with reset vector'
@echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
@echo '* uImage - U-Boot wrapped image'
@echo ' xipImage - XIP image'
endef

View File

@ -29,6 +29,7 @@ all: $(boot-y)
Image: boot-elf
zImage: boot-redboot
uImage: $(obj)/uImage
xipImage: $(obj)/xipImage
boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y))
$(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
@ -50,3 +51,7 @@ UIMAGE_COMPRESSION = gzip
$(obj)/uImage: vmlinux.bin.gz FORCE
$(call if_changed,uimage)
$(Q)$(kecho) ' Kernel: $@ is ready'
$(obj)/xipImage: vmlinux FORCE
$(call if_changed,objcopy)
$(Q)$(kecho) ' Kernel: $@ is ready'

View File

@ -0,0 +1,119 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_MEMCG=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_DEBUG=y
CONFIG_NAMESPACES=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
CONFIG_XTENSA_VARIANT_DC233C=y
CONFIG_XTENSA_UNALIGNED_USER=y
CONFIG_XIP_KERNEL=y
CONFIG_XIP_DATA_ADDR=0xd0000000
CONFIG_KERNEL_VIRTUAL_ADDRESS=0xe6000000
CONFIG_KERNEL_LOAD_ADDRESS=0xf6000000
CONFIG_XTENSA_KSEG_512M=y
CONFIG_HIGHMEM=y
CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
CONFIG_USE_OF=y
CONFIG_BUILTIN_DTB_SOURCE="kc705"
# CONFIG_PARSE_BOOTPARAM is not set
CONFIG_OPROFILE=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_COMPACTION is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_MARVELL_PHY=y
# CONFIG_WLAN is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_DEVKMEM=y
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=y
# CONFIG_VGA_CONSOLE is not set
# CONFIG_USB_SUPPORT is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT3_FS=y
CONFIG_FANOTIFY=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_SUNRPC_DEBUG=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_STACKTRACE=y
CONFIG_RCU_TRACE=y
# CONFIG_FTRACE is not set
# CONFIG_S32C1I_SELFTEST is not set

View File

@ -11,6 +11,7 @@ generic-y += exec.h
generic-y += extable.h
generic-y += fb.h
generic-y += hardirq.h
generic-y += hw_irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
@ -30,6 +31,7 @@ generic-y += qspinlock.h
generic-y += sections.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += user.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h

View File

@ -64,13 +64,13 @@ static inline void atomic_##op(int i, atomic_t *v) \
int result; \
\
__asm__ __volatile__( \
"1: l32ex %1, %3\n" \
" " #op " %0, %1, %2\n" \
" s32ex %0, %3\n" \
" getex %0\n" \
" beqz %0, 1b\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
"1: l32ex %[tmp], %[addr]\n" \
" " #op " %[result], %[tmp], %[i]\n" \
" s32ex %[result], %[addr]\n" \
" getex %[result]\n" \
" beqz %[result], 1b\n" \
: [result] "=&a" (result), [tmp] "=&a" (tmp) \
: [i] "a" (i), [addr] "a" (v) \
: "memory" \
); \
} \
@ -82,14 +82,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
int result; \
\
__asm__ __volatile__( \
"1: l32ex %1, %3\n" \
" " #op " %0, %1, %2\n" \
" s32ex %0, %3\n" \
" getex %0\n" \
" beqz %0, 1b\n" \
" " #op " %0, %1, %2\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
"1: l32ex %[tmp], %[addr]\n" \
" " #op " %[result], %[tmp], %[i]\n" \
" s32ex %[result], %[addr]\n" \
" getex %[result]\n" \
" beqz %[result], 1b\n" \
" " #op " %[result], %[tmp], %[i]\n" \
: [result] "=&a" (result), [tmp] "=&a" (tmp) \
: [i] "a" (i), [addr] "a" (v) \
: "memory" \
); \
\
@ -103,13 +103,13 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
int result; \
\
__asm__ __volatile__( \
"1: l32ex %1, %3\n" \
" " #op " %0, %1, %2\n" \
" s32ex %0, %3\n" \
" getex %0\n" \
" beqz %0, 1b\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
"1: l32ex %[tmp], %[addr]\n" \
" " #op " %[result], %[tmp], %[i]\n" \
" s32ex %[result], %[addr]\n" \
" getex %[result]\n" \
" beqz %[result], 1b\n" \
: [result] "=&a" (result), [tmp] "=&a" (tmp) \
: [i] "a" (i), [addr] "a" (v) \
: "memory" \
); \
\
@ -124,13 +124,14 @@ static inline void atomic_##op(int i, atomic_t * v) \
int result; \
\
__asm__ __volatile__( \
"1: l32i %1, %3, 0\n" \
" wsr %1, scompare1\n" \
" " #op " %0, %1, %2\n" \
" s32c1i %0, %3, 0\n" \
" bne %0, %1, 1b\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
"1: l32i %[tmp], %[mem]\n" \
" wsr %[tmp], scompare1\n" \
" " #op " %[result], %[tmp], %[i]\n" \
" s32c1i %[result], %[mem]\n" \
" bne %[result], %[tmp], 1b\n" \
: [result] "=&a" (result), [tmp] "=&a" (tmp), \
[mem] "+m" (*v) \
: [i] "a" (i) \
: "memory" \
); \
} \
@ -142,14 +143,15 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
int result; \
\
__asm__ __volatile__( \
"1: l32i %1, %3, 0\n" \
" wsr %1, scompare1\n" \
" " #op " %0, %1, %2\n" \
" s32c1i %0, %3, 0\n" \
" bne %0, %1, 1b\n" \
" " #op " %0, %0, %2\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
"1: l32i %[tmp], %[mem]\n" \
" wsr %[tmp], scompare1\n" \
" " #op " %[result], %[tmp], %[i]\n" \
" s32c1i %[result], %[mem]\n" \
" bne %[result], %[tmp], 1b\n" \
" " #op " %[result], %[result], %[i]\n" \
: [result] "=&a" (result), [tmp] "=&a" (tmp), \
[mem] "+m" (*v) \
: [i] "a" (i) \
: "memory" \
); \
\
@ -163,13 +165,14 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \
int result; \
\
__asm__ __volatile__( \
"1: l32i %1, %3, 0\n" \
" wsr %1, scompare1\n" \
" " #op " %0, %1, %2\n" \
" s32c1i %0, %3, 0\n" \
" bne %0, %1, 1b\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
"1: l32i %[tmp], %[mem]\n" \
" wsr %[tmp], scompare1\n" \
" " #op " %[result], %[tmp], %[i]\n" \
" s32c1i %[result], %[mem]\n" \
" bne %[result], %[tmp], 1b\n" \
: [result] "=&a" (result), [tmp] "=&a" (tmp), \
[mem] "+m" (*v) \
: [i] "a" (i) \
: "memory" \
); \
\
@ -184,14 +187,14 @@ static inline void atomic_##op(int i, atomic_t * v) \
unsigned int vval; \
\
__asm__ __volatile__( \
" rsil a15, "__stringify(TOPLEVEL)"\n"\
" l32i %0, %2, 0\n" \
" " #op " %0, %0, %1\n" \
" s32i %0, %2, 0\n" \
" rsil a15, "__stringify(TOPLEVEL)"\n" \
" l32i %[result], %[mem]\n" \
" " #op " %[result], %[result], %[i]\n" \
" s32i %[result], %[mem]\n" \
" wsr a15, ps\n" \
" rsync\n" \
: "=&a" (vval) \
: "a" (i), "a" (v) \
: [result] "=&a" (vval), [mem] "+m" (*v) \
: [i] "a" (i) \
: "a15", "memory" \
); \
} \
@ -203,13 +206,13 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
\
__asm__ __volatile__( \
" rsil a15,"__stringify(TOPLEVEL)"\n" \
" l32i %0, %2, 0\n" \
" " #op " %0, %0, %1\n" \
" s32i %0, %2, 0\n" \
" l32i %[result], %[mem]\n" \
" " #op " %[result], %[result], %[i]\n" \
" s32i %[result], %[mem]\n" \
" wsr a15, ps\n" \
" rsync\n" \
: "=&a" (vval) \
: "a" (i), "a" (v) \
: [result] "=&a" (vval), [mem] "+m" (*v) \
: [i] "a" (i) \
: "a15", "memory" \
); \
\
@ -223,13 +226,14 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \
\
__asm__ __volatile__( \
" rsil a15,"__stringify(TOPLEVEL)"\n" \
" l32i %0, %3, 0\n" \
" " #op " %1, %0, %2\n" \
" s32i %1, %3, 0\n" \
" l32i %[result], %[mem]\n" \
" " #op " %[tmp], %[result], %[i]\n" \
" s32i %[tmp], %[mem]\n" \
" wsr a15, ps\n" \
" rsync\n" \
: "=&a" (vval), "=&a" (tmp) \
: "a" (i), "a" (v) \
: [result] "=&a" (vval), [tmp] "=&a" (tmp), \
[mem] "+m" (*v) \
: [i] "a" (i) \
: "a15", "memory" \
); \
\

View File

@ -98,248 +98,113 @@ static inline unsigned long __fls(unsigned long word)
#if XCHAL_HAVE_EXCLUSIVE
static inline void set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %0, %2\n"
" or %0, %0, %1\n"
" s32ex %0, %2\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp)
: "a" (mask), "a" (p)
: "memory");
#define BIT_OP(op, insn, inv) \
static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
{ \
unsigned long tmp; \
unsigned long mask = 1UL << (bit & 31); \
\
p += bit >> 5; \
\
__asm__ __volatile__( \
"1: l32ex %[tmp], %[addr]\n" \
" "insn" %[tmp], %[tmp], %[mask]\n" \
" s32ex %[tmp], %[addr]\n" \
" getex %[tmp]\n" \
" beqz %[tmp], 1b\n" \
: [tmp] "=&a" (tmp) \
: [mask] "a" (inv mask), [addr] "a" (p) \
: "memory"); \
}
static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %0, %2\n"
" and %0, %0, %1\n"
" s32ex %0, %2\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp)
: "a" (~mask), "a" (p)
: "memory");
}
static inline void change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %0, %2\n"
" xor %0, %0, %1\n"
" s32ex %0, %2\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp)
: "a" (mask), "a" (p)
: "memory");
}
static inline int
test_and_set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %1, %3\n"
" or %0, %1, %2\n"
" s32ex %0, %3\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
return value & mask;
}
static inline int
test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %1, %3\n"
" and %0, %1, %2\n"
" s32ex %0, %3\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (~mask), "a" (p)
: "memory");
return value & mask;
}
static inline int
test_and_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %1, %3\n"
" xor %0, %1, %2\n"
" s32ex %0, %3\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
return value & mask;
#define TEST_AND_BIT_OP(op, insn, inv) \
static inline int \
test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
{ \
unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \
\
p += bit >> 5; \
\
__asm__ __volatile__( \
"1: l32ex %[value], %[addr]\n" \
" "insn" %[tmp], %[value], %[mask]\n" \
" s32ex %[tmp], %[addr]\n" \
" getex %[tmp]\n" \
" beqz %[tmp], 1b\n" \
: [tmp] "=&a" (tmp), [value] "=&a" (value) \
: [mask] "a" (inv mask), [addr] "a" (p) \
: "memory"); \
\
return value & mask; \
}
#elif XCHAL_HAVE_S32C1I
static inline void set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" or %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
#define BIT_OP(op, insn, inv) \
static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
{ \
unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \
\
p += bit >> 5; \
\
__asm__ __volatile__( \
"1: l32i %[value], %[mem]\n" \
" wsr %[value], scompare1\n" \
" "insn" %[tmp], %[value], %[mask]\n" \
" s32c1i %[tmp], %[mem]\n" \
" bne %[tmp], %[value], 1b\n" \
: [tmp] "=&a" (tmp), [value] "=&a" (value), \
[mem] "+m" (*p) \
: [mask] "a" (inv mask) \
: "memory"); \
}
static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" and %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (~mask), "a" (p)
: "memory");
}
static inline void change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" xor %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
}
static inline int
test_and_set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" or %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
return tmp & mask;
}
static inline int
test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" and %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (~mask), "a" (p)
: "memory");
return tmp & mask;
}
static inline int
test_and_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" xor %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
return tmp & mask;
#define TEST_AND_BIT_OP(op, insn, inv) \
static inline int \
test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
{ \
unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \
\
p += bit >> 5; \
\
__asm__ __volatile__( \
"1: l32i %[value], %[mem]\n" \
" wsr %[value], scompare1\n" \
" "insn" %[tmp], %[value], %[mask]\n" \
" s32c1i %[tmp], %[mem]\n" \
" bne %[tmp], %[value], 1b\n" \
: [tmp] "=&a" (tmp), [value] "=&a" (value), \
[mem] "+m" (*p) \
: [mask] "a" (inv mask) \
: "memory"); \
\
return tmp & mask; \
}
#else
#define BIT_OP(op, insn, inv)
#define TEST_AND_BIT_OP(op, insn, inv)
#include <asm-generic/bitops/atomic.h>
#endif /* XCHAL_HAVE_S32C1I */
#define BIT_OPS(op, insn, inv) \
BIT_OP(op, insn, inv) \
TEST_AND_BIT_OP(op, insn, inv)
BIT_OPS(set, "or", )
BIT_OPS(clear, "and", ~)
BIT_OPS(change, "xor", )
#undef BIT_OPS
#undef BIT_OP
#undef TEST_AND_BIT_OP
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>

View File

@ -31,4 +31,10 @@
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
/*
* R/O after init is actually writable, it cannot go to .rodata
* according to vmlinux linker script.
*/
#define __ro_after_init __read_mostly
#endif /* _XTENSA_CACHE_H */

View File

@ -27,25 +27,25 @@ __cmpxchg_u32(volatile int *p, int old, int new)
unsigned long tmp, result;
__asm__ __volatile__(
"1: l32ex %0, %3\n"
" bne %0, %4, 2f\n"
" mov %1, %2\n"
" s32ex %1, %3\n"
" getex %1\n"
" beqz %1, 1b\n"
"1: l32ex %[result], %[addr]\n"
" bne %[result], %[cmp], 2f\n"
" mov %[tmp], %[new]\n"
" s32ex %[tmp], %[addr]\n"
" getex %[tmp]\n"
" beqz %[tmp], 1b\n"
"2:\n"
: "=&a" (result), "=&a" (tmp)
: "a" (new), "a" (p), "a" (old)
: [result] "=&a" (result), [tmp] "=&a" (tmp)
: [new] "a" (new), [addr] "a" (p), [cmp] "a" (old)
: "memory"
);
return result;
#elif XCHAL_HAVE_S32C1I
__asm__ __volatile__(
" wsr %2, scompare1\n"
" s32c1i %0, %1, 0\n"
: "+a" (new)
: "a" (p), "a" (old)
" wsr %[cmp], scompare1\n"
" s32c1i %[new], %[mem]\n"
: [new] "+a" (new), [mem] "+m" (*p)
: [cmp] "a" (old)
: "memory"
);
@ -53,14 +53,14 @@ __cmpxchg_u32(volatile int *p, int old, int new)
#else
__asm__ __volatile__(
" rsil a15, "__stringify(TOPLEVEL)"\n"
" l32i %0, %1, 0\n"
" bne %0, %2, 1f\n"
" s32i %3, %1, 0\n"
" l32i %[old], %[mem]\n"
" bne %[old], %[cmp], 1f\n"
" s32i %[new], %[mem]\n"
"1:\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (old)
: "a" (p), "a" (old), "r" (new)
: [old] "=&a" (old), [mem] "+m" (*p)
: [cmp] "a" (old), [new] "r" (new)
: "a15", "memory");
return old;
#endif
@ -129,13 +129,13 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
unsigned long tmp, result;
__asm__ __volatile__(
"1: l32ex %0, %3\n"
" mov %1, %2\n"
" s32ex %1, %3\n"
" getex %1\n"
" beqz %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (val), "a" (m)
"1: l32ex %[result], %[addr]\n"
" mov %[tmp], %[val]\n"
" s32ex %[tmp], %[addr]\n"
" getex %[tmp]\n"
" beqz %[tmp], 1b\n"
: [result] "=&a" (result), [tmp] "=&a" (tmp)
: [val] "a" (val), [addr] "a" (m)
: "memory"
);
@ -143,13 +143,14 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#elif XCHAL_HAVE_S32C1I
unsigned long tmp, result;
__asm__ __volatile__(
"1: l32i %1, %2, 0\n"
" mov %0, %3\n"
" wsr %1, scompare1\n"
" s32c1i %0, %2, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (m), "a" (val)
"1: l32i %[tmp], %[mem]\n"
" mov %[result], %[val]\n"
" wsr %[tmp], scompare1\n"
" s32c1i %[result], %[mem]\n"
" bne %[result], %[tmp], 1b\n"
: [result] "=&a" (result), [tmp] "=&a" (tmp),
[mem] "+m" (*m)
: [val] "a" (val)
: "memory"
);
return result;
@ -157,12 +158,12 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
unsigned long tmp;
__asm__ __volatile__(
" rsil a15, "__stringify(TOPLEVEL)"\n"
" l32i %0, %1, 0\n"
" s32i %2, %1, 0\n"
" l32i %[tmp], %[mem]\n"
" s32i %[val], %[mem]\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (tmp)
: "a" (m), "a" (val)
: [tmp] "=&a" (tmp), [mem] "+m" (*m)
: [val] "a" (val)
: "a15", "memory");
return tmp;
#endif

View File

@ -78,8 +78,10 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel( \
pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
(vaddr) \
)
pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), \
(vaddr)), \
(vaddr)), \
(vaddr)), \
(vaddr))
#endif

View File

@ -43,10 +43,10 @@
#elif XCHAL_HAVE_S32C1I
#define __futex_atomic_op(insn, ret, old, uaddr, arg) \
__asm__ __volatile( \
"1: l32i %[oldval], %[addr], 0\n" \
"1: l32i %[oldval], %[mem]\n" \
insn "\n" \
" wsr %[oldval], scompare1\n" \
"2: s32c1i %[newval], %[addr], 0\n" \
"2: s32c1i %[newval], %[mem]\n" \
" bne %[newval], %[oldval], 1b\n" \
" movi %[newval], 0\n" \
"3:\n" \
@ -60,9 +60,9 @@
" .section __ex_table,\"a\"\n" \
" .long 1b, 5b, 2b, 5b\n" \
" .previous\n" \
: [oldval] "=&r" (old), [newval] "=&r" (ret) \
: [addr] "r" (uaddr), [oparg] "r" (arg), \
[fault] "I" (-EFAULT) \
: [oldval] "=&r" (old), [newval] "=&r" (ret), \
[mem] "+m" (*(uaddr)) \
: [oparg] "r" (arg), [fault] "I" (-EFAULT) \
: "memory")
#endif

View File

@ -1,14 +0,0 @@
/*
* include/asm-xtensa/hw_irq.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2002 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_HW_IRQ_H
#define _XTENSA_HW_IRQ_H
#endif

View File

@ -23,6 +23,7 @@
#ifndef _XTENSA_INITIALIZE_MMU_H
#define _XTENSA_INITIALIZE_MMU_H
#include <linux/init.h>
#include <asm/pgtable.h>
#include <asm/vectors.h>
@ -183,7 +184,7 @@
#endif
#if XCHAL_HAVE_MPU
.data
__REFCONST
.align 4
.Lattribute_table:
.long 0x000000, 0x1fff00, 0x1ddf00, 0x1eef00

View File

@ -11,6 +11,7 @@
#ifndef _XTENSA_KMEM_LAYOUT_H
#define _XTENSA_KMEM_LAYOUT_H
#include <asm/core.h>
#include <asm/types.h>
#ifdef CONFIG_MMU
@ -65,6 +66,34 @@
#endif
/* KIO definition */
#if XCHAL_HAVE_PTP_MMU
#define XCHAL_KIO_CACHED_VADDR 0xe0000000
#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
#else
#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR
#define XCHAL_KIO_DEFAULT_PADDR 0x90000000
#endif
#define XCHAL_KIO_SIZE 0x10000000
#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
#ifndef __ASSEMBLY__
extern unsigned long xtensa_kio_paddr;
static inline unsigned long xtensa_get_kio_paddr(void)
{
return xtensa_kio_paddr;
}
#endif
#else
#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
#endif
/* KERNEL_STACK definition */
#ifndef CONFIG_KASAN
#define KERNEL_STACK_SHIFT 13
#else

View File

@ -169,7 +169,18 @@ static inline unsigned long ___pa(unsigned long va)
if (off >= XCHAL_KSEG_SIZE)
off -= XCHAL_KSEG_SIZE;
#ifndef CONFIG_XIP_KERNEL
return off + PHYS_OFFSET;
#else
if (off < XCHAL_KSEG_SIZE)
return off + PHYS_OFFSET;
off -= XCHAL_KSEG_SIZE;
if (off >= XCHAL_KIO_SIZE)
off -= XCHAL_KIO_SIZE;
return off + XCHAL_KIO_PADDR;
#endif
}
#define __pa(x) ___pa((unsigned long)(x))
#else

View File

@ -8,7 +8,6 @@
#ifndef _XTENSA_PGTABLE_H
#define _XTENSA_PGTABLE_H
#define __ARCH_USE_5LEVEL_HACK
#include <asm/page.h>
#include <asm/kmem_layout.h>
#include <asm-generic/pgtable-nopmd.h>
@ -371,9 +370,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
/* Find an entry in the second-level page table.. */
#define pmd_offset(dir,address) ((pmd_t*)(dir))
/* Find an entry in the third-level page table.. */
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir,addr) \

View File

@ -195,6 +195,7 @@ struct thread_struct {
/* Clearing a0 terminates the backtrace. */
#define start_thread(regs, new_pc, new_sp) \
do { \
unsigned long syscall = (regs)->syscall; \
memset((regs), 0, sizeof(*(regs))); \
(regs)->pc = (new_pc); \
(regs)->ps = USER_PS_VALUE; \
@ -204,7 +205,7 @@ struct thread_struct {
(regs)->depc = 0; \
(regs)->windowbase = 0; \
(regs)->windowstart = 1; \
(regs)->syscall = NO_SYSCALL; \
(regs)->syscall = syscall; \
} while (0)
/* Forward declaration */

View File

@ -51,7 +51,7 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
regs->areg[0] = (long) error ? error : val;
regs->areg[2] = (long) error ? error : val;
}
#define SYSCALL_MAX_ARGS 6
@ -79,7 +79,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
regs->areg[reg[i]] = args[i];
}
asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
asmlinkage long xtensa_rt_sigreturn(void);
asmlinkage long xtensa_shmat(int, char __user *, int);
asmlinkage long xtensa_fadvise64_64(int, int,
unsigned long long, unsigned long long);

View File

@ -132,13 +132,13 @@ do { \
#define __check_align_1 ""
#define __check_align_2 \
" _bbci.l %[addr], 0, 1f \n" \
" _bbci.l %[mem] * 0, 1f \n" \
" movi %[err], %[efault] \n" \
" _j 2f \n"
#define __check_align_4 \
" _bbsi.l %[addr], 0, 0f \n" \
" _bbci.l %[addr], 1, 1f \n" \
" _bbsi.l %[mem] * 0, 0f \n" \
" _bbci.l %[mem] * 0 + 1, 1f \n" \
"0: movi %[err], %[efault] \n" \
" _j 2f \n"
@ -154,7 +154,7 @@ do { \
#define __put_user_asm(x_, addr_, err_, align, insn, cb)\
__asm__ __volatile__( \
__check_align_##align \
"1: "insn" %[x], %[addr], 0 \n" \
"1: "insn" %[x], %[mem] \n" \
"2: \n" \
" .section .fixup,\"ax\" \n" \
" .align 4 \n" \
@ -167,8 +167,8 @@ __asm__ __volatile__( \
" .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \
" .previous" \
:[err] "+r"(err_), [tmp] "=r"(cb) \
:[x] "r"(x_), [addr] "r"(addr_), [efault] "i"(-EFAULT))
:[err] "+r"(err_), [tmp] "=r"(cb), [mem] "=m"(*(addr_)) \
:[x] "r"(x_), [efault] "i"(-EFAULT))
#define __get_user_nocheck(x, ptr, size) \
({ \
@ -222,7 +222,7 @@ do { \
u32 __x = 0; \
__asm__ __volatile__( \
__check_align_##align \
"1: "insn" %[x], %[addr], 0 \n" \
"1: "insn" %[x], %[mem] \n" \
"2: \n" \
" .section .fixup,\"ax\" \n" \
" .align 4 \n" \
@ -236,7 +236,7 @@ do { \
" .long 1b, 5b \n" \
" .previous" \
:[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \
:[addr] "r"(addr_), [efault] "i"(-EFAULT)); \
:[mem] "m"(*(addr_)), [efault] "i"(-EFAULT)); \
(x_) = (__force __typeof__(*(addr_)))__x; \
} while (0)

View File

@ -1,20 +0,0 @@
/*
* include/asm-xtensa/user.h
*
* Xtensa Processor version.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_USER_H
#define _XTENSA_USER_H
/* This file usually defines a 'struct user' structure. However, it it only
* used for a.out file, which are not supported on Xtensa.
*/
#endif /* _XTENSA_USER_H */

View File

@ -21,50 +21,18 @@
#include <asm/core.h>
#include <asm/kmem_layout.h>
#if XCHAL_HAVE_PTP_MMU
#define XCHAL_KIO_CACHED_VADDR 0xe0000000
#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
#ifdef CONFIG_KERNEL_VIRTUAL_ADDRESS
#define KERNELOFFSET CONFIG_KERNEL_VIRTUAL_ADDRESS
#else
#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR
#define XCHAL_KIO_DEFAULT_PADDR 0x90000000
#endif
#define XCHAL_KIO_SIZE 0x10000000
#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
#ifndef __ASSEMBLY__
extern unsigned long xtensa_kio_paddr;
static inline unsigned long xtensa_get_kio_paddr(void)
{
return xtensa_kio_paddr;
}
#endif
#else
#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
#endif
#if defined(CONFIG_MMU)
#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
/* Image Virtual Start Address */
#define KERNELOFFSET (XCHAL_KSEG_CACHED_VADDR + \
CONFIG_KERNEL_LOAD_ADDRESS - \
#define KERNELOFFSET (CONFIG_KERNEL_LOAD_ADDRESS + \
XCHAL_KSEG_CACHED_VADDR - \
XCHAL_KSEG_PADDR)
#endif
#else
#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
#endif
#else /* !defined(CONFIG_MMU) */
/* MMU Not being used - Virtual == Physical */
/* Location of the start of the kernel text, _start */
#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
#endif /* CONFIG_MMU */
#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR)
#ifdef CONFIG_VECTORS_OFFSET
#define VECBASE_VADDR (KERNELOFFSET - CONFIG_VECTORS_OFFSET)

View File

@ -15,17 +15,9 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/asmmacro.h>
#include <asm/processor.h>
#include <asm/coprocessor.h>
#include <asm/thread_info.h>
#include <asm/asm-uaccess.h>
#include <asm/unistd.h>
#include <asm/ptrace.h>
#include <asm/current.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/signal.h>
#include <asm/tlbflush.h>
#include <asm/regs.h>
#if XTENSA_HAVE_COPROCESSORS

View File

@ -529,7 +529,7 @@ common_exception_return:
l32i a4, a2, TI_PRE_COUNT
bnez a4, 4f
call4 preempt_schedule_irq
j 1b
j 4f
#endif
#if XTENSA_FAKE_NMI
@ -1876,8 +1876,7 @@ ENDPROC(fast_store_prohibited)
ENTRY(system_call)
/* reserve 4 bytes on stack for function parameter */
abi_entry(4)
abi_entry_default
/* regs->syscall = regs->areg[2] */
@ -1892,11 +1891,10 @@ ENTRY(system_call)
mov a6, a2
call4 do_syscall_trace_enter
beqz a6, .Lsyscall_exit
l32i a7, a2, PT_SYSCALL
1:
s32i a7, a1, 4
/* syscall = sys_call_table[syscall_nr] */
movi a4, sys_call_table
@ -1906,8 +1904,6 @@ ENTRY(system_call)
addx4 a4, a7, a4
l32i a4, a4, 0
movi a5, sys_ni_syscall;
beq a4, a5, 1f
/* Load args: arg0 - arg5 are passed via regs. */
@ -1918,25 +1914,19 @@ ENTRY(system_call)
l32i a10, a2, PT_AREG8
l32i a11, a2, PT_AREG9
/* Pass one additional argument to the syscall: pt_regs (on stack) */
s32i a2, a1, 0
callx4 a4
1: /* regs->areg[2] = return_value */
s32i a6, a2, PT_AREG2
bnez a3, 1f
abi_ret(4)
.Lsyscall_exit:
abi_ret_default
1:
l32i a4, a1, 4
l32i a3, a2, PT_SYSCALL
s32i a4, a2, PT_SYSCALL
mov a6, a2
call4 do_syscall_trace_leave
s32i a3, a2, PT_SYSCALL
abi_ret(4)
abi_ret_default
ENDPROC(system_call)

View File

@ -260,6 +260,13 @@ ENTRY(_startup)
___invalidate_icache_all a2 a3
isync
#ifdef CONFIG_XIP_KERNEL
/* Setup bootstrap CPU stack in XIP kernel */
movi a1, start_info
l32i a1, a1, 0
#endif
movi a6, 0
xsr a6, excsave1
@ -355,10 +362,10 @@ ENDPROC(cpu_restart)
* DATA section
*/
.section ".data.init.refok"
.align 4
__REFDATA
.align 4
ENTRY(start_info)
.long init_thread_union + KERNEL_STACK_SIZE
.long init_thread_union + KERNEL_STACK_SIZE
/*
* BSS section

View File

@ -264,6 +264,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
&regs->areg[XCHAL_NUM_AREGS - len/4], len);
}
childregs->syscall = regs->syscall;
/* The thread pointer is passed in the '4th argument' (= a5) */
if (clone_flags & CLONE_SETTLS)
childregs->threadptr = childregs->areg[5];

View File

@ -542,14 +542,28 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}
void do_syscall_trace_enter(struct pt_regs *regs)
void do_syscall_trace_leave(struct pt_regs *regs);
int do_syscall_trace_enter(struct pt_regs *regs)
{
if (regs->syscall == NO_SYSCALL)
regs->areg[2] = -ENOSYS;
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
tracehook_report_syscall_entry(regs)) {
regs->areg[2] = -ENOSYS;
regs->syscall = NO_SYSCALL;
return 0;
}
if (regs->syscall == NO_SYSCALL) {
do_syscall_trace_leave(regs);
return 0;
}
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, syscall_get_nr(current, regs));
return 1;
}
void do_syscall_trace_leave(struct pt_regs *regs)

View File

@ -308,6 +308,10 @@ extern char _Level6InterruptVector_text_end;
extern char _SecondaryResetVector_text_start;
extern char _SecondaryResetVector_text_end;
#endif
#ifdef CONFIG_XIP_KERNEL
extern char _xip_start[];
extern char _xip_end[];
#endif
static inline int __init_memblock mem_reserve(unsigned long start,
unsigned long end)
@ -339,6 +343,9 @@ void __init setup_arch(char **cmdline_p)
#endif
mem_reserve(__pa(_stext), __pa(_end));
#ifdef CONFIG_XIP_KERNEL
mem_reserve(__pa(_xip_start), __pa(_xip_end));
#endif
#ifdef CONFIG_VECTORS_OFFSET
mem_reserve(__pa(&_WindowVectors_text_start),

View File

@ -236,9 +236,9 @@ restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
* Do a signal return; undo the signal stack.
*/
asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
long a4, long a5, struct pt_regs *regs)
asmlinkage long xtensa_rt_sigreturn(void)
{
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame;
sigset_t set;
int ret;

View File

@ -491,32 +491,27 @@ void show_trace(struct task_struct *task, unsigned long *sp)
pr_info("Call Trace:\n");
walk_stackframe(sp, show_trace_cb, NULL);
#ifndef CONFIG_KALLSYMS
pr_cont("\n");
#endif
}
static int kstack_depth_to_print = 24;
#define STACK_DUMP_ENTRY_SIZE 4
#define STACK_DUMP_LINE_SIZE 32
static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
void show_stack(struct task_struct *task, unsigned long *sp)
{
int i = 0;
unsigned long *stack;
size_t len;
if (!sp)
sp = stack_pointer(task);
stack = sp;
len = min((-(size_t)sp) & (THREAD_SIZE - STACK_DUMP_ENTRY_SIZE),
kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
pr_info("Stack:\n");
for (i = 0; i < kstack_depth_to_print; i++) {
if (kstack_end(sp))
break;
pr_cont(" %08lx", *sp++);
if (i % 8 == 7)
pr_cont("\n");
}
show_trace(task, stack);
print_hex_dump(KERN_INFO, " ", DUMP_PREFIX_NONE,
STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
sp, len, false);
show_trace(task, sp);
}
DEFINE_SPINLOCK(die_lock);

View File

@ -119,7 +119,7 @@ SECTIONS
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
}
_etext = .;
PROVIDE (etext = .);
@ -128,12 +128,11 @@ SECTIONS
RO_DATA(4096)
/* Relocation table */
.fixup : { *(.fixup) }
/* Data section */
#ifdef CONFIG_XIP_KERNEL
INIT_TEXT_SECTION(PAGE_SIZE)
#else
_sdata = .;
RW_DATA(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
_edata = .;
@ -147,6 +146,11 @@ SECTIONS
.init.data :
{
INIT_DATA
}
#endif
.init.rodata :
{
. = ALIGN(0x4);
__tagtable_begin = .;
*(.taglist)
@ -187,12 +191,16 @@ SECTIONS
RELOCATE_ENTRY(_DebugInterruptVector_text,
.DebugInterruptVector.text);
#endif
#ifdef CONFIG_XIP_KERNEL
RELOCATE_ENTRY(_xip_data, .data);
RELOCATE_ENTRY(_xip_init_data, .init.data);
#else
#if defined(CONFIG_SMP)
RELOCATE_ENTRY(_SecondaryResetVector_text,
.SecondaryResetVector.text);
#endif
#endif
__boot_reloc_table_end = ABSOLUTE(.) ;
INIT_SETUP(XCHAL_ICACHE_LINESIZE)
@ -278,7 +286,7 @@ SECTIONS
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
#endif
#if defined(CONFIG_SMP)
#if !defined(CONFIG_XIP_KERNEL) && defined(CONFIG_SMP)
SECTION_VECTOR (_SecondaryResetVector_text,
.SecondaryResetVector.text,
@ -291,12 +299,48 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
#ifndef CONFIG_XIP_KERNEL
__init_end = .;
BSS_SECTION(0, 8192, 0)
#endif
_end = .;
#ifdef CONFIG_XIP_KERNEL
. = CONFIG_XIP_DATA_ADDR;
_xip_start = .;
#undef LOAD_OFFSET
#define LOAD_OFFSET \
(CONFIG_XIP_DATA_ADDR - (LOADADDR(.dummy) + SIZEOF(.dummy) + 3) & ~ 3)
_xip_data_start = .;
_sdata = .;
RW_DATA(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
_edata = .;
_xip_data_end = .;
/* Initialization data: */
STRUCT_ALIGN();
_xip_init_data_start = .;
__init_begin = .;
.init.data :
{
INIT_DATA
}
_xip_init_data_end = .;
__init_end = .;
BSS_SECTION(0, 8192, 0)
_xip_end = .;
#undef LOAD_OFFSET
#endif
DWARF_DEBUG
.xt.prop 0 : { KEEP(*(.xt.prop .xt.prop.* .gnu.linkonce.prop.*)) }

View File

@ -197,6 +197,8 @@ void do_page_fault(struct pt_regs *regs)
struct mm_struct *act_mm = current->active_mm;
int index = pgd_index(address);
pgd_t *pgd, *pgd_k;
p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
@ -211,8 +213,18 @@ void do_page_fault(struct pt_regs *regs)
pgd_val(*pgd) = pgd_val(*pgd_k);
pmd = pmd_offset(pgd, address);
pmd_k = pmd_offset(pgd_k, address);
p4d = p4d_offset(pgd, address);
p4d_k = p4d_offset(pgd_k, address);
if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
goto bad_page_fault;
pud = pud_offset(p4d, address);
pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud) || !pud_present(*pud_k))
goto bad_page_fault;
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
goto bad_page_fault;

View File

@ -193,8 +193,8 @@ void __init mem_init(void)
((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20,
(unsigned long)_text, (unsigned long)_etext,
(unsigned long)(_etext - _text) >> 10,
(unsigned long)__start_rodata, (unsigned long)_sdata,
(unsigned long)(_sdata - __start_rodata) >> 10,
(unsigned long)__start_rodata, (unsigned long)__end_rodata,
(unsigned long)(__end_rodata - __start_rodata) >> 10,
(unsigned long)_sdata, (unsigned long)_edata,
(unsigned long)(_edata - _sdata) >> 10,
(unsigned long)__init_begin, (unsigned long)__init_end,

View File

@ -20,7 +20,9 @@ void __init kasan_early_init(void)
{
unsigned long vaddr = KASAN_SHADOW_START;
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr);
p4d_t *p4d = p4d_offset(pgd, vaddr);
pud_t *pud = pud_offset(p4d, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
int i;
for (i = 0; i < PTRS_PER_PTE; ++i)
@ -42,7 +44,9 @@ static void __init populate(void *start, void *end)
unsigned long i, j;
unsigned long vaddr = (unsigned long)start;
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr);
p4d_t *p4d = p4d_offset(pgd, vaddr);
pud_t *pud = pud_offset(p4d, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
if (!pte)
@ -56,7 +60,9 @@ static void __init populate(void *start, void *end)
for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
phys_addr_t phys =
memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE,
0,
MEMBLOCK_ALLOC_ANYWHERE);
if (!phys)
panic("Failed to allocate page table page\n");

View File

@ -22,7 +22,9 @@
static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
{
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr);
p4d_t *p4d = p4d_offset(pgd, vaddr);
pud_t *pud = pud_offset(p4d, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
pte_t *pte;
unsigned long i;

View File

@ -169,6 +169,8 @@ static unsigned get_pte_for_vaddr(unsigned vaddr)
struct task_struct *task = get_current();
struct mm_struct *mm = task->mm;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@ -177,7 +179,13 @@ static unsigned get_pte_for_vaddr(unsigned vaddr)
pgd = pgd_offset(mm, vaddr);
if (pgd_none_or_clear_bad(pgd))
return 0;
pmd = pmd_offset(pgd, vaddr);
p4d = p4d_offset(pgd, vaddr);
if (p4d_none_or_clear_bad(p4d))
return 0;
pud = pud_offset(p4d, vaddr);
if (pud_none_or_clear_bad(pud))
return 0;
pmd = pmd_offset(pud, vaddr);
if (pmd_none_or_clear_bad(pmd))
return 0;
pte = pte_offset_map(pmd, vaddr);
@ -216,6 +224,8 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
unsigned tlbidx = w | (e << PAGE_SHIFT);
unsigned r0 = dtlb ?
read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
unsigned r1 = dtlb ?
read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx);
unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
unsigned pte = get_pte_for_vaddr(vpn);
unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
@ -231,8 +241,6 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
}
if (tlb_asid == mm_asid) {
unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
read_itlb_translation(tlbidx);
if ((pte ^ r1) & PAGE_MASK) {
pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
dtlb ? 'D' : 'I', w, e, r0, r1, pte);