mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-01 04:36:43 +07:00
39ec58f3fe
This implements {copy_to,clear}_user() by faulting in the userland pages and then using the regular kernel mem{cpy,set}() to copy the data (while holding the page table lock). This is a win if the regular mem{cpy,set}() implementations are faster than the user copy functions, which is the case e.g. on Feroceon, where 8-word STMs (which memcpy() uses under the right conditions) give significantly higher memory write throughput than a sequence of individual 32bit stores. Here are numbers for page sized buffers on some Feroceon cores: - copy_to_user on Orion5x goes from 51 MB/s to 83 MB/s - clear_user on Orion5x goes from 89MB/s to 314MB/s - copy_to_user on Kirkwood goes from 240 MB/s to 356 MB/s - clear_user on Kirkwood goes from 367 MB/s to 1108 MB/s - copy_to_user on Disco-Duo goes from 248 MB/s to 398 MB/s - clear_user on Disco-Duo goes from 328 MB/s to 1741 MB/s Because the setup cost is non negligible, this is worthwhile only if the amount of data to copy is large enough. The operation falls back to the standard implementation when the amount of data is below a certain threshold. This threshold was determined empirically, however some targets could benefit from a lower runtime determined value for optimal results eventually. In the copy_from_user() case, this technique does not provide any worthwhile performance gain due to the fact that any kind of read access allocates the cache and subsequent 32bit loads are just as fast as the equivalent 8-word LDM. Signed-off-by: Lennert Buytenhek <buytenh@marvell.com> Signed-off-by: Nicolas Pitre <nico@marvell.com> Tested-by: Martin Michlmayr <tbm@cyrius.com>
49 lines
1.5 KiB
Makefile
49 lines
1.5 KiB
Makefile
#
|
|
# linux/arch/arm/lib/Makefile
|
|
#
|
|
# Copyright (C) 1995-2000 Russell King
|
|
#
|
|
|
|
lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
|
|
csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
|
|
delay.o findbit.o memchr.o memcpy.o \
|
|
memmove.o memset.o memzero.o setbit.o \
|
|
strncpy_from_user.o strnlen_user.o \
|
|
strchr.o strrchr.o \
|
|
testchangebit.o testclearbit.o testsetbit.o \
|
|
ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
|
|
ucmpdi2.o lib1funcs.o div64.o sha1.o \
|
|
io-readsb.o io-writesb.o io-readsl.o io-writesl.o
|
|
|
|
mmu-y := clear_user.o copy_page.o getuser.o putuser.o
|
|
|
|
# the code in uaccess.S is not preemption safe and
|
|
# probably faster on ARMv3 only
|
|
ifeq ($(CONFIG_PREEMPT),y)
|
|
mmu-y += copy_from_user.o copy_to_user.o
|
|
else
|
|
ifneq ($(CONFIG_CPU_32v3),y)
|
|
mmu-y += copy_from_user.o copy_to_user.o
|
|
else
|
|
mmu-y += uaccess.o
|
|
endif
|
|
endif
|
|
|
|
# using lib_ here won't override already available weak symbols
|
|
obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o
|
|
|
|
lib-$(CONFIG_MMU) += $(mmu-y)
|
|
|
|
ifeq ($(CONFIG_CPU_32v3),y)
|
|
lib-y += io-readsw-armv3.o io-writesw-armv3.o
|
|
else
|
|
lib-y += io-readsw-armv4.o io-writesw-armv4.o
|
|
endif
|
|
|
|
lib-$(CONFIG_ARCH_RPC) += ecard.o io-acorn.o floppydma.o
|
|
lib-$(CONFIG_ARCH_L7200) += io-acorn.o
|
|
lib-$(CONFIG_ARCH_SHARK) += io-shark.o
|
|
|
|
$(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S
|
|
$(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S
|