2005-04-17 05:20:36 +07:00
|
|
|
/*
|
2011-03-18 16:42:11 +07:00
|
|
|
* Copyright 2002, 2003 Andi Kleen, SuSE Labs.
|
|
|
|
*
|
2005-04-17 05:20:36 +07:00
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file COPYING in the main directory of this archive
|
|
|
|
* for more details. No warranty for anything given at all.
|
|
|
|
*/
|
2006-09-26 15:52:32 +07:00
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <asm/errno.h>
|
2012-04-21 02:19:51 +07:00
|
|
|
#include <asm/asm.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Checksum copy with exception handling.
|
2011-03-18 16:42:11 +07:00
|
|
|
* On exceptions src_err_ptr or dst_err_ptr is set to -EFAULT and the
|
2005-04-17 05:20:36 +07:00
|
|
|
* destination is zeroed.
|
2011-03-18 16:42:11 +07:00
|
|
|
*
|
2005-04-17 05:20:36 +07:00
|
|
|
* Input
|
|
|
|
* rdi source
|
|
|
|
* rsi destination
|
|
|
|
* edx len (32bit)
|
|
|
|
*
|
|
|
|
* Output
|
|
|
|
* eax 64bit sum. undefined in case of exception.
|
2011-03-18 16:42:11 +07:00
|
|
|
*
|
|
|
|
* Wrappers need to take care of valid exception sum and zeroing.
|
2005-04-17 05:20:36 +07:00
|
|
|
* They also should align source or destination to 8 bytes.
|
|
|
|
*/
|
|
|
|
|
|
|
|
.macro source
|
|
|
|
10:
|
2020-07-20 08:56:07 +07:00
|
|
|
_ASM_EXTABLE_UA(10b, .Lfault)
|
2005-04-17 05:20:36 +07:00
|
|
|
.endm
|
2011-03-18 16:42:11 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
.macro dest
|
|
|
|
20:
|
2020-07-20 08:56:07 +07:00
|
|
|
_ASM_EXTABLE_UA(20b, .Lfault)
|
2005-04-17 05:20:36 +07:00
|
|
|
.endm
|
2011-03-18 16:42:11 +07:00
|
|
|
|
2019-10-11 18:51:04 +07:00
|
|
|
SYM_FUNC_START(csum_partial_copy_generic)
|
2020-07-20 08:56:07 +07:00
|
|
|
subq $5*8, %rsp
|
|
|
|
movq %rbx, 0*8(%rsp)
|
|
|
|
movq %r12, 1*8(%rsp)
|
|
|
|
movq %r14, 2*8(%rsp)
|
|
|
|
movq %r13, 3*8(%rsp)
|
|
|
|
movq %r15, 4*8(%rsp)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2020-07-20 08:56:07 +07:00
|
|
|
movl $-1, %eax
|
|
|
|
xorl %r9d, %r9d
|
2011-03-18 16:42:11 +07:00
|
|
|
movl %edx, %ecx
|
2020-07-20 08:56:07 +07:00
|
|
|
cmpl $8, %ecx
|
|
|
|
jb .Lshort
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2020-07-20 08:56:07 +07:00
|
|
|
testb $7, %sil
|
|
|
|
jne .Lunaligned
|
|
|
|
.Laligned:
|
|
|
|
movl %ecx, %r12d
|
2011-03-18 16:42:11 +07:00
|
|
|
|
|
|
|
shrq $6, %r12
|
|
|
|
jz .Lhandle_tail /* < 64 */
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
clc
|
2011-03-18 16:42:11 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* main loop. clear in 64 byte blocks */
|
|
|
|
/* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
|
|
|
|
/* r11: temp3, rdx: temp4, r12 loopcnt */
|
2017-05-04 21:51:40 +07:00
|
|
|
/* r10: temp5, r15: temp6, r14 temp7, r13 temp8 */
|
2005-04-17 05:20:36 +07:00
|
|
|
.p2align 4
|
|
|
|
.Lloop:
|
|
|
|
source
|
2011-03-18 16:42:11 +07:00
|
|
|
movq (%rdi), %rbx
|
2005-04-17 05:20:36 +07:00
|
|
|
source
|
2011-03-18 16:42:11 +07:00
|
|
|
movq 8(%rdi), %r8
|
2005-04-17 05:20:36 +07:00
|
|
|
source
|
2011-03-18 16:42:11 +07:00
|
|
|
movq 16(%rdi), %r11
|
2005-04-17 05:20:36 +07:00
|
|
|
source
|
2011-03-18 16:42:11 +07:00
|
|
|
movq 24(%rdi), %rdx
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
source
|
2011-03-18 16:42:11 +07:00
|
|
|
movq 32(%rdi), %r10
|
2005-04-17 05:20:36 +07:00
|
|
|
source
|
2017-05-04 21:51:40 +07:00
|
|
|
movq 40(%rdi), %r15
|
2005-04-17 05:20:36 +07:00
|
|
|
source
|
2011-03-18 16:42:11 +07:00
|
|
|
movq 48(%rdi), %r14
|
2005-04-17 05:20:36 +07:00
|
|
|
source
|
2011-03-18 16:42:11 +07:00
|
|
|
movq 56(%rdi), %r13
|
|
|
|
|
2020-07-20 08:56:07 +07:00
|
|
|
30:
|
|
|
|
/*
|
|
|
|
* No _ASM_EXTABLE_UA; this is used for intentional prefetch on a
|
|
|
|
* potentially unmapped kernel address.
|
|
|
|
*/
|
|
|
|
_ASM_EXTABLE(30b, 2f)
|
2005-04-17 05:20:36 +07:00
|
|
|
prefetcht0 5*64(%rdi)
|
2011-03-18 16:42:11 +07:00
|
|
|
2:
|
|
|
|
adcq %rbx, %rax
|
|
|
|
adcq %r8, %rax
|
|
|
|
adcq %r11, %rax
|
|
|
|
adcq %rdx, %rax
|
|
|
|
adcq %r10, %rax
|
2017-05-04 21:51:40 +07:00
|
|
|
adcq %r15, %rax
|
2011-03-18 16:42:11 +07:00
|
|
|
adcq %r14, %rax
|
|
|
|
adcq %r13, %rax
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
decl %r12d
|
2011-03-18 16:42:11 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
dest
|
2011-03-18 16:42:11 +07:00
|
|
|
movq %rbx, (%rsi)
|
2005-04-17 05:20:36 +07:00
|
|
|
dest
|
2011-03-18 16:42:11 +07:00
|
|
|
movq %r8, 8(%rsi)
|
2005-04-17 05:20:36 +07:00
|
|
|
dest
|
2011-03-18 16:42:11 +07:00
|
|
|
movq %r11, 16(%rsi)
|
2005-04-17 05:20:36 +07:00
|
|
|
dest
|
2011-03-18 16:42:11 +07:00
|
|
|
movq %rdx, 24(%rsi)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
dest
|
2011-03-18 16:42:11 +07:00
|
|
|
movq %r10, 32(%rsi)
|
2005-04-17 05:20:36 +07:00
|
|
|
dest
|
2017-05-04 21:51:40 +07:00
|
|
|
movq %r15, 40(%rsi)
|
2005-04-17 05:20:36 +07:00
|
|
|
dest
|
2011-03-18 16:42:11 +07:00
|
|
|
movq %r14, 48(%rsi)
|
2005-04-17 05:20:36 +07:00
|
|
|
dest
|
2011-03-18 16:42:11 +07:00
|
|
|
movq %r13, 56(%rsi)
|
|
|
|
|
|
|
|
leaq 64(%rdi), %rdi
|
|
|
|
leaq 64(%rsi), %rsi
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2011-03-18 16:42:11 +07:00
|
|
|
jnz .Lloop
|
|
|
|
|
|
|
|
adcq %r9, %rax
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2011-03-18 02:24:16 +07:00
|
|
|
/* do last up to 56 bytes */
|
2005-04-17 05:20:36 +07:00
|
|
|
.Lhandle_tail:
|
2020-07-20 08:56:07 +07:00
|
|
|
/* ecx: count, rcx.63: the end result needs to be rol8 */
|
|
|
|
movq %rcx, %r10
|
2011-03-18 16:42:11 +07:00
|
|
|
andl $63, %ecx
|
|
|
|
shrl $3, %ecx
|
|
|
|
jz .Lfold
|
2005-04-17 05:20:36 +07:00
|
|
|
clc
|
|
|
|
.p2align 4
|
2011-03-18 16:42:11 +07:00
|
|
|
.Lloop_8:
|
2005-04-17 05:20:36 +07:00
|
|
|
source
|
2011-03-18 16:42:11 +07:00
|
|
|
movq (%rdi), %rbx
|
|
|
|
adcq %rbx, %rax
|
2005-04-17 05:20:36 +07:00
|
|
|
decl %ecx
|
|
|
|
dest
|
2011-03-18 16:42:11 +07:00
|
|
|
movq %rbx, (%rsi)
|
|
|
|
leaq 8(%rsi), %rsi /* preserve carry */
|
|
|
|
leaq 8(%rdi), %rdi
|
2005-04-17 05:20:36 +07:00
|
|
|
jnz .Lloop_8
|
2011-03-18 16:42:11 +07:00
|
|
|
adcq %r9, %rax /* add in carry */
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
.Lfold:
|
|
|
|
/* reduce checksum to 32bits */
|
2011-03-18 16:42:11 +07:00
|
|
|
movl %eax, %ebx
|
|
|
|
shrq $32, %rax
|
|
|
|
addl %ebx, %eax
|
|
|
|
adcl %r9d, %eax
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2011-03-18 16:42:11 +07:00
|
|
|
/* do last up to 6 bytes */
|
2005-04-17 05:20:36 +07:00
|
|
|
.Lhandle_7:
|
2011-03-18 16:42:11 +07:00
|
|
|
movl %r10d, %ecx
|
|
|
|
andl $7, %ecx
|
2020-07-20 08:56:07 +07:00
|
|
|
.L1: /* .Lshort rejoins the common path here */
|
2011-03-18 16:42:11 +07:00
|
|
|
shrl $1, %ecx
|
2005-04-17 05:20:36 +07:00
|
|
|
jz .Lhandle_1
|
2011-03-18 16:42:11 +07:00
|
|
|
movl $2, %edx
|
|
|
|
xorl %ebx, %ebx
|
|
|
|
clc
|
2005-04-17 05:20:36 +07:00
|
|
|
.p2align 4
|
2011-03-18 16:42:11 +07:00
|
|
|
.Lloop_1:
|
2005-04-17 05:20:36 +07:00
|
|
|
source
|
2011-03-18 16:42:11 +07:00
|
|
|
movw (%rdi), %bx
|
|
|
|
adcl %ebx, %eax
|
2005-04-17 05:20:36 +07:00
|
|
|
decl %ecx
|
2005-07-30 12:59:20 +07:00
|
|
|
dest
|
2011-03-18 16:42:11 +07:00
|
|
|
movw %bx, (%rsi)
|
|
|
|
leaq 2(%rdi), %rdi
|
|
|
|
leaq 2(%rsi), %rsi
|
2005-04-17 05:20:36 +07:00
|
|
|
jnz .Lloop_1
|
2011-03-18 16:42:11 +07:00
|
|
|
adcl %r9d, %eax /* add in carry */
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* handle last odd byte */
|
|
|
|
.Lhandle_1:
|
x86/asm: Optimize unnecessarily wide TEST instructions
By the nature of the TEST operation, it is often possible to test
a narrower part of the operand:
"testl $3, mem" -> "testb $3, mem",
"testq $3, %rcx" -> "testb $3, %cl"
This results in shorter instructions, because the TEST instruction
has no sign-entending byte-immediate forms unlike other ALU ops.
Note that this change does not create any LCP (Length-Changing Prefix)
stalls, which happen when adding a 0x66 prefix, which happens when
16-bit immediates are used, which changes such TEST instructions:
[test_opcode] [modrm] [imm32]
to:
[0x66] [test_opcode] [modrm] [imm16]
where [imm16] has a *different length* now: 2 bytes instead of 4.
This confuses the decoder and slows down execution.
REX prefixes were carefully designed to almost never hit this case:
adding REX prefix does not change instruction length except MOVABS
and MOV [addr],RAX instruction.
This patch does not add instructions which would use a 0x66 prefix,
code changes in assembly are:
-48 f7 07 01 00 00 00 testq $0x1,(%rdi)
+f6 07 01 testb $0x1,(%rdi)
-48 f7 c1 01 00 00 00 test $0x1,%rcx
+f6 c1 01 test $0x1,%cl
-48 f7 c1 02 00 00 00 test $0x2,%rcx
+f6 c1 02 test $0x2,%cl
-41 f7 c2 01 00 00 00 test $0x1,%r10d
+41 f6 c2 01 test $0x1,%r10b
-48 f7 c1 04 00 00 00 test $0x4,%rcx
+f6 c1 04 test $0x4,%cl
-48 f7 c1 08 00 00 00 test $0x8,%rcx
+f6 c1 08 test $0x8,%cl
Linus further notes:
"There are no stalls from using 8-bit instruction forms.
Now, changing from 64-bit or 32-bit 'test' instructions to 8-bit ones
*could* cause problems if it ends up having forwarding issues, so that
instead of just forwarding the result, you end up having to wait for
it to be stable in the L1 cache (or possibly the register file). The
forwarding from the store buffer is simplest and most reliable if the
read is done at the exact same address and the exact same size as the
write that gets forwarded.
But that's true only if:
(a) the write was very recent and is still in the write queue. I'm
not sure that's the case here anyway.
(b) on at least most Intel microarchitectures, you have to test a
different byte than the lowest one (so forwarding a 64-bit write
to a 8-bit read ends up working fine, as long as the 8-bit read
is of the low 8 bits of the written data).
A very similar issue *might* show up for registers too, not just
memory writes, if you use 'testb' with a high-byte register (where
instead of forwarding the value from the original producer it needs to
go through the register file and then shifted). But it's mainly a
problem for store buffers.
But afaik, the way Denys changed the test instructions, neither of the
above issues should be true.
The real problem for store buffer forwarding tends to be "write 8
bits, read 32 bits". That can be really surprisingly expensive,
because the read ends up having to wait until the write has hit the
cacheline, and we might talk tens of cycles of latency here. But
"write 32 bits, read the low 8 bits" *should* be fast on pretty much
all x86 chips, afaik."
Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
Acked-by: Andy Lutomirski <luto@amacapital.net>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Will Drewry <wad@chromium.org>
Link: http://lkml.kernel.org/r/1425675332-31576-1-git-send-email-dvlasenk@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-03-07 03:55:32 +07:00
|
|
|
testb $1, %r10b
|
2005-04-17 05:20:36 +07:00
|
|
|
jz .Lende
|
2011-03-18 16:42:11 +07:00
|
|
|
xorl %ebx, %ebx
|
2005-04-17 05:20:36 +07:00
|
|
|
source
|
2011-03-18 16:42:11 +07:00
|
|
|
movb (%rdi), %bl
|
2005-04-17 05:20:36 +07:00
|
|
|
dest
|
2011-03-18 16:42:11 +07:00
|
|
|
movb %bl, (%rsi)
|
|
|
|
addl %ebx, %eax
|
|
|
|
adcl %r9d, %eax /* carry */
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
.Lende:
|
2020-07-20 08:56:07 +07:00
|
|
|
testq %r10, %r10
|
|
|
|
js .Lwas_odd
|
|
|
|
.Lout:
|
|
|
|
movq 0*8(%rsp), %rbx
|
|
|
|
movq 1*8(%rsp), %r12
|
|
|
|
movq 2*8(%rsp), %r14
|
|
|
|
movq 3*8(%rsp), %r13
|
|
|
|
movq 4*8(%rsp), %r15
|
|
|
|
addq $5*8, %rsp
|
2005-04-17 05:20:36 +07:00
|
|
|
ret
|
2020-07-20 08:56:07 +07:00
|
|
|
.Lshort:
|
|
|
|
movl %ecx, %r10d
|
|
|
|
jmp .L1
|
|
|
|
.Lunaligned:
|
|
|
|
xorl %ebx, %ebx
|
|
|
|
testb $1, %sil
|
|
|
|
jne .Lodd
|
|
|
|
1: testb $2, %sil
|
|
|
|
je 2f
|
|
|
|
source
|
|
|
|
movw (%rdi), %bx
|
|
|
|
dest
|
|
|
|
movw %bx, (%rsi)
|
|
|
|
leaq 2(%rdi), %rdi
|
|
|
|
subq $2, %rcx
|
|
|
|
leaq 2(%rsi), %rsi
|
|
|
|
addq %rbx, %rax
|
|
|
|
2: testb $4, %sil
|
|
|
|
je .Laligned
|
|
|
|
source
|
|
|
|
movl (%rdi), %ebx
|
|
|
|
dest
|
|
|
|
movl %ebx, (%rsi)
|
|
|
|
leaq 4(%rdi), %rdi
|
|
|
|
subq $4, %rcx
|
|
|
|
leaq 4(%rsi), %rsi
|
|
|
|
addq %rbx, %rax
|
|
|
|
jmp .Laligned
|
|
|
|
|
|
|
|
.Lodd:
|
|
|
|
source
|
|
|
|
movb (%rdi), %bl
|
|
|
|
dest
|
|
|
|
movb %bl, (%rsi)
|
|
|
|
leaq 1(%rdi), %rdi
|
|
|
|
leaq 1(%rsi), %rsi
|
|
|
|
/* decrement, set MSB */
|
|
|
|
leaq -1(%rcx, %rcx), %rcx
|
|
|
|
rorq $1, %rcx
|
|
|
|
shll $8, %ebx
|
|
|
|
addq %rbx, %rax
|
|
|
|
jmp 1b
|
|
|
|
|
|
|
|
.Lwas_odd:
|
|
|
|
roll $8, %eax
|
|
|
|
jmp .Lout
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2020-07-20 08:56:07 +07:00
|
|
|
/* Exception: just return 0 */
|
|
|
|
.Lfault:
|
|
|
|
xorl %eax, %eax
|
|
|
|
jmp .Lout
|
2019-10-11 18:51:04 +07:00
|
|
|
SYM_FUNC_END(csum_partial_copy_generic)
|