2008-10-23 12:26:29 +07:00
|
|
|
#ifndef _ASM_X86_XOR_64_H
|
|
|
|
#define _ASM_X86_XOR_64_H
|
2008-06-11 04:45:45 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static struct xor_block_template xor_block_sse = {
|
2008-03-23 15:04:03 +07:00
|
|
|
.name = "generic_sse",
|
|
|
|
.do_2 = xor_sse_2,
|
|
|
|
.do_3 = xor_sse_3,
|
|
|
|
.do_4 = xor_sse_4,
|
|
|
|
.do_5 = xor_sse_5,
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
2012-05-22 10:54:04 +07:00
|
|
|
|
|
|
|
/* Also try the AVX routines */
|
2012-10-03 00:01:25 +07:00
|
|
|
#include <asm/xor_avx.h>
|
2012-05-22 10:54:04 +07:00
|
|
|
|
2012-11-02 21:20:24 +07:00
|
|
|
/* We force the use of the SSE xor block because it can write around L2.
|
|
|
|
We may also be able to load into the L1 only depending on how the cpu
|
|
|
|
deals with a load to a line that is being prefetched. */
|
2005-04-17 05:20:36 +07:00
|
|
|
#undef XOR_TRY_TEMPLATES
|
2008-03-23 15:04:03 +07:00
|
|
|
#define XOR_TRY_TEMPLATES \
|
|
|
|
do { \
|
2012-05-22 10:54:04 +07:00
|
|
|
AVX_XOR_SPEED; \
|
2012-11-02 21:20:24 +07:00
|
|
|
xor_speed(&xor_block_sse_pf64); \
|
2008-03-23 15:04:03 +07:00
|
|
|
xor_speed(&xor_block_sse); \
|
|
|
|
} while (0)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-10-23 12:26:29 +07:00
|
|
|
#endif /* _ASM_X86_XOR_64_H */
|