summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/xor_64.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/xor_64.h')
-rw-r--r--arch/x86/include/asm/xor_64.h27
1 files changed, 27 insertions, 0 deletions
diff --git a/arch/x86/include/asm/xor_64.h b/arch/x86/include/asm/xor_64.h
new file mode 100644
index 000000000..546f1e3b8
--- /dev/null
+++ b/arch/x86/include/asm/xor_64.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_X86_XOR_64_H
+#define _ASM_X86_XOR_64_H
+
+static struct xor_block_template xor_block_sse = {
+ .name = "generic_sse",
+ .do_2 = xor_sse_2,
+ .do_3 = xor_sse_3,
+ .do_4 = xor_sse_4,
+ .do_5 = xor_sse_5,
+};
+
+
+/* Also try the AVX routines */
+#include <asm/xor_avx.h>
+
+/* We force the use of the SSE xor block because it can write around L2.
+ We may also be able to load into the L1 only depending on how the cpu
+ deals with a load to a line that is being prefetched. */
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+do { \
+ AVX_XOR_SPEED; \
+ xor_speed(&xor_block_sse_pf64); \
+ xor_speed(&xor_block_sse); \
+} while (0)
+
+#endif /* _ASM_X86_XOR_64_H */