mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 05:05:13 +07:00
fe5cbc6e06
v3: s-o-b comment, explanation of performance and descision for the start/stop implementation Implementing rmw functionality for RAID6 requires optimized syndrome calculation. Up to now we can only generate a complete syndrome. The target P/Q pages are always overwritten. With this patch we provide a framework for inplace P/Q modification. In the first place simply fill those functions with NULL values. xor_syndrome() has two additional parameters: start & stop. These will indicate the first and last page that are changing during a rmw run. That makes it possible to avoid several unneccessary loops and speed up calculation. The caller needs to implement the following logic to make the functions work. 1) xor_syndrome(disks, start, stop, ...): "Remove" all data of source blocks inside P/Q between (and including) start and end. 2) modify any block with start <= block <= stop 3) xor_syndrome(disks, start, stop, ...): "Reinsert" all data of source blocks into P/Q between (and including) start and end. Pages between start and stop that won't be changed should be filled with a pointer to the kernel zero page. The reasons for not taking NULL pages are: 1) Algorithms cross the whole source data line by line. Thus avoid additional branches. 2) Having a NULL page avoids calculating the XOR P parity but still need calulation steps for the Q parity. Depending on the algorithm unrolling that might be only a difference of 2 instructions per loop. The benchmark numbers of the gen_syndrome() functions are displayed in the kernel log. Do the same for the xor_syndrome() functions. This will help to analyze performance problems and give an rough estimate how well the algorithm works. The choice of the fastest algorithm will still depend on the gen_syndrome() performance. With the start/stop page implementation the speed can vary a lot in real life. E.g. a change of page 0 & page 15 on a stripe will be harder to compute than the case where page 0 & page 1 are XOR candidates. To be not to enthusiatic about the expected speeds we will run a worse case test that simulates a change on the upper half of the stripe. So we do: 1) calculation of P/Q for the upper pages 2) continuation of Q for the lower (empty) pages Signed-off-by: Markus Stockhausen <stockhausen@collogia.de> Signed-off-by: NeilBrown <neilb@suse.de>
88 lines
2.2 KiB
Ucode
88 lines
2.2 KiB
Ucode
/* -*- linux-c -*- ------------------------------------------------------- *
|
|
*
|
|
* Copyright 2002 H. Peter Anvin - All Rights Reserved
|
|
* Copyright 2012 Tilera Corporation - All Rights Reserved
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation, Inc., 53 Temple Place Ste 330,
|
|
* Boston MA 02111-1307, USA; either version 2 of the License, or
|
|
* (at your option) any later version; incorporated herein by reference.
|
|
*
|
|
* ----------------------------------------------------------------------- */
|
|
|
|
/*
|
|
* tilegx$#.c
|
|
*
|
|
* $#-way unrolled TILE-Gx SIMD for RAID-6 math.
|
|
*
|
|
* This file is postprocessed using unroll.awk.
|
|
*
|
|
*/
|
|
|
|
#include <linux/raid/pq.h>
|
|
|
|
/* Create 8 byte copies of constant byte */
|
|
# define NBYTES(x) (__insn_v1addi(0, x))
|
|
# define NSIZE 8
|
|
|
|
/*
|
|
* The SHLBYTE() operation shifts each byte left by 1, *not*
|
|
* rolling over into the next byte
|
|
*/
|
|
static inline __attribute_const__ u64 SHLBYTE(u64 v)
|
|
{
|
|
/* Vector One Byte Shift Left Immediate. */
|
|
return __insn_v1shli(v, 1);
|
|
}
|
|
|
|
/*
|
|
* The MASK() operation returns 0xFF in any byte for which the high
|
|
* bit is 1, 0x00 for any byte for which the high bit is 0.
|
|
*/
|
|
static inline __attribute_const__ u64 MASK(u64 v)
|
|
{
|
|
/* Vector One Byte Shift Right Signed Immediate. */
|
|
return __insn_v1shrsi(v, 7);
|
|
}
|
|
|
|
|
|
void raid6_tilegx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
|
|
{
|
|
u8 **dptr = (u8 **)ptrs;
|
|
u64 *p, *q;
|
|
int d, z, z0;
|
|
|
|
u64 wd$$, wq$$, wp$$, w1$$, w2$$;
|
|
u64 x1d = NBYTES(0x1d);
|
|
u64 * z0ptr;
|
|
|
|
z0 = disks - 3; /* Highest data disk */
|
|
p = (u64 *)dptr[z0+1]; /* XOR parity */
|
|
q = (u64 *)dptr[z0+2]; /* RS syndrome */
|
|
|
|
z0ptr = (u64 *)&dptr[z0][0];
|
|
for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
|
|
wq$$ = wp$$ = *z0ptr++;
|
|
for ( z = z0-1 ; z >= 0 ; z-- ) {
|
|
wd$$ = *(u64 *)&dptr[z][d+$$*NSIZE];
|
|
wp$$ = wp$$ ^ wd$$;
|
|
w2$$ = MASK(wq$$);
|
|
w1$$ = SHLBYTE(wq$$);
|
|
w2$$ = w2$$ & x1d;
|
|
w1$$ = w1$$ ^ w2$$;
|
|
wq$$ = w1$$ ^ wd$$;
|
|
}
|
|
*p++ = wp$$;
|
|
*q++ = wq$$;
|
|
}
|
|
}
|
|
|
|
const struct raid6_calls raid6_tilegx$# = {
|
|
raid6_tilegx$#_gen_syndrome,
|
|
NULL, /* XOR not yet implemented */
|
|
NULL,
|
|
"tilegx$#",
|
|
0
|
|
};
|