mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-25 20:25:31 +07:00
aa5b395b69
Patch series "S390 hardware support for kernel zlib", v3. With IBM z15 mainframe the new DFLTCC instruction is available. It implements deflate algorithm in hardware (Nest Acceleration Unit - NXU) with estimated compression and decompression performance orders of magnitude faster than the current zlib. This patchset adds s390 hardware compression support to kernel zlib. The code is based on the userspace zlib implementation: https://github.com/madler/zlib/pull/410 The coding style is also preserved for future maintainability. There is only limited set of userspace zlib functions represented in kernel. Apart from that, all the memory allocation should be performed in advance. Thus, the workarea structures are extended with the parameter lists required for the DEFLATE CONVENTION CALL instruction. Since kernel zlib itself does not support gzip headers, only Adler-32 checksum is processed (also can be produced by DFLTCC facility). Like it was implemented for userspace, kernel zlib will compress in hardware on level 1, and in software on all other levels. Decompression will always happen in hardware (when enabled). Two DFLTCC compression calls produce the same results only when they both are made on machines of the same generation, and when the respective buffers have the same offset relative to the start of the page. Therefore care should be taken when using hardware compression when reproducible results are desired. However it does always produce the standard conform output which can be inflated anyway. The new kernel command line parameter 'dfltcc' is introduced to configure s390 zlib hardware support: Format: { on | off | def_only | inf_only | always } on: s390 zlib hardware support for compression on level 1 and decompression (default) off: No s390 zlib hardware support def_only: s390 zlib hardware support for deflate only (compression on level 1) inf_only: s390 zlib hardware support for inflate only (decompression) always: Same as 'on' but ignores the selected compression level always using hardware support (used for debugging) The main purpose of the integration of the NXU support into the kernel zlib is the use of hardware deflate in btrfs filesystem with on-the-fly compression enabled. Apart from that, hardware support can also be used during boot for decompressing the kernel or the ramdisk image With the patch for btrfs expanding zlib buffer from 1 to 4 pages (patch 6) the following performance results have been achieved using the ramdisk with btrfs. These are relative numbers based on throughput rate and compression ratio for zlib level 1: Input data Deflate rate Inflate rate Compression ratio NXU/Software NXU/Software NXU/Software stream of zeroes 1.46 1.02 1.00 random ASCII data 10.44 3.00 0.96 ASCII text (dickens) 6,21 3.33 0.94 binary data (vmlinux) 8,37 3.90 1.02 This means that s390 hardware deflate can provide up to 10 times faster compression (on level 1) and up to 4 times faster decompression (refers to all compression levels) for btrfs zlib. Disclaimer: Performance results are based on IBM internal tests using DD command-line utility on btrfs on a Fedora 30 based internal driver in native LPAR on a z15 system. Results may vary based on individual workload, configuration and software levels. This patch (of 9): Create zlib_dfltcc library with the s390 DEFLATE CONVERSION CALL implementation and related compression functions. Update zlib_deflate functions with the hooks for s390 hardware support and adjust workspace structures with extra parameter lists required for hardware deflate. Link: http://lkml.kernel.org/r/20200103223334.20669-2-zaslonko@linux.ibm.com Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com> Signed-off-by: Mikhail Zaslonko <zaslonko@linux.ibm.com> Co-developed-by: Ilya Leoshkevich <iii@linux.ibm.com> Cc: Chris Mason <clm@fb.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: David Sterba <dsterba@suse.com> Cc: Eduard Shishkin <edward6@linux.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Josef Bacik <josef@toxicpanda.com> Cc: Richard Purdie <rpurdie@rpsys.net> Cc: Vasily Gorbik <gor@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
442 lines
15 KiB
C
442 lines
15 KiB
C
#ifndef DEFUTIL_H
|
|
#define DEFUTIL_H
|
|
|
|
#include <linux/zutil.h>
|
|
|
|
#define Assert(err, str)
|
|
#define Trace(dummy)
|
|
#define Tracev(dummy)
|
|
#define Tracecv(err, dummy)
|
|
#define Tracevv(dummy)
|
|
|
|
|
|
|
|
#define LENGTH_CODES 29
|
|
/* number of length codes, not counting the special END_BLOCK code */
|
|
|
|
#define LITERALS 256
|
|
/* number of literal bytes 0..255 */
|
|
|
|
#define L_CODES (LITERALS+1+LENGTH_CODES)
|
|
/* number of Literal or Length codes, including the END_BLOCK code */
|
|
|
|
#define D_CODES 30
|
|
/* number of distance codes */
|
|
|
|
#define BL_CODES 19
|
|
/* number of codes used to transfer the bit lengths */
|
|
|
|
#define HEAP_SIZE (2*L_CODES+1)
|
|
/* maximum heap size */
|
|
|
|
#define MAX_BITS 15
|
|
/* All codes must not exceed MAX_BITS bits */
|
|
|
|
#define INIT_STATE 42
|
|
#define BUSY_STATE 113
|
|
#define FINISH_STATE 666
|
|
/* Stream status */
|
|
|
|
|
|
/* Data structure describing a single value and its code string. */
|
|
typedef struct ct_data_s {
|
|
union {
|
|
ush freq; /* frequency count */
|
|
ush code; /* bit string */
|
|
} fc;
|
|
union {
|
|
ush dad; /* father node in Huffman tree */
|
|
ush len; /* length of bit string */
|
|
} dl;
|
|
} ct_data;
|
|
|
|
#define Freq fc.freq
|
|
#define Code fc.code
|
|
#define Dad dl.dad
|
|
#define Len dl.len
|
|
|
|
typedef struct static_tree_desc_s static_tree_desc;
|
|
|
|
typedef struct tree_desc_s {
|
|
ct_data *dyn_tree; /* the dynamic tree */
|
|
int max_code; /* largest code with non zero frequency */
|
|
static_tree_desc *stat_desc; /* the corresponding static tree */
|
|
} tree_desc;
|
|
|
|
typedef ush Pos;
|
|
typedef unsigned IPos;
|
|
|
|
/* A Pos is an index in the character window. We use short instead of int to
|
|
* save space in the various tables. IPos is used only for parameter passing.
|
|
*/
|
|
|
|
typedef struct deflate_state {
|
|
z_streamp strm; /* pointer back to this zlib stream */
|
|
int status; /* as the name implies */
|
|
Byte *pending_buf; /* output still pending */
|
|
ulg pending_buf_size; /* size of pending_buf */
|
|
Byte *pending_out; /* next pending byte to output to the stream */
|
|
int pending; /* nb of bytes in the pending buffer */
|
|
int noheader; /* suppress zlib header and adler32 */
|
|
Byte data_type; /* UNKNOWN, BINARY or ASCII */
|
|
Byte method; /* STORED (for zip only) or DEFLATED */
|
|
int last_flush; /* value of flush param for previous deflate call */
|
|
|
|
/* used by deflate.c: */
|
|
|
|
uInt w_size; /* LZ77 window size (32K by default) */
|
|
uInt w_bits; /* log2(w_size) (8..16) */
|
|
uInt w_mask; /* w_size - 1 */
|
|
|
|
Byte *window;
|
|
/* Sliding window. Input bytes are read into the second half of the window,
|
|
* and move to the first half later to keep a dictionary of at least wSize
|
|
* bytes. With this organization, matches are limited to a distance of
|
|
* wSize-MAX_MATCH bytes, but this ensures that IO is always
|
|
* performed with a length multiple of the block size. Also, it limits
|
|
* the window size to 64K, which is quite useful on MSDOS.
|
|
* To do: use the user input buffer as sliding window.
|
|
*/
|
|
|
|
ulg window_size;
|
|
/* Actual size of window: 2*wSize, except when the user input buffer
|
|
* is directly used as sliding window.
|
|
*/
|
|
|
|
Pos *prev;
|
|
/* Link to older string with same hash index. To limit the size of this
|
|
* array to 64K, this link is maintained only for the last 32K strings.
|
|
* An index in this array is thus a window index modulo 32K.
|
|
*/
|
|
|
|
Pos *head; /* Heads of the hash chains or NIL. */
|
|
|
|
uInt ins_h; /* hash index of string to be inserted */
|
|
uInt hash_size; /* number of elements in hash table */
|
|
uInt hash_bits; /* log2(hash_size) */
|
|
uInt hash_mask; /* hash_size-1 */
|
|
|
|
uInt hash_shift;
|
|
/* Number of bits by which ins_h must be shifted at each input
|
|
* step. It must be such that after MIN_MATCH steps, the oldest
|
|
* byte no longer takes part in the hash key, that is:
|
|
* hash_shift * MIN_MATCH >= hash_bits
|
|
*/
|
|
|
|
long block_start;
|
|
/* Window position at the beginning of the current output block. Gets
|
|
* negative when the window is moved backwards.
|
|
*/
|
|
|
|
uInt match_length; /* length of best match */
|
|
IPos prev_match; /* previous match */
|
|
int match_available; /* set if previous match exists */
|
|
uInt strstart; /* start of string to insert */
|
|
uInt match_start; /* start of matching string */
|
|
uInt lookahead; /* number of valid bytes ahead in window */
|
|
|
|
uInt prev_length;
|
|
/* Length of the best match at previous step. Matches not greater than this
|
|
* are discarded. This is used in the lazy match evaluation.
|
|
*/
|
|
|
|
uInt max_chain_length;
|
|
/* To speed up deflation, hash chains are never searched beyond this
|
|
* length. A higher limit improves compression ratio but degrades the
|
|
* speed.
|
|
*/
|
|
|
|
uInt max_lazy_match;
|
|
/* Attempt to find a better match only when the current match is strictly
|
|
* smaller than this value. This mechanism is used only for compression
|
|
* levels >= 4.
|
|
*/
|
|
# define max_insert_length max_lazy_match
|
|
/* Insert new strings in the hash table only if the match length is not
|
|
* greater than this length. This saves time but degrades compression.
|
|
* max_insert_length is used only for compression levels <= 3.
|
|
*/
|
|
|
|
int level; /* compression level (1..9) */
|
|
int strategy; /* favor or force Huffman coding*/
|
|
|
|
uInt good_match;
|
|
/* Use a faster search when the previous match is longer than this */
|
|
|
|
int nice_match; /* Stop searching when current match exceeds this */
|
|
|
|
/* used by trees.c: */
|
|
/* Didn't use ct_data typedef below to suppress compiler warning */
|
|
struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */
|
|
struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
|
|
struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */
|
|
|
|
struct tree_desc_s l_desc; /* desc. for literal tree */
|
|
struct tree_desc_s d_desc; /* desc. for distance tree */
|
|
struct tree_desc_s bl_desc; /* desc. for bit length tree */
|
|
|
|
ush bl_count[MAX_BITS+1];
|
|
/* number of codes at each bit length for an optimal tree */
|
|
|
|
int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */
|
|
int heap_len; /* number of elements in the heap */
|
|
int heap_max; /* element of largest frequency */
|
|
/* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
|
|
* The same heap array is used to build all trees.
|
|
*/
|
|
|
|
uch depth[2*L_CODES+1];
|
|
/* Depth of each subtree used as tie breaker for trees of equal frequency
|
|
*/
|
|
|
|
uch *l_buf; /* buffer for literals or lengths */
|
|
|
|
uInt lit_bufsize;
|
|
/* Size of match buffer for literals/lengths. There are 4 reasons for
|
|
* limiting lit_bufsize to 64K:
|
|
* - frequencies can be kept in 16 bit counters
|
|
* - if compression is not successful for the first block, all input
|
|
* data is still in the window so we can still emit a stored block even
|
|
* when input comes from standard input. (This can also be done for
|
|
* all blocks if lit_bufsize is not greater than 32K.)
|
|
* - if compression is not successful for a file smaller than 64K, we can
|
|
* even emit a stored file instead of a stored block (saving 5 bytes).
|
|
* This is applicable only for zip (not gzip or zlib).
|
|
* - creating new Huffman trees less frequently may not provide fast
|
|
* adaptation to changes in the input data statistics. (Take for
|
|
* example a binary file with poorly compressible code followed by
|
|
* a highly compressible string table.) Smaller buffer sizes give
|
|
* fast adaptation but have of course the overhead of transmitting
|
|
* trees more frequently.
|
|
* - I can't count above 4
|
|
*/
|
|
|
|
uInt last_lit; /* running index in l_buf */
|
|
|
|
ush *d_buf;
|
|
/* Buffer for distances. To simplify the code, d_buf and l_buf have
|
|
* the same number of elements. To use different lengths, an extra flag
|
|
* array would be necessary.
|
|
*/
|
|
|
|
ulg opt_len; /* bit length of current block with optimal trees */
|
|
ulg static_len; /* bit length of current block with static trees */
|
|
ulg compressed_len; /* total bit length of compressed file */
|
|
uInt matches; /* number of string matches in current block */
|
|
int last_eob_len; /* bit length of EOB code for last block */
|
|
|
|
#ifdef DEBUG_ZLIB
|
|
ulg bits_sent; /* bit length of the compressed data */
|
|
#endif
|
|
|
|
ush bi_buf;
|
|
/* Output buffer. bits are inserted starting at the bottom (least
|
|
* significant bits).
|
|
*/
|
|
int bi_valid;
|
|
/* Number of valid bits in bi_buf. All bits above the last valid bit
|
|
* are always zero.
|
|
*/
|
|
|
|
} deflate_state;
|
|
|
|
#ifdef CONFIG_ZLIB_DFLTCC
|
|
#define zlib_deflate_window_memsize(windowBits) \
|
|
(2 * (1 << (windowBits)) * sizeof(Byte) + PAGE_SIZE)
|
|
#else
|
|
#define zlib_deflate_window_memsize(windowBits) \
|
|
(2 * (1 << (windowBits)) * sizeof(Byte))
|
|
#endif
|
|
#define zlib_deflate_prev_memsize(windowBits) \
|
|
((1 << (windowBits)) * sizeof(Pos))
|
|
#define zlib_deflate_head_memsize(memLevel) \
|
|
((1 << ((memLevel)+7)) * sizeof(Pos))
|
|
#define zlib_deflate_overlay_memsize(memLevel) \
|
|
((1 << ((memLevel)+6)) * (sizeof(ush)+2))
|
|
|
|
/* Output a byte on the stream.
|
|
* IN assertion: there is enough room in pending_buf.
|
|
*/
|
|
#define put_byte(s, c) {s->pending_buf[s->pending++] = (c);}
|
|
|
|
|
|
#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
|
|
/* Minimum amount of lookahead, except at the end of the input file.
|
|
* See deflate.c for comments about the MIN_MATCH+1.
|
|
*/
|
|
|
|
#define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD)
|
|
/* In order to simplify the code, particularly on 16 bit machines, match
|
|
* distances are limited to MAX_DIST instead of WSIZE.
|
|
*/
|
|
|
|
/* in trees.c */
|
|
void zlib_tr_init (deflate_state *s);
|
|
int zlib_tr_tally (deflate_state *s, unsigned dist, unsigned lc);
|
|
ulg zlib_tr_flush_block (deflate_state *s, char *buf, ulg stored_len,
|
|
int eof);
|
|
void zlib_tr_align (deflate_state *s);
|
|
void zlib_tr_stored_block (deflate_state *s, char *buf, ulg stored_len,
|
|
int eof);
|
|
void zlib_tr_stored_type_only (deflate_state *);
|
|
|
|
|
|
/* ===========================================================================
|
|
* Output a short LSB first on the stream.
|
|
* IN assertion: there is enough room in pendingBuf.
|
|
*/
|
|
#define put_short(s, w) { \
|
|
put_byte(s, (uch)((w) & 0xff)); \
|
|
put_byte(s, (uch)((ush)(w) >> 8)); \
|
|
}
|
|
|
|
/* ===========================================================================
|
|
* Reverse the first len bits of a code, using straightforward code (a faster
|
|
* method would use a table)
|
|
* IN assertion: 1 <= len <= 15
|
|
*/
|
|
static inline unsigned bi_reverse(
|
|
unsigned code, /* the value to invert */
|
|
int len /* its bit length */
|
|
)
|
|
{
|
|
register unsigned res = 0;
|
|
do {
|
|
res |= code & 1;
|
|
code >>= 1, res <<= 1;
|
|
} while (--len > 0);
|
|
return res >> 1;
|
|
}
|
|
|
|
/* ===========================================================================
|
|
* Flush the bit buffer, keeping at most 7 bits in it.
|
|
*/
|
|
static inline void bi_flush(deflate_state *s)
|
|
{
|
|
if (s->bi_valid == 16) {
|
|
put_short(s, s->bi_buf);
|
|
s->bi_buf = 0;
|
|
s->bi_valid = 0;
|
|
} else if (s->bi_valid >= 8) {
|
|
put_byte(s, (Byte)s->bi_buf);
|
|
s->bi_buf >>= 8;
|
|
s->bi_valid -= 8;
|
|
}
|
|
}
|
|
|
|
/* ===========================================================================
|
|
* Flush the bit buffer and align the output on a byte boundary
|
|
*/
|
|
static inline void bi_windup(deflate_state *s)
|
|
{
|
|
if (s->bi_valid > 8) {
|
|
put_short(s, s->bi_buf);
|
|
} else if (s->bi_valid > 0) {
|
|
put_byte(s, (Byte)s->bi_buf);
|
|
}
|
|
s->bi_buf = 0;
|
|
s->bi_valid = 0;
|
|
#ifdef DEBUG_ZLIB
|
|
s->bits_sent = (s->bits_sent+7) & ~7;
|
|
#endif
|
|
}
|
|
|
|
typedef enum {
|
|
need_more, /* block not completed, need more input or more output */
|
|
block_done, /* block flush performed */
|
|
finish_started, /* finish started, need only more output at next deflate */
|
|
finish_done /* finish done, accept no more input or output */
|
|
} block_state;
|
|
|
|
#define Buf_size (8 * 2*sizeof(char))
|
|
/* Number of bits used within bi_buf. (bi_buf might be implemented on
|
|
* more than 16 bits on some systems.)
|
|
*/
|
|
|
|
/* ===========================================================================
|
|
* Send a value on a given number of bits.
|
|
* IN assertion: length <= 16 and value fits in length bits.
|
|
*/
|
|
#ifdef DEBUG_ZLIB
|
|
static void send_bits (deflate_state *s, int value, int length);
|
|
|
|
static void send_bits(
|
|
deflate_state *s,
|
|
int value, /* value to send */
|
|
int length /* number of bits */
|
|
)
|
|
{
|
|
Tracevv((stderr," l %2d v %4x ", length, value));
|
|
Assert(length > 0 && length <= 15, "invalid length");
|
|
s->bits_sent += (ulg)length;
|
|
|
|
/* If not enough room in bi_buf, use (valid) bits from bi_buf and
|
|
* (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
|
|
* unused bits in value.
|
|
*/
|
|
if (s->bi_valid > (int)Buf_size - length) {
|
|
s->bi_buf |= (value << s->bi_valid);
|
|
put_short(s, s->bi_buf);
|
|
s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
|
|
s->bi_valid += length - Buf_size;
|
|
} else {
|
|
s->bi_buf |= value << s->bi_valid;
|
|
s->bi_valid += length;
|
|
}
|
|
}
|
|
#else /* !DEBUG_ZLIB */
|
|
|
|
#define send_bits(s, value, length) \
|
|
{ int len = length;\
|
|
if (s->bi_valid > (int)Buf_size - len) {\
|
|
int val = value;\
|
|
s->bi_buf |= (val << s->bi_valid);\
|
|
put_short(s, s->bi_buf);\
|
|
s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
|
|
s->bi_valid += len - Buf_size;\
|
|
} else {\
|
|
s->bi_buf |= (value) << s->bi_valid;\
|
|
s->bi_valid += len;\
|
|
}\
|
|
}
|
|
#endif /* DEBUG_ZLIB */
|
|
|
|
static inline void zlib_tr_send_bits(
|
|
deflate_state *s,
|
|
int value,
|
|
int length
|
|
)
|
|
{
|
|
send_bits(s, value, length);
|
|
}
|
|
|
|
/* =========================================================================
|
|
* Flush as much pending output as possible. All deflate() output goes
|
|
* through this function so some applications may wish to modify it
|
|
* to avoid allocating a large strm->next_out buffer and copying into it.
|
|
* (See also read_buf()).
|
|
*/
|
|
static inline void flush_pending(
|
|
z_streamp strm
|
|
)
|
|
{
|
|
deflate_state *s = (deflate_state *) strm->state;
|
|
unsigned len = s->pending;
|
|
|
|
if (len > strm->avail_out) len = strm->avail_out;
|
|
if (len == 0) return;
|
|
|
|
if (strm->next_out != NULL) {
|
|
memcpy(strm->next_out, s->pending_out, len);
|
|
strm->next_out += len;
|
|
}
|
|
s->pending_out += len;
|
|
strm->total_out += len;
|
|
strm->avail_out -= len;
|
|
s->pending -= len;
|
|
if (s->pending == 0) {
|
|
s->pending_out = s->pending_buf;
|
|
}
|
|
}
|
|
#endif /* DEFUTIL_H */
|