1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/stddef.h>
11 #include <linux/string.h>
12 #include <linux/types.h>
18 /* Src buffer is zero. */
19 #define LZNT_ERROR_ALL_ZEROS 1
20 #define LZNT_CHUNK_SIZE 0x1000
35 struct lznt_hash hash[LZNT_CHUNK_SIZE];
38 static inline size_t get_match_len(const u8 *ptr, const u8 *end, const u8 *prev,
43 while (ptr + len < end && ptr[len] == prev[len] && ++len < max_len)
48 static size_t longest_match_std(const u8 *src, struct lznt *ctx)
51 size_t len1 = 0, len2 = 0;
55 ((40543U * ((((src[0] << 4) ^ src[1]) << 4) ^ src[2])) >> 4) &
56 (LZNT_CHUNK_SIZE - 1);
58 hash = &(ctx->hash[hash_index].p1);
60 if (hash[0] >= ctx->unc && hash[0] < src && hash[0][0] == src[0] &&
61 hash[0][1] == src[1] && hash[0][2] == src[2]) {
64 len1 += get_match_len(src + 3, ctx->unc_end,
65 hash[0] + 3, ctx->max_len - 3);
68 if (hash[1] >= ctx->unc && hash[1] < src && hash[1][0] == src[0] &&
69 hash[1][1] == src[1] && hash[1][2] == src[2]) {
72 len2 += get_match_len(src + 3, ctx->unc_end,
73 hash[1] + 3, ctx->max_len - 3);
76 /* Compare two matches and select the best one. */
78 ctx->best_match = hash[1];
81 ctx->best_match = hash[0];
89 static size_t longest_match_best(const u8 *src, struct lznt *ctx)
94 if (ctx->unc >= src || !ctx->max_len)
98 for (ptr = ctx->unc; ptr < src; ++ptr) {
100 get_match_len(src, ctx->unc_end, ptr, ctx->max_len);
101 if (len >= max_len) {
103 ctx->best_match = ptr;
107 return max_len >= 3 ? max_len : 0;
110 static const size_t s_max_len[] = {
111 0x1002, 0x802, 0x402, 0x202, 0x102, 0x82, 0x42, 0x22, 0x12,
114 static const size_t s_max_off[] = {
115 0x10, 0x20, 0x40, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
118 static inline u16 make_pair(size_t offset, size_t len, size_t index)
120 return ((offset - 1) << (12 - index)) |
121 ((len - 3) & (((1 << (12 - index)) - 1)));
124 static inline size_t parse_pair(u16 pair, size_t *offset, size_t index)
126 *offset = 1 + (pair >> (12 - index));
127 return 3 + (pair & ((1 << (12 - index)) - 1));
134 * * 0 - Ok, @cmpr contains @cmpr_chunk_size bytes of compressed data.
135 * * 1 - Input buffer is full zero.
136 * * -2 - The compressed buffer is too small to hold the compressed data.
138 static inline int compress_chunk(size_t (*match)(const u8 *, struct lznt *),
139 const u8 *unc, const u8 *unc_end, u8 *cmpr,
140 u8 *cmpr_end, size_t *cmpr_chunk_size,
149 /* Control byte of 8-bit values: ( 0 - means byte as is, 1 - short pair ). */
154 if (unc + LZNT_CHUNK_SIZE < unc_end)
155 unc_end = unc + LZNT_CHUNK_SIZE;
157 last = min(cmpr + LZNT_CHUNK_SIZE + sizeof(short), cmpr_end);
160 ctx->unc_end = unc_end;
161 ctx->max_len = s_max_len[0];
163 while (up < unc_end) {
166 while (unc + s_max_off[idx] < up)
167 ctx->max_len = s_max_len[++idx];
170 max_len = up + 3 <= unc_end ? (*match)(up, ctx) : 0;
175 not_zero |= *cp++ = *up++;
176 } else if (cp + 1 >= last) {
179 t16 = make_pair(up - ctx->best_match, max_len, idx);
201 *cmpr_chunk_size = cp - cmpr;
203 t16 = (*cmpr_chunk_size - 3) | 0xB000;
207 return not_zero ? 0 : LZNT_ERROR_ALL_ZEROS;
211 if ((cmpr + LZNT_CHUNK_SIZE + sizeof(short)) > last)
215 * Copy non cmpr data.
216 * 0x3FFF == ((LZNT_CHUNK_SIZE + 2 - 3) | 0x3000)
221 memcpy(cmpr + sizeof(short), unc, LZNT_CHUNK_SIZE);
222 *cmpr_chunk_size = LZNT_CHUNK_SIZE + sizeof(short);
227 static inline ssize_t decompress_chunk(u8 *unc, u8 *unc_end, const u8 *cmpr,
235 size_t offset, length;
237 /* Do decompression until pointers are inside range. */
238 while (up < unc_end && cmpr < cmpr_end) {
239 // return err if more than LZNT_CHUNK_SIZE bytes are written
240 if (up - unc > LZNT_CHUNK_SIZE)
243 while (unc + s_max_off[index] < up)
246 /* Check the current flag for zero. */
247 if (!(ch & (1 << bit))) {
248 /* Just copy byte. */
253 /* Check for boundary. */
254 if (cmpr + 1 >= cmpr_end)
257 /* Read a short from little endian stream. */
264 /* Translate packed information into offset and length. */
265 length = parse_pair(pair, &offset, index);
267 /* Check offset for boundary. */
268 if (unc + offset > up)
271 /* Truncate the length if necessary. */
272 if (up + length >= unc_end)
273 length = unc_end - up;
275 /* Now we copy bytes. This is the heart of LZ algorithm. */
276 for (; length > 0; length--, up++)
277 *up = *(up - offset);
280 /* Advance flag bit value. */
284 if (cmpr >= cmpr_end)
291 /* Return the size of uncompressed data. */
297 * @level: 0 - Standard compression.
298 * !0 - Best compression, requires a lot of cpu.
300 struct lznt *get_lznt_ctx(int level)
302 struct lznt *r = kzalloc(level ? offsetof(struct lznt, hash) :
312 * compress_lznt - Compresses @unc into @cmpr
315 * * +x - Ok, @cmpr contains 'final_compressed_size' bytes of compressed data.
316 * * 0 - Input buffer is full zero.
318 size_t compress_lznt(const void *unc, size_t unc_size, void *cmpr,
319 size_t cmpr_size, struct lznt *ctx)
322 size_t (*match)(const u8 *src, struct lznt *ctx);
324 u8 *end = p + cmpr_size;
325 const u8 *unc_chunk = unc;
326 const u8 *unc_end = unc_chunk + unc_size;
330 match = &longest_match_std;
331 memset(ctx->hash, 0, sizeof(ctx->hash));
333 match = &longest_match_best;
336 /* Compression cycle. */
337 for (; unc_chunk < unc_end; unc_chunk += LZNT_CHUNK_SIZE) {
339 err = compress_chunk(match, unc_chunk, unc_end, p, end,
344 if (is_zero && err != LZNT_ERROR_ALL_ZEROS)
353 return is_zero ? 0 : PtrOffset(cmpr, p);
357 * decompress_lznt - Decompress @cmpr into @unc.
359 ssize_t decompress_lznt(const void *cmpr, size_t cmpr_size, void *unc,
362 const u8 *cmpr_chunk = cmpr;
363 const u8 *cmpr_end = cmpr_chunk + cmpr_size;
365 u8 *unc_end = unc_chunk + unc_size;
368 if (cmpr_size < sizeof(short))
371 /* Read chunk header. */
372 chunk_hdr = cmpr_chunk[1];
374 chunk_hdr |= cmpr_chunk[0];
376 /* Loop through decompressing chunks. */
378 size_t chunk_size_saved;
380 size_t cmpr_use = 3 + (chunk_hdr & (LZNT_CHUNK_SIZE - 1));
382 /* Check that the chunk actually fits the supplied buffer. */
383 if (cmpr_chunk + cmpr_use > cmpr_end)
386 /* First make sure the chunk contains compressed data. */
387 if (chunk_hdr & 0x8000) {
388 /* Decompress a chunk and return if we get an error. */
390 decompress_chunk(unc_chunk, unc_end,
391 cmpr_chunk + sizeof(chunk_hdr),
392 cmpr_chunk + cmpr_use);
397 /* This chunk does not contain compressed data. */
398 unc_use = unc_chunk + LZNT_CHUNK_SIZE > unc_end ?
399 unc_end - unc_chunk :
402 if (cmpr_chunk + sizeof(chunk_hdr) + unc_use >
407 memcpy(unc_chunk, cmpr_chunk + sizeof(chunk_hdr),
411 /* Advance pointers. */
412 cmpr_chunk += cmpr_use;
413 unc_chunk += unc_use;
415 /* Check for the end of unc buffer. */
416 if (unc_chunk >= unc_end)
419 /* Proceed the next chunk. */
420 if (cmpr_chunk > cmpr_end - 2)
423 chunk_size_saved = LZNT_CHUNK_SIZE;
425 /* Read chunk header. */
426 chunk_hdr = cmpr_chunk[1];
428 chunk_hdr |= cmpr_chunk[0];
433 /* Check the size of unc buffer. */
434 if (unc_use < chunk_size_saved) {
435 size_t t1 = chunk_size_saved - unc_use;
436 u8 *t2 = unc_chunk + t1;
442 memset(unc_chunk, 0, t1);
447 /* Check compression boundary. */
448 if (cmpr_chunk > cmpr_end)
452 * The unc size is just a difference between current
453 * pointer and original one.
455 return PtrOffset(unc, unc_chunk);