const std = @import("std");
const assert = std.debug.assert;
const fmt = std.fmt;
const io = std.io;
const math = std.math;
const mem = std.mem;
const Allocator = std.mem.Allocator;
const deflate_const = @import("deflate_const.zig");
const fast = @import("deflate_fast.zig");
const hm_bw = @import("huffman_bit_writer.zig");
const mu = @import("mem_utils.zig");
const token = @import("token.zig");
pub const Compression = enum(i5) {
    
    
    
    
    
    
    
    
    
    huffman_only = -2,
    
    default_compression = -1,
    
    no_compression = 0,
    
    best_speed = 1,
    level_2 = 2,
    level_3 = 3,
    level_4 = 4,
    level_5 = 5,
    level_6 = 6,
    level_7 = 7,
    level_8 = 8,
    
    best_compression = 9,
};
const log_window_size = 15;
const window_size = 1 << log_window_size;
const window_mask = window_size - 1;
const base_match_length = deflate_const.base_match_length; 
const min_match_length = 4; 
const max_match_length = deflate_const.max_match_length;
const base_match_offset = deflate_const.base_match_offset; 
const max_match_offset = deflate_const.max_match_offset; 
const max_flate_block_tokens = 1 << 14;
const max_store_block_size = deflate_const.max_store_block_size;
const hash_bits = 17; 
const hash_size = 1 << hash_bits;
const hash_mask = (1 << hash_bits) - 1;
const max_hash_offset = 1 << 24;
const skip_never = math.maxInt(u32);
const CompressionLevel = struct {
    good: u16,
    lazy: u16,
    nice: u16,
    chain: u16,
    fast_skip_hashshing: u32,
};
fn levels(compression: Compression) CompressionLevel {
    switch (compression) {
        .no_compression,
        .best_speed, 
        .huffman_only,
        => return .{
            .good = 0,
            .lazy = 0,
            .nice = 0,
            .chain = 0,
            .fast_skip_hashshing = 0,
        },
        
        .level_2 => return .{
            .good = 4,
            .lazy = 0,
            .nice = 16,
            .chain = 8,
            .fast_skip_hashshing = 5,
        },
        .level_3 => return .{
            .good = 4,
            .lazy = 0,
            .nice = 32,
            .chain = 32,
            .fast_skip_hashshing = 6,
        },
        
        
        .level_4 => return .{
            .good = 4,
            .lazy = 4,
            .nice = 16,
            .chain = 16,
            .fast_skip_hashshing = skip_never,
        },
        .level_5 => return .{
            .good = 8,
            .lazy = 16,
            .nice = 32,
            .chain = 32,
            .fast_skip_hashshing = skip_never,
        },
        .default_compression,
        .level_6,
        => return .{
            .good = 8,
            .lazy = 16,
            .nice = 128,
            .chain = 128,
            .fast_skip_hashshing = skip_never,
        },
        .level_7 => return .{
            .good = 8,
            .lazy = 32,
            .nice = 128,
            .chain = 256,
            .fast_skip_hashshing = skip_never,
        },
        .level_8 => return .{
            .good = 32,
            .lazy = 128,
            .nice = 258,
            .chain = 1024,
            .fast_skip_hashshing = skip_never,
        },
        .best_compression => return .{
            .good = 32,
            .lazy = 258,
            .nice = 258,
            .chain = 4096,
            .fast_skip_hashshing = skip_never,
        },
    }
}
fn matchLen(a: []u8, b: []u8, max: u32) u32 {
    var bounded_a = a[0..max];
    var bounded_b = b[0..max];
    for (bounded_a) |av, i| {
        if (bounded_b[i] != av) {
            return @intCast(u32, i);
        }
    }
    return max;
}
const hash_mul = 0x1e35a7bd;
fn hash4(b: []u8) u32 {
    return ((@as(u32, b[3]) |
        @as(u32, b[2]) << 8 |
        @as(u32, b[1]) << 16 |
        @as(u32, b[0]) << 24) *% hash_mul) >> (32 - hash_bits);
}
fn bulkHash4(b: []u8, dst: []u32) u32 {
    if (b.len < min_match_length) {
        return 0;
    }
    var hb =
        @as(u32, b[3]) |
        @as(u32, b[2]) << 8 |
        @as(u32, b[1]) << 16 |
        @as(u32, b[0]) << 24;
    dst[0] = (hb *% hash_mul) >> (32 - hash_bits);
    var end = b.len - min_match_length + 1;
    var i: u32 = 1;
    while (i < end) : (i += 1) {
        hb = (hb << 8) | @as(u32, b[i + 3]);
        dst[i] = (hb *% hash_mul) >> (32 - hash_bits);
    }
    return hb;
}
pub const CompressorOptions = struct {
    level: Compression = .default_compression,
    dictionary: ?[]const u8 = null,
};
pub fn compressor(
    allocator: Allocator,
    writer: anytype,
    options: CompressorOptions,
) !Compressor(@TypeOf(writer)) {
    return Compressor(@TypeOf(writer)).init(allocator, writer, options);
}
pub fn Compressor(comptime WriterType: anytype) type {
    return struct {
        const Self = @This();
        
        
        pub const Writer = io.Writer(*Self, Error, write);
        
        
        pub fn writer(self: *Self) Writer {
            return .{ .context = self };
        }
        pub const Error = WriterType.Error;
        allocator: Allocator,
        compression: Compression,
        compression_level: CompressionLevel,
        
        hm_bw: hm_bw.HuffmanBitWriter(WriterType) = undefined,
        bulk_hasher: std.meta.FnPtr(fn ([]u8, []u32) u32),
        sync: bool, 
        best_speed_enc: *fast.DeflateFast, 
        
        
        
        
        
        chain_head: u32,
        hash_head: []u32, 
        hash_prev: []u32, 
        hash_offset: u32,
        
        index: u32,
        window: []u8,
        window_end: usize,
        block_start: usize, 
        byte_available: bool, 
        
        tokens: []token.Token,
        tokens_count: u16,
        
        length: u32,
        offset: u32,
        hash: u32,
        max_insert_index: usize,
        err: bool,
        
        hash_match: []u32, 
        
        dictionary: ?[]const u8,
        fn fillDeflate(self: *Self, b: []const u8) u32 {
            if (self.index >= 2 * window_size - (min_match_length + max_match_length)) {
                
                mem.copy(u8, self.window, self.window[window_size .. 2 * window_size]);
                self.index -= window_size;
                self.window_end -= window_size;
                if (self.block_start >= window_size) {
                    self.block_start -= window_size;
                } else {
                    self.block_start = math.maxInt(u32);
                }
                self.hash_offset += window_size;
                if (self.hash_offset > max_hash_offset) {
                    var delta = self.hash_offset - 1;
                    self.hash_offset -= delta;
                    self.chain_head -|= delta;
                    
                    
                    for (self.hash_prev) |v, i| {
                        if (v > delta) {
                            self.hash_prev[i] = @intCast(u32, v - delta);
                        } else {
                            self.hash_prev[i] = 0;
                        }
                    }
                    for (self.hash_head) |v, i| {
                        if (v > delta) {
                            self.hash_head[i] = @intCast(u32, v - delta);
                        } else {
                            self.hash_head[i] = 0;
                        }
                    }
                }
            }
            var n = mu.copy(self.window[self.window_end..], b);
            self.window_end += n;
            return @intCast(u32, n);
        }
        fn writeBlock(self: *Self, tokens: []token.Token, index: usize) !void {
            if (index > 0) {
                var window: ?[]u8 = null;
                if (self.block_start <= index) {
                    window = self.window[self.block_start..index];
                }
                self.block_start = index;
                try self.hm_bw.writeBlock(tokens, false, window);
                return;
            }
            return;
        }
        
        
        
        
        fn fillWindow(self: *Self, in_b: []const u8) void {
            var b = in_b;
            
            
            if (self.compression == .no_compression or
                self.compression == .huffman_only or
                self.compression == .best_speed)
            {
                return;
            }
            
            assert(self.index == 0 and self.window_end == 0);
            
            if (b.len > window_size) {
                b = b[b.len - window_size ..];
            }
            
            mem.copy(u8, self.window, b);
            var n = b.len;
            
            var loops = (n + 256 - min_match_length) / 256;
            var j: usize = 0;
            while (j < loops) : (j += 1) {
                var index = j * 256;
                var end = index + 256 + min_match_length - 1;
                if (end > n) {
                    end = n;
                }
                var to_check = self.window[index..end];
                var dst_size = to_check.len - min_match_length + 1;
                if (dst_size <= 0) {
                    continue;
                }
                var dst = self.hash_match[0..dst_size];
                _ = self.bulk_hasher(to_check, dst);
                var new_h: u32 = 0;
                for (dst) |val, i| {
                    var di = i + index;
                    new_h = val;
                    var hh = &self.hash_head[new_h & hash_mask];
                    
                    
                    self.hash_prev[di & window_mask] = hh.*;
                    
                    hh.* = @intCast(u32, di + self.hash_offset);
                }
                self.hash = new_h;
            }
            
            self.window_end = n;
            self.index = @intCast(u32, n);
        }
        const Match = struct {
            length: u32,
            offset: u32,
            ok: bool,
        };
        
        
        fn findMatch(
            self: *Self,
            pos: u32,
            prev_head: u32,
            prev_length: u32,
            lookahead: u32,
        ) Match {
            var length: u32 = 0;
            var offset: u32 = 0;
            var ok: bool = false;
            var min_match_look: u32 = max_match_length;
            if (lookahead < min_match_look) {
                min_match_look = lookahead;
            }
            var win = self.window[0 .. pos + min_match_look];
            
            var nice = win.len - pos;
            if (self.compression_level.nice < nice) {
                nice = self.compression_level.nice;
            }
            
            var tries = self.compression_level.chain;
            length = prev_length;
            if (length >= self.compression_level.good) {
                tries >>= 2;
            }
            var w_end = win[pos + length];
            var w_pos = win[pos..];
            var min_index = pos -| window_size;
            var i = prev_head;
            while (tries > 0) : (tries -= 1) {
                if (w_end == win[i + length]) {
                    var n = matchLen(win[i..], w_pos, min_match_look);
                    if (n > length and (n > min_match_length or pos - i <= 4096)) {
                        length = n;
                        offset = pos - i;
                        ok = true;
                        if (n >= nice) {
                            
                            break;
                        }
                        w_end = win[pos + n];
                    }
                }
                if (i == min_index) {
                    
                    break;
                }
                if (@intCast(u32, self.hash_prev[i & window_mask]) < self.hash_offset) {
                    break;
                }
                i = @intCast(u32, self.hash_prev[i & window_mask]) - self.hash_offset;
                if (i < min_index) {
                    break;
                }
            }
            return Match{ .length = length, .offset = offset, .ok = ok };
        }
        fn writeStoredBlock(self: *Self, buf: []u8) !void {
            try self.hm_bw.writeStoredHeader(buf.len, false);
            try self.hm_bw.writeBytes(buf);
        }
        
        
        fn encSpeed(self: *Self) !void {
            
            if (self.window_end < max_store_block_size) {
                if (!self.sync) {
                    return;
                }
                
                if (self.window_end < 128) {
                    switch (self.window_end) {
                        0 => return,
                        1...16 => {
                            try self.writeStoredBlock(self.window[0..self.window_end]);
                        },
                        else => {
                            try self.hm_bw.writeBlockHuff(false, self.window[0..self.window_end]);
                            self.err = self.hm_bw.err;
                        },
                    }
                    self.window_end = 0;
                    self.best_speed_enc.reset();
                    return;
                }
            }
            
            self.tokens_count = 0;
            self.best_speed_enc.encode(
                self.tokens,
                &self.tokens_count,
                self.window[0..self.window_end],
            );
            
            if (self.tokens_count > self.window_end - (self.window_end >> 4)) {
                try self.hm_bw.writeBlockHuff(false, self.window[0..self.window_end]);
            } else {
                try self.hm_bw.writeBlockDynamic(
                    self.tokens[0..self.tokens_count],
                    false,
                    self.window[0..self.window_end],
                );
            }
            self.err = self.hm_bw.err;
            self.window_end = 0;
        }
        fn initDeflate(self: *Self) !void {
            self.window = try self.allocator.alloc(u8, 2 * window_size);
            self.hash_offset = 1;
            self.tokens = try self.allocator.alloc(token.Token, max_flate_block_tokens);
            self.tokens_count = 0;
            mem.set(token.Token, self.tokens, 0);
            self.length = min_match_length - 1;
            self.offset = 0;
            self.byte_available = false;
            self.index = 0;
            self.hash = 0;
            self.chain_head = 0;
            self.bulk_hasher = bulkHash4;
        }
        fn deflate(self: *Self) !void {
            if (self.window_end - self.index < min_match_length + max_match_length and !self.sync) {
                return;
            }
            self.max_insert_index = self.window_end -| (min_match_length - 1);
            if (self.index < self.max_insert_index) {
                self.hash = hash4(self.window[self.index .. self.index + min_match_length]);
            }
            while (true) {
                assert(self.index <= self.window_end);
                var lookahead = self.window_end -| self.index;
                if (lookahead < min_match_length + max_match_length) {
                    if (!self.sync) {
                        break;
                    }
                    assert(self.index <= self.window_end);
                    if (lookahead == 0) {
                        
                        if (self.byte_available) {
                            
                            self.tokens[self.tokens_count] = token.literalToken(@intCast(u32, self.window[self.index - 1]));
                            self.tokens_count += 1;
                            self.byte_available = false;
                        }
                        if (self.tokens.len > 0) {
                            try self.writeBlock(self.tokens[0..self.tokens_count], self.index);
                            self.tokens_count = 0;
                        }
                        break;
                    }
                }
                if (self.index < self.max_insert_index) {
                    
                    self.hash = hash4(self.window[self.index .. self.index + min_match_length]);
                    var hh = &self.hash_head[self.hash & hash_mask];
                    self.chain_head = @intCast(u32, hh.*);
                    self.hash_prev[self.index & window_mask] = @intCast(u32, self.chain_head);
                    hh.* = @intCast(u32, self.index + self.hash_offset);
                }
                var prev_length = self.length;
                var prev_offset = self.offset;
                self.length = min_match_length - 1;
                self.offset = 0;
                var min_index = self.index -| window_size;
                if (self.hash_offset <= self.chain_head and
                    self.chain_head - self.hash_offset >= min_index and
                    (self.compression_level.fast_skip_hashshing != skip_never and
                    lookahead > min_match_length - 1 or
                    self.compression_level.fast_skip_hashshing == skip_never and
                    lookahead > prev_length and
                    prev_length < self.compression_level.lazy))
                {
                    {
                        var fmatch = self.findMatch(
                            self.index,
                            self.chain_head -| self.hash_offset,
                            min_match_length - 1,
                            @intCast(u32, lookahead),
                        );
                        if (fmatch.ok) {
                            self.length = fmatch.length;
                            self.offset = fmatch.offset;
                        }
                    }
                }
                if (self.compression_level.fast_skip_hashshing != skip_never and
                    self.length >= min_match_length or
                    self.compression_level.fast_skip_hashshing == skip_never and
                    prev_length >= min_match_length and
                    self.length <= prev_length)
                {
                    
                    
                    if (self.compression_level.fast_skip_hashshing != skip_never) {
                        self.tokens[self.tokens_count] = token.matchToken(@intCast(u32, self.length - base_match_length), @intCast(u32, self.offset - base_match_offset));
                        self.tokens_count += 1;
                    } else {
                        self.tokens[self.tokens_count] = token.matchToken(
                            @intCast(u32, prev_length - base_match_length),
                            @intCast(u32, prev_offset -| base_match_offset),
                        );
                        self.tokens_count += 1;
                    }
                    
                    
                    
                    
                    if (self.length <= self.compression_level.fast_skip_hashshing) {
                        var newIndex: u32 = 0;
                        if (self.compression_level.fast_skip_hashshing != skip_never) {
                            newIndex = self.index + self.length;
                        } else {
                            newIndex = self.index + prev_length - 1;
                        }
                        var index = self.index;
                        index += 1;
                        while (index < newIndex) : (index += 1) {
                            if (index < self.max_insert_index) {
                                self.hash = hash4(self.window[index .. index + min_match_length]);
                                
                                
                                var hh = &self.hash_head[self.hash & hash_mask];
                                self.hash_prev[index & window_mask] = hh.*;
                                
                                hh.* = @intCast(u32, index + self.hash_offset);
                            }
                        }
                        self.index = index;
                        if (self.compression_level.fast_skip_hashshing == skip_never) {
                            self.byte_available = false;
                            self.length = min_match_length - 1;
                        }
                    } else {
                        
                        
                        self.index += self.length;
                        if (self.index < self.max_insert_index) {
                            self.hash = hash4(self.window[self.index .. self.index + min_match_length]);
                        }
                    }
                    if (self.tokens_count == max_flate_block_tokens) {
                        
                        try self.writeBlock(self.tokens[0..self.tokens_count], self.index);
                        self.tokens_count = 0;
                    }
                } else {
                    if (self.compression_level.fast_skip_hashshing != skip_never or self.byte_available) {
                        var i = self.index -| 1;
                        if (self.compression_level.fast_skip_hashshing != skip_never) {
                            i = self.index;
                        }
                        self.tokens[self.tokens_count] = token.literalToken(@intCast(u32, self.window[i]));
                        self.tokens_count += 1;
                        if (self.tokens_count == max_flate_block_tokens) {
                            try self.writeBlock(self.tokens[0..self.tokens_count], i + 1);
                            self.tokens_count = 0;
                        }
                    }
                    self.index += 1;
                    if (self.compression_level.fast_skip_hashshing == skip_never) {
                        self.byte_available = true;
                    }
                }
            }
        }
        fn fillStore(self: *Self, b: []const u8) u32 {
            var n = mu.copy(self.window[self.window_end..], b);
            self.window_end += n;
            return @intCast(u32, n);
        }
        fn store(self: *Self) !void {
            if (self.window_end > 0 and (self.window_end == max_store_block_size or self.sync)) {
                try self.writeStoredBlock(self.window[0..self.window_end]);
                self.window_end = 0;
            }
        }
        
        
        fn storeHuff(self: *Self) !void {
            if (self.window_end < self.window.len and !self.sync or self.window_end == 0) {
                return;
            }
            try self.hm_bw.writeBlockHuff(false, self.window[0..self.window_end]);
            self.err = self.hm_bw.err;
            self.window_end = 0;
        }
        pub fn bytesWritten(self: *Self) usize {
            return self.hm_bw.bytes_written;
        }
        
        pub fn write(self: *Self, input: []const u8) !usize {
            var buf = input;
            
            
            while (buf.len > 0) {
                try self.step();
                var filled = self.fill(buf);
                buf = buf[filled..];
            }
            return input.len;
        }
        
        
        
        
        
        
        
        
        
        pub fn flush(self: *Self) !void {
            self.sync = true;
            try self.step();
            try self.hm_bw.writeStoredHeader(0, false);
            try self.hm_bw.flush();
            self.sync = false;
            return;
        }
        fn step(self: *Self) !void {
            switch (self.compression) {
                .no_compression => return self.store(),
                .huffman_only => return self.storeHuff(),
                .best_speed => return self.encSpeed(),
                .default_compression,
                .level_2,
                .level_3,
                .level_4,
                .level_5,
                .level_6,
                .level_7,
                .level_8,
                .best_compression,
                => return self.deflate(),
            }
        }
        fn fill(self: *Self, b: []const u8) u32 {
            switch (self.compression) {
                .no_compression => return self.fillStore(b),
                .huffman_only => return self.fillStore(b),
                .best_speed => return self.fillStore(b),
                .default_compression,
                .level_2,
                .level_3,
                .level_4,
                .level_5,
                .level_6,
                .level_7,
                .level_8,
                .best_compression,
                => return self.fillDeflate(b),
            }
        }
        fn init(
            allocator: Allocator,
            in_writer: WriterType,
            options: CompressorOptions,
        ) !Self {
            var s = Self{
                .allocator = undefined,
                .compression = undefined,
                .compression_level = undefined,
                .hm_bw = undefined, 
                .bulk_hasher = undefined,
                .sync = false,
                .best_speed_enc = undefined, 
                .chain_head = 0,
                .hash_head = undefined,
                .hash_prev = undefined, 
                .hash_offset = 0,
                .index = 0,
                .window = undefined,
                .window_end = 0,
                .block_start = 0,
                .byte_available = false,
                .tokens = undefined,
                .tokens_count = 0,
                .length = 0,
                .offset = 0,
                .hash = 0,
                .max_insert_index = 0,
                .err = false, 
                .hash_match = undefined,
                .dictionary = options.dictionary,
            };
            s.hm_bw = try hm_bw.huffmanBitWriter(allocator, in_writer);
            s.allocator = allocator;
            s.hash_head = try allocator.alloc(u32, hash_size);
            s.hash_prev = try allocator.alloc(u32, window_size);
            s.hash_match = try allocator.alloc(u32, max_match_length - 1);
            mem.set(u32, s.hash_head, 0);
            mem.set(u32, s.hash_prev, 0);
            mem.set(u32, s.hash_match, 0);
            switch (options.level) {
                .no_compression => {
                    s.compression = options.level;
                    s.compression_level = levels(options.level);
                    s.window = try allocator.alloc(u8, max_store_block_size);
                    s.tokens = try allocator.alloc(token.Token, 0);
                },
                .huffman_only => {
                    s.compression = options.level;
                    s.compression_level = levels(options.level);
                    s.window = try allocator.alloc(u8, max_store_block_size);
                    s.tokens = try allocator.alloc(token.Token, 0);
                },
                .best_speed => {
                    s.compression = options.level;
                    s.compression_level = levels(options.level);
                    s.window = try allocator.alloc(u8, max_store_block_size);
                    s.tokens = try allocator.alloc(token.Token, max_store_block_size);
                    s.best_speed_enc = try allocator.create(fast.DeflateFast);
                    s.best_speed_enc.* = fast.deflateFast();
                    try s.best_speed_enc.init(allocator);
                },
                .default_compression => {
                    s.compression = .level_6;
                    s.compression_level = levels(.level_6);
                    try s.initDeflate();
                    if (options.dictionary != null) {
                        s.fillWindow(options.dictionary.?);
                    }
                },
                .level_2,
                .level_3,
                .level_4,
                .level_5,
                .level_6,
                .level_7,
                .level_8,
                .best_compression,
                => {
                    s.compression = options.level;
                    s.compression_level = levels(options.level);
                    try s.initDeflate();
                    if (options.dictionary != null) {
                        s.fillWindow(options.dictionary.?);
                    }
                },
            }
            return s;
        }
        
        pub fn deinit(self: *Self) void {
            self.hm_bw.deinit();
            self.allocator.free(self.window);
            self.allocator.free(self.tokens);
            self.allocator.free(self.hash_head);
            self.allocator.free(self.hash_prev);
            self.allocator.free(self.hash_match);
            if (self.compression == .best_speed) {
                self.best_speed_enc.deinit();
                self.allocator.destroy(self.best_speed_enc);
            }
        }
        
        
        pub fn reset(self: *Self, new_writer: WriterType) void {
            self.hm_bw.reset(new_writer);
            self.sync = false;
            switch (self.compression) {
                
                .no_compression => self.window_end = 0,
                
                .best_speed => {
                    self.window_end = 0;
                    self.tokens_count = 0;
                    self.best_speed_enc.reset();
                },
                
                .huffman_only,
                .default_compression,
                .level_2,
                .level_3,
                .level_4,
                .level_5,
                .level_6,
                .level_7,
                .level_8,
                .best_compression,
                => {
                    self.chain_head = 0;
                    mem.set(u32, self.hash_head, 0);
                    mem.set(u32, self.hash_prev, 0);
                    self.hash_offset = 1;
                    self.index = 0;
                    self.window_end = 0;
                    self.block_start = 0;
                    self.byte_available = false;
                    self.tokens_count = 0;
                    self.length = min_match_length - 1;
                    self.offset = 0;
                    self.hash = 0;
                    self.max_insert_index = 0;
                    if (self.dictionary != null) {
                        self.fillWindow(self.dictionary.?);
                    }
                },
            }
        }
        
        pub fn close(self: *Self) !void {
            self.sync = true;
            try self.step();
            try self.hm_bw.writeStoredHeader(0, true);
            try self.hm_bw.flush();
            return;
        }
    };
}
const expect = std.testing.expect;
const testing = std.testing;
const ArrayList = std.ArrayList;
const DeflateTest = struct {
    in: []const u8,
    level: Compression,
    out: []const u8,
};
var deflate_tests = [_]DeflateTest{
    
    .{
        .in = &[_]u8{},
        .level = .no_compression,
        .out = &[_]u8{ 1, 0, 0, 255, 255 },
    },
    
    .{
        .in = &[_]u8{0x11},
        .level = .default_compression,
        .out = &[_]u8{ 18, 4, 4, 0, 0, 255, 255 },
    },
    .{
        .in = &[_]u8{0x11},
        .level = .level_6,
        .out = &[_]u8{ 18, 4, 4, 0, 0, 255, 255 },
    },
    
    .{
        .in = &[_]u8{0x11},
        .level = .level_4,
        .out = &[_]u8{ 18, 4, 4, 0, 0, 255, 255 },
    },
    
    .{
        .in = &[_]u8{0x11},
        .level = .no_compression,
        .out = &[_]u8{ 0, 1, 0, 254, 255, 17, 1, 0, 0, 255, 255 },
    },
    .{
        .in = &[_]u8{ 0x11, 0x12 },
        .level = .no_compression,
        .out = &[_]u8{ 0, 2, 0, 253, 255, 17, 18, 1, 0, 0, 255, 255 },
    },
    .{
        .in = &[_]u8{ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11 },
        .level = .no_compression,
        .out = &[_]u8{ 0, 8, 0, 247, 255, 17, 17, 17, 17, 17, 17, 17, 17, 1, 0, 0, 255, 255 },
    },
    
    .{
        .in = &[_]u8{},
        .level = .level_2,
        .out = &[_]u8{ 1, 0, 0, 255, 255 },
    },
    .{
        .in = &[_]u8{0x11},
        .level = .level_2,
        .out = &[_]u8{ 18, 4, 4, 0, 0, 255, 255 },
    },
    .{
        .in = &[_]u8{ 0x11, 0x12 },
        .level = .level_2,
        .out = &[_]u8{ 18, 20, 2, 4, 0, 0, 255, 255 },
    },
    .{
        .in = &[_]u8{ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11 },
        .level = .level_2,
        .out = &[_]u8{ 18, 132, 2, 64, 0, 0, 0, 255, 255 },
    },
    
    .{
        .in = &[_]u8{},
        .level = .best_compression,
        .out = &[_]u8{ 1, 0, 0, 255, 255 },
    },
    .{
        .in = &[_]u8{0x11},
        .level = .best_compression,
        .out = &[_]u8{ 18, 4, 4, 0, 0, 255, 255 },
    },
    .{
        .in = &[_]u8{ 0x11, 0x12 },
        .level = .best_compression,
        .out = &[_]u8{ 18, 20, 2, 4, 0, 0, 255, 255 },
    },
    .{
        .in = &[_]u8{ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11 },
        .level = .best_compression,
        .out = &[_]u8{ 18, 132, 2, 64, 0, 0, 0, 255, 255 },
    },
};
test "deflate" {
    for (deflate_tests) |dt| {
        var output = ArrayList(u8).init(testing.allocator);
        defer output.deinit();
        var comp = try compressor(testing.allocator, output.writer(), .{ .level = dt.level });
        _ = try comp.write(dt.in);
        try comp.close();
        comp.deinit();
        try expect(mem.eql(u8, output.items, dt.out));
    }
}
test "bulkHash4" {
    for (deflate_tests) |x| {
        if (x.out.len < min_match_length) {
            continue;
        }
        
        var out = try testing.allocator.alloc(u8, x.out.len * 2);
        defer testing.allocator.free(out);
        mem.copy(u8, out[0..x.out.len], x.out);
        mem.copy(u8, out[x.out.len..], x.out);
        var j: usize = 4;
        while (j < out.len) : (j += 1) {
            var y = out[0..j];
            var dst = try testing.allocator.alloc(u32, y.len - min_match_length + 1);
            defer testing.allocator.free(dst);
            _ = bulkHash4(y, dst);
            for (dst) |got, i| {
                var want = hash4(y[i..]);
                try expect(got == want);
            }
        }
    }
}