mirror of
https://github.com/NixOS/nix
synced 2025-06-28 05:21:16 +02:00
nfc(libutil): reformat files
Run clang-format on compression.{cc,hh} and tarfile{cc,hh}. This way follow-up patches will be formatted properly and have easier to read diffs.
This commit is contained in:
parent
b72e1c79da
commit
6d9bafb3b8
4 changed files with 63 additions and 49 deletions
|
@ -12,8 +12,6 @@
|
|||
#include <brotli/decode.h>
|
||||
#include <brotli/encode.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace nix {
|
||||
|
||||
static const int COMPRESSION_LEVEL_DEFAULT = -1;
|
||||
|
@ -40,20 +38,24 @@ struct ArchiveDecompressionSource : Source
|
|||
{
|
||||
std::unique_ptr<TarArchive> archive = 0;
|
||||
Source & src;
|
||||
ArchiveDecompressionSource(Source & src) : src(src) {}
|
||||
ArchiveDecompressionSource(Source & src)
|
||||
: src(src)
|
||||
{
|
||||
}
|
||||
~ArchiveDecompressionSource() override {}
|
||||
size_t read(char * data, size_t len) override {
|
||||
size_t read(char * data, size_t len) override
|
||||
{
|
||||
struct archive_entry * ae;
|
||||
if (!archive) {
|
||||
archive = std::make_unique<TarArchive>(src, true);
|
||||
this->archive->check(archive_read_next_header(this->archive->archive, &ae),
|
||||
"failed to read header (%s)");
|
||||
this->archive->check(archive_read_next_header(this->archive->archive, &ae), "failed to read header (%s)");
|
||||
if (archive_filter_count(this->archive->archive) < 2) {
|
||||
throw CompressionError("input compression not recognized");
|
||||
}
|
||||
}
|
||||
ssize_t result = archive_read_data(this->archive->archive, data, len);
|
||||
if (result > 0) return result;
|
||||
if (result > 0)
|
||||
return result;
|
||||
if (result == 0) {
|
||||
throw EndOfFile("reached end of compressed file");
|
||||
}
|
||||
|
@ -67,16 +69,19 @@ struct ArchiveCompressionSink : CompressionSink
|
|||
Sink & nextSink;
|
||||
struct archive * archive;
|
||||
|
||||
ArchiveCompressionSink(Sink & nextSink, std::string format, bool parallel, int level = COMPRESSION_LEVEL_DEFAULT) : nextSink(nextSink)
|
||||
ArchiveCompressionSink(Sink & nextSink, std::string format, bool parallel, int level = COMPRESSION_LEVEL_DEFAULT)
|
||||
: nextSink(nextSink)
|
||||
{
|
||||
archive = archive_write_new();
|
||||
if (!archive) throw Error("failed to initialize libarchive");
|
||||
if (!archive)
|
||||
throw Error("failed to initialize libarchive");
|
||||
check(archive_write_add_filter_by_name(archive, format.c_str()), "couldn't initialize compression (%s)");
|
||||
check(archive_write_set_format_raw(archive));
|
||||
if (parallel)
|
||||
check(archive_write_set_filter_option(archive, format.c_str(), "threads", "0"));
|
||||
if (level != COMPRESSION_LEVEL_DEFAULT)
|
||||
check(archive_write_set_filter_option(archive, format.c_str(), "compression-level", std::to_string(level).c_str()));
|
||||
check(archive_write_set_filter_option(
|
||||
archive, format.c_str(), "compression-level", std::to_string(level).c_str()));
|
||||
// disable internal buffering
|
||||
check(archive_write_set_bytes_per_block(archive, 0));
|
||||
// disable output padding
|
||||
|
@ -86,7 +91,8 @@ struct ArchiveCompressionSink : CompressionSink
|
|||
|
||||
~ArchiveCompressionSink() override
|
||||
{
|
||||
if (archive) archive_write_free(archive);
|
||||
if (archive)
|
||||
archive_write_free(archive);
|
||||
}
|
||||
|
||||
void finish() override
|
||||
|
@ -106,7 +112,8 @@ struct ArchiveCompressionSink : CompressionSink
|
|||
void writeUnbuffered(std::string_view data) override
|
||||
{
|
||||
ssize_t result = archive_write_data(archive, data.data(), data.length());
|
||||
if (result <= 0) check(result);
|
||||
if (result <= 0)
|
||||
check(result);
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -130,13 +137,20 @@ private:
|
|||
struct NoneSink : CompressionSink
|
||||
{
|
||||
Sink & nextSink;
|
||||
NoneSink(Sink & nextSink, int level = COMPRESSION_LEVEL_DEFAULT) : nextSink(nextSink)
|
||||
NoneSink(Sink & nextSink, int level = COMPRESSION_LEVEL_DEFAULT)
|
||||
: nextSink(nextSink)
|
||||
{
|
||||
if (level != COMPRESSION_LEVEL_DEFAULT)
|
||||
warn("requested compression level '%d' not supported by compression method 'none'", level);
|
||||
}
|
||||
void finish() override { flush(); }
|
||||
void writeUnbuffered(std::string_view data) override { nextSink(data); }
|
||||
void finish() override
|
||||
{
|
||||
flush();
|
||||
}
|
||||
void writeUnbuffered(std::string_view data) override
|
||||
{
|
||||
nextSink(data);
|
||||
}
|
||||
};
|
||||
|
||||
struct BrotliDecompressionSink : ChunkedCompressionSink
|
||||
|
@ -145,7 +159,8 @@ struct BrotliDecompressionSink : ChunkedCompressionSink
|
|||
BrotliDecoderState * state;
|
||||
bool finished = false;
|
||||
|
||||
BrotliDecompressionSink(Sink & nextSink) : nextSink(nextSink)
|
||||
BrotliDecompressionSink(Sink & nextSink)
|
||||
: nextSink(nextSink)
|
||||
{
|
||||
state = BrotliDecoderCreateInstance(nullptr, nullptr, nullptr);
|
||||
if (!state)
|
||||
|
@ -173,10 +188,7 @@ struct BrotliDecompressionSink : ChunkedCompressionSink
|
|||
while (!finished && (!data.data() || avail_in)) {
|
||||
checkInterrupt();
|
||||
|
||||
if (!BrotliDecoderDecompressStream(state,
|
||||
&avail_in, &next_in,
|
||||
&avail_out, &next_out,
|
||||
nullptr))
|
||||
if (!BrotliDecoderDecompressStream(state, &avail_in, &next_in, &avail_out, &next_out, nullptr))
|
||||
throw CompressionError("error while decompressing brotli file");
|
||||
|
||||
if (avail_out < sizeof(outbuf) || avail_in == 0) {
|
||||
|
@ -219,7 +231,8 @@ struct BrotliCompressionSink : ChunkedCompressionSink
|
|||
BrotliEncoderState * state;
|
||||
bool finished = false;
|
||||
|
||||
BrotliCompressionSink(Sink & nextSink) : nextSink(nextSink)
|
||||
BrotliCompressionSink(Sink & nextSink)
|
||||
: nextSink(nextSink)
|
||||
{
|
||||
state = BrotliEncoderCreateInstance(nullptr, nullptr, nullptr);
|
||||
if (!state)
|
||||
|
@ -247,11 +260,9 @@ struct BrotliCompressionSink : ChunkedCompressionSink
|
|||
while (!finished && (!data.data() || avail_in)) {
|
||||
checkInterrupt();
|
||||
|
||||
if (!BrotliEncoderCompressStream(state,
|
||||
data.data() ? BROTLI_OPERATION_PROCESS : BROTLI_OPERATION_FINISH,
|
||||
&avail_in, &next_in,
|
||||
&avail_out, &next_out,
|
||||
nullptr))
|
||||
if (!BrotliEncoderCompressStream(
|
||||
state, data.data() ? BROTLI_OPERATION_PROCESS : BROTLI_OPERATION_FINISH, &avail_in, &next_in,
|
||||
&avail_out, &next_out, nullptr))
|
||||
throw CompressionError("error while compressing brotli compression");
|
||||
|
||||
if (avail_out < sizeof(outbuf) || avail_in == 0) {
|
||||
|
@ -267,9 +278,8 @@ struct BrotliCompressionSink : ChunkedCompressionSink
|
|||
|
||||
ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink, const bool parallel, int level)
|
||||
{
|
||||
std::vector<std::string> la_supports = {
|
||||
"bzip2", "compress", "grzip", "gzip", "lrzip", "lz4", "lzip", "lzma", "lzop", "xz", "zstd"
|
||||
};
|
||||
std::vector<std::string> la_supports = {"bzip2", "compress", "grzip", "gzip", "lrzip", "lz4",
|
||||
"lzip", "lzma", "lzop", "xz", "zstd"};
|
||||
if (std::find(la_supports.begin(), la_supports.end(), method) != la_supports.end()) {
|
||||
return make_ref<ArchiveCompressionSink>(nextSink, method, parallel, level);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue