1
0
Fork 0

Merging upstream version 1.8.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-24 04:16:09 +01:00
parent 95e76700ee
commit 3ab3342c4f
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
21 changed files with 729 additions and 460 deletions

View file

@ -1,6 +1,6 @@
/* Plzip - Parallel compressor compatible with lzip
/* Plzip - Massively parallel implementation of lzip
Copyright (C) 2009 Laszlo Ersek.
Copyright (C) 2009-2018 Antonio Diaz Diaz.
Copyright (C) 2009-2019 Antonio Diaz Diaz.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -34,7 +34,7 @@
#include <lzlib.h>
#include "lzip.h"
#include "file_index.h"
#include "lzip_index.h"
namespace {
@ -147,7 +147,7 @@ public:
struct Worker_arg
{
const File_index * file_index;
const Lzip_index * lzip_index;
Packet_courier * courier;
const Pretty_print * pp;
int worker_id;
@ -160,8 +160,8 @@ struct Worker_arg
// give the produced packets to courier.
extern "C" void * dworker_o( void * arg )
{
const Worker_arg & tmp = *(Worker_arg *)arg;
const File_index & file_index = *tmp.file_index;
const Worker_arg & tmp = *(const Worker_arg *)arg;
const Lzip_index & lzip_index = *tmp.lzip_index;
Packet_courier & courier = *tmp.courier;
const Pretty_print & pp = *tmp.pp;
const int worker_id = tmp.worker_id;
@ -177,10 +177,10 @@ extern "C" void * dworker_o( void * arg )
{ pp( "Not enough memory." ); cleanup_and_fail(); }
int new_pos = 0;
for( long i = worker_id; i < file_index.members(); i += num_workers )
for( long i = worker_id; i < lzip_index.members(); i += num_workers )
{
long long member_pos = file_index.mblock( i ).pos();
long long member_rest = file_index.mblock( i ).size();
long long member_pos = lzip_index.mblock( i ).pos();
long long member_rest = lzip_index.mblock( i ).size();
while( member_rest > 0 )
{
@ -229,7 +229,7 @@ extern "C" void * dworker_o( void * arg )
if( rd == 0 ) break;
}
}
show_progress( file_index.mblock( i ).size() );
show_progress( lzip_index.mblock( i ).size() );
}
delete[] ibuffer; delete[] new_data;
@ -265,9 +265,8 @@ void muxer( Packet_courier & courier, const Pretty_print & pp, const int outfd )
// init the courier, then start the workers and call the muxer.
int dec_stdout( const int num_workers, const int infd, const int outfd,
const Pretty_print & pp, const int debug_level,
const File_index & file_index )
const int out_slots, const Lzip_index & lzip_index )
{
const int out_slots = 32;
Packet_courier courier( num_workers, out_slots );
Worker_arg * worker_args = new( std::nothrow ) Worker_arg[num_workers];
@ -276,7 +275,7 @@ int dec_stdout( const int num_workers, const int infd, const int outfd,
{ pp( "Not enough memory." ); cleanup_and_fail(); }
for( int i = 0; i < num_workers; ++i )
{
worker_args[i].file_index = &file_index;
worker_args[i].lzip_index = &lzip_index;
worker_args[i].courier = &courier;
worker_args[i].pp = &pp;
worker_args[i].worker_id = i;
@ -301,9 +300,9 @@ int dec_stdout( const int num_workers, const int infd, const int outfd,
if( verbosity >= 2 )
{
if( verbosity >= 4 ) show_header( file_index.dictionary_size( 0 ) );
const unsigned long long in_size = file_index.cdata_size();
const unsigned long long out_size = file_index.udata_size();
if( verbosity >= 4 ) show_header( lzip_index.dictionary_size( 0 ) );
const unsigned long long in_size = lzip_index.cdata_size();
const unsigned long long out_size = lzip_index.udata_size();
if( out_size == 0 || in_size == 0 )
std::fputs( "no data compressed. ", stderr );
else