1
0
Fork 0

Merging upstream version 1.1~pre1.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-24 04:01:28 +01:00
parent 19bde4a70f
commit 8871145941
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
17 changed files with 356 additions and 277 deletions

View file

@ -1,4 +1,4 @@
/* Plzip - A parallel compressor compatible with lzip
/* Plzip - Parallel compressor compatible with lzip
Copyright (C) 2009 Laszlo Ersek.
Copyright (C) 2009, 2010, 2011, 2012, 2013 Antonio Diaz Diaz.
@ -171,7 +171,7 @@ extern "C" void * dworker_o( void * arg )
LZ_Decoder * const decoder = LZ_decompress_open();
if( !new_data || !ibuffer || !decoder ||
LZ_decompress_errno( decoder ) != LZ_ok )
{ pp( "Not enough memory" ); fatal(); }
{ pp( "Not enough memory" ); cleanup_and_fail(); }
int new_pos = 0;
for( int i = worker_id; i < file_index.members(); i += num_workers )
@ -188,7 +188,7 @@ extern "C" void * dworker_o( void * arg )
if( size > 0 )
{
if( preadblock( infd, ibuffer, size, member_pos ) != size )
{ pp(); show_error( "Read error", errno ); fatal(); }
{ pp(); show_error( "Read error", errno ); cleanup_and_fail(); }
member_pos += size;
member_rest -= size;
if( LZ_decompress_write( decoder, ibuffer, size ) != size )
@ -201,7 +201,7 @@ extern "C" void * dworker_o( void * arg )
const int rd = LZ_decompress_read( decoder, new_data + new_pos,
max_packet_size - new_pos );
if( rd < 0 )
fatal( decompress_read_error( decoder, pp, worker_id ) );
cleanup_and_fail( decompress_read_error( decoder, pp, worker_id ) );
new_pos += rd;
if( new_pos > max_packet_size )
internal_error( "opacket size exceeded in worker" );
@ -216,7 +216,7 @@ extern "C" void * dworker_o( void * arg )
courier.collect_packet( opacket, worker_id );
new_pos = 0;
new_data = new( std::nothrow ) uint8_t[max_packet_size];
if( !new_data ) { pp( "Not enough memory" ); fatal(); }
if( !new_data ) { pp( "Not enough memory" ); cleanup_and_fail(); }
}
if( LZ_decompress_finished( decoder ) == 1 )
{
@ -235,9 +235,9 @@ extern "C" void * dworker_o( void * arg )
delete[] ibuffer; delete[] new_data;
if( LZ_decompress_member_position( decoder ) != 0 )
{ pp( "Error, some data remains in decoder" ); fatal(); }
{ pp( "Error, some data remains in decoder" ); cleanup_and_fail(); }
if( LZ_decompress_close( decoder ) < 0 )
{ pp( "LZ_decompress_close failed" ); fatal(); }
{ pp( "LZ_decompress_close failed" ); cleanup_and_fail(); }
courier.worker_finished();
return 0;
}
@ -256,7 +256,7 @@ void muxer( Packet_courier & courier, const Pretty_print & pp, const int outfd )
{
const int wr = writeblock( outfd, opacket->data, opacket->size );
if( wr != opacket->size )
{ pp(); show_error( "Write error", errno ); fatal(); }
{ pp(); show_error( "Write error", errno ); cleanup_and_fail(); }
}
delete[] opacket->data;
delete opacket;
@ -280,7 +280,7 @@ int dec_stdout( const int num_workers, const int infd, const int outfd,
Worker_arg * worker_args = new( std::nothrow ) Worker_arg[num_workers];
pthread_t * worker_threads = new( std::nothrow ) pthread_t[num_workers];
if( !worker_args || !worker_threads )
{ pp( "Not enough memory" ); fatal(); }
{ pp( "Not enough memory" ); cleanup_and_fail(); }
for( int i = 0; i < num_workers; ++i )
{
worker_args[i].file_index = &file_index;
@ -292,7 +292,7 @@ int dec_stdout( const int num_workers, const int infd, const int outfd,
const int errcode =
pthread_create( &worker_threads[i], 0, dworker_o, &worker_args[i] );
if( errcode )
{ show_error( "Can't create worker threads", errcode ); fatal(); }
{ show_error( "Can't create worker threads", errcode ); cleanup_and_fail(); }
}
muxer( courier, pp, outfd );
@ -301,7 +301,7 @@ int dec_stdout( const int num_workers, const int infd, const int outfd,
{
const int errcode = pthread_join( worker_threads[i], 0 );
if( errcode )
{ show_error( "Can't join worker threads", errcode ); fatal(); }
{ show_error( "Can't join worker threads", errcode ); cleanup_and_fail(); }
}
delete[] worker_threads;
delete[] worker_args;