2025-02-24 03:25:50 +01:00
|
|
|
/* Plzip - A parallel compressor compatible with lzip
|
|
|
|
Copyright (C) 2009 Laszlo Ersek.
|
2025-02-24 03:42:28 +01:00
|
|
|
Copyright (C) 2009, 2010, 2011, 2012 Antonio Diaz Diaz.
|
2025-02-24 03:25:50 +01:00
|
|
|
|
|
|
|
This program is free software: you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation, either version 3 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define _FILE_OFFSET_BITS 64
|
|
|
|
|
|
|
|
#include <algorithm>
|
|
|
|
#include <cerrno>
|
|
|
|
#include <climits>
|
|
|
|
#include <csignal>
|
|
|
|
#include <cstdio>
|
|
|
|
#include <cstdlib>
|
|
|
|
#include <cstring>
|
|
|
|
#include <queue>
|
|
|
|
#include <string>
|
|
|
|
#include <vector>
|
2025-02-24 03:27:22 +01:00
|
|
|
#include <inttypes.h>
|
2025-02-24 03:25:50 +01:00
|
|
|
#include <pthread.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <lzlib.h>
|
|
|
|
|
|
|
|
#include "plzip.h"
|
|
|
|
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2025-02-24 03:33:22 +01:00
|
|
|
enum { max_packet_size = 1 << 20 };
|
2025-02-24 03:26:46 +01:00
|
|
|
long long in_size = 0;
|
|
|
|
long long out_size = 0;
|
|
|
|
|
|
|
|
|
|
|
|
struct Packet // data block
|
2025-02-24 03:25:50 +01:00
|
|
|
{
|
2025-02-24 03:26:46 +01:00
|
|
|
uint8_t * data; // data == 0 means end of member
|
|
|
|
int size; // number of bytes in data (if any)
|
|
|
|
};
|
2025-02-24 03:25:50 +01:00
|
|
|
|
2025-02-24 03:26:46 +01:00
|
|
|
|
|
|
|
class Packet_courier // moves packets around
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
unsigned long icheck_counter;
|
|
|
|
unsigned long iwait_counter;
|
|
|
|
unsigned long ocheck_counter;
|
|
|
|
unsigned long owait_counter;
|
|
|
|
private:
|
|
|
|
int receive_worker_id; // worker queue currently receiving packets
|
|
|
|
int deliver_worker_id; // worker queue currently delivering packets
|
2025-02-24 03:33:22 +01:00
|
|
|
Slot_tally slot_tally; // limits the number of input packets
|
2025-02-24 03:26:46 +01:00
|
|
|
std::vector< std::queue< Packet * > > ipacket_queues;
|
|
|
|
std::vector< std::queue< Packet * > > opacket_queues;
|
2025-02-24 03:33:22 +01:00
|
|
|
int num_working; // number of workers still running
|
|
|
|
const int num_workers; // number of workers
|
|
|
|
int num_free; // remaining free output slots
|
2025-02-24 03:26:46 +01:00
|
|
|
pthread_mutex_t imutex;
|
|
|
|
pthread_cond_t iav_or_eof; // input packet available or splitter done
|
|
|
|
pthread_mutex_t omutex;
|
|
|
|
pthread_cond_t oav_or_exit; // output packet available or all workers exited
|
2025-02-24 03:33:22 +01:00
|
|
|
pthread_cond_t slot_av; // free output slot available
|
|
|
|
bool eof; // splitter done
|
2025-02-24 03:26:46 +01:00
|
|
|
|
2025-02-24 03:42:28 +01:00
|
|
|
Packet_courier( const Packet_courier & ); // declared as private
|
|
|
|
void operator=( const Packet_courier & ); // declared as private
|
|
|
|
|
2025-02-24 03:26:46 +01:00
|
|
|
public:
|
|
|
|
Packet_courier( const int workers, const int slots )
|
|
|
|
: icheck_counter( 0 ), iwait_counter( 0 ),
|
|
|
|
ocheck_counter( 0 ), owait_counter( 0 ),
|
|
|
|
receive_worker_id( 0 ), deliver_worker_id( 0 ),
|
|
|
|
slot_tally( slots ), ipacket_queues( workers ),
|
|
|
|
opacket_queues( workers ), num_working( workers ),
|
2025-02-24 03:33:22 +01:00
|
|
|
num_workers( workers ), num_free( 8 * slots ), eof( false )
|
|
|
|
{
|
2025-02-24 03:42:28 +01:00
|
|
|
xinit( &imutex ); xinit( &iav_or_eof );
|
|
|
|
xinit( &omutex ); xinit( &oav_or_exit ); xinit( &slot_av );
|
2025-02-24 03:33:22 +01:00
|
|
|
}
|
2025-02-24 03:26:46 +01:00
|
|
|
|
|
|
|
~Packet_courier()
|
2025-02-24 03:33:22 +01:00
|
|
|
{
|
2025-02-24 03:42:28 +01:00
|
|
|
xdestroy( &slot_av ); xdestroy( &oav_or_exit ); xdestroy( &omutex );
|
|
|
|
xdestroy( &iav_or_eof ); xdestroy( &imutex );
|
2025-02-24 03:33:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
const Slot_tally & tally() const { return slot_tally; }
|
2025-02-24 03:26:46 +01:00
|
|
|
|
|
|
|
// make a packet with data received from splitter
|
|
|
|
// if data == 0, move to next queue
|
|
|
|
void receive_packet( uint8_t * const data, const int size )
|
|
|
|
{
|
|
|
|
Packet * ipacket = new Packet;
|
|
|
|
ipacket->data = data;
|
|
|
|
ipacket->size = size;
|
|
|
|
if( data != 0 )
|
|
|
|
{ in_size += size; slot_tally.get_slot(); } // wait for a free slot
|
|
|
|
xlock( &imutex );
|
|
|
|
ipacket_queues[receive_worker_id].push( ipacket );
|
|
|
|
xbroadcast( &iav_or_eof );
|
|
|
|
xunlock( &imutex );
|
|
|
|
if( data == 0 && ++receive_worker_id >= num_workers )
|
|
|
|
receive_worker_id = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// distribute a packet to a worker
|
|
|
|
Packet * distribute_packet( const int worker_id )
|
2025-02-24 03:25:50 +01:00
|
|
|
{
|
2025-02-24 03:26:46 +01:00
|
|
|
Packet * ipacket = 0;
|
|
|
|
xlock( &imutex );
|
|
|
|
++icheck_counter;
|
|
|
|
while( ipacket_queues[worker_id].empty() && !eof )
|
2025-02-24 03:25:50 +01:00
|
|
|
{
|
2025-02-24 03:26:46 +01:00
|
|
|
++iwait_counter;
|
|
|
|
xwait( &iav_or_eof, &imutex );
|
2025-02-24 03:27:22 +01:00
|
|
|
++icheck_counter;
|
2025-02-24 03:25:50 +01:00
|
|
|
}
|
2025-02-24 03:26:46 +01:00
|
|
|
if( !ipacket_queues[worker_id].empty() )
|
2025-02-24 03:25:50 +01:00
|
|
|
{
|
2025-02-24 03:26:46 +01:00
|
|
|
ipacket = ipacket_queues[worker_id].front();
|
|
|
|
ipacket_queues[worker_id].pop();
|
|
|
|
}
|
|
|
|
xunlock( &imutex );
|
|
|
|
if( ipacket != 0 )
|
|
|
|
{ if( ipacket->data != 0 ) slot_tally.leave_slot(); }
|
|
|
|
else
|
|
|
|
{
|
2025-02-24 03:33:22 +01:00
|
|
|
// notify muxer when last worker exits
|
2025-02-24 03:26:46 +01:00
|
|
|
xlock( &omutex );
|
2025-02-24 03:33:22 +01:00
|
|
|
if( --num_working == 0 ) xsignal( &oav_or_exit );
|
2025-02-24 03:26:46 +01:00
|
|
|
xunlock( &omutex );
|
|
|
|
}
|
|
|
|
return ipacket;
|
|
|
|
}
|
|
|
|
|
|
|
|
// collect a packet from a worker
|
|
|
|
void collect_packet( Packet * const opacket, const int worker_id )
|
|
|
|
{
|
|
|
|
xlock( &omutex );
|
2025-02-24 03:33:22 +01:00
|
|
|
if( opacket->data != 0 )
|
|
|
|
{
|
|
|
|
while( worker_id != deliver_worker_id && num_free <= 0 )
|
|
|
|
xwait( &slot_av, &omutex );
|
|
|
|
--num_free;
|
|
|
|
}
|
2025-02-24 03:26:46 +01:00
|
|
|
opacket_queues[worker_id].push( opacket );
|
|
|
|
if( worker_id == deliver_worker_id ) xsignal( &oav_or_exit );
|
|
|
|
xunlock( &omutex );
|
|
|
|
}
|
|
|
|
|
|
|
|
// deliver a packet to muxer
|
|
|
|
// if packet data == 0, move to next queue and wait again
|
|
|
|
Packet * deliver_packet()
|
|
|
|
{
|
|
|
|
Packet * opacket = 0;
|
|
|
|
xlock( &omutex );
|
|
|
|
++ocheck_counter;
|
|
|
|
while( true )
|
|
|
|
{
|
|
|
|
while( opacket_queues[deliver_worker_id].empty() && num_working > 0 )
|
2025-02-24 03:25:50 +01:00
|
|
|
{
|
2025-02-24 03:26:46 +01:00
|
|
|
++owait_counter;
|
|
|
|
xwait( &oav_or_exit, &omutex );
|
2025-02-24 03:27:22 +01:00
|
|
|
++ocheck_counter;
|
2025-02-24 03:25:50 +01:00
|
|
|
}
|
2025-02-24 03:26:46 +01:00
|
|
|
if( opacket_queues[deliver_worker_id].empty() ) break;
|
|
|
|
opacket = opacket_queues[deliver_worker_id].front();
|
|
|
|
opacket_queues[deliver_worker_id].pop();
|
2025-02-24 03:33:22 +01:00
|
|
|
if( opacket->data != 0 )
|
2025-02-24 03:25:50 +01:00
|
|
|
{
|
2025-02-24 03:33:22 +01:00
|
|
|
if( ++num_free == 1 ) xsignal( &slot_av );
|
|
|
|
break;
|
2025-02-24 03:25:50 +01:00
|
|
|
}
|
2025-02-24 03:33:22 +01:00
|
|
|
if( ++deliver_worker_id >= num_workers ) deliver_worker_id = 0;
|
|
|
|
xbroadcast( &slot_av ); // restart deliver_worker_id thread
|
|
|
|
delete opacket; opacket = 0;
|
2025-02-24 03:26:46 +01:00
|
|
|
}
|
|
|
|
xunlock( &omutex );
|
|
|
|
return opacket;
|
|
|
|
}
|
|
|
|
|
|
|
|
void finish() // splitter has no more packets to send
|
|
|
|
{
|
|
|
|
xlock( &imutex );
|
|
|
|
eof = true;
|
|
|
|
xbroadcast( &iav_or_eof );
|
|
|
|
xunlock( &imutex );
|
|
|
|
}
|
|
|
|
|
|
|
|
bool finished() // all packets delivered to muxer
|
|
|
|
{
|
|
|
|
if( !slot_tally.all_free() || !eof || num_working != 0 ) return false;
|
|
|
|
for( int i = 0; i < num_workers; ++i )
|
|
|
|
if( !ipacket_queues[i].empty() ) return false;
|
|
|
|
for( int i = 0; i < num_workers; ++i )
|
|
|
|
if( !opacket_queues[i].empty() ) return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2025-02-24 03:42:28 +01:00
|
|
|
// Search forward from 'pos' for "LZIP" (Boyer-Moore algorithm)
|
|
|
|
// Return pos of found string or 'pos+size' if not found.
|
|
|
|
//
|
2025-02-24 03:44:19 +01:00
|
|
|
int find_magic( const uint8_t * const buffer, const int pos, const int size )
|
2025-02-24 03:42:28 +01:00
|
|
|
{
|
|
|
|
const uint8_t table[256] = {
|
|
|
|
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
|
|
|
|
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
|
|
|
|
4,4,4,4,4,4,4,4,4,1,4,4,3,4,4,4,4,4,4,4,4,4,4,4,4,4,2,4,4,4,4,4,
|
|
|
|
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
|
|
|
|
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
|
|
|
|
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
|
|
|
|
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
|
|
|
|
4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4 };
|
|
|
|
|
|
|
|
for( int i = pos; i <= pos + size - 4; i += table[buffer[i+3]] )
|
|
|
|
if( buffer[i] == 'L' && buffer[i+1] == 'Z' &&
|
|
|
|
buffer[i+2] == 'I' && buffer[i+3] == 'P' )
|
|
|
|
return i; // magic string found
|
|
|
|
return pos + size;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2025-02-24 03:26:46 +01:00
|
|
|
struct Splitter_arg
|
|
|
|
{
|
|
|
|
Packet_courier * courier;
|
|
|
|
const Pretty_print * pp;
|
|
|
|
int infd;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// split data from input file into chunks and pass them to
|
|
|
|
// courier for packaging and distribution to workers.
|
2025-02-24 03:27:22 +01:00
|
|
|
extern "C" void * dsplitter( void * arg )
|
2025-02-24 03:26:46 +01:00
|
|
|
{
|
|
|
|
const Splitter_arg & tmp = *(Splitter_arg *)arg;
|
|
|
|
Packet_courier & courier = *tmp.courier;
|
|
|
|
const Pretty_print & pp = *tmp.pp;
|
|
|
|
const int infd = tmp.infd;
|
|
|
|
const int hsize = 6; // header size
|
|
|
|
const int tsize = 20; // trailer size
|
2025-02-24 03:33:22 +01:00
|
|
|
const int buffer_size = max_packet_size;
|
2025-02-24 03:26:46 +01:00
|
|
|
const int base_buffer_size = tsize + buffer_size + hsize;
|
|
|
|
uint8_t * const base_buffer = new( std::nothrow ) uint8_t[base_buffer_size];
|
2025-02-24 03:33:22 +01:00
|
|
|
if( base_buffer == 0 ) { pp( "Not enough memory" ); fatal(); }
|
2025-02-24 03:26:46 +01:00
|
|
|
uint8_t * const buffer = base_buffer + tsize;
|
|
|
|
|
|
|
|
int size = readblock( infd, buffer, buffer_size + hsize ) - hsize;
|
|
|
|
bool at_stream_end = ( size < buffer_size );
|
|
|
|
if( size != buffer_size && errno )
|
2025-02-24 03:33:22 +01:00
|
|
|
{ pp(); show_error( "Read error", errno ); fatal(); }
|
2025-02-24 03:42:28 +01:00
|
|
|
if( size <= tsize || find_magic( buffer, 0, 4 ) != 0 )
|
2025-02-24 03:33:22 +01:00
|
|
|
{ pp( "Bad magic number (file not in lzip format)" ); fatal(); }
|
2025-02-24 03:26:46 +01:00
|
|
|
|
|
|
|
long long partial_member_size = 0;
|
|
|
|
while( true )
|
|
|
|
{
|
|
|
|
int pos = 0;
|
|
|
|
for( int newpos = 1; newpos <= size; ++newpos )
|
2025-02-24 03:42:28 +01:00
|
|
|
{
|
|
|
|
newpos = find_magic( buffer, newpos, size + 4 - newpos );
|
|
|
|
if( newpos <= size )
|
2025-02-24 03:25:50 +01:00
|
|
|
{
|
2025-02-24 03:26:46 +01:00
|
|
|
long long member_size = 0;
|
|
|
|
for( int i = 1; i <= 8; ++i )
|
|
|
|
{ member_size <<= 8; member_size += base_buffer[tsize+newpos-i]; }
|
|
|
|
if( partial_member_size + newpos - pos == member_size )
|
|
|
|
{ // header found
|
2025-02-24 03:42:28 +01:00
|
|
|
uint8_t * const data = new( std::nothrow ) uint8_t[newpos - pos];
|
2025-02-24 03:33:22 +01:00
|
|
|
if( data == 0 ) { pp( "Not enough memory" ); fatal(); }
|
2025-02-24 03:26:46 +01:00
|
|
|
std::memcpy( data, buffer + pos, newpos - pos );
|
|
|
|
courier.receive_packet( data, newpos - pos );
|
|
|
|
courier.receive_packet( 0, 0 ); // end of member token
|
|
|
|
partial_member_size = 0;
|
|
|
|
pos = newpos;
|
|
|
|
}
|
2025-02-24 03:25:50 +01:00
|
|
|
}
|
2025-02-24 03:42:28 +01:00
|
|
|
}
|
2025-02-24 03:26:46 +01:00
|
|
|
|
|
|
|
if( at_stream_end )
|
|
|
|
{
|
|
|
|
uint8_t * data = new( std::nothrow ) uint8_t[size + hsize - pos];
|
2025-02-24 03:33:22 +01:00
|
|
|
if( data == 0 ) { pp( "Not enough memory" ); fatal(); }
|
2025-02-24 03:26:46 +01:00
|
|
|
std::memcpy( data, buffer + pos, size + hsize - pos );
|
|
|
|
courier.receive_packet( data, size + hsize - pos );
|
|
|
|
courier.receive_packet( 0, 0 ); // end of member token
|
|
|
|
break;
|
2025-02-24 03:25:50 +01:00
|
|
|
}
|
2025-02-24 03:26:46 +01:00
|
|
|
if( pos < buffer_size )
|
2025-02-24 03:25:50 +01:00
|
|
|
{
|
2025-02-24 03:26:46 +01:00
|
|
|
partial_member_size += buffer_size - pos;
|
|
|
|
uint8_t * data = new( std::nothrow ) uint8_t[buffer_size - pos];
|
2025-02-24 03:33:22 +01:00
|
|
|
if( data == 0 ) { pp( "Not enough memory" ); fatal(); }
|
2025-02-24 03:26:46 +01:00
|
|
|
std::memcpy( data, buffer + pos, buffer_size - pos );
|
|
|
|
courier.receive_packet( data, buffer_size - pos );
|
2025-02-24 03:25:50 +01:00
|
|
|
}
|
2025-02-24 03:26:46 +01:00
|
|
|
std::memcpy( base_buffer, base_buffer + buffer_size, tsize + hsize );
|
|
|
|
size = readblock( infd, buffer + hsize, buffer_size );
|
|
|
|
at_stream_end = ( size < buffer_size );
|
|
|
|
if( size != buffer_size && errno )
|
2025-02-24 03:33:22 +01:00
|
|
|
{ pp(); show_error( "Read error", errno ); fatal(); }
|
2025-02-24 03:25:50 +01:00
|
|
|
}
|
2025-02-24 03:26:46 +01:00
|
|
|
delete[] base_buffer;
|
|
|
|
courier.finish(); // no more packets to send
|
2025-02-24 03:25:50 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2025-02-24 03:26:46 +01:00
|
|
|
|
|
|
|
struct Worker_arg
|
|
|
|
{
|
|
|
|
Packet_courier * courier;
|
|
|
|
const Pretty_print * pp;
|
|
|
|
int worker_id;
|
|
|
|
};
|
2025-02-24 03:25:50 +01:00
|
|
|
|
|
|
|
|
2025-02-24 03:26:46 +01:00
|
|
|
// consume packets from courier, decompress their contents, and
|
|
|
|
// give the produced packets to courier.
|
2025-02-24 03:27:22 +01:00
|
|
|
extern "C" void * dworker( void * arg )
|
2025-02-24 03:25:50 +01:00
|
|
|
{
|
2025-02-24 03:26:46 +01:00
|
|
|
const Worker_arg & tmp = *(Worker_arg *)arg;
|
|
|
|
Packet_courier & courier = *tmp.courier;
|
|
|
|
const Pretty_print & pp = *tmp.pp;
|
|
|
|
const int worker_id = tmp.worker_id;
|
2025-02-24 03:33:22 +01:00
|
|
|
const int new_data_size = max_packet_size;
|
2025-02-24 03:26:46 +01:00
|
|
|
|
|
|
|
uint8_t * new_data = new( std::nothrow ) uint8_t[new_data_size];
|
2025-02-24 03:25:50 +01:00
|
|
|
LZ_Decoder * const decoder = LZ_decompress_open();
|
2025-02-24 03:26:46 +01:00
|
|
|
if( !new_data || !decoder || LZ_decompress_errno( decoder ) != LZ_ok )
|
2025-02-24 03:33:22 +01:00
|
|
|
{ pp( "Not enough memory" ); fatal(); }
|
2025-02-24 03:26:46 +01:00
|
|
|
int new_pos = 0;
|
2025-02-24 03:25:50 +01:00
|
|
|
|
2025-02-24 03:26:46 +01:00
|
|
|
while( true )
|
2025-02-24 03:25:50 +01:00
|
|
|
{
|
2025-02-24 03:42:28 +01:00
|
|
|
const Packet * const ipacket = courier.distribute_packet( worker_id );
|
2025-02-24 03:26:46 +01:00
|
|
|
if( ipacket == 0 ) break; // no more packets to process
|
|
|
|
if( ipacket->data == 0 ) LZ_decompress_finish( decoder );
|
|
|
|
|
|
|
|
int written = 0;
|
|
|
|
while( true )
|
|
|
|
{
|
|
|
|
if( LZ_decompress_write_size( decoder ) > 0 && written < ipacket->size )
|
|
|
|
{
|
|
|
|
const int wr = LZ_decompress_write( decoder, ipacket->data + written,
|
|
|
|
ipacket->size - written );
|
|
|
|
if( wr < 0 ) internal_error( "library error (LZ_decompress_write)" );
|
|
|
|
written += wr;
|
|
|
|
if( written > ipacket->size )
|
|
|
|
internal_error( "ipacket size exceeded in worker" );
|
|
|
|
}
|
|
|
|
while( true ) // read and pack decompressed data
|
|
|
|
{
|
|
|
|
const int rd = LZ_decompress_read( decoder, new_data + new_pos,
|
|
|
|
new_data_size - new_pos );
|
|
|
|
if( rd < 0 )
|
|
|
|
{
|
|
|
|
pp();
|
|
|
|
if( verbosity >= 0 )
|
|
|
|
std::fprintf( stderr, "LZ_decompress_read error in worker %d: %s.\n",
|
|
|
|
worker_id, LZ_strerror( LZ_decompress_errno( decoder ) ) );
|
|
|
|
fatal();
|
|
|
|
}
|
|
|
|
new_pos += rd;
|
|
|
|
if( new_pos > new_data_size )
|
|
|
|
internal_error( "opacket size exceeded in worker" );
|
|
|
|
if( new_pos == new_data_size || LZ_decompress_finished( decoder ) == 1 )
|
|
|
|
{
|
|
|
|
if( new_pos > 0 ) // make data packet
|
|
|
|
{
|
|
|
|
Packet * opacket = new Packet;
|
|
|
|
opacket->data = new_data;
|
|
|
|
opacket->size = new_pos;
|
|
|
|
courier.collect_packet( opacket, worker_id );
|
|
|
|
new_pos = 0;
|
|
|
|
new_data = new( std::nothrow ) uint8_t[new_data_size];
|
2025-02-24 03:33:22 +01:00
|
|
|
if( new_data == 0 ) { pp( "Not enough memory" ); fatal(); }
|
2025-02-24 03:26:46 +01:00
|
|
|
}
|
|
|
|
if( LZ_decompress_finished( decoder ) == 1 )
|
|
|
|
{
|
2025-02-24 03:42:28 +01:00
|
|
|
LZ_decompress_reset( decoder ); // prepare for new ipacket
|
2025-02-24 03:26:46 +01:00
|
|
|
Packet * opacket = new Packet; // end of member token
|
|
|
|
opacket->data = 0;
|
|
|
|
opacket->size = 0;
|
|
|
|
courier.collect_packet( opacket, worker_id );
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if( rd == 0 ) break;
|
|
|
|
}
|
|
|
|
if( ipacket->data == 0 ) { delete ipacket; break; }
|
|
|
|
if( written == ipacket->size )
|
|
|
|
{ delete[] ipacket->data; delete ipacket; break; }
|
|
|
|
}
|
2025-02-24 03:25:50 +01:00
|
|
|
}
|
2025-02-24 03:26:46 +01:00
|
|
|
|
|
|
|
delete[] new_data;
|
2025-02-24 03:42:28 +01:00
|
|
|
if( LZ_decompress_member_position( decoder ) != 0 )
|
2025-02-24 03:33:22 +01:00
|
|
|
{ pp( "Error, some data remains in decoder" ); fatal(); }
|
2025-02-24 03:42:28 +01:00
|
|
|
if( LZ_decompress_close( decoder ) < 0 )
|
|
|
|
{ pp( "LZ_decompress_close failed" ); fatal(); }
|
2025-02-24 03:26:46 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// get from courier the processed and sorted packets, and write
|
|
|
|
// their contents to the output file.
|
|
|
|
void muxer( Packet_courier & courier, const Pretty_print & pp, const int outfd )
|
|
|
|
{
|
|
|
|
while( true )
|
|
|
|
{
|
|
|
|
Packet * opacket = courier.deliver_packet();
|
|
|
|
if( opacket == 0 ) break; // queue is empty. all workers exited
|
|
|
|
|
|
|
|
out_size += opacket->size;
|
|
|
|
|
|
|
|
if( outfd >= 0 )
|
|
|
|
{
|
|
|
|
const int wr = writeblock( outfd, opacket->data, opacket->size );
|
|
|
|
if( wr != opacket->size )
|
2025-02-24 03:33:22 +01:00
|
|
|
{ pp(); show_error( "Write error", errno ); fatal(); }
|
2025-02-24 03:26:46 +01:00
|
|
|
}
|
|
|
|
delete[] opacket->data;
|
|
|
|
delete opacket;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // end namespace
|
|
|
|
|
|
|
|
|
|
|
|
// init the courier, then start the splitter and the workers and
|
|
|
|
// call the muxer.
|
2025-02-24 03:42:28 +01:00
|
|
|
int decompress( const int num_workers, const int infd, const int outfd,
|
|
|
|
const Pretty_print & pp, const int debug_level,
|
|
|
|
const bool testing )
|
2025-02-24 03:26:46 +01:00
|
|
|
{
|
2025-02-24 03:42:28 +01:00
|
|
|
const int slots_per_worker = 2;
|
|
|
|
const int num_slots = ( ( INT_MAX / num_workers >= slots_per_worker ) ?
|
|
|
|
num_workers * slots_per_worker : INT_MAX );
|
2025-02-24 03:26:46 +01:00
|
|
|
in_size = 0;
|
|
|
|
out_size = 0;
|
|
|
|
Packet_courier courier( num_workers, num_slots );
|
|
|
|
|
|
|
|
Splitter_arg splitter_arg;
|
|
|
|
splitter_arg.courier = &courier;
|
|
|
|
splitter_arg.pp = &pp;
|
|
|
|
splitter_arg.infd = infd;
|
|
|
|
|
|
|
|
pthread_t splitter_thread;
|
2025-02-24 03:27:22 +01:00
|
|
|
int errcode = pthread_create( &splitter_thread, 0, dsplitter, &splitter_arg );
|
|
|
|
if( errcode )
|
2025-02-24 03:33:22 +01:00
|
|
|
{ show_error( "Can't create splitter thread", errcode ); fatal(); }
|
2025-02-24 03:26:46 +01:00
|
|
|
|
|
|
|
Worker_arg * worker_args = new( std::nothrow ) Worker_arg[num_workers];
|
|
|
|
pthread_t * worker_threads = new( std::nothrow ) pthread_t[num_workers];
|
|
|
|
if( worker_args == 0 || worker_threads == 0 )
|
2025-02-24 03:33:22 +01:00
|
|
|
{ pp( "Not enough memory" ); fatal(); }
|
2025-02-24 03:26:46 +01:00
|
|
|
for( int i = 0; i < num_workers; ++i )
|
|
|
|
{
|
|
|
|
worker_args[i].courier = &courier;
|
|
|
|
worker_args[i].pp = &pp;
|
|
|
|
worker_args[i].worker_id = i;
|
2025-02-24 03:27:22 +01:00
|
|
|
errcode = pthread_create( &worker_threads[i], 0, dworker, &worker_args[i] );
|
|
|
|
if( errcode )
|
2025-02-24 03:33:22 +01:00
|
|
|
{ show_error( "Can't create worker threads", errcode ); fatal(); }
|
2025-02-24 03:26:46 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
muxer( courier, pp, outfd );
|
|
|
|
|
|
|
|
for( int i = num_workers - 1; i >= 0; --i )
|
2025-02-24 03:27:22 +01:00
|
|
|
{
|
|
|
|
errcode = pthread_join( worker_threads[i], 0 );
|
|
|
|
if( errcode )
|
2025-02-24 03:33:22 +01:00
|
|
|
{ show_error( "Can't join worker threads", errcode ); fatal(); }
|
2025-02-24 03:27:22 +01:00
|
|
|
}
|
2025-02-24 03:26:46 +01:00
|
|
|
delete[] worker_threads; worker_threads = 0;
|
|
|
|
delete[] worker_args; worker_args = 0;
|
|
|
|
|
2025-02-24 03:27:22 +01:00
|
|
|
errcode = pthread_join( splitter_thread, 0 );
|
|
|
|
if( errcode )
|
2025-02-24 03:33:22 +01:00
|
|
|
{ show_error( "Can't join splitter thread", errcode ); fatal(); }
|
2025-02-24 03:26:46 +01:00
|
|
|
|
2025-02-24 03:42:28 +01:00
|
|
|
if( verbosity >= 3 && out_size > 0 && in_size > 0 )
|
|
|
|
std::fprintf( stderr, "%6.3f:1, %6.3f bits/byte, %5.2f%% saved. ",
|
|
|
|
(double)out_size / in_size,
|
|
|
|
( 8.0 * in_size ) / out_size,
|
|
|
|
100.0 * ( 1.0 - ( (double)in_size / out_size ) ) );
|
2025-02-24 03:26:46 +01:00
|
|
|
if( verbosity >= 2 )
|
|
|
|
std::fprintf( stderr, "decompressed size %9lld, size %9lld. ",
|
|
|
|
out_size, in_size );
|
|
|
|
|
|
|
|
if( verbosity >= 1 )
|
|
|
|
{ if( testing ) std::fprintf( stderr, "ok\n" );
|
|
|
|
else std::fprintf( stderr, "done\n" ); }
|
|
|
|
|
|
|
|
if( debug_level & 1 )
|
|
|
|
std::fprintf( stderr,
|
|
|
|
"splitter tried to send a packet %8lu times\n"
|
|
|
|
"splitter had to wait %8lu times\n"
|
|
|
|
"any worker tried to consume from splitter %8lu times\n"
|
|
|
|
"any worker had to wait %8lu times\n"
|
|
|
|
"muxer tried to consume from workers %8lu times\n"
|
|
|
|
"muxer had to wait %8lu times\n",
|
|
|
|
courier.tally().check_counter,
|
|
|
|
courier.tally().wait_counter,
|
|
|
|
courier.icheck_counter,
|
|
|
|
courier.iwait_counter,
|
|
|
|
courier.ocheck_counter,
|
|
|
|
courier.owait_counter );
|
|
|
|
|
|
|
|
if( !courier.finished() ) internal_error( "courier not finished" );
|
|
|
|
return 0;
|
2025-02-24 03:25:50 +01:00
|
|
|
}
|