2025-02-24 03:26:25 +01:00
|
|
|
/* Plzip - A parallel compressor compatible with lzip
|
|
|
|
Copyright (C) 2009 Laszlo Ersek.
|
|
|
|
Copyright (C) 2009, 2010 Antonio Diaz Diaz.
|
|
|
|
|
|
|
|
This program is free software: you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation, either version 3 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define _FILE_OFFSET_BITS 64
|
|
|
|
|
|
|
|
#include <algorithm>
|
|
|
|
#include <cerrno>
|
|
|
|
#include <climits>
|
|
|
|
#include <csignal>
|
|
|
|
#include <cstdio>
|
|
|
|
#include <cstdlib>
|
|
|
|
#include <cstring>
|
|
|
|
#include <queue>
|
|
|
|
#include <string>
|
|
|
|
#include <vector>
|
|
|
|
#include <pthread.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <lzlib.h>
|
|
|
|
|
|
|
|
#include "plzip.h"
|
|
|
|
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2025-02-24 03:26:51 +01:00
|
|
|
long long in_size = 0;
|
|
|
|
long long out_size = 0;
|
|
|
|
|
|
|
|
|
|
|
|
struct Packet // data block
|
2025-02-24 03:26:25 +01:00
|
|
|
{
|
2025-02-24 03:26:51 +01:00
|
|
|
uint8_t * data; // data == 0 means end of member
|
|
|
|
int size; // number of bytes in data (if any)
|
|
|
|
};
|
2025-02-24 03:26:25 +01:00
|
|
|
|
2025-02-24 03:26:51 +01:00
|
|
|
|
|
|
|
class Packet_courier // moves packets around
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
unsigned long icheck_counter;
|
|
|
|
unsigned long iwait_counter;
|
|
|
|
unsigned long ocheck_counter;
|
|
|
|
unsigned long owait_counter;
|
|
|
|
private:
|
|
|
|
int receive_worker_id; // worker queue currently receiving packets
|
|
|
|
int deliver_worker_id; // worker queue currently delivering packets
|
|
|
|
Slot_tally slot_tally;
|
|
|
|
std::vector< std::queue< Packet * > > ipacket_queues;
|
|
|
|
std::vector< std::queue< Packet * > > opacket_queues;
|
|
|
|
int num_working; // Number of workers still running
|
|
|
|
const int num_workers; // Number of workers
|
|
|
|
const int num_slots; // max packets in circulation
|
|
|
|
pthread_mutex_t imutex;
|
|
|
|
pthread_cond_t iav_or_eof; // input packet available or splitter done
|
|
|
|
pthread_mutex_t omutex;
|
|
|
|
pthread_cond_t oav_or_exit; // output packet available or all workers exited
|
|
|
|
bool eof; // splitter done
|
|
|
|
|
|
|
|
public:
|
|
|
|
Packet_courier( const int workers, const int slots )
|
|
|
|
: icheck_counter( 0 ), iwait_counter( 0 ),
|
|
|
|
ocheck_counter( 0 ), owait_counter( 0 ),
|
|
|
|
receive_worker_id( 0 ), deliver_worker_id( 0 ),
|
|
|
|
slot_tally( slots ), ipacket_queues( workers ),
|
|
|
|
opacket_queues( workers ), num_working( workers ),
|
|
|
|
num_workers( workers ), num_slots( slots ), eof( false )
|
|
|
|
{ xinit( &iav_or_eof, &imutex ); xinit( &oav_or_exit, &omutex ); }
|
|
|
|
|
|
|
|
~Packet_courier()
|
|
|
|
{ xdestroy( &iav_or_eof, &imutex ); xdestroy( &oav_or_exit, &omutex ); }
|
|
|
|
|
|
|
|
// make a packet with data received from splitter
|
|
|
|
// if data == 0, move to next queue
|
|
|
|
void receive_packet( uint8_t * const data, const int size )
|
|
|
|
{
|
|
|
|
Packet * ipacket = new Packet;
|
|
|
|
ipacket->data = data;
|
|
|
|
ipacket->size = size;
|
|
|
|
if( data != 0 )
|
|
|
|
{ in_size += size; slot_tally.get_slot(); } // wait for a free slot
|
|
|
|
xlock( &imutex );
|
|
|
|
ipacket_queues[receive_worker_id].push( ipacket );
|
|
|
|
xbroadcast( &iav_or_eof );
|
|
|
|
xunlock( &imutex );
|
|
|
|
if( data == 0 && ++receive_worker_id >= num_workers )
|
|
|
|
receive_worker_id = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// distribute a packet to a worker
|
|
|
|
Packet * distribute_packet( const int worker_id )
|
2025-02-24 03:26:25 +01:00
|
|
|
{
|
2025-02-24 03:26:51 +01:00
|
|
|
Packet * ipacket = 0;
|
|
|
|
xlock( &imutex );
|
|
|
|
++icheck_counter;
|
|
|
|
while( ipacket_queues[worker_id].empty() && !eof )
|
2025-02-24 03:26:25 +01:00
|
|
|
{
|
2025-02-24 03:26:51 +01:00
|
|
|
++iwait_counter;
|
|
|
|
xwait( &iav_or_eof, &imutex );
|
2025-02-24 03:26:25 +01:00
|
|
|
}
|
2025-02-24 03:26:51 +01:00
|
|
|
if( !ipacket_queues[worker_id].empty() )
|
2025-02-24 03:26:25 +01:00
|
|
|
{
|
2025-02-24 03:26:51 +01:00
|
|
|
ipacket = ipacket_queues[worker_id].front();
|
|
|
|
ipacket_queues[worker_id].pop();
|
|
|
|
}
|
|
|
|
xunlock( &imutex );
|
|
|
|
if( ipacket != 0 )
|
|
|
|
{ if( ipacket->data != 0 ) slot_tally.leave_slot(); }
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// Notify muxer when last worker exits
|
|
|
|
xlock( &omutex );
|
|
|
|
if( --num_working == 0 )
|
|
|
|
xsignal( &oav_or_exit );
|
|
|
|
xunlock( &omutex );
|
|
|
|
}
|
|
|
|
return ipacket;
|
|
|
|
}
|
|
|
|
|
|
|
|
// collect a packet from a worker
|
|
|
|
void collect_packet( Packet * const opacket, const int worker_id )
|
|
|
|
{
|
|
|
|
xlock( &omutex );
|
|
|
|
opacket_queues[worker_id].push( opacket );
|
|
|
|
if( worker_id == deliver_worker_id ) xsignal( &oav_or_exit );
|
|
|
|
xunlock( &omutex );
|
|
|
|
}
|
|
|
|
|
|
|
|
// deliver a packet to muxer
|
|
|
|
// if packet data == 0, move to next queue and wait again
|
|
|
|
Packet * deliver_packet()
|
|
|
|
{
|
|
|
|
Packet * opacket = 0;
|
|
|
|
xlock( &omutex );
|
|
|
|
++ocheck_counter;
|
|
|
|
while( true )
|
|
|
|
{
|
|
|
|
while( opacket_queues[deliver_worker_id].empty() && num_working > 0 )
|
2025-02-24 03:26:25 +01:00
|
|
|
{
|
2025-02-24 03:26:51 +01:00
|
|
|
++owait_counter;
|
|
|
|
xwait( &oav_or_exit, &omutex );
|
2025-02-24 03:26:25 +01:00
|
|
|
}
|
2025-02-24 03:26:51 +01:00
|
|
|
if( opacket_queues[deliver_worker_id].empty() ) break;
|
|
|
|
opacket = opacket_queues[deliver_worker_id].front();
|
|
|
|
opacket_queues[deliver_worker_id].pop();
|
|
|
|
if( opacket->data != 0 ) break;
|
|
|
|
else
|
2025-02-24 03:26:25 +01:00
|
|
|
{
|
2025-02-24 03:26:51 +01:00
|
|
|
if( ++deliver_worker_id >= num_workers ) deliver_worker_id = 0;
|
|
|
|
delete opacket; opacket = 0;
|
2025-02-24 03:26:25 +01:00
|
|
|
}
|
2025-02-24 03:26:51 +01:00
|
|
|
}
|
|
|
|
xunlock( &omutex );
|
|
|
|
return opacket;
|
|
|
|
}
|
|
|
|
|
|
|
|
void finish() // splitter has no more packets to send
|
|
|
|
{
|
|
|
|
xlock( &imutex );
|
|
|
|
eof = true;
|
|
|
|
xbroadcast( &iav_or_eof );
|
|
|
|
xunlock( &imutex );
|
|
|
|
}
|
|
|
|
|
|
|
|
bool finished() // all packets delivered to muxer
|
|
|
|
{
|
|
|
|
if( !slot_tally.all_free() || !eof || num_working != 0 ) return false;
|
|
|
|
for( int i = 0; i < num_workers; ++i )
|
|
|
|
if( !ipacket_queues[i].empty() ) return false;
|
|
|
|
for( int i = 0; i < num_workers; ++i )
|
|
|
|
if( !opacket_queues[i].empty() ) return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Slot_tally & tally() const { return slot_tally; }
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
struct Splitter_arg
|
|
|
|
{
|
|
|
|
Packet_courier * courier;
|
|
|
|
const Pretty_print * pp;
|
|
|
|
int infd;
|
|
|
|
int packet_size;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// split data from input file into chunks and pass them to
|
|
|
|
// courier for packaging and distribution to workers.
|
|
|
|
void * splitter( void * arg )
|
|
|
|
{
|
|
|
|
const Splitter_arg & tmp = *(Splitter_arg *)arg;
|
|
|
|
Packet_courier & courier = *tmp.courier;
|
|
|
|
const Pretty_print & pp = *tmp.pp;
|
|
|
|
const int infd = tmp.infd;
|
|
|
|
const int hsize = 6; // header size
|
|
|
|
const int tsize = 20; // trailer size
|
|
|
|
const int buffer_size = tmp.packet_size;
|
|
|
|
const int base_buffer_size = tsize + buffer_size + hsize;
|
|
|
|
uint8_t * const base_buffer = new( std::nothrow ) uint8_t[base_buffer_size];
|
|
|
|
if( base_buffer == 0 ) { pp( "not enough memory" ); fatal(); }
|
|
|
|
uint8_t * const buffer = base_buffer + tsize;
|
|
|
|
|
|
|
|
int size = readblock( infd, buffer, buffer_size + hsize ) - hsize;
|
|
|
|
bool at_stream_end = ( size < buffer_size );
|
|
|
|
if( size != buffer_size && errno )
|
|
|
|
{ pp(); show_error( "read error", errno ); fatal(); }
|
|
|
|
if( size <= tsize || buffer[0] != 'L' || buffer[1] != 'Z' ||
|
|
|
|
buffer[2] != 'I' || buffer[3] != 'P' )
|
|
|
|
{ pp( "bad magic number (file not in lzip format)" ); fatal(); }
|
|
|
|
|
|
|
|
long long partial_member_size = 0;
|
|
|
|
while( true )
|
|
|
|
{
|
|
|
|
int pos = 0;
|
|
|
|
for( int newpos = 1; newpos <= size; ++newpos )
|
|
|
|
if( buffer[newpos] == 'L' && buffer[newpos+1] == 'Z' &&
|
|
|
|
buffer[newpos+2] == 'I' && buffer[newpos+3] == 'P' )
|
2025-02-24 03:26:25 +01:00
|
|
|
{
|
2025-02-24 03:26:51 +01:00
|
|
|
long long member_size = 0;
|
|
|
|
for( int i = 1; i <= 8; ++i )
|
|
|
|
{ member_size <<= 8; member_size += base_buffer[tsize+newpos-i]; }
|
|
|
|
if( partial_member_size + newpos - pos == member_size )
|
|
|
|
{ // header found
|
|
|
|
uint8_t * data = new( std::nothrow ) uint8_t[newpos - pos];
|
|
|
|
if( data == 0 ) { pp( "not enough memory" ); fatal(); }
|
|
|
|
std::memcpy( data, buffer + pos, newpos - pos );
|
|
|
|
courier.receive_packet( data, newpos - pos );
|
|
|
|
courier.receive_packet( 0, 0 ); // end of member token
|
|
|
|
partial_member_size = 0;
|
|
|
|
pos = newpos;
|
|
|
|
}
|
2025-02-24 03:26:25 +01:00
|
|
|
}
|
2025-02-24 03:26:51 +01:00
|
|
|
|
|
|
|
if( at_stream_end )
|
|
|
|
{
|
|
|
|
uint8_t * data = new( std::nothrow ) uint8_t[size + hsize - pos];
|
|
|
|
if( data == 0 ) { pp( "not enough memory" ); fatal(); }
|
|
|
|
std::memcpy( data, buffer + pos, size + hsize - pos );
|
|
|
|
courier.receive_packet( data, size + hsize - pos );
|
|
|
|
courier.receive_packet( 0, 0 ); // end of member token
|
|
|
|
break;
|
2025-02-24 03:26:25 +01:00
|
|
|
}
|
2025-02-24 03:26:51 +01:00
|
|
|
if( pos < buffer_size )
|
2025-02-24 03:26:25 +01:00
|
|
|
{
|
2025-02-24 03:26:51 +01:00
|
|
|
partial_member_size += buffer_size - pos;
|
|
|
|
uint8_t * data = new( std::nothrow ) uint8_t[buffer_size - pos];
|
|
|
|
if( data == 0 ) { pp( "not enough memory" ); fatal(); }
|
|
|
|
std::memcpy( data, buffer + pos, buffer_size - pos );
|
|
|
|
courier.receive_packet( data, buffer_size - pos );
|
2025-02-24 03:26:25 +01:00
|
|
|
}
|
2025-02-24 03:26:51 +01:00
|
|
|
std::memcpy( base_buffer, base_buffer + buffer_size, tsize + hsize );
|
|
|
|
size = readblock( infd, buffer + hsize, buffer_size );
|
|
|
|
at_stream_end = ( size < buffer_size );
|
|
|
|
if( size != buffer_size && errno )
|
|
|
|
{ pp(); show_error( "read error", errno ); fatal(); }
|
2025-02-24 03:26:25 +01:00
|
|
|
}
|
2025-02-24 03:26:51 +01:00
|
|
|
delete[] base_buffer;
|
|
|
|
courier.finish(); // no more packets to send
|
2025-02-24 03:26:25 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2025-02-24 03:26:51 +01:00
|
|
|
|
|
|
|
struct Worker_arg
|
|
|
|
{
|
|
|
|
Packet_courier * courier;
|
|
|
|
const Pretty_print * pp;
|
|
|
|
int worker_id;
|
|
|
|
int packet_size;
|
|
|
|
};
|
2025-02-24 03:26:25 +01:00
|
|
|
|
|
|
|
|
2025-02-24 03:26:51 +01:00
|
|
|
// consume packets from courier, decompress their contents, and
|
|
|
|
// give the produced packets to courier.
|
|
|
|
void * worker( void * arg )
|
2025-02-24 03:26:25 +01:00
|
|
|
{
|
2025-02-24 03:26:51 +01:00
|
|
|
const Worker_arg & tmp = *(Worker_arg *)arg;
|
|
|
|
Packet_courier & courier = *tmp.courier;
|
|
|
|
const Pretty_print & pp = *tmp.pp;
|
|
|
|
const int worker_id = tmp.worker_id;
|
|
|
|
const int new_data_size = tmp.packet_size;
|
|
|
|
|
|
|
|
uint8_t * new_data = new( std::nothrow ) uint8_t[new_data_size];
|
2025-02-24 03:26:25 +01:00
|
|
|
LZ_Decoder * const decoder = LZ_decompress_open();
|
2025-02-24 03:26:51 +01:00
|
|
|
if( !new_data || !decoder || LZ_decompress_errno( decoder ) != LZ_ok )
|
|
|
|
{ pp( "not enough memory" ); fatal(); }
|
|
|
|
int new_pos = 0;
|
2025-02-24 03:26:25 +01:00
|
|
|
|
2025-02-24 03:26:51 +01:00
|
|
|
while( true )
|
2025-02-24 03:26:25 +01:00
|
|
|
{
|
2025-02-24 03:26:51 +01:00
|
|
|
Packet * ipacket = courier.distribute_packet( worker_id );
|
|
|
|
if( ipacket == 0 ) break; // no more packets to process
|
|
|
|
if( ipacket->data == 0 ) LZ_decompress_finish( decoder );
|
|
|
|
|
|
|
|
int written = 0;
|
|
|
|
while( true )
|
|
|
|
{
|
|
|
|
if( LZ_decompress_write_size( decoder ) > 0 && written < ipacket->size )
|
|
|
|
{
|
|
|
|
const int wr = LZ_decompress_write( decoder, ipacket->data + written,
|
|
|
|
ipacket->size - written );
|
|
|
|
if( wr < 0 ) internal_error( "library error (LZ_decompress_write)" );
|
|
|
|
written += wr;
|
|
|
|
if( written > ipacket->size )
|
|
|
|
internal_error( "ipacket size exceeded in worker" );
|
|
|
|
}
|
|
|
|
while( true ) // read and pack decompressed data
|
|
|
|
{
|
|
|
|
const int rd = LZ_decompress_read( decoder, new_data + new_pos,
|
|
|
|
new_data_size - new_pos );
|
|
|
|
if( rd < 0 )
|
|
|
|
{
|
|
|
|
pp();
|
|
|
|
if( verbosity >= 0 )
|
|
|
|
std::fprintf( stderr, "LZ_decompress_read error in worker %d: %s.\n",
|
|
|
|
worker_id, LZ_strerror( LZ_decompress_errno( decoder ) ) );
|
|
|
|
fatal();
|
|
|
|
}
|
|
|
|
new_pos += rd;
|
|
|
|
if( new_pos > new_data_size )
|
|
|
|
internal_error( "opacket size exceeded in worker" );
|
|
|
|
if( new_pos == new_data_size || LZ_decompress_finished( decoder ) == 1 )
|
|
|
|
{
|
|
|
|
if( new_pos > 0 ) // make data packet
|
|
|
|
{
|
|
|
|
Packet * opacket = new Packet;
|
|
|
|
opacket->data = new_data;
|
|
|
|
opacket->size = new_pos;
|
|
|
|
courier.collect_packet( opacket, worker_id );
|
|
|
|
new_pos = 0;
|
|
|
|
new_data = new( std::nothrow ) uint8_t[new_data_size];
|
|
|
|
if( new_data == 0 ) { pp( "not enough memory" ); fatal(); }
|
|
|
|
}
|
|
|
|
if( LZ_decompress_finished( decoder ) == 1 )
|
|
|
|
{
|
|
|
|
LZ_decompress_reset( decoder );
|
|
|
|
Packet * opacket = new Packet; // end of member token
|
|
|
|
opacket->data = 0;
|
|
|
|
opacket->size = 0;
|
|
|
|
courier.collect_packet( opacket, worker_id );
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if( rd == 0 ) break;
|
|
|
|
}
|
|
|
|
if( ipacket->data == 0 ) { delete ipacket; break; }
|
|
|
|
if( written == ipacket->size )
|
|
|
|
{ delete[] ipacket->data; delete ipacket; break; }
|
|
|
|
}
|
2025-02-24 03:26:25 +01:00
|
|
|
}
|
2025-02-24 03:26:51 +01:00
|
|
|
|
|
|
|
delete[] new_data;
|
|
|
|
if( LZ_decompress_total_in_size( decoder ) != 0 )
|
|
|
|
{ pp( "error, remaining data in decoder" ); fatal(); }
|
2025-02-24 03:26:25 +01:00
|
|
|
LZ_decompress_close( decoder );
|
2025-02-24 03:26:51 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// get from courier the processed and sorted packets, and write
|
|
|
|
// their contents to the output file.
|
|
|
|
void muxer( Packet_courier & courier, const Pretty_print & pp, const int outfd )
|
|
|
|
{
|
|
|
|
while( true )
|
|
|
|
{
|
|
|
|
Packet * opacket = courier.deliver_packet();
|
|
|
|
if( opacket == 0 ) break; // queue is empty. all workers exited
|
|
|
|
|
|
|
|
out_size += opacket->size;
|
|
|
|
|
|
|
|
if( outfd >= 0 )
|
|
|
|
{
|
|
|
|
const int wr = writeblock( outfd, opacket->data, opacket->size );
|
|
|
|
if( wr != opacket->size )
|
|
|
|
{ pp(); show_error( "write error", errno ); fatal(); }
|
|
|
|
}
|
|
|
|
delete[] opacket->data;
|
|
|
|
delete opacket;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // end namespace
|
|
|
|
|
|
|
|
|
|
|
|
// init the courier, then start the splitter and the workers and
|
|
|
|
// call the muxer.
|
|
|
|
int decompress( const int num_workers, const int num_slots,
|
|
|
|
const int infd, const int outfd, const Pretty_print & pp,
|
|
|
|
const int debug_level, const bool testing )
|
|
|
|
{
|
|
|
|
in_size = 0;
|
|
|
|
out_size = 0;
|
|
|
|
const int packet_size = 1 << 20;
|
|
|
|
Packet_courier courier( num_workers, num_slots );
|
|
|
|
|
|
|
|
Splitter_arg splitter_arg;
|
|
|
|
splitter_arg.courier = &courier;
|
|
|
|
splitter_arg.pp = &pp;
|
|
|
|
splitter_arg.infd = infd;
|
|
|
|
splitter_arg.packet_size = packet_size;
|
|
|
|
|
|
|
|
pthread_t splitter_thread;
|
|
|
|
xcreate( &splitter_thread, splitter, &splitter_arg );
|
|
|
|
|
|
|
|
Worker_arg * worker_args = new( std::nothrow ) Worker_arg[num_workers];
|
|
|
|
pthread_t * worker_threads = new( std::nothrow ) pthread_t[num_workers];
|
|
|
|
if( worker_args == 0 || worker_threads == 0 )
|
|
|
|
{ pp( "not enough memory" ); fatal(); }
|
|
|
|
for( int i = 0; i < num_workers; ++i )
|
|
|
|
{
|
|
|
|
worker_args[i].courier = &courier;
|
|
|
|
worker_args[i].pp = &pp;
|
|
|
|
worker_args[i].worker_id = i;
|
|
|
|
worker_args[i].packet_size = packet_size;
|
|
|
|
xcreate( &worker_threads[i], worker, &worker_args[i] );
|
|
|
|
}
|
|
|
|
|
|
|
|
muxer( courier, pp, outfd );
|
|
|
|
|
|
|
|
for( int i = num_workers - 1; i >= 0; --i )
|
|
|
|
xjoin( worker_threads[i] );
|
|
|
|
delete[] worker_threads; worker_threads = 0;
|
|
|
|
delete[] worker_args; worker_args = 0;
|
|
|
|
|
|
|
|
xjoin( splitter_thread );
|
|
|
|
|
|
|
|
if( verbosity >= 2 )
|
|
|
|
std::fprintf( stderr, "decompressed size %9lld, size %9lld. ",
|
|
|
|
out_size, in_size );
|
|
|
|
|
|
|
|
if( verbosity >= 1 )
|
|
|
|
{ if( testing ) std::fprintf( stderr, "ok\n" );
|
|
|
|
else std::fprintf( stderr, "done\n" ); }
|
|
|
|
|
|
|
|
if( debug_level & 1 )
|
|
|
|
std::fprintf( stderr,
|
|
|
|
"splitter tried to send a packet %8lu times\n"
|
|
|
|
"splitter had to wait %8lu times\n"
|
|
|
|
"any worker tried to consume from splitter %8lu times\n"
|
|
|
|
"any worker had to wait %8lu times\n"
|
|
|
|
"muxer tried to consume from workers %8lu times\n"
|
|
|
|
"muxer had to wait %8lu times\n",
|
|
|
|
courier.tally().check_counter,
|
|
|
|
courier.tally().wait_counter,
|
|
|
|
courier.icheck_counter,
|
|
|
|
courier.iwait_counter,
|
|
|
|
courier.ocheck_counter,
|
|
|
|
courier.owait_counter );
|
|
|
|
|
|
|
|
if( !courier.finished() ) internal_error( "courier not finished" );
|
|
|
|
return 0;
|
2025-02-24 03:26:25 +01:00
|
|
|
}
|