1
0
Fork 0

Merging upstream version 0.12.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-17 21:12:33 +01:00
parent 8a1b7bb819
commit fae36bf8d8
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
18 changed files with 427 additions and 212 deletions

View file

@ -1,3 +1,13 @@
2019-02-22 Antonio Diaz Diaz <antonio@gnu.org>
* Version 0.12 released.
* create.cc (fill_headers): Fixed use of st_rdev instead of st_dev.
* Save just numerical uid/gid if user or group not in database.
* extract.cc (format_member_name): Print devmajor and devminor.
* Added new option '-d, --diff'.
* Added new option '--ignore-ids'.
* extract.cc: Fast '-t, --list' on seekable uncompressed archives.
2019-02-13 Antonio Diaz Diaz <antonio@gnu.org>
* Version 0.11 released.

View file

@ -143,6 +143,7 @@ dist : doc
$(DISTNAME)/testsuite/t155.tar.lz \
$(DISTNAME)/testsuite/test3_bad[1-6].tar.lz \
$(DISTNAME)/testsuite/dotdot[1-5].tar.lz \
$(DISTNAME)/testsuite/ug32767.tar.lz \
$(DISTNAME)/testsuite/ug32chars.tar.lz \
$(DISTNAME)/testsuite/eof.tar.lz
rm -f $(DISTNAME)

28
NEWS
View file

@ -1,15 +1,23 @@
Changes in version 0.11:
Changes in version 0.12:
An endless loop happening when trying to list or extract from an empty
tar.lz archive has been fixed.
When dumping a character special file or a block special file, the devmajor
and devminor fields were incorrectly filled with the values corresponding to
the device containing the special file instead of the values corresponding
to the special file itself.
Multi-threaded '-c, --create' and '-r, --append' have been implemented.
If when creating an archive tarlz can't find a user or group name in the
database, it now saves just the numerical uid/gid instead of exiting with
error status.
The default compression granularity has been changed to '--bsolid'
(per block compression) instead of '--no-solid' (per file compression).
When listing verbosely a character special file or a block special file, the
devmajor and devminor values are now shown.
The message "Removing leading '<prefix>' from member names." is now shown
once for each <prefix>.
The new option '-d, --diff', which reports differences between archive and
file system, has been added.
The new chapter 'Minimum archive sizes required for multi-threaded block
compression' has been added to the manual.
The new option '--ignore-ids', which tells '-d, --diff' to ignore
differences in owner and group IDs, has been added. This option is useful
when comparing an --anonymous archive.
Listing of large seekable uncompressed archives is now much faster because
tarlz now skips over member data instead of reading it.

2
README
View file

@ -11,7 +11,7 @@ it like any other tar.lz archive. Tarlz can append files to the end of such
compressed archives.
Tarlz can create tar archives with five levels of compression granularity;
per file, per block, per directory, appendable solid, and solid.
per file, per block (default), per directory, appendable solid, and solid.
Of course, compressing each file (or each directory) individually can't
achieve a compression ratio as high as compressing solidly the whole tar

2
configure vendored
View file

@ -6,7 +6,7 @@
# to copy, distribute and modify it.
pkgname=tarlz
pkgversion=0.11
pkgversion=0.12
progname=tarlz
srctrigger=doc/${pkgname}.texi

View file

@ -25,6 +25,7 @@
#include <cstring>
#include <string>
#include <vector>
#include <pthread.h>
#include <stdint.h>
#include <unistd.h>
#include <sys/stat.h>
@ -91,14 +92,15 @@ bool option_C_after_relative_filename( const Arg_parser & parser )
}
int seek_read( const int fd, uint8_t * const buf, const int size,
const long long pos )
bool writeblock_wrapper( const int outfd, const uint8_t * const buffer,
const int size )
{
if( lseek( fd, pos, SEEK_SET ) == pos )
return readblock( fd, buf, size );
return 0;
if( writeblock( outfd, buffer, size ) != size )
{ show_file_error( archive_namep, "Write error", errno ); return false; }
return true;
}
// infd and outfd can refer to the same file if copying to a lower file
// position or if source and destination blocks don't overlap.
// max_size < 0 means no size limit.
@ -120,10 +122,7 @@ bool copy_file( const int infd, const int outfd, const long long max_size = -1 )
{ show_error( "Error reading input file", errno ); error = true; break; }
if( rd > 0 )
{
const int wr = writeblock( outfd, buffer, rd );
if( wr != rd )
{ show_error( "Error writing output file", errno );
error = true; break; }
if( !writeblock_wrapper( outfd, buffer, rd ) ) { error = true; break; }
copied_size += rd;
}
if( rd < size ) break; // EOF
@ -193,7 +192,7 @@ bool archive_write( const uint8_t * const buf, const int size )
if( size <= 0 && flushed ) return true;
flushed = ( size <= 0 );
if( !encoder ) // uncompressed
return ( writeblock( goutfd, buf, size ) == size );
return writeblock_wrapper( goutfd, buf, size );
enum { obuf_size = 65536 };
uint8_t obuf[obuf_size];
int sz = 0;
@ -207,7 +206,7 @@ bool archive_write( const uint8_t * const buf, const int size )
const int rd = LZ_compress_read( encoder, obuf, obuf_size );
if( rd < 0 ) internal_error( "library error (LZ_compress_read)." );
if( rd == 0 && sz >= size ) break;
if( writeblock( goutfd, obuf, rd ) != rd ) return false;
if( !writeblock_wrapper( goutfd, obuf, rd ) ) return false;
}
if( LZ_compress_finished( encoder ) == 1 &&
LZ_compress_restart_member( encoder, LLONG_MAX ) < 0 )
@ -270,13 +269,10 @@ int add_member( const char * const filename, const struct stat *,
if( encoder && solidity == bsolid &&
block_is_full( extended, file_size, partial_data_size ) &&
!archive_write( 0, 0 ) )
{ show_error( "Error flushing encoder", errno ); return 1; }
!archive_write( 0, 0 ) ) return 1;
if( !write_extended( extended ) )
{ show_error( "Error writing extended header", errno ); return 1; }
if( !archive_write( header, header_size ) )
{ show_error( "Error writing ustar header", errno ); return 1; }
if( !write_extended( extended ) || !archive_write( header, header_size ) )
return 1;
if( file_size )
{
enum { bufsize = 32 * header_size };
@ -301,15 +297,12 @@ int add_member( const char * const filename, const struct stat *,
{ const int padding = header_size - rem;
std::memset( buf + size, 0, padding ); size += padding; }
}
if( !archive_write( buf, size ) )
{ show_error( "Error writing archive", errno ); close( infd );
return 1; }
if( !archive_write( buf, size ) ) { close( infd ); return 1; }
}
if( close( infd ) != 0 )
{ show_file_error( filename, "Error closing file", errno ); return 1; }
}
if( encoder && solidity == no_solid && !archive_write( 0, 0 ) )
{ show_error( "Error flushing encoder", errno ); return 1; }
if( encoder && solidity == no_solid && !archive_write( 0, 0 ) ) return 1;
if( verbosity >= 1 ) std::fprintf( stderr, "%s\n", filename );
return 0;
}
@ -378,7 +371,7 @@ bool fill_headers( const char * const filename, Extended & extended,
set_error_status( 1 ); return false; }
print_octal( header + uid_o, uid_l - 1, uid );
print_octal( header + gid_o, gid_l - 1, gid );
const long long mtime = st.st_mtime; // shut up gcc
const long long mtime = st.st_mtime; // shut up gcc about time_t
if( mtime < 0 || mtime >= 1LL << 33 )
{ show_file_error( filename, "mtime is out of ustar range [0, 8_589_934_591]." );
set_error_status( 1 ); return false; }
@ -413,28 +406,28 @@ bool fill_headers( const char * const filename, Extended & extended,
else if( S_ISCHR( mode ) || S_ISBLK( mode ) )
{
typeflag = S_ISCHR( mode ) ? tf_chardev : tf_blockdev;
if( major( st.st_dev ) >= 2 << 20 || minor( st.st_dev ) >= 2 << 20 )
if( major( st.st_rdev ) >= 2 << 20 || minor( st.st_rdev ) >= 2 << 20 )
{ show_file_error( filename, "devmajor or devminor is larger than 2_097_151." );
set_error_status( 1 ); return false; }
print_octal( header + devmajor_o, devmajor_l - 1, major( st.st_dev ) );
print_octal( header + devminor_o, devminor_l - 1, minor( st.st_dev ) );
print_octal( header + devmajor_o, devmajor_l - 1, major( st.st_rdev ) );
print_octal( header + devminor_o, devminor_l - 1, minor( st.st_rdev ) );
}
else if( S_ISFIFO( mode ) ) typeflag = tf_fifo;
else { show_file_error( filename, "Unknown file type." );
set_error_status( 2 ); return false; }
header[typeflag_o] = typeflag;
errno = 0;
// errno = 0;
const struct passwd * const pw = getpwuid( uid );
if( pw && pw->pw_name )
std::strncpy( (char *)header + uname_o, pw->pw_name, uname_l - 1 );
else { show_file_error( filename, "Can't read user name from database", errno );
set_error_status( 1 ); }
errno = 0;
/* else { show_file_error( filename, "Can't read user name from database", errno );
set_error_status( 1 ); } */ // numerical only
// errno = 0;
const struct group * const gr = getgrgid( gid );
if( gr && gr->gr_name )
std::strncpy( (char *)header + gname_o, gr->gr_name, gname_l - 1 );
else { show_file_error( filename, "Can't read group name from database", errno );
set_error_status( 1 ); }
/* else { show_file_error( filename, "Can't read group name from database", errno );
set_error_status( 1 ); } */ // numerical only
if( file_size >= 1ULL << 33 )
{ extended.file_size( file_size ); force_extended_name = true; }
else print_octal( header + size_o, size_l - 1, file_size );
@ -469,10 +462,11 @@ void set_error_status( const int retval )
xunlock( &mutex );
}
int final_exit_status( int retval )
int final_exit_status( int retval, const bool show_msg )
{
if( !retval && error_status )
{ show_error( "Exiting with failure status due to previous errors." );
{ if( show_msg )
show_error( "Exiting with failure status due to previous errors." );
retval = error_status; }
return retval;
}
@ -511,9 +505,7 @@ int concatenate( const std::string & archive_name, const Arg_parser & parser,
if( parser.argument( i ).empty() ) continue; // skip empty names
const char * const filename = parser.argument( i ).c_str();
const int infd = open_instream( filename );
if( infd < 0 )
{ show_file_error( filename, "Can't open input file", errno );
retval = 1; break; }
if( infd < 0 ) { retval = 1; break; }
if( !check_appendable( infd, false ) )
{ show_file_error( filename, "Not an appendable tar.lz archive." );
close( infd ); retval = 2; break; }
@ -640,7 +632,7 @@ int encode( const std::string & archive_name, const Arg_parser & parser,
else if( ( retval = nftw( filename, add_member, 16, FTW_PHYS ) ) != 0 )
break; // write error
else if( encoder && solidity == dsolid && !archive_write( 0, 0 ) )
{ show_error( "Error flushing encoder", errno ); retval = 1; }
retval = 1;
}
if( !retval ) // write End-Of-Archive records
@ -650,12 +642,9 @@ int encode( const std::string & archive_name, const Arg_parser & parser,
std::memset( buf, 0, bufsize );
if( encoder &&
( solidity == asolid || ( solidity == bsolid && partial_data_size ) ) &&
!archive_write( 0, 0 ) )
{ show_error( "Error flushing encoder", errno ); retval = 1; }
!archive_write( 0, 0 ) ) retval = 1; // flush encoder
else if( !archive_write( buf, bufsize ) ||
( encoder && !archive_write( 0, 0 ) ) ) // flush encoder
{ show_error( "Error writing end-of-archive blocks", errno );
retval = 1; }
( encoder && !archive_write( 0, 0 ) ) ) retval = 1;
}
if( encoder && LZ_compress_close( encoder ) < 0 )
{ show_error( "LZ_compress_close failed." ); retval = 1; }

View file

@ -45,6 +45,42 @@ Packet_courier * courierp = 0; // local vars needed by add_member
unsigned long long partial_data_size = 0; // size of current block
class Slot_tally
{
const int num_slots; // total slots
int num_free; // remaining free slots
pthread_mutex_t mutex;
pthread_cond_t slot_av; // slot available
Slot_tally( const Slot_tally & ); // declared as private
void operator=( const Slot_tally & ); // declared as private
public:
explicit Slot_tally( const int slots )
: num_slots( slots ), num_free( slots )
{ xinit_mutex( &mutex ); xinit_cond( &slot_av ); }
~Slot_tally() { xdestroy_cond( &slot_av ); xdestroy_mutex( &mutex ); }
bool all_free() { return ( num_free == num_slots ); }
void get_slot() // wait for a free slot
{
xlock( &mutex );
while( num_free <= 0 ) xwait( &slot_av, &mutex );
--num_free;
xunlock( &mutex );
}
void leave_slot() // return a slot to the tally
{
xlock( &mutex );
if( ++num_free == 1 ) xsignal( &slot_av ); // num_free was 0
xunlock( &mutex );
}
};
struct Ipacket // filename, file size and headers
{
const unsigned long long file_size;
@ -458,7 +494,7 @@ extern "C" void * cworker( void * arg )
/* Get from courier the processed and sorted packets, and write
their contents to the output archive. */
bool muxer( Packet_courier & courier, const char * const archive_name,
void muxer( Packet_courier & courier, const char * const archive_name,
const int outfd )
{
while( true )
@ -466,13 +502,12 @@ bool muxer( Packet_courier & courier, const char * const archive_name,
const Opacket * const opacket = courier.deliver_packet();
if( !opacket ) break; // queue is empty. all workers exited
const int wr = writeblock( outfd, opacket->data, opacket->size );
if( wr != opacket->size )
{ show_file_error( archive_name, "Write error", errno ); return false; }
if( writeblock( outfd, opacket->data, opacket->size ) != opacket->size )
{ show_file_error( archive_name, "Write error", errno );
cleanup_and_fail(); }
delete[] opacket->data;
delete opacket;
}
return true;
}
} // end namespace
@ -488,6 +523,8 @@ int encode_lz( const char * const archive_name, const Arg_parser & parser,
num_workers * in_slots : INT_MAX;
const int out_slots = 64;
/* If an error happens after any threads have been started, exit must be
called before courier goes out of scope. */
Packet_courier courier( num_workers, total_in_slots, out_slots );
courierp = &courier; // needed by add_member
@ -498,11 +535,12 @@ int encode_lz( const char * const archive_name, const Arg_parser & parser,
pthread_t grouper_thread;
int errcode = pthread_create( &grouper_thread, 0, grouper, &grouper_arg );
if( errcode )
{ show_error( "Can't create grouper thread", errcode ); return 1; }
{ show_error( "Can't create grouper thread", errcode ); cleanup_and_fail(); }
Worker_arg * worker_args = new( std::nothrow ) Worker_arg[num_workers];
pthread_t * worker_threads = new( std::nothrow ) pthread_t[num_workers];
if( !worker_args || !worker_threads ) { show_error( mem_msg ); return 1; }
if( !worker_args || !worker_threads )
{ show_error( mem_msg ); cleanup_and_fail(); }
for( int i = 0; i < num_workers; ++i )
{
worker_args[i].courier = &courier;
@ -511,23 +549,23 @@ int encode_lz( const char * const archive_name, const Arg_parser & parser,
worker_args[i].worker_id = i;
errcode = pthread_create( &worker_threads[i], 0, cworker, &worker_args[i] );
if( errcode )
{ show_error( "Can't create worker threads", errcode ); return 1; }
{ show_error( "Can't create worker threads", errcode ); cleanup_and_fail(); }
}
if( !muxer( courier, archive_name, outfd ) ) return 1;
muxer( courier, archive_name, outfd );
for( int i = num_workers - 1; i >= 0; --i )
{
errcode = pthread_join( worker_threads[i], 0 );
if( errcode )
{ show_error( "Can't join worker threads", errcode ); return 1; }
{ show_error( "Can't join worker threads", errcode ); cleanup_and_fail(); }
}
delete[] worker_threads;
delete[] worker_args;
errcode = pthread_join( grouper_thread, 0 );
if( errcode )
{ show_error( "Can't join grouper thread", errcode ); return 1; }
{ show_error( "Can't join grouper thread", errcode ); cleanup_and_fail(); }
// write End-Of-Archive records
int retval = 0;

View file

@ -1,5 +1,5 @@
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.46.1.
.TH TARLZ "1" "February 2019" "tarlz 0.11" "User Commands"
.TH TARLZ "1" "February 2019" "tarlz 0.12" "User Commands"
.SH NAME
tarlz \- creates tar archives with multimember lzip compression
.SH SYNOPSIS
@ -42,6 +42,12 @@ create a new archive
\fB\-C\fR, \fB\-\-directory=\fR<dir>
change to directory <dir>
.TP
\fB\-d\fR, \fB\-\-diff\fR
find differences between archive and file system
.TP
\fB\-\-ignore\-ids\fR
ignore differences in owner and group IDs
.TP
\fB\-f\fR, \fB\-\-file=\fR<archive>
use archive file <archive>
.TP
@ -66,6 +72,9 @@ extract files from an archive
\fB\-0\fR .. \fB\-9\fR
set compression level [default 6]
.TP
\fB\-\-uncompressed\fR
don't compress the archive created
.TP
\fB\-\-asolid\fR
create solidly compressed appendable archive
.TP
@ -95,9 +104,6 @@ don't delete partially extracted files
.TP
\fB\-\-missing\-crc\fR
exit with error status if missing extended CRC
.TP
\fB\-\-uncompressed\fR
don't compress the archive created
.PP
Exit status: 0 for a normal exit, 1 for environmental problems (file
not found, invalid flags, I/O errors, etc), 2 to indicate a corrupt or

View file

@ -11,7 +11,7 @@ File: tarlz.info, Node: Top, Next: Introduction, Up: (dir)
Tarlz Manual
************
This manual is for Tarlz (version 0.11, 13 February 2019).
This manual is for Tarlz (version 0.12, 22 February 2019).
* Menu:
@ -48,8 +48,8 @@ tar tools like GNU tar, which treat it like any other tar.lz archive.
Tarlz can append files to the end of such compressed archives.
Tarlz can create tar archives with five levels of compression
granularity; per file, per block, per directory, appendable solid, and
solid.
granularity; per file, per block (default), per directory, appendable
solid, and solid.
Of course, compressing each file (or each directory) individually can't
achieve a compression ratio as high as compressing solidly the whole tar
@ -130,9 +130,9 @@ equivalent to '-1 --solid'
'-B BYTES'
'--data-size=BYTES'
Set target size of input data blocks for the '--bsolid' option.
Valid values range from 8 KiB to 1 GiB. Default value is two times
the dictionary size, except for option '-0' where it defaults to
1 MiB. *Note Minimum archive sizes::.
*Note --bsolid::. Valid values range from 8 KiB to 1 GiB. Default
value is two times the dictionary size, except for option '-0'
where it defaults to 1 MiB. *Note Minimum archive sizes::.
'-c'
'--create'
@ -154,6 +154,20 @@ equivalent to '-1 --solid'
archive if a '-C' option appears after a relative filename in the
command line.
'-d'
'--diff'
Find differences between archive and file system. For each tar
member in the archive, verify that the corresponding file exists
and is of the same type (regular file, directory, etc). Report the
differences found in type, mode (permissions), owner and group
IDs, modification time, file size, file contents (of regular
files), target (of symlinks) and device number (of block/character
special files).
'--ignore-ids'
Make '--diff' ignore differences in owner and group IDs. This
option is useful when comparing an '--anonymous' archive.
'-f ARCHIVE'
'--file=ARCHIVE'
Use archive file ARCHIVE. '-' used as an ARCHIVE argument reads
@ -224,6 +238,10 @@ equivalent to '-1 --solid'
-8 24 MiB 132 bytes
-9 32 MiB 273 bytes
'--uncompressed'
With '--create', don't compress the created tar archive. Create an
uncompressed tar archive instead.
'--asolid'
When creating or appending to a compressed archive, use appendable
solid compression. All the files being added to the archive are
@ -296,10 +314,6 @@ equivalent to '-1 --solid'
the posix pax format; i.e., the lack of a mandatory check sequence
in the extended records. *Note crc32::.
'--uncompressed'
With '--create', don't compress the created tar archive. Create an
uncompressed tar archive instead.
Exit status: 0 for a normal exit, 1 for environmental problems (file
not found, invalid flags, I/O errors, etc), 2 to indicate a corrupt or
@ -822,18 +836,19 @@ Concept index
Tag Table:
Node: Top223
Node: Introduction1089
Node: Invoking tarlz3218
Ref: --data-size5097
Node: File format12673
Ref: key_crc3217493
Node: Amendments to pax format22910
Ref: crc3223434
Ref: flawed-compat24459
Node: Multi-threaded tar26826
Node: Minimum archive sizes29365
Node: Examples31495
Node: Problems33164
Node: Concept index33690
Node: Invoking tarlz3228
Ref: --data-size5107
Ref: --bsolid10054
Node: File format13298
Ref: key_crc3218118
Node: Amendments to pax format23535
Ref: crc3224059
Ref: flawed-compat25084
Node: Multi-threaded tar27451
Node: Minimum archive sizes29990
Node: Examples32120
Node: Problems33789
Node: Concept index34315

End Tag Table

View file

@ -6,8 +6,8 @@
@finalout
@c %**end of header
@set UPDATED 13 February 2019
@set VERSION 0.11
@set UPDATED 22 February 2019
@set VERSION 0.12
@dircategory Data Compression
@direntry
@ -69,7 +69,7 @@ tar, which treat it like any other tar.lz archive. Tarlz can append files to
the end of such compressed archives.
Tarlz can create tar archives with five levels of compression granularity;
per file, per block, per directory, appendable solid, and solid.
per file, per block (default), per directory, appendable solid, and solid.
@noindent
Of course, compressing each file (or each directory) individually can't
@ -130,7 +130,7 @@ appended or concatenated, and skips it.
On extraction and listing, tarlz removes leading @samp{./} strings from
member names in the archive or given in the command line, so that
@w{@code{tarlz -xf foo ./bar baz}} extracts members @samp{bar} and
@w{@samp{tarlz -xf foo ./bar baz}} extracts members @samp{bar} and
@samp{./baz} from archive @samp{foo}.
If several compression levels or @samp{--*solid} options are given, the last
@ -162,10 +162,10 @@ specified. Tarlz can't concatenate uncompressed tar archives.
@anchor{--data-size}
@item -B @var{bytes}
@itemx --data-size=@var{bytes}
Set target size of input data blocks for the @samp{--bsolid} option. Valid
values range from @w{8 KiB} to @w{1 GiB}. Default value is two times the
dictionary size, except for option @samp{-0} where it defaults to @w{1 MiB}.
@xref{Minimum archive sizes}.
Set target size of input data blocks for the @samp{--bsolid} option.
@xref{--bsolid}. Valid values range from @w{8 KiB} to @w{1 GiB}. Default
value is two times the dictionary size, except for option @samp{-0} where it
defaults to @w{1 MiB}. @xref{Minimum archive sizes}.
@item -c
@itemx --create
@ -186,6 +186,19 @@ Note that a process can only have one current working directory (CWD).
Therefore multi-threading can't be used to create an archive if a @samp{-C}
option appears after a relative filename in the command line.
@item -d
@itemx --diff
Find differences between archive and file system. For each tar member in the
archive, verify that the corresponding file exists and is of the same type
(regular file, directory, etc). Report the differences found in type, mode
(permissions), owner and group IDs, modification time, file size, file
contents (of regular files), target (of symlinks) and device number (of
block/character special files).
@item --ignore-ids
Make @samp{--diff} ignore differences in owner and group IDs. This option is
useful when comparing an @samp{--anonymous} archive.
@item -f @var{archive}
@itemx --file=@var{archive}
Use archive file @var{archive}. @samp{-} used as an @var{archive}
@ -205,7 +218,7 @@ Multi-threaded extraction of files from an archive is not yet implemented.
Note that the number of usable threads is limited during compression to
@w{ceil( uncompressed_size / data_size )} (@pxref{Minimum archive sizes}),
and during decompression to the number of lzip members in the tar.lz
archive, which you can find by running @w{@code{lzip -lv archive.tar.lz}}.
archive, which you can find by running @w{@samp{lzip -lv archive.tar.lz}}.
@item -q
@itemx --quiet
@ -255,6 +268,10 @@ it creates, reducing the amount of memory required for decompression.
@item -9 @tab 32 MiB @tab 273 bytes
@end multitable
@item --uncompressed
With @samp{--create}, don't compress the created tar archive. Create an
uncompressed tar archive instead.
@item --asolid
When creating or appending to a compressed archive, use appendable solid
compression. All the files being added to the archive are compressed into a
@ -262,6 +279,7 @@ single lzip member, but the end-of-file blocks are compressed into a
separate lzip member. This creates a solidly compressed appendable archive.
Solid archives can't be created nor decoded in parallel.
@anchor{--bsolid}
@item --bsolid
When creating or appending to a compressed archive, use block compression.
Tar members are compressed together in a lzip member until they approximate
@ -329,10 +347,6 @@ headers preceding a ustar header, or several records with the same
keyword appearing in the same block of extended records.
@end ignore
@item --uncompressed
With @samp{--create}, don't compress the created tar archive. Create an
uncompressed tar archive instead.
@end table
Exit status: 0 for a normal exit, 1 for environmental problems (file not
@ -871,7 +885,7 @@ for all eternity, if not longer.
If you find a bug in tarlz, please send electronic mail to
@email{lzip-bug@@nongnu.org}. Include the version number, which you can
find by running @w{@code{tarlz --version}}.
find by running @w{@samp{tarlz --version}}.
@node Concept index

View file

@ -26,13 +26,14 @@
#include <cstring>
#include <string>
#include <vector>
#include <pthread.h>
#include <stdint.h>
#include <unistd.h>
#include <utime.h>
#include <sys/stat.h>
#include <sys/types.h>
#if defined(__GNU_LIBRARY__)
#include <sys/sysmacros.h> // for makedev
#include <sys/sysmacros.h> // for major, minor, makedev
#endif
#include <lzlib.h>
@ -44,15 +45,17 @@
namespace {
Resizable_buffer grbuf( initial_line_length );
bool archive_is_uncompressed_seekable = false;
bool has_lz_ext; // global var for archive_read
void skip_warn( const bool reset = false ) // avoid duplicate warnings
bool skip_warn( const bool reset = false ) // avoid duplicate warnings
{
static bool skipping = false;
if( reset ) skipping = false;
else if( !skipping )
{ skipping = true; show_error( "Skipping to next header." ); }
{ skipping = true; show_error( "Skipping to next header." ); return true; }
return false;
}
@ -122,7 +125,8 @@ int archive_read( const int infd, uint8_t * const buf, const int size,
}
if( !islz ) // uncompressed
{ if( rd == size ) return 0; fatal = true; return 2; }
decoder = LZ_decompress_open(); // compressed
archive_is_uncompressed_seekable = false; // compressed
decoder = LZ_decompress_open();
if( !decoder || LZ_decompress_errno( decoder ) != LZ_ok )
{ show_error( mem_msg );
LZ_decompress_close( decoder ); fatal = true; return 2; }
@ -251,7 +255,7 @@ void format_member_name( const Extended & extended, const Tar_header header,
format_mode_string( header, rbuf() );
const int group_string_len =
format_user_group_string( header, rbuf() + mode_string_size );
const int offset = mode_string_size + group_string_len;
int offset = mode_string_size + group_string_len;
const time_t mtime = parse_octal( header + mtime_o, mtime_l ); // 33 bits
struct tm tms;
const struct tm * tm = localtime_r( &mtime, &tms );
@ -261,13 +265,20 @@ void format_member_name( const Extended & extended, const Tar_header header,
const bool islink = ( typeflag == tf_link || typeflag == tf_symlink );
const char * const link_string = !islink ? "" :
( ( typeflag == tf_link ) ? " link to " : " -> " );
if( typeflag == tf_chardev || typeflag == tf_blockdev )
offset += snprintf( rbuf() + offset, rbuf.size() - offset, " %5u,%u",
(unsigned)parse_octal( header + devmajor_o, devmajor_l ),
(unsigned)parse_octal( header + devminor_o, devminor_l ) );
else
offset += snprintf( rbuf() + offset, rbuf.size() - offset, " %9llu",
extended.file_size() );
for( int i = 0; i < 2; ++i )
{
const int len = snprintf( rbuf() + offset, rbuf.size() - offset,
" %9llu %4d-%02u-%02u %02u:%02u %s%s%s\n",
extended.file_size(), 1900 + tm->tm_year, 1 + tm->tm_mon,
tm->tm_mday, tm->tm_hour, tm->tm_min, extended.path().c_str(),
link_string, !islink ? "" : extended.linkpath().c_str() );
" %4d-%02u-%02u %02u:%02u %s%s%s\n",
1900 + tm->tm_year, 1 + tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, extended.path().c_str(),
link_string, islink ? extended.linkpath().c_str() : "" );
if( (int)rbuf.size() > len + offset || !rbuf.resize( len + offset + 1 ) )
break;
}
@ -292,16 +303,15 @@ void show_member_name( const Extended & extended, const Tar_header header,
}
int list_member( const int infd, const Extended & extended,
const Tar_header header, const bool skip )
int skip_member( const int infd, const Extended & extended )
{
if( !skip ) show_member_name( extended, header, 0, grbuf );
const unsigned bufsize = 32 * header_size;
uint8_t buf[bufsize];
unsigned long long rest = extended.file_size();
const int rem = rest % header_size;
const int padding = rem ? header_size - rem : 0;
if( archive_is_uncompressed_seekable &&
lseek( infd, rest + padding, SEEK_CUR ) > 0 ) return 0;
const unsigned bufsize = 32 * header_size;
uint8_t buf[bufsize];
while( rest > 0 )
{
const int rsize = ( rest >= bufsize ) ? bufsize : rest + padding;
@ -314,6 +324,130 @@ int list_member( const int infd, const Extended & extended,
}
void show_file_diff( const char * const filename, const char * const msg )
{
if( verbosity >= 0 ) std::fprintf( stderr, "%s: %s\n", filename, msg );
}
int compare_member( const int infd1, const Extended & extended,
const Tar_header header, const bool ignore_ids )
{
show_member_name( extended, header, 1, grbuf );
unsigned long long rest = extended.file_size();
const char * const filename = extended.path().c_str();
const Typeflag typeflag = (Typeflag)header[typeflag_o];
bool diff = false, size_differs = false, type_differs = true;
struct stat st;
if( lstat( filename, &st ) != 0 )
show_file_error( filename, "Warning: Can't stat", errno );
else if( ( typeflag == tf_regular || typeflag == tf_hiperf ) &&
!S_ISREG( st.st_mode ) )
show_file_diff( filename, "Is not a regular file" );
else if( typeflag == tf_symlink && !S_ISLNK( st.st_mode ) )
show_file_diff( filename, "Is not a symlink" );
else if( typeflag == tf_chardev && !S_ISCHR( st.st_mode ) )
show_file_diff( filename, "Is not a character device" );
else if( typeflag == tf_blockdev && !S_ISBLK( st.st_mode ) )
show_file_diff( filename, "Is not a block device" );
else if( typeflag == tf_directory && !S_ISDIR( st.st_mode ) )
show_file_diff( filename, "Is not a directory" );
else if( typeflag == tf_fifo && !S_ISFIFO( st.st_mode ) )
show_file_diff( filename, "Is not a FIFO" );
else
{
type_differs = false;
if( typeflag != tf_symlink )
{
const mode_t mode = parse_octal( header + mode_o, mode_l ); // 12 bits
if( mode != ( st.st_mode & ( S_ISUID | S_ISGID | S_ISVTX |
S_IRWXU | S_IRWXG | S_IRWXO ) ) )
{ show_file_diff( filename, "Mode differs" ); diff = true; }
}
if( !ignore_ids )
{
if( (uid_t)parse_octal( header + uid_o, uid_l ) != st.st_uid )
{ show_file_diff( filename, "Uid differs" ); diff = true; }
if( (gid_t)parse_octal( header + gid_o, gid_l ) != st.st_gid )
{ show_file_diff( filename, "Gid differs" ); diff = true; }
}
if( typeflag != tf_symlink )
{
const time_t mtime = parse_octal( header + mtime_o, mtime_l ); // 33 bits
if( mtime != st.st_mtime )
{ show_file_diff( filename, "Mod time differs" ); diff = true; }
if( ( typeflag == tf_regular || typeflag == tf_hiperf ) &&
(off_t)rest != st.st_size ) // don't compare contents
{ show_file_diff( filename, "Size differs" ); size_differs = true; }
if( ( typeflag == tf_chardev || typeflag == tf_blockdev ) &&
( parse_octal( header + devmajor_o, devmajor_l ) != major( st.st_rdev ) ||
parse_octal( header + devminor_o, devminor_l ) != minor( st.st_rdev ) ) )
{ show_file_diff( filename, "Device number differs" ); diff = true; }
}
else
{
char * const buf = new char[st.st_size+1];
long len = readlink( filename, buf, st.st_size );
bool e = ( len != st.st_size );
if( !e ) { buf[len] = 0; if( extended.linkpath() != buf ) e = true; }
delete[] buf;
if( e ) { show_file_diff( filename, "Symlink differs" ); diff = true; }
}
}
if( diff || size_differs || type_differs )
{ diff = false; set_error_status( 1 ); }
if( rest == 0 ) return 0;
if( ( typeflag != tf_regular && typeflag != tf_hiperf ) ||
size_differs || type_differs ) return skip_member( infd1, extended );
// else compare file contents
const int rem = rest % header_size;
const int padding = rem ? header_size - rem : 0;
const unsigned bufsize = 32 * header_size;
uint8_t buf1[bufsize];
uint8_t buf2[bufsize];
const int infd2 = open_instream( filename );
if( infd2 < 0 )
{ set_error_status( 1 ); return skip_member( infd1, extended ); }
int retval = 0;
while( rest > 0 )
{
const int rsize1 = ( rest >= bufsize ) ? bufsize : rest + padding;
const int rsize2 = ( rest >= bufsize ) ? bufsize : rest;
const int ret = archive_read( infd1, buf1, rsize1 );
if( ret != 0 ) { if( ret == 2 ) retval = 2; diff = true; break; }
if( !diff )
{
const int rd = readblock( infd2, buf2, rsize2 );
if( rd != rsize2 )
{
if( errno ) show_file_error( filename, "Read error", errno );
else show_file_diff( filename, "EOF found in file" );
diff = true;
}
else
{
int i = 0; while( i < rsize2 && buf1[i] == buf2[i] ) ++i;
if( i < rsize2 )
{ show_file_diff( filename, "Contents differ" ); diff = true; }
}
}
if( rest < bufsize ) break;
rest -= rsize1;
}
if( diff ) set_error_status( 1 );
close( infd2 );
return retval;
}
int list_member( const int infd, const Extended & extended,
const Tar_header header )
{
show_member_name( extended, header, 0, grbuf );
return skip_member( infd, extended );
}
bool contains_dotdot( const char * const filename )
{
for( int i = 0; filename[i]; ++i )
@ -331,7 +465,7 @@ int extract_member( const int infd, const Extended & extended,
if( contains_dotdot( filename ) )
{
show_file_error( filename, "Contains a '..' component, skipping." );
return list_member( infd, extended, header, true );
return skip_member( infd, extended );
}
const mode_t mode = parse_octal( header + mode_o, mode_l ); // 12 bits
const time_t mtime = parse_octal( header + mtime_o, mtime_l ); // 33 bits
@ -354,12 +488,6 @@ int extract_member( const int infd, const Extended & extended,
case tf_symlink:
{
const char * const linkname = extended.linkpath().c_str();
/* if( contains_dotdot( linkname ) )
{
show_file_error( filename,
"Link destination contains a '..' component, skipping." );
return list_member( infd, extended, header, false );
}*/
const bool hard = typeflag == tf_link;
if( ( hard && link( linkname, filename ) != 0 ) ||
( !hard && symlink( linkname, filename ) != 0 ) )
@ -545,20 +673,21 @@ unsigned long long parse_octal( const uint8_t * const ptr, const int size )
int decode( const std::string & archive_name, const Arg_parser & parser,
const int filenames, const int num_workers, const int debug_level,
const bool keep_damaged, const bool listing, const bool missing_crc,
const Program_mode program_mode, const bool ignore_ids,
const bool keep_damaged, const bool missing_crc,
const bool permissive )
{
const int infd = archive_name.size() ?
open_instream( archive_name ) : STDIN_FILENO;
if( infd < 0 ) return 1;
// Execute -C options and mark filenames to be extracted or listed.
// Execute -C options and mark filenames to be compared, extracted or listed.
// name_pending is of type char instead of bool to allow concurrent update.
std::vector< char > name_pending( parser.arguments(), false );
for( int i = 0; i < parser.arguments(); ++i )
{
const int code = parser.code( i );
if( code == 'C' && !listing )
if( code == 'C' && program_mode != m_list )
{
const char * const dir = parser.argument( i ).c_str();
if( chdir( dir ) != 0 )
@ -569,7 +698,7 @@ int decode( const std::string & archive_name, const Arg_parser & parser,
}
// multi-threaded --list is faster even with 1 thread and 1 file in archive
if( listing && num_workers > 0 )
if( program_mode == m_list && num_workers > 0 )
{
const Lzip_index lzip_index( infd, true, false ); // only regular files
const long members = lzip_index.members();
@ -580,7 +709,9 @@ int decode( const std::string & archive_name, const Arg_parser & parser,
infd, std::min( (long)num_workers, members ),
missing_crc, permissive );
}
lseek( infd, 0, SEEK_SET );
if( lseek( infd, 0, SEEK_SET ) == 0 && lzip_index.retval() != 0 &&
lzip_index.file_size() > 3 * header_size )
archive_is_uncompressed_seekable = true; // unless compressed corrupt
}
has_lz_ext = // global var for archive_read
@ -599,9 +730,9 @@ int decode( const std::string & archive_name, const Arg_parser & parser,
if( ret != 0 || !verify_ustar_chksum( header ) )
{
if( ret == 0 && block_is_zero( header, header_size ) ) break; // EOF
if( verbosity >= 2 )
if( skip_warn() && verbosity >= 2 )
std::fprintf( stderr, "ustar chksum = %07o\n", ustar_chksum( header ) );
skip_warn(); set_error_status( 2 ); continue;
set_error_status( 2 ); continue;
}
skip_warn( true ); // reset warning
@ -676,8 +807,12 @@ int decode( const std::string & archive_name, const Arg_parser & parser,
( typeflag == tf_regular || typeflag == tf_hiperf ) )
extended.file_size( parse_octal( header + size_o, size_l ) );
if( listing || skip )
retval = list_member( infd, extended, header, skip );
if( skip )
retval = skip_member( infd, extended );
else if( program_mode == m_list )
retval = list_member( infd, extended, header );
else if( program_mode == m_diff )
retval = compare_member( infd, extended, header, ignore_ids );
else
retval = extract_member( infd, extended, header, keep_damaged );
extended.reset();
@ -690,7 +825,7 @@ int decode( const std::string & archive_name, const Arg_parser & parser,
if( !parser.code( i ) && parser.argument( i ).size() && name_pending[i] )
{
show_file_error( parser.argument( i ).c_str(), "Not found in archive." );
set_error_status( 1 );
retval = 1;
}
return final_exit_status( retval );
return final_exit_status( retval, program_mode != m_diff );
}

View file

@ -605,7 +605,7 @@ done:
/* Get from courier the processed and sorted packets, and print
the member lines on stdout or the diagnostics on stderr. */
bool muxer( Packet_courier & courier )
void muxer( Packet_courier & courier )
{
while( true )
{
@ -613,15 +613,14 @@ bool muxer( Packet_courier & courier )
if( !opacket ) break; // queue is empty. all workers exited
if( opacket->status == Packet::error )
{ show_error( opacket->line.c_str() ); return false; }
{ show_error( opacket->line.c_str() ); cleanup_and_fail( 2 ); }
if( opacket->line.size() )
{ std::fputs( opacket->line.c_str(), stdout );
std::fflush( stdout ); }
delete opacket;
}
if( !courier.mastership_granted() ) // no worker found EOF blocks
{ show_error( "Archive ends unexpectedly." ); return false; }
return true;
{ show_error( "Archive ends unexpectedly." ); cleanup_and_fail( 2 ); }
}
} // end namespace
@ -634,6 +633,9 @@ int list_lz( const Arg_parser & parser, std::vector< char > & name_pending,
const bool missing_crc, const bool permissive )
{
const int out_slots = 65536; // max small files (<=512B) in 64 MiB
/* If an error happens after any threads have been started, exit must be
called before courier goes out of scope. */
Packet_courier courier( num_workers, out_slots );
Worker_arg * worker_args = new( std::nothrow ) Worker_arg[num_workers];
@ -654,16 +656,16 @@ int list_lz( const Arg_parser & parser, std::vector< char > & name_pending,
const int errcode =
pthread_create( &worker_threads[i], 0, tworker, &worker_args[i] );
if( errcode )
{ show_error( "Can't create worker threads", errcode ); return 1; }
{ show_error( "Can't create worker threads", errcode ); cleanup_and_fail(); }
}
if( !muxer( courier ) ) return 2;
muxer( courier );
for( int i = num_workers - 1; i >= 0; --i )
{
const int errcode = pthread_join( worker_threads[i], 0 );
if( errcode )
{ show_error( "Can't join worker threads", errcode ); return 1; }
{ show_error( "Can't join worker threads", errcode ); cleanup_and_fail(); }
}
delete[] worker_threads;
delete[] worker_args;

View file

@ -32,8 +32,6 @@
#include "tarlz.h"
namespace {
int seek_read( const int fd, uint8_t * const buf, const int size,
const long long pos )
{
@ -42,6 +40,8 @@ int seek_read( const int fd, uint8_t * const buf, const int size,
return 0;
}
namespace {
const char * bad_version( const unsigned version )
{
static char buf[80];

28
main.cc
View file

@ -62,8 +62,6 @@ const char * const program_name = "tarlz";
const char * const program_year = "2019";
const char * invocation_name = 0;
enum Mode { m_none, m_append, m_concatenate, m_create, m_extract, m_list };
void show_help( const long num_online )
{
@ -90,6 +88,8 @@ void show_help( const long num_online )
" -B, --data-size=<bytes> set target size of input data blocks [2x8=16 MiB]\n"
" -c, --create create a new archive\n"
" -C, --directory=<dir> change to directory <dir>\n"
" -d, --diff find differences between archive and file system\n"
" --ignore-ids ignore differences in owner and group IDs\n"
" -f, --file=<archive> use archive file <archive>\n"
" -n, --threads=<n> set number of (de)compression threads [%ld]\n"
" -q, --quiet suppress all messages\n"
@ -98,6 +98,7 @@ void show_help( const long num_online )
" -v, --verbose verbosely list files processed\n"
" -x, --extract extract files from an archive\n"
" -0 .. -9 set compression level [default 6]\n"
" --uncompressed don't compress the archive created\n"
" --asolid create solidly compressed appendable archive\n"
" --bsolid create per block compressed archive (default)\n"
" --dsolid create per directory compressed archive\n"
@ -108,8 +109,7 @@ void show_help( const long num_online )
" --group=<group> use <group> name/ID for files added\n"
" --keep-damaged don't delete partially extracted files\n"
" --missing-crc exit with error status if missing extended CRC\n"
// " --permissive allow repeated extended headers and records\n"
" --uncompressed don't compress the archive created\n",
/* " --permissive allow repeated extended headers and records\n"*/,
num_online );
if( verbosity >= 1 )
{
@ -185,7 +185,7 @@ unsigned long long getnum( const char * const ptr,
}
void set_mode( Mode & program_mode, const Mode new_mode )
void set_mode( Program_mode & program_mode, const Program_mode new_mode )
{
if( program_mode != m_none && program_mode != new_mode )
{
@ -245,7 +245,7 @@ int open_outstream( const std::string & name, const bool create )
//
void cleanup_and_fail( const int retval )
{
// only one thread can delete and exit
// calling 'exit' more than once results in undefined behavior
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_mutex_lock( &mutex ); // ignore errors to avoid loop
@ -290,7 +290,8 @@ int main( const int argc, const char * const argv[] )
int debug_level = 0;
int num_workers = -1; // start this many worker threads
int level = 6; // compression level, < 0 means uncompressed
Mode program_mode = m_none;
Program_mode program_mode = m_none;
bool ignore_ids = false;
bool keep_damaged = false;
bool missing_crc = false;
bool permissive = false;
@ -301,7 +302,7 @@ int main( const int argc, const char * const argv[] )
return 1; }
enum { opt_ano = 256, opt_aso, opt_bso, opt_crc, opt_dbg, opt_dso, opt_grp,
opt_kd, opt_nso, opt_own, opt_per, opt_sol, opt_un };
opt_id, opt_kd, opt_nso, opt_own, opt_per, opt_sol, opt_un };
const Arg_parser::Option options[] =
{
{ '0', 0, Arg_parser::no },
@ -318,6 +319,7 @@ int main( const int argc, const char * const argv[] )
{ 'B', "data-size", Arg_parser::yes },
{ 'c', "create", Arg_parser::no },
{ 'C', "directory", Arg_parser::yes },
{ 'd', "diff", Arg_parser::no },
{ 'f', "file", Arg_parser::yes },
{ 'h', "help", Arg_parser::no },
{ 'H', "format", Arg_parser::yes },
@ -334,6 +336,7 @@ int main( const int argc, const char * const argv[] )
{ opt_dbg, "debug", Arg_parser::yes },
{ opt_dso, "dsolid", Arg_parser::no },
{ opt_grp, "group", Arg_parser::yes },
{ opt_id, "ignore-ids", Arg_parser::no },
{ opt_kd, "keep-damaged", Arg_parser::no },
{ opt_crc, "missing-crc", Arg_parser::no },
{ opt_nso, "no-solid", Arg_parser::no },
@ -370,6 +373,7 @@ int main( const int argc, const char * const argv[] )
break;
case 'c': set_mode( program_mode, m_create ); break;
case 'C': break; // skip chdir
case 'd': set_mode( program_mode, m_diff ); break;
case 'f': if( sarg != "-" ) archive_name = sarg; break;
case 'h': show_help( num_online ); return 0;
case 'H': break; // ignore format
@ -387,6 +391,7 @@ int main( const int argc, const char * const argv[] )
case opt_dbg: debug_level = getnum( arg, 0, 3 ); break;
case opt_dso: solidity = dsolid; break;
case opt_grp: set_group( arg ); break;
case opt_id: ignore_ids = true; break;
case opt_kd: keep_damaged = true; break;
case opt_nso: solidity = no_solid; break;
case opt_own: set_owner( arg ); break;
@ -411,9 +416,10 @@ int main( const int argc, const char * const argv[] )
case m_create: return encode( archive_name, parser, filenames, level,
num_workers, debug_level, program_mode == m_append );
case m_concatenate: return concatenate( archive_name, parser, filenames );
case m_diff:
case m_extract:
case m_list: return decode( archive_name, parser, filenames, num_workers,
debug_level, keep_damaged, program_mode == m_list,
missing_crc, permissive );
case m_list: return decode( archive_name, parser, filenames,
num_workers, debug_level, program_mode,
ignore_ids, keep_damaged, missing_crc, permissive );
}
}

49
tarlz.h
View file

@ -62,7 +62,7 @@ inline unsigned long long round_up( const unsigned long long size )
}
enum { initial_line_length = 1000 }; // must be >= 77 for 'mode user/group'
enum { initial_line_length = 1000 }; // must be >= 87 for format_member_name
class Resizable_buffer
{
@ -309,7 +309,7 @@ bool block_is_full( const Extended & extended,
const unsigned long long file_size,
unsigned long long & partial_data_size );
void set_error_status( const int retval );
int final_exit_status( int retval );
int final_exit_status( int retval, const bool show_msg = true );
unsigned ustar_chksum( const uint8_t * const header );
bool verify_ustar_chksum( const uint8_t * const header );
class Arg_parser;
@ -325,6 +325,8 @@ int encode_lz( const char * const archive_name, const Arg_parser & parser,
const int num_workers, const int outfd, const int debug_level );
// defined in extract.cc
enum Program_mode { m_none, m_append, m_concatenate, m_create, m_diff,
m_extract, m_list };
bool block_is_zero( const uint8_t * const buf, const int size );
void format_member_name( const Extended & extended, const Tar_header header,
Resizable_buffer & rbuf, const bool long_format );
@ -335,7 +337,8 @@ int writeblock( const int fd, const uint8_t * const buf, const int size );
unsigned long long parse_octal( const uint8_t * const ptr, const int size );
int decode( const std::string & archive_name, const Arg_parser & parser,
const int filenames, const int num_workers, const int debug_level,
const bool keep_damaged, const bool listing, const bool missing_crc,
const Program_mode program_mode, const bool ignore_ids,
const bool keep_damaged, const bool missing_crc,
const bool permissive );
// defined in list_lz.cc
@ -354,6 +357,10 @@ int list_lz( const Arg_parser & parser, std::vector< char > & name_pending,
const int debug_level, const int infd, const int num_workers,
const bool missing_crc, const bool permissive );
// defined in lzip_index.cc
int seek_read( const int fd, uint8_t * const buf, const int size,
const long long pos );
// defined in main.cc
extern int verbosity;
int open_instream( const std::string & name );
@ -364,39 +371,3 @@ void show_error( const char * const msg, const int errcode = 0,
void show_file_error( const char * const filename, const char * const msg,
const int errcode = 0 );
void internal_error( const char * const msg );
class Slot_tally
{
const int num_slots; // total slots
int num_free; // remaining free slots
pthread_mutex_t mutex;
pthread_cond_t slot_av; // slot available
Slot_tally( const Slot_tally & ); // declared as private
void operator=( const Slot_tally & ); // declared as private
public:
explicit Slot_tally( const int slots )
: num_slots( slots ), num_free( slots )
{ xinit_mutex( &mutex ); xinit_cond( &slot_av ); }
~Slot_tally() { xdestroy_cond( &slot_av ); xdestroy_mutex( &mutex ); }
bool all_free() { return ( num_free == num_slots ); }
void get_slot() // wait for a free slot
{
xlock( &mutex );
while( num_free <= 0 ) xwait( &slot_av, &mutex );
--num_free;
xunlock( &mutex );
}
void leave_slot() // return a slot to the tally
{
xlock( &mutex );
if( ++num_free == 1 ) xsignal( &slot_av ); // num_free was 0
xunlock( &mutex );
}
};

View file

@ -92,6 +92,7 @@ lzlib_1_11() { [ ${lwarn} = 0 ] &&
# tlz_in_tar1.tar: 1 member (test3.tar.lz) first magic damaged
# tlz_in_tar2.tar: 2 members (foo test3.tar.lz) first magic damaged
# ug32chars.tar.lz: 1 member (foo) with 32-character owner and group names
# ug32767.tar.lz: 1 member (foo) with numerical-only owner and group
printf "testing tarlz-%s..." "$2"
@ -270,16 +271,16 @@ for i in "${tarint1_lz}" "${tarint2_lz}" ; do
"${TARLZ}" -tvf "$i" --threads=$j > outv$j ||
test_failed $LINENO "$i $j"
done
cmp out0 out2 || test_failed $LINENO
cmp out0 out6 || test_failed $LINENO
cmp out2 out6 || test_failed $LINENO
cmp outv0 outv2 || test_failed $LINENO
cmp outv0 outv6 || test_failed $LINENO
cmp outv2 outv6 || test_failed $LINENO
diff -u out0 out2 || test_failed $LINENO $i
diff -u out0 out6 || test_failed $LINENO $i
diff -u out2 out6 || test_failed $LINENO $i
diff -u outv0 outv2 || test_failed $LINENO $i
diff -u outv0 outv6 || test_failed $LINENO $i
diff -u outv2 outv6 || test_failed $LINENO $i
rm -f out0 out2 out6 outv0 outv2 outv6 || framework_failure
"${TARLZ}" -xf "$i" || test_failed $LINENO
cmp "${in_tar}" test.txt.tar || test_failed $LINENO
cmp "${test3}" test3.tar || test_failed $LINENO
"${TARLZ}" -xf "$i" || test_failed $LINENO $i
cmp "${in_tar}" test.txt.tar || test_failed $LINENO $i
cmp "${test3}" test3.tar || test_failed $LINENO $i
rm -f test.txt.tar test3.tar || framework_failure
done
@ -421,10 +422,11 @@ cmp out.tar.lz aout.tar.lz || test_failed $LINENO
rm -f out.tar.lz aout.tar.lz || framework_failure
# append to solid archive
"${TARLZ}" --solid -0 -cf out.tar.lz foo || test_failed $LINENO
"${TARLZ}" --solid -q -0 -cf out.tar.lz "${in}" foo bar || test_failed $LINENO
"${TARLZ}" -q -tf out.tar.lz || test_failed $LINENO # compressed seekable
cat out.tar.lz > aout.tar.lz || framework_failure
for i in --asolid --bsolid --dsolid --solid -0 ; do
"${TARLZ}" $i -q -rf out.tar.lz bar baz
"${TARLZ}" $i -q -rf out.tar.lz baz
[ $? = 2 ] || test_failed $LINENO $i
cmp out.tar.lz aout.tar.lz || test_failed $LINENO $i
done
@ -445,6 +447,19 @@ for i in --asolid --bsolid --dsolid -0 ; do
done
rm -f foo bar baz || framework_failure
# test --diff
"${TARLZ}" -xf "${test3_lz}" || test_failed $LINENO
"${TARLZ}" --uncompressed -cf out.tar foo || test_failed $LINENO
"${TARLZ}" --uncompressed -cf aout.tar foo --anonymous || test_failed $LINENO
if cmp out.tar aout.tar > /dev/null ; then
printf "\nwarning: --diff test can't be run as root."
else
"${TARLZ}" -q -df "${test3_lz}"
[ $? = 1 ] || test_failed $LINENO
"${TARLZ}" -df "${test3_lz}" --ignore-ids || test_failed $LINENO
fi
rm -f out.tar aout.tar foo bar baz || framework_failure
# test directories and links
mkdir dir1 || framework_failure
"${TARLZ}" -0 -cf out.tar.lz dir1 || test_failed $LINENO
@ -494,7 +509,7 @@ if ln dummy_file dummy_link 2> /dev/null &&
cmp "${in}" dir1/dir2/dir3/in || test_failed $LINENO
cmp "${in}" dir1/dir2/dir3/link || test_failed $LINENO
"${TARLZ}" -0 -q -c ../tmp/dir1 | "${TARLZ}" -x || test_failed $LINENO
diff -r tmp/dir1 dir1 || test_failed $LINENO
diff -ru tmp/dir1 dir1 || test_failed $LINENO
rm -rf tmp/dir1 dir1 || framework_failure
else
printf "\nwarning: skipping link test: 'ln' does not work on your system."
@ -510,11 +525,11 @@ if [ "${ln_works}" = yes ] ; then
"${TARLZ}" -C dir1 -xf "${t155}" || test_failed $LINENO
mkdir dir2 || framework_failure
"${TARLZ}" -C dir2 -xf "${t155_lz}" || test_failed $LINENO
diff -r dir1 dir2 || test_failed $LINENO
diff -ru dir1 dir2 || test_failed $LINENO
"${TARLZ}" -cf out.tar.lz dir2 || test_failed $LINENO
rm -rf dir2 || framework_failure
"${TARLZ}" -xf out.tar.lz || test_failed $LINENO
diff -r dir1 dir2 || test_failed $LINENO
diff -ru dir1 dir2 || test_failed $LINENO
rmdir dir2 2> /dev/null && test_failed $LINENO
rmdir dir1 2> /dev/null && test_failed $LINENO
rm -rf out.tar.lz dir2 dir1 || framework_failure
@ -532,6 +547,11 @@ fi
"${TARLZ}" -xf "${testdir}"/ug32chars.tar.lz || test_failed $LINENO
cmp cfoo foo || test_failed $LINENO
rm -f foo || framework_failure
"${TARLZ}" -tvf "${testdir}"/ug32767.tar.lz | grep -q -e 32767/32767 ||
test_failed $LINENO
"${TARLZ}" -xf "${testdir}"/ug32767.tar.lz || test_failed $LINENO
cmp cfoo foo || test_failed $LINENO
rm -f foo || framework_failure
printf "\ntesting bad input..."

BIN
testsuite/ug32767.tar.lz Normal file

Binary file not shown.

Binary file not shown.