aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorB. Watson <urchlay@slackware.uk>2025-12-12 04:58:15 -0500
committerB. Watson <urchlay@slackware.uk>2025-12-12 04:58:15 -0500
commit1fa2b732b7b6c1539995fa40605d9bcf807ac357 (patch)
treee579ccac1139dec4ea71c93ff44a5c006d594be4
parent717d581f9751f73e9a7cf69a02dfe73050add281 (diff)
downloadalftools-1fa2b732b7b6c1539995fa40605d9bcf807ac357.tar.gz
alf: Cleanup and commentary.
-rw-r--r--src/alf.16
-rw-r--r--src/alf.rst4
-rw-r--r--src/bytorder.h8
-rw-r--r--src/crunch.c21
4 files changed, 26 insertions, 13 deletions
diff --git a/src/alf.1 b/src/alf.1
index 5a3df8f..46a1ef2 100644
--- a/src/alf.1
+++ b/src/alf.1
@@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
-.TH "ALF" 1 "2025-12-11" "0.4.0" "Urchlay's Atari 8-bit Tools"
+.TH "ALF" 1 "2025-12-12" "0.4.0" "Urchlay's Atari 8-bit Tools"
.SH NAME
alf \- create Atari 8-bit ALF archives
.\" RST source for alf(1) man page. Convert with:
@@ -271,8 +271,8 @@ a DOS capable of handling multi\-megabyte files...
Performance is pretty good, as of alftools\-0.4.0. For small files
like you\(aqd use on an Atari (up to 50KB), it\(aqs basically instantaneous
(0.008 seconds) on the author\(aqs modest i7 workstation. For a 1MB text
-file, it takes 0.026 sec (faster than \fBarc\fP!). For 1MB of random
-garbage, it\(aqs 0.043 sec (and the resulting ALF file is 36% larger than
+file, it takes 0.022 sec (faster than \fBarc\fP!). For 1MB of random
+garbage, it\(aqs 0.03 sec (and the resulting ALF file is 36% larger than
the garbage).
.sp
By comparison, \fBzip\fP takes 0.06 seconds to compress the 1MB text file,
diff --git a/src/alf.rst b/src/alf.rst
index dbee22b..0c72aa2 100644
--- a/src/alf.rst
+++ b/src/alf.rst
@@ -238,8 +238,8 @@ Performance
Performance is pretty good, as of alftools-0.4.0. For small files
like you'd use on an Atari (up to 50KB), it's basically instantaneous
(0.008 seconds) on the author's modest i7 workstation. For a 1MB text
-file, it takes 0.026 sec (faster than **arc**\!). For 1MB of random
-garbage, it's 0.043 sec (and the resulting ALF file is 36% larger than
+file, it takes 0.022 sec (faster than **arc**\!). For 1MB of random
+garbage, it's 0.03 sec (and the resulting ALF file is 36% larger than
the garbage).
By comparison, **zip** takes 0.06 seconds to compress the 1MB text file,
diff --git a/src/bytorder.h b/src/bytorder.h
index cb8231e..2551950 100644
--- a/src/bytorder.h
+++ b/src/bytorder.h
@@ -2,13 +2,14 @@
# error Cannot define both ALF_LSB_FIRST and ALF_MSB_FIRST
#endif
+/* user defined one or the other in COPT, use it. */
#if defined(ALF_LSB_FIRST) || defined(ALF_MSB_FIRST)
# define ALF_ENDIAN_OK
#endif
/* try to get the byte order on this platform.
if we succeed, either ALF_LSB_FIRST or ALF_MSB_FIRST will
- defined, and so will ALF_ENDIAN_OK. */
+ be defined, and so will ALF_ENDIAN_OK. */
#ifndef ALF_ENDIAN_OK
# if defined(__BYTE_ORDER__)
@@ -24,12 +25,13 @@
# endif
#endif
+/* if the __BYTE_ORDER__ macro wasn't defined... */
#ifndef ALF_ENDIAN_OK
-# if defined(__LITTLE_ENDIAN__)
+# if defined(__LITTLE_ENDIAN__ || defined __LITTLE_ENDIAN || defined LITTLE_ENDIAN)
# define ALF_LSB_FIRST
# define ALF_ENDIAN_OK
# else
-# if defined(__BIG_ENDIAN__)
+# if defined(__BIG_ENDIAN__ || defined __BIG_ENDIAN || defined BIG_ENDIAN)
# define ALF_MSB_FIRST
# define ALF_ENDIAN_OK
# endif
diff --git a/src/crunch.c b/src/crunch.c
index f572192..2755248 100644
--- a/src/crunch.c
+++ b/src/crunch.c
@@ -30,6 +30,7 @@ unsigned int input_len, output_len, out_bitpos;
short tokens[MAX_TOKENS][256];
int token_bits;
+int shiftamt; /* precalculated MAX_BITS - token_bits */
int max_token;
int curr_token = INIT_TOKEN;
int in_pos;
@@ -121,6 +122,9 @@ void init_table(void) {
token_bits = INITIAL_BITS;
max_token = 1 << INITIAL_BITS;
curr_token = INIT_TOKEN;
+
+ /* precalculated for store_token() */
+ shiftamt = MAX_BITS - token_bits;
}
void check_output_len(void) {
@@ -136,9 +140,9 @@ void inc_output_len(void) {
output_buf[output_len] = 0;
}
-#ifdef ALF_ENDIAN_OK
+#if !defined(APPEND_BIT) && defined ALF_ENDIAN_OK
-/* This is 20% faster, but it requires knowing the endianness of
+/* This is 25% faster, but it requires knowing the endianness of
the platform at compile time. See bytorder.h for gory details. */
union { unsigned int ui; u8 bytes[4]; } ui2bytes;
@@ -154,11 +158,17 @@ void store_token(int tok) {
putchar('\n');
}
- tok <<= (MAX_BITS - token_bits);
- ui2bytes.ui = tok << (12 - out_bitpos);
+ /* align token so, no matter what its size (9 thru 12),
+ its top bit is at bit 23 of a 32-bit int. */
+ ui2bytes.ui = tok << ((12 - out_bitpos) + shiftamt);
+
+ /* always append 12 bits (it's quicker to do that than have
+ conditionals to decide whether the last byte is needed) */
output_buf[output_len] |= ui2bytes.bytes[HIBYTE];
output_buf[output_len + 1] = ui2bytes.bytes[MIDBYTE];
output_buf[output_len + 2] = ui2bytes.bytes[LOBYTE];
+
+ /* update based on actual token size */
out_bitpos += token_bits;
output_len += out_bitpos / 8;
out_bitpos %= 8;
@@ -227,11 +237,12 @@ void make_token(short tok, u8 chr) {
dump_tokens();
}
store_token(TOK_RESET); /* stored at the *old* token size! */
- token_bits = INITIAL_BITS;
init_table();
return; /* since we're starting over, *don't* make a token */
} else {
token_bits++;
+ /* precalculated for store_token() */
+ shiftamt--;
if(opt_verbose > 1) {
printf("token_bits increased to %d\n", token_bits);
}