aboutsummaryrefslogtreecommitdiff
path: root/sbosrcarch
diff options
context:
space:
mode:
authorB. Watson <yalhcru@gmail.com>2015-10-15 19:52:00 -0400
committerB. Watson <yalhcru@gmail.com>2015-10-15 19:52:00 -0400
commit8fd35a241e2daa34ef67b3b34c4c9858bb9a44cb (patch)
tree333cf3ab557cd6873fc5aefe32deda7c407b3a63 /sbosrcarch
parent275cb7d96b7bcad1c4e8bc5411477866f0c6a9c0 (diff)
downloadsbostuff-8fd35a241e2daa34ef67b3b34c4c9858bb9a44cb.tar.gz
sbosrcarch: cosmetics
Diffstat (limited to 'sbosrcarch')
-rwxr-xr-xsbosrcarch43
1 files changed, 24 insertions, 19 deletions
diff --git a/sbosrcarch b/sbosrcarch
index dc9b8e7..9f188e8 100755
--- a/sbosrcarch
+++ b/sbosrcarch
@@ -61,11 +61,11 @@ Rough guideline for choosing filesize:
100.0M | 16.6G | 98%
unlimited | 43.0G | 100%
-Note: these numbers will tend to increase over time, as the SBo
-repository grows.
+Note: these numbers will tend to increase over time, as the SBo repository
+grows. To be safe, add 25% or so to the total sizes above.
"Coverage" is the percentage of all the URLs in all the .info files
-that will be kept in this archive. Notice that about 75% of the storage
+that will be kept in this archive. Notice that about 60% of the storage
space is eaten up by 2% of the files, in the unlimited case. These
large files are mostly games, if that influences your decision any.
@@ -179,7 +179,7 @@ the section B<CONFIG FILE> for details.
The archive created by sbosrcarch consists of two top-level directories
called B<by-name> and B<by-md5>. All files are present in both hierarchies
-(as hard or symbolic links, to save space).
+(but the by-md5 tree is hard or symbolic links, to save space).
B<by-name> is organized by the familiar category and PRGNAM, like SBo
itself. Example:
@@ -283,6 +283,8 @@ B. Watson <yalhcru@gmail.com>
# use only modules that ship with Slackware, which pretty much
# means only modules that ship with core perl.
+# use the 'legacy' 2.0 API for File::Path, since we want to support
+# the older perl in Slackware 13.0.
use warnings;
use strict; # I hate strict, but I'll use it anyway...
use File::Temp qw/tempfile tempdir/;
@@ -386,7 +388,12 @@ sub url_to_filename {
sub parse_info {
local $/ = "";
my $file = shift;
- open my $fh, "<", $file or die "$file: $!";
+
+ open my $fh, "<", $file or do {
+ warn "$file: $!";
+ return undef;
+ };
+
my $got = <$fh>;
$got =~ s/\\\s*\n//gs; # join \ continuation lines
@@ -433,9 +440,8 @@ sub download_http {
sub download_file {
my $url = shift;
- my $filename = url_to_filename($url);
-
my $dlresult;
+
if($url =~ /^ftp:/) {
$dlresult = download_ftp($url);
} else {
@@ -500,7 +506,7 @@ sub toobig {
# code it up & send a patch to the wget maintainers?
# I've just discovered a better way to do this:
-# curl --head -sS -X GET $url
+# curl --head -L -sS -X GET $url
# Stops downloading and exits after the headers are received.
# Not as familiar with curl as I am with wget, have to see about
# options... and if this works as well as I expect, there's never going
@@ -529,11 +535,11 @@ sub wget_fake_head {
close $fh;
if($size && toobig($size)) {
- printf "file too large: %0.2fMB\n", $size / (1024 * 1024);
+ printf " file too large: %0.2fMB\n", $size / (1024 * 1024);
$skipcount++;
$size = 0;
} elsif(not defined $size) {
- print "can't determine file size, skipping\n";
+ print " can't determine file size, skipping\n";
}
return $size;
@@ -547,8 +553,8 @@ sub wget {
our $wget_config_arg;
if($url =~ /'/) {
- print "Refusing to deal with URL \"$url\" due to embedded single-quote.\n" .
- "Please contact the maintainer of the SlackBuild to have this fixed.\n";
+ print "! refusing to deal with URL \"$url\" due to embedded single-quote.\n" .
+ "! please contact the maintainer of the SlackBuild to have this fixed.\n";
return undef;
}
@@ -573,9 +579,8 @@ sub wget {
}
close $fh;
if(not $wget_config_arg) {
- print "\n| wget version is too old to support --config option.\n";
+ print "| wget version is too old to support --config option.\n";
print "| continuing without it...\n";
- sleep 1;
}
}
@@ -605,7 +610,7 @@ sub wget {
/^Length:\s*(\d+).*\[(.*?)\]/ && do {
$size = $1; # TODO: $content_type = $2, check for text/html or such
if(toobig($size)) {
- printf "file too large: %0.2fMB\n", $size / (1024 * 1024);
+ printf " file too large: %0.2fMB\n", $size / (1024 * 1024);
$skipcount++;
$size = 0;
}
@@ -662,7 +667,7 @@ sub download_ftp {
};
if($@) {
- print "$_[0]: $@";
+ print "! $@";
undef $size;
}
@@ -1048,7 +1053,7 @@ sub add_by_md5 {
for my $filename (@_) {
my $infile = $filename;
- $infile = "$oldcwd/$infile" unless -f $infile;
+ $infile = "$oldcwd/$infile" unless $infile =~ m,^/,;
my $md5 = md5sum_file($infile);
next unless defined $md5;
@@ -1073,7 +1078,7 @@ sub add_or_rm_mode {
init_git();
my $mode = shift @ARGV;
- if($mode eq 'add' && @ARGV && -f $ARGV[0] || -f "$oldcwd/$ARGV[0]") {
+ if($mode eq 'add' && @ARGV && (-f $ARGV[0] || -f "$oldcwd/$ARGV[0]")) {
add_by_md5($oldcwd, @ARGV);
exit 0;
}
@@ -1121,7 +1126,7 @@ sub add_or_rm_mode {
# Find all files in by-name, make sure the md5sums match, make sure the
# by-md5 file exists and is either a hardlink or symlink to the by-name
# file. If the size is over the limit, make a note of it. If the file
-# isn't found in the hash of filenames, it's extraneous (and so its its
+# isn't found in the hash of filenames, it's extraneous (and so is its
# by-md5 counterpart).
# Do the same thing for the by-md5 tree, more or less. If both hard and