#!/usr/bin/perl # 20151015 bkw: finally tested a full run on slack 13.0, results: # - create_mode stats are wrong # - the old openssl on slack 13.0 can't handle cloud.github.com. chokes # with 'sslv3 alert handshake failure'... or maybe it's wget that # can't handle it, as curl seems to be able to, using the same # openssl. # - seriously considering switching to curl. # - another thought: do away with HEAD requests entirely. do something # like open a pipeline reading from wget, read the headers (like # wget_fake_head does now)... then decide whether to finish the # download or close the fh. if we finish it, read from the pipeline # and write to the target filename. # - if a download fails, turds shouldn't be left behind in the git tree. # TODO based on feedback from ttkp and pink_mist on IRC: # - IPC::Open3 instead of open my $fh, "wget ...|"? At least use # open my $fh, "-|", "wget", @args or such, to avoid quoting issues. # However, avoiding the shell means being unable to redirect # stderr & stdout to the same place. Hm. =pod =head1 NAME sbosrcarch - Create and maintain an archive of source code for SBo builds =head1 SYNOPSIS sbosrcarch sbosrcarch add [-f] [ ...] sbosrcarch rm =head1 DESCRIPTION sbosrcarch creates and maintains an archive of source code files linked to by DOWNLOAD= and DOWNLOAD_x86_64= URLs in SlackBuilds.org .info files. The archive contains only source code from upstream sites. No content from slackbuilds.org itself is included. Since a full archive would be pretty large (45GB or so), sbosrcarch allows limiting the size of the archive (but only indirectly, by limiting the max file size it will download). This means we won't have a full archive of every source tarball, but even a partial mirror is still useful. Rough guideline for choosing filesize: Max filesize | Approx. total archive size | Coverage 1.0M | 803.1M | 68% 2.0M | 1.4G | 77% 5.0M | 2.7G | 85% 10.0M | 4.3G | 90% 20.0M | 6.6G | 93% 35.0M | 8.9G | 95% 50.0M | 11.6G | 96% 100.0M | 16.6G | 98% unlimited | 43.0G | 100% Note: these numbers will tend to increase over time, as the SBo repository grows. To be safe, add 25% or so to the total sizes above. "Coverage" is the percentage of all the URLs in all the .info files that will be kept in this archive. Notice that about 60% of the storage space is eaten up by 2% of the files, in the unlimited case. These large files are mostly games, if that influences your decision any. =head1 OPTIONS =over =item create Create archive. Used for initial archive creation, and for downloading new files to an existing archive when the size limit ($maxfilemegs, see B) is increased. Should be run interactively, from a login shell. Takes a long time to run and uses a lot of bandwidth. Log output goes to stdout, and is pretty verbose (redirecting to a file is recommended). If the archive already exists, existing files will be kept instead of being re-downloaded (provided of course their md5sums are correct). =item update Update archive, by checking the SBo git log and parsing any .info files that have changed since the last create or update. Should be run daily or weekly as a cron job. If there are are few or no changed download URLs, update should run quickly and not eat many resources. For each new URL, the file is downloaded and added to the archive, but the old file is *not* deleted (use 'sbosrcarch purge' to do that). =item purge [-r|--rebuild] Purge files from the archive that are no longer referenced by any .info file. Should be run monthly or quarterly as a cron job. This is more resource-intensive than an update, as it must read and parse every .info file in the SBo repository. If -r or --rebuild is given, the entire by-md5 tree is deleted and recreated. This shouldn't be needed unless $symlinks is changed. =item trim Gets rid of files that are in the archive, but are larger than the size limit. Should be run manually after lowering $maxfilemegs; there's no reason to run it any other time. =item check [-v] Checks the integrity and coverage of the archive. Reports at least these conditions: - dangling symlinks - invalid md5sums - files present in only one of by-name or by-md5 but not the other - extraneous files in the tree - generates a status report, giving the total size and coverage. Will not modify the archive in any way, but might recommend fixes. With -v, lists all SlackBuilds not covered by the archive. =item add [-f] [ ...] Manually add (possibly already downloaded) files to the archive. Use -f to skip the size limit checking, so your archive can include a few large files (perhaps because they're for builds you maintain). Files added this way will still be deleted by 'sbosrcarch trim', if they're larger than the limit. This is intended to let the mirror operator keep a few large files (over the maxfilemegs limit), or save bandwidth by using already-downloaded copies (e.g. of stuff that was built recently). If files are given after the category/prgnam argument, they will be used instead of downloading the URLs in the .info file (provided their md5sums match the .info file). Size limits are not checked for files added this way. =item add [...] Manually add local file(s) to the archive. As above, but the category/prgnam is discovered by parsing all the .info files and matching md5sums. This is a good bit slower, but it can handle files for many different category/prgnam at once. It's especially useful if you already have an archive of SBo sources that you want to convert to sbosrcarch format. The -f option is not supported (or needed) with this form of the add command. =item rm Manually remove files from the archive. All the files referenced by the .info file for / will be removed. ...but the next update will re-add anything you remove, if it's less than the size limit. Mostly this is useful for manually-added files that are over the limit. =back =head1 FILES B (or B<.sbosrcarch.conf>) is the config file for sbosrcarch. It's searched for under both names in the current directory, the user's home directory, /etc/sbosrcarch, and /etc (in order). See the section B for details. The archive created by sbosrcarch consists of two top-level directories called B and B. All files are present in both hierarchies (but the by-md5 tree is hard or symbolic links, to save space). B is organized by the familiar category and PRGNAM, like SBo itself. Example: by-name/network/ifstatus/ifstatus-v1.1.0.tar.gz This makes it easy for humans to browse the archive and find the source file they're looking for. B contains the same files, but organized in a hierarchy based on the md5sum of the file, for automated systems to easily find the exact file needed. The same file as the example above would be found at: by-md5/f/4/f4d413f880754fd6677290160f8bc5d7/ifstatus-v1.1.0.tar.gz Notice there are two layers of subdirectory, named after the first two hex digits in the md5sum. Also, notice that the actual SlackBuilds and .info files are not present in the archive. There is one other directory of files used/maintained by sbosrcarch: a git clone of SBo's master git branch. This is cloned and updated automatically as needed, and shouldn't need to be messed with. If you need a git clone of SBo for some other purpose, create a separate one to avoid confusing sbosrcarch with your changes and pulls. =head1 CONFIG FILE TODO: document the config options here. For now, see the sample config file sbosrcarch.conf =head1 SERVER CONFIGURATION If you're planning to host a public archive, you'll need to make the $archivedir available via whatever protocols you support (HTTP, FTP, rsync, etc). This is the directory containing B and B. The git clone directory doesn't need to be served to the public. TODO: example Apache, proftpd, etc configs for serving up the archive. =head1 CLIENT-SIDE EXAMPLE TODO: shell script that parses an .info file and tries to d/l the source from the archive. =head1 NOTES sbosrcarch is written in perl, and is intended to work on at least Slackware 13.0 through 14.1, using only perl modules that ship with the OS (so no CPAN dependencies), plus an external wget executable for downloading files. If you want to run it on some other OS, it might need some extra packages installed and/or some slight porting work. If you want to keep a SBo source archive on your non-Slackware server, it might be easier to just rsync someone else's (that they build using this script). Note that there's no need to run sbosrcarch as root. In fact, it's recommended not to. Good choices for a user to run it as: - your everyday user you log in as - apache - nobody =head1 BUGS/LIMITATIONS Plenty of these, see FIXME TODO XXX comments in the code. Here are some that I'm not planning to address any time soon: No threading. Not likely to change. It would be possible to spawn wget processes in the background, but I'm not going to complicate it that way. It would mainly be useful for create mode, and hopefully each archive site only needs to do that once. There maybe should be a whitelist and a blacklist. The whitelist would be a list of builds (or entire categories) that you want to mirror all of, regardless of file size limits. The blacklist would be a list of builds or categories you don't want to mirror, ever. Probably I won't add this unless multiple people ask for it. Anything that checks referer header or otherwise tries to stop automated downloads, will stop us. This isn't really a bug (sbopkg can't handle them either). Usually the README will say "you must download the file with a browser" or such. You can still download the file manually and use "sbosrcarch add category/prgnam filename.tar.gz" to add it to the archive... but please pay attention to licensing! Some files (e.g. Oracle's Java) don't allow redistribution, so please don't include them in your archive. Length: unspecified isn't handled (we just don't download these). Specifically, dropbox URLs do this. Might add an option that controls what to do about these, e.g. download & keep them all instead of ignoring them all. Can still add them manually. $sbogitdir and $archivedir must be located on the same filesystem, as files are moved around by linking them. Not a major problem, just thought I'd mention it. =head1 AUTHOR B. Watson =cut # use only modules that ship with Slackware, which pretty much # means only modules that ship with core perl. # use the 'legacy' 2.0 API for File::Path, since we want to support # the older perl in Slackware 13.0. use warnings; use strict; # I hate strict, but I'll use it anyway... use File::Temp qw/tempfile tempdir/; use File::Find; use Digest::MD5; use Net::FTP; use POSIX 'getcwd'; use File::Path qw/mkpath rmtree/; use File::Copy 'copy'; our($sbogiturl, $sbogitdir, $archivedir, $maxfilemegs, $wgetargs, $symlinks, $wgetrc_contents, $wgetrc, %user_agent_overrides, @trim_empty_dirs, $skipcount, $urlcount, $archivecount, $attemptcount, $failcount, $dlcount, $nowarchived, $coverage, $purgebytes, $purgefiles, $trimcount, $trimbytes, %keep_filenames); our %infofilecount; our %parsedinfo; our $symlinkcount = 0; our $hardlinkcount = 0; our $filecount = 0; our $filebytes = 0; our $actualfilecount = 0; our $totalfiles = 0; sub read_config { my $conf_used; my @configdirs = ( ".", $ENV{HOME}, "/etc/sbosrcarch", "/etc", ); for my $dir (@configdirs) { for my $file (qw/.sbosrcarch.conf sbosrcarch.conf/) { $_ = "$dir/$file"; next unless -e $_; do $_; next if $!; die "reading config file $_: $@" if $@; $conf_used = $_; last; } } if($conf_used) { print "read config file: $conf_used\n"; } else { die "can't find .sbosrcarch.conf or sbosrcarch.conf in any of the\n" . "following directories, giving up:\n" . join ("\n", @configdirs) . "\n"; } # required stuff in the conf file: die "config file missing \$sbogiturl\n" unless defined $sbogiturl; die "config file missing \$sbogitdir\n" unless defined $sbogitdir; die "config file missing \$archivedir\n" unless defined $archivedir; # not required, but warn if it's missing: if((not defined $maxfilemegs) || ($maxfilemegs < 0)) { print "config file missing/invalid \$maxfilemegs, defaulting to 10\n"; $maxfilemegs = 10; } # quietly use defaults if missing: $wgetargs = "" unless defined $wgetargs; $symlinks = "" unless defined $symlinks; if(not defined $wgetrc_contents) { $wgetrc_contents = < 'wget', ); } } # url_to_filename, gets the filename part of a URL (after the last slash) # and un-escapes any %XX sequences. # Note: we *don't* do plus-to-space conversion here, as that's only # for CGI params, not URLs in general. There are quite a few files # called e.g. "c++-utils.tar.gz" that would get broken by it. sub url_to_filename { my $u = shift; $u =~ s,.*/,,; $u =~ s,%([0-9A-F]{2}),chr(hex($1)),ge; return $u; } # parse a single .info file, return a hashref where keys = URL(s) # and values are their md5sums. sub parse_info { local $/ = ""; my $file = shift; open my $fh, "<", $file or do { warn "$file: $!"; return undef; }; my $got = <$fh>; $got =~ s/\\\s*\n//gs; # join \ continuation lines $got =~ s/[ \t]+/ /g; # condense whitespace $got =~ /DOWNLOAD(?:_x86_64)?="([^"]+)"/; my @urls = split " ", $1; $got =~ /MD5SUM(?:_x86_64)?="([^"]+)"/; my @md5s = split " ", $1; for(@md5s) { die "bad md5sum in $file\n" unless /^[0-9a-f]{32}$/; } my %ret; for(@urls) { next if /^un(test|support)ed$/i; print "bad URL in $file (backtick)\n", next if /`/; # backticks should never occur! $ret{$_} = shift @md5s; } return \%ret; } # the download_* subs return: # 0 - file too big (so skip it) # positive integer - file size # undef - download error (404, failed DNS, etc). # FIXME: the above isn't really true, and the calling code doesn't # check the return values as it should. sub download_http { my $url = shift; my $size = wget($url, 1); # HEAD request first # $size will be 0 for 'too big' or undef if the HEAD failed. if($size) { $size = wget($url, 0); } return $size; } sub download_file { my $url = shift; my $dlresult; if($url =~ /^ftp:/) { $dlresult = download_ftp($url); } else { $dlresult = download_http($url); } return $dlresult; } # see %user_agent_overrides sub user_agent { my $url = shift; my $ua = ""; $url =~ m,^\w+://([^/]*)/,; my $site = $1; for (keys %user_agent_overrides) { $site =~ /$_/ && do { $ua = $user_agent_overrides{$_}; }; } $ua = "--user-agent='$ua'" if $ua; return $ua; } # return true if limit set and file size > limit. # return false if no limit set, or file size <= limit. sub toobig { return 0 if $maxfilemegs <= 0; # no limit return $_[0] > ($maxfilemegs * 1024 * 1024); } # wget_fake_head: What is a fake HEAD request? # Various cloud-ey web servers don't support HEAD requests: # github.com and bitbucket.org download links redirect to amazonaws.com, # which returns 403 Forbidden for any HEAD request. # googlecode.com always returns 404 Not Found for a HEAD request. # some other servers don't return a Content-Length header for a HEAD # request, but they do for a GET. # We really want to know the file size, so we can decide whether or # not to download it. If a HEAD request fails, we'll do a GET request # instead, but stop the transfer as soon as we get the Content-Length # header from wget. # Due to buffering, wget still downloads the first 16K or so of the file, # which gets discarded when we close its filehandle. We could do better # than this by implementing the HTTP protocol in terms of IO::Socket::INET # or such, but I'm not writing & debugging the mess that would turn into. # Plus, core perl (and Slackware's perl) lacks SSL support. # This gets called for any URL that doesn't return a Content-Length header # in its HEAD request (for whatever reason, including because of a 404 # not found). Of course, a GET might not return a length header either, # in which case the file won't be downloaded. # It might be nice if wget supported a --fake-head option itself. Maybe I'll # code it up & send a patch to the wget maintainers? # I've just discovered a better way to do this: # curl --head -L -sS -X GET $url # Stops downloading and exits after the headers are received. # Not as familiar with curl as I am with wget, have to see about # options... and if this works as well as I expect, there's never going # to be a need to do a real HEAD request! sub wget_fake_head { my $url = shift; our $wget_config_arg; my $cmd = "wget $wget_config_arg " . "--tries 1 --quiet -O- --save-headers " . user_agent($url) . " " . " $wgetargs " . "'$url'"; #print "real HEAD failed, trying fake HEAD request: $cmd\n"; # TODO: open3? open my $fh, "$cmd|" or return undef; my $size; while(<$fh>) { s/\r//; chomp; last if /^$/; $size = $1 if /^Content-Length:\s+(\d+)/; } close $fh; if($size && toobig($size)) { printf " file too large: %0.2fMB\n", $size / (1024 * 1024); $skipcount++; $size = 0; } elsif(not defined $size) { print " can't determine file size, skipping\n"; } return $size; } # wget() does a HEAD (or fake head, if HEAD fails), or GET (download), # using an external wget process. Return value is the file size in bytes, # or 0 for "too big", or undef for any error. sub wget { my $url = shift; our $wget_config_arg; if($url =~ /'/) { print "! refusing to deal with URL \"$url\" due to embedded single-quote.\n" . "! please contact the maintainer of the SlackBuild to have this fixed.\n"; return undef; } my $head = shift; # boolean, 0 = download (GET), 1 = HEAD request only my $size; my $fh; my $tmpdir = $ENV{TMPDIR} || $ENV{TMP} || "/tmp"; if(not defined $wgetrc) { ($fh, $wgetrc) = tempfile("wgetrc.XXXXXXXX", DIR => $tmpdir, UNLINK => 1); print $fh $wgetrc_contents; close $fh; } if(not defined $wget_config_arg) { $wget_config_arg = ""; open my $fh, "wget --help|" or die "can't run wget: $!\n"; while(<$fh>) { $wget_config_arg = "--config=$wgetrc" if /--config/; } close $fh; if(not $wget_config_arg) { print "| wget version is too old to support --config option.\n"; print "| continuing without it...\n"; } } my $outfile; ($fh, $outfile) = tempfile("wget.out.XXXXXXXX", DIR => $tmpdir, UNLINK => 1); close $fh; # TODO: open3? # the -O is there to force the filename, in case of a redirect. newer # versions of wget don't actually need this, but it doesn't hurt. my $cmd = "wget $wget_config_arg " . user_agent($url) . " " . ($head ? "--spider --tries 1" : "-O " . url_to_filename($url)) . " $wgetargs " . "'$url' " . ">$outfile 2>&1"; #" --referer='$url' " . # don't use, it breaks sourceforge my $retval = system($cmd); print "$cmd\n" if $retval != 0; open $fh, "<", "$outfile"; while(<$fh>) { print " ! $_" if $retval != 0; /^Length:\s*(\d+).*\[(.*?)\]/ && do { $size = $1; # TODO: $content_type = $2, check for text/html or such if(toobig($size)) { printf " file too large: %0.2fMB\n", $size / (1024 * 1024); $skipcount++; $size = 0; } }; } close $fh; unlink $outfile; # Grr. Some sites refuse HEAD requests, and some allow them but # don't return a Content-Length header. So we must resort to more # drastic measures. # FIXME: don't bother doing this if we got a DNS error from the HEAD. if($head && not(defined($size))) { return wget_fake_head($url); } return $size; # which might be undef! } # we could use wget for FTP links too, but doing it this way # lets us check the filesize and do the download with only one # FTP session. sub download_ftp { my ($server, $dir, $filename) = ($_[0] =~ m, ^ftp:// # proto ([^/]+) # server (no slashes) (/.*?)? # optional path (always at least the initial slash) ([^/]+)$ # filename (everything after last slash) ,x); print "using Net::FTP to get $_[0]\n"; my $size = undef; eval { my $ftp = Net::FTP->new($server, Debug => 0) or die "Can't connect to $server: $@"; $ftp->login("anonymous",'-anonymous@') or die "Can't log in to $server: ", $ftp->message; $ftp->cwd($dir) or die "Can't chdir($dir) on $server: ", $ftp->message; $ftp->binary; $size = $ftp->size($filename) or die "Can't get $filename size from $server: ", $ftp->message; if(toobig($size)) { printf "file too large: %0.2fMB\n", $size / (1024 * 1024); $skipcount++; $size = 0; } else { $ftp->get($filename) or die "Can't download $filename from server: ", $ftp->message; } $ftp->quit; }; if($@) { print "! $@"; undef $size; } return $size; } sub git_clone { system('git', 'clone', $sbogiturl, $sbogitdir); } sub git_pull { return !system('git', 'pull'); } sub md5_dir { my $md5 = shift; return "$archivedir/by-md5/" . substr($md5, 0, 1) . "/" . substr($md5, 1, 1) . "/" . $md5 . "/"; } sub name_dir { my ($cat, $prg) = @_; return "$archivedir/by-name/$cat/$prg/"; } sub md5sum_file { my $filename = shift; open my $fh, "<", $filename or do { print "can't get md5sum of $filename: $!\n"; return undef; }; binmode($fh); my $ret = Digest::MD5->new->addfile($fh)->hexdigest; close $fh; return $ret; } sub already_exists { my ($filename, $category, $prgnam, $md5) = @_; my $n = name_dir($category, $prgnam) . "/" . $filename; my $m = md5_dir($md5) . "/" . $filename; return -e $n && -e $m && ($md5 eq md5sum_file($n)) && ($md5 eq md5sum_file($n)); } sub store_file { my ($filename, $category, $prgnam, $md5) = @_; #warn "store_file($filename, $category, $prgnam, $md5);\n"; my $md5dir = md5_dir($md5); my $namedir = name_dir($category, $prgnam); mkpath($md5dir); mkpath($namedir); unlink($namedir . "/" . $filename); # rm -f old copy, if any link($filename, $namedir . "/" . $filename); if($symlinks) { symlink("../../../../by-name/" . $category . "/" . $prgnam . "/" . $filename, $md5dir . "/" . $filename); } else { link($filename, $md5dir . "/" . $filename); } } # handle_info_file() is used as the 'wanted' sub for File::Find, but # it's also called from add and update modes, so it doesn't use any of # the File::Find stuff. Call while cd'ed to $sbogitdir, with $_ set to # the relative path to the .info file. sub handle_info_file { return unless /\.info$/; my $dls = parse_info($_); s,^\./,,; # strip leading ./, if present my ($category, $prgnam) = split /\//, $_; print "=== $category/$prgnam\n"; for(keys %$dls) { $urlcount++; my $url = $_; my $md5 = $dls->{$_}; my $filename = url_to_filename($url); print ": $url\n"; if(already_exists($filename, $category, $prgnam, $md5)) { print " already in archive, OK\n"; $archivecount++; } else { $attemptcount++; download_file($url); # TODO: check result! if(! -f $filename) { $failcount++; print " not downloaded\n"; next; } if(md5sum_file($filename) ne $md5) { $failcount++; print " md5sum failed\n"; unlink($filename); next; } print " downloaded, OK\n"; $archivecount++; $dlcount++; store_file($filename, $category, $prgnam, $md5); unlink($filename); } } } sub init_git { chdir($sbogitdir) && -d ".git" || die "SBo git dir $sbogitdir not a git checkout, " . "do you need to run 'sbosrcarch create?'\n"; } sub create_mode { chdir($sbogitdir) or git_clone; chdir($sbogitdir) or die "can't find or create SBo git dir $sbogitdir\n"; git_clone() unless -d ".git"; git_pull() or die "git pull failed, check $sbogitdir\n"; $skipcount = $attemptcount = $urlcount = $archivecount = $dlcount = $failcount = $nowarchived = 0; find({wanted => \&handle_info_file, no_chdir => 1}, "."); $nowarchived = $dlcount + $archivecount; $coverage = sprintf("%.1d", ($nowarchived * 100 / $urlcount)); print <; (undef, $oldcommit) = split /\s+/, $logline; print "git repo was at commit $oldcommit\n"; close $fh; git_pull(); open $fh, "git diff --numstat $oldcommit|" or die "$!"; while(<$fh>) { (undef, undef, $_) = split /\s+/; next unless /\.info$/; handle_info_file(); } exit 0; } # purge_mode() does 3 passes. # 1. get all the filenames from all the info files, build a hash of filenames. # 2. walk the archive tree with File::Find and rm any file that's in a # category/name dir, but not mentioned in the filename hash (also, rm its # md5_dir() counterpart). # 3. do a trim_post() pass to delete any empty dirs and/or dangling symlinks # If --rebuild is given, pass 3 instead deletes the by-md5 tree and # recreates it. # FIXME: files from different URLs but with the same filename will not be # purged when they should, because the comparison is solely filename-based! sub purge_mode { my $rebuild = 0; shift @ARGV; if($ARGV[0]) { if($ARGV[0] =~ /^--?r(?:ebuild)?/) { $rebuild = 1; } else { die "Unknown option: $ARGV[0]\n"; } } init_git(); $purgebytes = $purgefiles = 0; # pass 1 %keep_filenames = (); # populated by the find(): find({wanted => \&purge_pass_1_wanted, no_chdir => 1}, "."); # for(keys %keep_filenames) { # warn "keep $_\n"; # } # pass 2 chdir($archivedir) or die "$archivedir: $!\n"; find({wanted => \&purge_pass_2_wanted, no_chdir => 1}, "by-name"); # pass 3 if($rebuild) { rmtree("by-md5"); print "Removed by-md5 tree, rebuilding\n"; find({wanted => \&rebuild_wanted, no_chdir => 1}, "by-name"); } else { trim_post(); } printf("Purged $purgefiles files, %.1fMB\n", ($purgebytes / (1024 * 1024))); exit 0; } # helper for purge_mode, populates %keep_filenames sub purge_pass_1_wanted { return unless /\.info$/; my $dls = parse_info($_); for(keys %$dls) { $_ = url_to_filename($_); $keep_filenames{$_}++; } } # helper for purge_mode, removes all files in category/prgnam/ # dirs that aren't listed in %keep_filenames sub purge_pass_2_wanted { s,^\./,,; # remove leading ./ my (undef, $cat, $name, $file) = split /\//, $_; return unless defined $file; return if $keep_filenames{$file}; print "purge $_\n"; $purgebytes += -s $_; $purgefiles++; unlink md5_dir(md5sum_file($_)). "/$file"; unlink $_; } sub rebuild_wanted { return unless -f; s,^\./,,; # remove leading ./ my $md5dir = md5_dir(md5sum_file($_)); my (undef, $category, $prgnam, $filename) = split /\//, $_; mkpath($md5dir); if($symlinks) { symlink("../../../../by-name/" . $category . "/" . $prgnam . "/" . $filename, $md5dir . "/" . $filename); } else { link($_, $md5dir . "/" . $filename); } } # helper for trim_mode sub trim_wanted { return unless -f $_; my $size = -s _; if(toobig($size)) { unlink($_); $trimcount++; $trimbytes += $size; } } # helper for trim_post sub trim_post_wanted { unlink $_ if -l $_ && ! -e _; return unless -d _; push @trim_empty_dirs, $_ if !<*>; } # pass 2 of trim_mode, also called by purge_mode. removes # empty directories and dangling symlinks. sub trim_post { chdir($archivedir) or die "$archivedir: $!\n"; # can't rmdir from within find's wanted sub, or we get # lots of 'Can't opendir()' warnings. So collect all the # empty dirs in an array during the find, then rmdir them # all in one swell foop afterwards. @trim_empty_dirs = (); # remove dangling symlinks and make a list of empty dirs find({wanted => \&trim_post_wanted, no_chdir => 1}, "."); rmdir $_ for @trim_empty_dirs; # the aforementioned swell foop } # this mode doesn't know/care about the git stuff, it operates purely # on the archive file tree. sub trim_mode { chdir($archivedir) or die "$archivedir: $!\n"; $trimcount = $trimbytes = 0; # first pass: remove files that are too big find({wanted => \&trim_wanted, no_chdir => 1}, "."); # 2nd pass trim_post(); printf("Trimmed $trimcount files, %.1fMB\n", ($trimbytes / (1024 * 1024))); exit 0; } # in: "category/name" # out: "category/name/name.info" sub find_info_file { my $info = shift; $info =~ s,/([^/]+)$,/$1/$1.info,; return $info; } # FIXME: this will fail if @localfiles are absolute paths! sub local_add { my ($oldcwd, $catname, $info, @localfiles) = @_; $catname =~ s,^\./,,; my ($category, $prgnam) = split /\//, $catname; my %localmd5s; for(@localfiles) { $localmd5s{md5sum_file("$oldcwd/$_")} = "$oldcwd/$_"; } my $dls = parse_info($info); chdir($archivedir) or die "$archivedir: $!"; for(keys %$dls) { my $targetfile = url_to_filename($_); my $md5 = $dls->{$_}; my $localfile = $localmd5s{$md5}; next unless $localfile; delete $localmd5s{$md5}; copy($localfile, $targetfile); store_file($targetfile, $category, $prgnam, $md5); unlink($targetfile); print "added $targetfile for $category/$prgnam\n"; } for(keys %localmd5s) { print "$localmd5s{$_} ($_) ignored: doesn't match any md5sum in $info\n"; } } sub add_by_md5_wanted { our %md5_to_dl; return unless /\.info/; s,\./,,; my ($category, $prgnam, undef) = split /\//; my $dls = parse_info($_); $md5_to_dl{$_} = "$category/$prgnam" for values %$dls; } sub add_by_md5 { print "no category/prgnam, adding file(s) by md5sum\n"; my $oldcwd = shift; our %md5_to_dl; find({wanted => \&add_by_md5_wanted, no_chdir => 1}, "."); for my $filename (@_) { my $infile = $filename; $infile = "$oldcwd/$infile" unless $infile =~ m,^/,; my $md5 = md5sum_file($infile); next unless defined $md5; my $catname = $md5_to_dl{$md5} or do { print "$filename ($md5) doesn't match any .info file, skipping\n"; next; }; my $info = find_info_file($catname) or do { print "can't find info file for $catname"; next; }; local_add($oldcwd, $catname, $info, $filename); chdir($sbogitdir); } } sub add_or_rm_mode { my $oldcwd = POSIX::getcwd(); init_git(); my $mode = shift @ARGV; if($mode eq 'add' && @ARGV && (-f $ARGV[0] || -f "$oldcwd/$ARGV[0]")) { add_by_md5($oldcwd, @ARGV); exit 0; } my $catname = shift @ARGV or usage(); if($catname eq '-f') { $maxfilemegs = 0; $catname = shift(@ARGV) or usage(); } my $info = find_info_file($catname); if(! -f $info) { die "Can't find $info in repo\n"; } if($mode eq "add") { if(!@ARGV) { # no args, use URL(s) in .info file $_ = $info; handle_info_file(); } else { local_add($oldcwd, $catname, $info, @ARGV); } } elsif($mode eq "rm") { my $dls = parse_info($info); for(keys %$dls) { my $md5 = $dls->{$_}; my $filename = url_to_filename($_); my ($category, $prgname) = split /\//, $catname; unlink(name_dir($category, $prgname) . "/$filename"); rmdir(name_dir($category, $prgname)); unlink(md5_dir($md5) . "/$filename"); rmdir(md5_dir($md5)); } } else { die "this never happens"; } exit 0; } # check_mode() needs to do this: # Find/parse all info files, building hashes of filenames and md5sums # Find all files in by-name, make sure the md5sums match, make sure the # by-md5 file exists and is either a hardlink or symlink to the by-name # file. If the size is over the limit, make a note of it. If the file # isn't found in the hash of filenames, it's extraneous (and so is its # by-md5 counterpart). # Do the same thing for the by-md5 tree, more or less. If both hard and # symolic links are found, that fact will get reported (but only once!) # Print a report. sub check_byname_wanted { if(-d) { my (undef, $category, $prgnam, $extra) = split /\//; if(defined($extra)) { print "misplaced dir (not a category/prgnam): $_\n"; } return; } return unless -f _; $filecount++; my $size = -s _; $filebytes += $size; s,^\./,,; my (undef, $category, $prgnam, $filename, $extra) = split /\//; if(!defined($filename) || defined($extra)) { print "misplaced file (not in a category/prgnam dir): $_\n"; return; } my $shortname = join("/", $category, $prgnam, $filename); my $info = join("/", $sbogitdir, $category, $prgnam, $prgnam . ".info"); if(!-f $info) { print "$shortname extraneous: no info file for $prgnam/$category\n"; return; } my $dls = $parsedinfo{"$category/$prgnam"}; my $md5 = md5sum_file($_); my $foundfile; # make $info and printable (relative path only) $info = join("/", $category, $prgnam, $prgnam . ".info"); for my $dl (keys %$dls) { my $infofilename = url_to_filename($dl); if($infofilename eq $filename) { $foundfile++; if($md5 ne $dls->{$dl}) { print "$info: $shortname: wrong md5sum (should be $dls->{$dl})\n"; } else { # check by-md5 file existence only (check_bymd5_wanted will do more) my $md5file = md5_dir($md5) . "/" . $filename; if(! -e $md5file) { print "$info: $shortname: missing $md5file\n"; } } } } if($foundfile) { $infofilecount{"$category/$prgnam"}--; } else { print "$shortname extraneous: not mentioned in $info\n"; } if(toobig($size)) { $size = sprintf("%.1f", $size / (1024 * 1024)); print "$shortname (${size}MB) exceeds file size limit (${maxfilemegs}MB)\n"; } } sub check_bymd5_wanted { return if -d; s,^\./,,; if(-l $_ && (! -e $_)) { print "dangling symlink: $_\n"; return; } my $realmd5 = md5sum_file($_) || return; my (undef, $a, $b, $md5dir, $filename, $extra) = split /\//; if(!defined($filename) || defined($extra)) { print "$_: misplaced file (not in a a/b/md5sum dir)\n"; return; } if(-l $_) { our $symlinkcount++; } else { my (undef, undef, undef, $nlink) = stat $_; if($nlink >= 2) { our $hardlinkcount++; } else { print "$_: not a symlink or hardlink\n"; } } my $reala = substr($realmd5, 0, 1); my $realb = substr($realmd5, 1, 1); if($reala ne $a || $realb ne $b) { print "$_: wrong subdir (should be $reala/$realb/$realmd5)\n"; } if($realmd5 ne $md5dir) { print "$_: md5sum mismatch\n"; } } sub check_info_wanted { return unless /\.info/; s,\./,,; my ($category, $prgnam, undef) = split /\//; my $dls = parse_info($_); $totalfiles += keys %$dls; $infofilecount{"$category/$prgnam"} += keys %$dls; $parsedinfo{"$category/$prgnam"} = $dls; } sub check_mode { shift @ARGV; my $verbose = ($ARGV[0] && $ARGV[0] =~ /^-*v(?:erbose)?$/); init_git(); $|++; print "* Parsing .info files...\n"; find({wanted => \&check_info_wanted, no_chdir => 1}, "."); chdir($archivedir) or die "$archivedir: $!"; print "* Checking by-name tree...\n"; find({wanted => \&check_byname_wanted, no_chdir => 1}, "by-name"); print "* Checking by-md5 tree...\n"; find({wanted => \&check_bymd5_wanted, no_chdir => 1}, "by-md5"); my @missingfilebuilds; for(keys %infofilecount) { my $count = $infofilecount{$_}; push @missingfilebuilds, $_ if $count; } if($verbose) { if(@missingfilebuilds) { print "Following SlackBuilds are missing files:\n"; print " $_\n" for sort { $a cmp $b } @missingfilebuilds; } else { print "All SlackBuild download files present\n"; } } if($symlinkcount && $hardlinkcount) { print "by-md5 contains both symlinks and hardlinks (harmless but messy)\n"; } my $totalbuildcount = keys %infofilecount; my $missingbuildcount = @missingfilebuilds; my $completebuildcount = $totalbuildcount - $missingbuildcount; my $coverage = sprintf("%.1f", ($completebuildcount * 100 / $totalbuildcount)); my $filemegs = sprintf("%.1fMB", $filebytes / (1024 * 1024)); my $missingfiles = $totalfiles - $filecount; my $filecoverage = sprintf("%.1f", $filecount * 100 / $totalfiles); print < is one of: create update purge trim check add [ ...] rm For full documentation try: perldoc $self EOF exit 1 } #main() usage() unless defined $ARGV[0]; read_config(); for ($ARGV[0]) { /create/ && do { create_mode(); }; /update/ && do { update_mode(); }; /purge/ && do { purge_mode(); }; /add/ && do { add_or_rm_mode(); }; /rm/ && do { add_or_rm_mode(); }; /trim/ && do { trim_mode(); }; /check/ && do { check_mode(); }; usage(); } __END__