1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
|
#!/bin/sh
# 20150827 bkw: attempt to find missing source tarballs
SELF=$( basename $0 )
# to add a repo, list its name here, and write a <name>_download()
# function, which should return success if a file was downloaded
# and failure otherwise.
repos="wayback naptime tld ponce sfdirect gentoo netbsd freebsd debian"
usage() {
cat <<EOF
$SELF - attempt to find slackbuilds.org sources
Usage: $SELF [-t] [path]
path is either an .info file or a directory containing an .info file.
If no path given, current directory is assumed.
Files are searched for in the following repositories:
$repos
If a source file is found, it's downloaded to the current directory.
The md5sum is checked, and the file is discarded if it doesn't match.
Exit status is success (0) if a file was found, otherwise nonzero.
-t is test mode, does an HTTP HEAD request instead of downloading,
and doesn't stop after finding a match.
EOF
exit "$1"
}
die() {
echo "$SELF:" "$@" 1>&2
exit 1
}
read_info_file() {
case "$1" in
"") dir=. ;;
"*.info") file="$1" ;;
*) dir="$1" ;;
esac
if [ "$dir" != "" ]; then
file="$dir"/*.info
fi
if [ ! -f $file ]; then
die "Can't find .info file matching $file"
fi
source $file
# snarfed straight from template.SlackBuild:
if [ -z "$ARCH" ]; then
case "$( uname -m )" in
i?86) ARCH=i486 ;;
arm*) ARCH=arm ;;
*) ARCH=$( uname -m ) ;;
esac
fi
# TODO: handle DOWNLOAD_x86_64 and MD5SUM_x86_64
# if [ "$ARCH" = "x86_64" ]; then
# fi
}
do_wget() {
url="$1"
shift
echo wget $wgetopts $@ $url
wget $wgetopts $@ $url
}
# ponce's server returns 200 OK status for its 404 page, hence the ugly:
ponce_download() {
do_wget "http://ponce.cc/slackware/sources/repo/$dlfile" 2>&1 | tee tmp.$$
ret=$?
grep -q '^Length.*text/html' tmp.$$ && ret=1
rm -f tmp.$$
return $ret
}
# the user agent is set because sf does something different if it thinks
# you're using a browser, and some of us like to set the user agent to
# firefox in .wgetrc because it fixes downloading from most other sites
# that check it.
sfdirect_download() {
do_wget "http://downloads.sourceforge.net/project/slackbuildsdirectlinks/$PRGNAM/$dlfile" --user-agent wget
}
gentoo_download() {
do_wget "http://ftp.osuosl.org/pub/gentoo/distfiles/$dlfile"
}
freebsd_download() {
do_wget "http://distcache.FreeBSD.org/ports-distfiles/$dlfile"
}
netbsd_download() {
do_wget "http://ftp.netbsd.org/pub/pkgsrc/distfiles/$dlfile"
}
# debian's tricky because they rename the files: all lowercase, an
# underscore between name and version, and .orig added before the
# filename extension. Also they're fanned out into subdirs, see
# http://http.debian.net/debian/pool/main/
debian_download() {
case "$dlfile" in
*.tar.*) ext="$( echo $dlfile | sed 's,.*\.\(tar\..*\)$,\1,' )" ;;
*) ext="$( echo $dlfile | sed 's,.*\.\([^.]*\)$,\1,' )" ;;
esac
prog="$( echo $dlfile | tr A-Z a-z | sed 's,_,-,g' | sed "s,\.$ext\$,," )"
ver="$( echo $prog | rev | cut -d- -f1 | rev )"
prog="$( echo $prog | rev | cut -d- -f2- | rev )"
case "$prog" in
lib*) subdir="$( echo $prog | head -c4 )" ;;
*) subdir="$( echo $prog | head -c1 )" ;;
esac
#echo "prog='$prog' ver='$ver' ext='$ext' subdir='$subdir'"
debfile=${prog}_$ver.orig.$ext
do_wget "http://http.debian.net/debian/pool/main/$subdir/$prog/$debfile"
ret=$?
mv "$debfile" "$dlfile" 2>/dev/null
return $ret
}
# my own archive. Not well populated yet.
naptime_download() {
do_wget "http://urchlay.naptime.net/~urchlay/src/$dlfile"
}
tld_download() {
dir="$( echo $dlmd5 | cut -b1 )/$( echo $dlmd5 | cut -b2 )/$dlmd5"
do_wget "http://df.tld-linux.org/distfiles/by-md5/$dir/" -r -l1 -nH -np -nd -Rdesc -Rindex.html\*
}
# https://archive.org/help/wayback_api.php
# json_pp included in slackware's perl package
wayback_download() {
url=$(
wget -O- "http://archive.org/wayback/available?url=$srcurl" | \
json_pp -f json -t dumper | \
perl -e 'undef $/; $_=<>; eval $_; print $VAR1->{archived_snapshots}->{closest}->{url};'
)
if [ "$url" = "" ]; then
return 1
fi
do_wget "$url"
}
check_file() {
if [ ! -f "$dlfile" ]; then
echo "Nothing downloaded"
return 1
fi
gotmd5="$( md5sum $dlfile | cut -d' ' -f1 )"
if [ "$gotmd5" != "$dlmd5" ]; then
echo "md5sum doesn't match, should be $dlmd5, got $gotmd5"
rm -f "$dlfile"
return 1
fi
return 0
}
# main()
if [ "$1" = "--help" ]; then
usage 0
fi
if [ "$1" = "-t" ]; then
wgetopts="--spider --tries 1" # might want -S here too
testmode=1
shift
else
wgetopts="--content-disposition"
fi
if [ "$#" -gt 1 ]; then
usage 1
fi
echo "-=- $1"
read_info_file "$1"
for srcurl in $DOWNLOAD; do
dlfile="$( basename "$srcurl" )"
set $MD5SUM
dlmd5=$1
shift
for repo in $repos; do
echo
echo "Trying $repo:"
eval ${repo}_download
got="$?"
if [ "$testmode" = "1" ]; then
[ "$got" = "0" ] && found=1
else
check_file && break
fi
done
done
if [ "$found" = "1" ]; then
echo
echo "=== Found $dlfile"
exit 0
fi
echo "--- FAILED to find $dlfile"
exit 1
|