1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
|
SHELL=bash -o pipefail
PATH:=$(CURDIR)/bin:$(PATH)
export PATH
url2murl = $(subst %,^25,$(subst :,^3A,$(subst ^,^5E,$1)))
murl2url = $(subst ^5E,^,$(subst ^3A,:,$(subst ^25,%,$1)))
all: download
fix:
grep -rl '<html><body><h1>503' dat | xargs rm -fv --
dat:
mkdir -p $@
dat/cdxindex.txt: | dat
cdxget 'url=www.unicode.org/Public/*' 'fl=urlkey' 'filter=statuscode:200' 'filter=urlkey:.*(cvt|convert)utf.*' > $@
dat/urlkeys.txt: dat/cdxindex.txt
cat $^ | cut -d '?' -f1 | sort -u > $@
dat/urlkeys.mk: dat/urlkeys.txt
cat $^ | sed 's/^/urlkeys+=/' < $< > $@
-include dat/urlkeys.mk
dat/each-cdx/%.txt:
@mkdir -p '$(@D)'
cdxget "url=$$(urlkey2url '$*')" 'filter=statuscode:200' 'collapse=digest' 'fl=timestamp,original' > '$@'
dat/index.txt: $(addprefix dat/each-cdx/,$(addsuffix .txt,$(urlkeys)))
cat -- $(foreach c,$^,'$c') | sort > $@
dat/index.mk: dat/index.txt
< $< sed 's,^,index+=web.archive.org/web/,;s, ,/,' > $@
-include dat/index.mk
dat/content-dir/%/index.wahtml:
@mkdir -p '$(@D)'
curl -s 'http://$(call murl2url,$*)' > $@
dat/content-dir/%/index.html: dat/content-dir/%/index.wahtml
wayfore < $< > $@
dat/content-dir/%/readme.txt: dat/content-dir/%/index.html
< $< sed -n '/^<pre>$$/,/<\/pre>/p' | sed -e 1d -e 's,</pre>.*,,' > $@
dat/content-dir/%/metadata.txt: dat/content-dir/%/index.html
< $< grep '^<img' | sed 's/<[^>]*>//g' | grep -vi 'parent directory' > $@
content-dir = $(foreach u,$(filter %/,$(index)),dat/content-dir/$(call url2murl,$(u)))
download: $(addsuffix readme.txt,$(content-dir)) $(addsuffix metadata.txt,$(content-dir))
dat/content-file/%:
@mkdir -p '$(@D)'
curl -s 'http://$(call murl2url,$*)' > $@
content-file = $(foreach u,$(filter-out %/,$(index)),dat/content-file/$(call url2murl,$(u)))
download: $(content-file)
git: download
gitthing dat/git < dat/index.txt
.PHONY: all fix download git
.DELETE_ON_ERROR:
.SECONDARY:
|