SHELL=bash -o pipefail
PATH:=$(CURDIR)/bin:$(PATH)
export PATH
url2murl = $(subst %,^25,$(subst :,^3A,$(subst ^,^5E,$1)))
murl2url = $(subst ^5E,^,$(subst ^3A,:,$(subst ^25,%,$1)))
all: download
fix:
grep -rl '
503' dat | xargs rm -fv --
dat:
mkdir -p $@
dat/cdxindex.txt: | dat
cdxget 'url=www.unicode.org/Public/*' 'fl=urlkey' 'filter=statuscode:200' 'filter=urlkey:.*(cvt|convert)utf.*' > $@
dat/urlkeys.txt: dat/cdxindex.txt
cat $^ | cut -d '?' -f1 | sort -u > $@
dat/urlkeys.mk: dat/urlkeys.txt
cat $^ | sed 's/^/urlkeys+=/' < $< > $@
-include dat/urlkeys.mk
dat/each-cdx/%.txt:
@mkdir -p '$(@D)'
cdxget "url=$$(urlkey2url '$*')" 'filter=statuscode:200' 'collapse=digest' 'fl=timestamp,original' > '$@'
dat/index.txt: $(addprefix dat/each-cdx/,$(addsuffix .txt,$(urlkeys)))
cat -- $(foreach c,$^,'$c') | sort > $@
dat/index.mk: dat/index.txt
< $< sed 's,^,index+=web.archive.org/web/,;s, ,/,' > $@
-include dat/index.mk
dat/content-dir/%/index.wahtml:
@mkdir -p '$(@D)'
curl -s 'http://$(call murl2url,$*)' > $@
dat/content-dir/%/index.html: dat/content-dir/%/index.wahtml
wayfore < $< > $@
dat/content-dir/%/readme.txt: dat/content-dir/%/index.html
< $< sed -n '/^$$/,/<\/pre>/p' | sed -e 1d -e 's,
.*,,' > $@
dat/content-dir/%/metadata.txt: dat/content-dir/%/index.html
< $< grep '^]*>//g' | grep -vi 'parent directory' > $@
content-dir = $(foreach u,$(filter %/,$(index)),dat/content-dir/$(call url2murl,$(u)))
download: $(addsuffix readme.txt,$(content-dir)) $(addsuffix metadata.txt,$(content-dir))
dat/content-file/%:
@mkdir -p '$(@D)'
curl -s 'http://$(call murl2url,$*)' > $@
content-file = $(foreach u,$(filter-out %/,$(index)),dat/content-file/$(call url2murl,$(u)))
download: $(content-file)
git: download
gitthing dat/git < dat/index.txt
.PHONY: all fix download git
.DELETE_ON_ERROR:
.SECONDARY: