summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorroot <root@rshg047.dnsready.net>2011-07-03 23:10:20 +0000
committerroot <root@rshg047.dnsready.net>2011-07-03 23:10:20 +0000
commit067b127a853780b2b4ae7236dcdaaf72396dfa86 (patch)
treeec55d0d4115d7c9fc0eb9e1a82a2553f4e0cd3aa
parent1732308adb7885b00fc388f978e65b3ad15aa067 (diff)
Sun Jul 3 23:10:20 UTC 2011
-rw-r--r--community-staging/geos/PKGBUILD30
-rw-r--r--community-staging/geos/geos.changelog5
-rw-r--r--community-staging/qgis/PKGBUILD59
-rw-r--r--community-staging/qgis/qgis.desktop10
-rw-r--r--community-staging/qgis/ticket_3378.diff38
-rw-r--r--community-testing/dsniff/PKGBUILD36
-rw-r--r--community-testing/exim/PKGBUILD74
-rw-r--r--community-testing/exim/aliases35
-rwxr-xr-xcommunity-testing/exim/exim42
-rw-r--r--community-testing/exim/exim.Makefile1227
-rw-r--r--community-testing/exim/exim.changelog21
-rw-r--r--community-testing/exim/exim.conf.d1
-rw-r--r--community-testing/exim/exim.install25
-rw-r--r--community-testing/exim/exim.logrotate5
-rw-r--r--community-testing/libgda3/PKGBUILD27
-rw-r--r--community-testing/librcc/PKGBUILD53
-rw-r--r--community-testing/librcc/librcc-strnlen.patch17
-rw-r--r--community-testing/librcc/librcc.install6
-rw-r--r--community-testing/perl-berkeleydb/PKGBUILD32
-rw-r--r--community-testing/perl-libapreq2/PKGBUILD31
-rw-r--r--community-testing/perl-xml-libxslt/PKGBUILD30
-rw-r--r--community-testing/poedit/PKGBUILD30
-rw-r--r--community-testing/poedit/poedit.install11
-rw-r--r--community-testing/python-bsddb/LICENSE19
-rw-r--r--community-testing/python-bsddb/PKGBUILD62
-rw-r--r--community-testing/wml/PKGBUILD42
-rw-r--r--community-testing/xemacs/PKGBUILD65
-rw-r--r--community-testing/xemacs/xemacs-21.5.29-optimization-bug.patch14
-rw-r--r--community-testing/xemacs/xemacs.desktop34
-rw-r--r--community-testing/xemacs/xemacs.install35
-rw-r--r--community/autofs/PKGBUILD15
-rw-r--r--community/calibre/PKGBUILD6
-rw-r--r--community/fotoxx/PKGBUILD4
-rw-r--r--community/mongodb/PKGBUILD9
-rw-r--r--community/mongodb/mongodb.conf4
-rwxr-xr-xcommunity/mongodb/mongodb.install19
-rwxr-xr-xcommunity/mongodb/mongodb.rc2
-rw-r--r--community/nodejs/PKGBUILD58
-rw-r--r--core/cronie/PKGBUILD12
-rw-r--r--core/sqlite3/PKGBUILD10
-rw-r--r--extra/irqbalance/Fix-detection-of-CPUs-in-sysfs.patch35
-rw-r--r--extra/irqbalance/PKGBUILD30
-rw-r--r--extra/irqbalance/Special-interrupt-counts-line-NMI-may-start-with-a-s.patch67
-rw-r--r--extra/irqbalance/current-trunk.patch217
-rw-r--r--extra/qjackctl/PKGBUILD13
-rw-r--r--multilib/lib32-v4l-utils/PKGBUILD8
-rw-r--r--testing/apr-util/PKGBUILD28
-rw-r--r--testing/bogofilter/PKGBUILD34
-rw-r--r--testing/claws-mail/PKGBUILD60
-rw-r--r--testing/claws-mail/claws-mail.install13
-rw-r--r--testing/claws-mail/claws-notify-crash.patch18
-rw-r--r--testing/db/PKGBUILD32
-rw-r--r--testing/db/db.install5
-rw-r--r--testing/evolution-data-server/PKGBUILD29
-rw-r--r--testing/evolution-exchange/PKGBUILD33
-rw-r--r--testing/evolution-exchange/evolution-exchange.install17
-rw-r--r--testing/iproute2/PKGBUILD50
-rw-r--r--testing/iproute2/iproute2-fhs.patch84
-rw-r--r--testing/libetpan/PKGBUILD26
-rw-r--r--testing/libproxy/PKGBUILD40
-rw-r--r--testing/libreoffice/PKGBUILD10
-rw-r--r--testing/libreoffice/libreoffice.install2
-rw-r--r--testing/libsasl/PKGBUILD54
-rw-r--r--testing/libsasl/cyrus-sasl-2.1.19-checkpw.c.patch170
-rw-r--r--testing/libsasl/cyrus-sasl-db.patch34
-rw-r--r--testing/moc/PKGBUILD44
-rw-r--r--testing/moc/gcc-undefined-symbols.diff12
-rw-r--r--testing/mod_perl/PKGBUILD27
-rw-r--r--testing/openldap/PKGBUILD107
-rwxr-xr-xtesting/openldap/slapd48
-rw-r--r--testing/openldap/slapd.default6
-rw-r--r--testing/pam/PKGBUILD56
-rw-r--r--testing/pam/other5
-rw-r--r--testing/pam/pam.install12
-rw-r--r--testing/perl/0001-Append-CFLAGS-and-LDFLAGS-to-their-Config.pm-counter.patch83
-rw-r--r--testing/perl/ChangeLog66
-rw-r--r--testing/perl/PKGBUILD103
-rw-r--r--testing/perl/fix-h2ph-and-tests.patch104
-rw-r--r--testing/perl/perl.install18
-rw-r--r--testing/perl/perlbin.csh15
-rwxr-xr-xtesting/perl/perlbin.sh18
-rw-r--r--testing/php/PKGBUILD379
-rw-r--r--testing/php/apache.conf13
-rw-r--r--testing/php/logrotate.d.php-fpm6
-rw-r--r--testing/php/php-fpm.conf.in.patch80
-rw-r--r--testing/php/php.ini.patch126
-rw-r--r--testing/php/rc.d.php-fpm158
-rw-r--r--testing/php/suhosin-patch-5.3.6-0.9.10.patch.gzbin0 -> 40881 bytes
-rw-r--r--testing/postfix/PKGBUILD64
-rw-r--r--testing/postfix/aliases.patch18
-rw-r--r--testing/postfix/install35
-rwxr-xr-xtesting/postfix/rc.d36
-rw-r--r--testing/python2/PKGBUILD83
-rw-r--r--testing/rasqal/PKGBUILD29
-rw-r--r--testing/redland/PKGBUILD78
-rw-r--r--testing/redland/rpath.diff11
-rw-r--r--testing/ruby/PKGBUILD49
-rw-r--r--testing/soprano/PKGBUILD35
-rw-r--r--testing/subversion/PKGBUILD100
-rw-r--r--testing/subversion/subversion-perl-bindings.patch12
-rw-r--r--testing/subversion/subversion.rpath.fix.patch10
-rw-r--r--testing/subversion/subversion.suppress.deprecation.warnings.patch22
-rw-r--r--testing/subversion/svn11
-rw-r--r--testing/subversion/svnmerge.py2370
-rwxr-xr-xtesting/subversion/svnserve42
-rw-r--r--testing/subversion/svnserve.conf7
106 files changed, 7790 insertions, 72 deletions
diff --git a/community-staging/geos/PKGBUILD b/community-staging/geos/PKGBUILD
new file mode 100644
index 000000000..aa97623f2
--- /dev/null
+++ b/community-staging/geos/PKGBUILD
@@ -0,0 +1,30 @@
+# $Id: PKGBUILD 50911 2011-07-02 23:00:14Z stephane $
+# Maintainer: Jaroslav Lichtblau <dragonlord@aur.archlinux.org>
+# Contributor: dibblethewrecker dibblethewrecker.at.jiwe.dot.org
+# Contributor: William Rea <sillywilly@gmail.com>
+
+pkgname=geos
+pkgver=3.3.0
+pkgrel=1
+pkgdesc="A C++ port of the Java Topology Suite"
+arch=('i686' 'x86_64')
+url="http://trac.osgeo.org/geos/"
+license=('LGPL')
+depends=('gcc-libs')
+options=('!libtool')
+changelog=$pkgname.changelog
+source=(http://download.osgeo.org/${pkgname}/${pkgname}-${pkgver}.tar.bz2)
+md5sums=('3301f3d1d747b95749384b8a356b022a')
+
+build() {
+ cd "${srcdir}/${pkgname}-${pkgver}"
+
+ ./configure --prefix=/usr
+ make
+}
+
+package() {
+ cd "${srcdir}/${pkgname}-${pkgver}"
+
+ make DESTDIR="${pkgdir}" install
+}
diff --git a/community-staging/geos/geos.changelog b/community-staging/geos/geos.changelog
new file mode 100644
index 000000000..d0a376568
--- /dev/null
+++ b/community-staging/geos/geos.changelog
@@ -0,0 +1,5 @@
+2010-11-14 Jaroslav Lichtblau <dragonlord@aur.archlinux.org>
+ * Update to major release 3.2.2
+
+2010-04-04 Jaroslav Lichtblau <dragonlord@aur.archlinux.org>
+ * Update to major release 3.2.1
diff --git a/community-staging/qgis/PKGBUILD b/community-staging/qgis/PKGBUILD
new file mode 100644
index 000000000..ef4bf030a
--- /dev/null
+++ b/community-staging/qgis/PKGBUILD
@@ -0,0 +1,59 @@
+# Maintainer: Thomas Dziedzic < gostrc at gmail >
+# Contributor: dibblethewrecker dibblethewrecker.at.jiwe.dot.org
+# Contributor: Gerardo Exequiel Pozzi <vmlinuz386@yahoo.com.ar>
+
+pkgname=qgis
+pkgver=1.7.0
+pkgrel=2
+pkgdesc='A Geographic Information System (GIS) that supports vector, raster & database formats'
+url='http://qgis.org/'
+license=('GPL')
+arch=('i686' 'x86_64')
+# update to http://www.qgis.org/wiki/Building_QGIS_from_Source#Overview
+depends=('libmysqlclient' 'postgresql-libs' 'sqlite3' 'jasper' 'curl' 'qt' 'python2' 'python2-qt' 'giflib' 'xerces-c' 'cfitsio' 'qwt' 'gdal')
+makedepends=('cmake' 'grass' 'gsl' 'postgis' 'netcdf' 'fcgi' 'python2-sip' 'txt2tags')
+optdepends=('postgis: postgis support and SPIT plugin'
+ 'fcgi: qgis mapserver'
+ 'python2-sip: python-support'
+ 'grass: grass plugin'
+ 'gsl: georeferencer ')
+options=('!makeflags')
+source=("http://qgis.org/downloads/qgis-${pkgver}.tar.bz2"
+ 'qgis.desktop'
+ 'ticket_3378.diff')
+md5sums=('d8506990f52563d39c7b916f500f282f'
+ '8ab66039f2aba519b92f52272ec3c13e'
+ '402f3d39a344367fd55bdaa324deb797')
+
+build() {
+ # Fix insecure RPATH is weird, but just works ;)
+# echo "os.system(\"sed -i '/^LFLAGS/s|-Wl,-rpath,.\+ ||g' gui/Makefile core/Makefile\")" >> python/configure.py.in
+
+ #cd qgis-${pkgver}
+ # fix compilation error https://trac.osgeo.org/qgis/ticket/3378
+ #patch -Np3 -i ${srcdir}/ticket_3378.diff
+ #cd ${srcdir}
+
+ rm -rf build
+ mkdir build
+ cd build
+
+ cmake ../qgis-${pkgver} \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_SKIP_RPATH=ON \
+ -DCMAKE_INSTALL_PREFIX=/usr \
+ -DGRASS_PREFIX=/opt/grass \
+ -DQGIS_MANUAL_SUBDIR=share/man
+
+ make
+}
+
+package() {
+ cd build
+
+ make DESTDIR=${pkgdir} install
+
+ # install some freedesktop.org compatibility
+ install -D -m644 ${srcdir}/qgis.desktop \
+ ${pkgdir}/usr/share/applications/qgis.desktop
+}
diff --git a/community-staging/qgis/qgis.desktop b/community-staging/qgis/qgis.desktop
new file mode 100644
index 000000000..c3db5ad08
--- /dev/null
+++ b/community-staging/qgis/qgis.desktop
@@ -0,0 +1,10 @@
+[Desktop Entry]
+Name=Quantum GIS
+GenericName="GIS"
+Comment="Geographic Information System (GIS) that supports vector, raster & database formats"
+Exec=/usr/bin/qgis
+Icon=/usr/share/qgis/images/icons/qgis-icon.png
+Terminal=false
+MultipleArgs=false
+Type=Application
+Categories=Application;Science
diff --git a/community-staging/qgis/ticket_3378.diff b/community-staging/qgis/ticket_3378.diff
new file mode 100644
index 000000000..0d722e4c8
--- /dev/null
+++ b/community-staging/qgis/ticket_3378.diff
@@ -0,0 +1,38 @@
+Index: /trunk/qgis/python/core/conversions.sip
+===================================================================
+--- /trunk/qgis/python/core/conversions.sip (revision 14323)
++++ /trunk/qgis/python/core/conversions.sip (revision 14988)
+@@ -16,4 +16,5 @@
+
+ %Feature QSETINT_CONVERSION
++%Feature QSETTYPE_CONVERSION
+
+ %ModuleHeaderCode
+@@ -321,5 +322,5 @@
+ %End
+
+-
++%If (QSETTYPE_CONVERSION)
+ template <TYPE>
+ %MappedType QSet<TYPE>
+@@ -395,6 +396,5 @@
+
+ };
+-
+-
++%End
+
+ template<TYPE>
+Index: /trunk/qgis/python/CMakeLists.txt
+===================================================================
+--- /trunk/qgis/python/CMakeLists.txt (revision 14330)
++++ /trunk/qgis/python/CMakeLists.txt (revision 14988)
+@@ -44,4 +44,8 @@
+ ENDIF(NOT PYQT4_VERSION_NUM LESS 263941)
+
++IF(NOT PYQT4_VERSION_NUM LESS 264194) # 0x040802
++ SET(SIP_DISABLE_FEATURES ${SIP_DISABLE_FEATURES} QSETTYPE_CONVERSION)
++ENDIF(NOT PYQT4_VERSION_NUM LESS 264194)
++
+ # core module
+ FILE(GLOB sip_files_core core/*.sip)
diff --git a/community-testing/dsniff/PKGBUILD b/community-testing/dsniff/PKGBUILD
new file mode 100644
index 000000000..f7428aa09
--- /dev/null
+++ b/community-testing/dsniff/PKGBUILD
@@ -0,0 +1,36 @@
+# $Id: PKGBUILD 49679 2011-06-19 07:23:27Z andrea $
+# Maintainer: Sergej Pupykin <pupykin.s+arch@gmail.com>
+# Contributor: ViNS <gladiator@fastwebnet.it>
+
+pkgname=dsniff
+pkgver=2.4b1
+pkgrel=20
+pkgdesc="Collection of tools for network auditing and penetration testing"
+url="http://www.monkey.org/~dugsong/dsniff/"
+arch=('i686' 'x86_64')
+license=('BSD')
+depends=('libpcap' 'openssl' 'libxmu' 'glib2' 'libnet' 'libnids')
+source=("http://www.monkey.org/~dugsong/${pkgname}/beta/$pkgname-$pkgver.tar.gz"
+ "http://ftp.de.debian.org/debian/pool/main/d/${pkgname}/${pkgname}_2.4b1+debian-18.diff.gz")
+md5sums=('2f761fa3475682a7512b0b43568ee7d6'
+ 'fbc9f62f9ab2f98f24f53ad497c1ce5d')
+
+build() {
+ cd $srcdir/$pkgname-2.4
+ patch -N < "${srcdir}"/dsniff_2.4b1+debian-18.diff
+ for i in *.dpatch; do
+ patch -N < "$i"
+ done
+
+ LDFLAGS="-lresolv -lglib-2.0 -lgthread-2.0 $LDFLAGS" ./configure --prefix=/usr
+ make
+}
+
+package() {
+ cd "$srcdir"/$pkgname-2.4
+ make prefix="$pkgdir"/usr install
+ install -D -m0644 LICENSE "$pkgdir"/usr/share/licenses/${pkgname}/LICENSE
+
+ install -d "$pkgdir"/usr/share
+ mv "$pkgdir"/usr/man "$pkgdir"/usr/share/
+}
diff --git a/community-testing/exim/PKGBUILD b/community-testing/exim/PKGBUILD
new file mode 100644
index 000000000..d30452d45
--- /dev/null
+++ b/community-testing/exim/PKGBUILD
@@ -0,0 +1,74 @@
+# $Id: PKGBUILD 49455 2011-06-16 18:57:50Z angvp $
+# Maintainer: Angel Velasquez <angvp@archlinux.org>
+# Maintainer: judd <jvinet@zeroflux.org>
+pkgname=exim
+pkgver=4.76
+pkgrel=2
+pkgdesc="A Message Transfer Agent"
+arch=(i686 x86_64)
+url="http://www.exim.org/"
+license=('GPL')
+backup=(etc/mail/aliases etc/mail/exim.conf \
+ etc/logrotate.d/exim etc/conf.d/exim)
+install=exim.install
+changelog=exim.changelog
+depends=('db' 'pcre' 'pam' 'tcp_wrappers' 'openssl' 'libldap')
+provides=('smtp-server')
+conflicts=('smtp-server')
+options=('!makeflags')
+source=(ftp://mirrors.24-7-solutions.net/pub/exim/ftp/exim/exim4/exim-${pkgver}.tar.bz2
+ aliases
+ exim
+ exim.logrotate
+ exim.conf.d
+ exim.Makefile
+ )
+md5sums=('58e784b33c7a2ab335ec6400346d6362'
+ '4874006f0585253ddab027d441009757'
+ '9aed772e87223213e8da9ca5e7376869'
+ 'd788c26f86a9d72a0aebb3b849fe74f2'
+ 'b75fe4c6e960a59a25b5f51e8f61ba3a'
+ '61e76543476f52f136c1d6c80ac1c5a1')
+
+build() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+ sed -i 's|tail -1|tail -n -1|g' scripts/Configure-config.h
+ # Make some configuration changes
+ cp ${srcdir}/${pkgname}.Makefile Local/Makefile
+ make
+}
+
+package() {
+ cd $srcdir/$pkgname-$pkgver
+ install -D -m644 ../exim.logrotate ${pkgdir}/etc/logrotate.d/exim
+ install -D -m644 ../exim.conf.d ${pkgdir}/etc/conf.d/exim
+ install -D -m644 doc/exim.8 ${pkgdir}/usr/share/man/man8/exim.8
+ install -D -m755 ../exim ${pkgdir}/etc/rc.d/exim
+ mkdir -p ${pkgdir}/var/spool/exim/db ${pkgdir}/etc/mail \
+ ${pkgdir}/var/log/exim ${pkgdir}/usr/{lib,sbin}
+ chmod 770 ${pkgdir}/var/spool/exim ${pkgdir}/var/spool/exim/db ${pkgdir}/var/log/exim
+ cd build-Linux-*
+ for i in exicyclog exim_checkaccess exim_dumpdb exim_lock\
+ exim_tidydb exipick exiqsumm exigrep exim_dbmbuild exim\
+ exim_fixdb eximstats exinext exiqgrep exiwhat
+ do
+ install -m 0755 $i ${pkgdir}/usr/sbin
+ done
+
+ cd ${srcdir}/exim-${pkgver}/src
+ sed "s|/etc/aliases|/etc/mail/aliases|g" configure.default | \
+ sed "s|SYSTEM_ALIASES_FILE|/etc/mail/aliases|g" \
+ >${pkgdir}/etc/mail/exim.conf
+
+ cp ${srcdir}/aliases ${pkgdir}/etc/mail
+ cd ${pkgdir}/usr/sbin
+ for i in mailq rmail rsmtp runq sendmail
+ do
+ ln -s exim $i
+ done
+ # fhs compliancy
+ ln -s ../sbin/exim ../lib/sendmail
+
+ mkdir -p ${pkgdir}/etc/rc.d
+ cp ${srcdir}/exim ${pkgdir}/etc/rc.d
+}
diff --git a/community-testing/exim/aliases b/community-testing/exim/aliases
new file mode 100644
index 000000000..5a76ff7d5
--- /dev/null
+++ b/community-testing/exim/aliases
@@ -0,0 +1,35 @@
+#
+# /etc/mail/aliases
+#
+# NOTE: Make sure you run 'newaliases' after modifying this file
+#
+
+# Basic system aliases -- these MUST be present.
+MAILER-DAEMON: postmaster
+postmaster: root
+hostmaster: root
+webmaster: hostmaster
+ftpmaster: hostmaster
+admin: hostmaster
+administrator: hostmaster
+
+# General redirections for pseudo accounts.
+bin: root
+daemon: root
+games: root
+ingres: root
+nobody: root
+system: root
+toor: root
+uucp: root
+
+# Well-known aliases.
+manager: root
+dumper: root
+operator: root
+
+# trap decode to catch security attacks
+decode: root
+
+# Person who should get root's mail
+#root:
diff --git a/community-testing/exim/exim b/community-testing/exim/exim
new file mode 100755
index 000000000..8e9640b32
--- /dev/null
+++ b/community-testing/exim/exim
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+# source application-specific settings
+[ -f /etc/conf.d/exim ] && . /etc/conf.d/exim
+
+# general config
+. /etc/rc.conf
+. /etc/rc.d/functions
+
+PID=`pidof -o %PPID /usr/sbin/exim`
+
+case "$1" in
+ start)
+ stat_busy "Starting Exim"
+ [ -z "$PID" ] && /usr/sbin/exim $EXIM_ARGS
+ if [ $? -gt 0 ]; then
+ stat_fail
+ else
+ add_daemon exim
+ stat_done
+ fi
+ ;;
+ stop)
+ stat_busy "Stopping Exim"
+ [ ! -z "$PID" ] && kill $PID &> /dev/null
+ if [ $? -gt 0 ]; then
+ stat_fail
+ else
+ rm /var/run/exim.pid
+ rm_daemon exim
+ stat_done
+ fi
+ ;;
+ restart)
+ $0 stop
+ sleep 2
+ $0 start
+ ;;
+ *)
+ echo "usage: $0 {start|stop|restart}"
+esac
+exit 0
diff --git a/community-testing/exim/exim.Makefile b/community-testing/exim/exim.Makefile
new file mode 100644
index 000000000..125f87dfb
--- /dev/null
+++ b/community-testing/exim/exim.Makefile
@@ -0,0 +1,1227 @@
+# $Cambridge: exim/src/src/EDITME,v 1.27 2010/06/12 15:21:25 jetmore Exp $
+
+##################################################
+# The Exim mail transport agent #
+##################################################
+
+# This is the template for Exim's main build-time configuration file. It
+# contains settings that are independent of any operating system. These are
+# things that are mostly sysadmin choices. The items below are divided into
+# those you must specify, those you probably want to specify, those you might
+# often want to specify, and those that you almost never need to mention.
+
+# Edit this file and save the result to a file called Local/Makefile within the
+# Exim distribution directory before running the "make" command.
+
+# Things that depend on the operating system have default settings in
+# OS/Makefile-Default, but these are overridden for some OS by files called
+# called OS/Makefile-<osname>. You can further override these by creating files
+# called Local/Makefile-<osname>, where "<osname>" stands for the name of your
+# operating system - look at the names in the OS directory to see which names
+# are recognized.
+
+# However, if you are building Exim for a single OS only, you don't need to
+# worry about setting up Local/Makefile-<osname>. Any build-time configuration
+# settings you require can in fact be placed in the one file called
+# Local/Makefile. It is only if you are building for several OS from the same
+# source files that you need to worry about splitting off your own OS-dependent
+# settings into separate files. (There's more explanation about how this all
+# works in the toplevel README file, under "Modifying the building process", as
+# well as in the Exim specification.)
+
+# One OS-specific thing that may need to be changed is the command for running
+# the C compiler; the overall default is gcc, but some OS Makefiles specify cc.
+# You can override anything that is set by putting CC=whatever in your
+# Local/Makefile.
+
+# NOTE: You should never need to edit any of the distributed Makefiles; all
+# overriding can be done in your Local/Makefile(s). This will make it easier
+# for you when the next release comes along.
+
+# The location of the X11 libraries is something else that is quite variable
+# even between different versions of the same operating system (and indeed
+# there are different versions of X11 as well, of course). The four settings
+# concerned here are X11, XINCLUDE, XLFLAGS (linking flags) and X11_LD_LIB
+# (dynamic run-time library). You need not worry about X11 unless you want to
+# compile the Exim monitor utility. Exim itself does not use X11.
+
+# Another area of variability between systems is the type and location of the
+# DBM library package. Exim has support for ndbm, gdbm, tdb, and Berkeley DB.
+# By default the code assumes ndbm; this often works with gdbm or DB, provided
+# they are correctly installed, via their compatibility interfaces. However,
+# Exim can also be configured to use the native calls for Berkeley DB (obsolete
+# versions 1.85, 2.x, 3.x, or the current 4.x version) and also for gdbm.
+
+# For some operating systems, a default DBM library (other than ndbm) is
+# selected by a setting in the OS-specific Makefile. Most modern OS now have
+# a DBM library installed as standard, and in many cases this will be selected
+# for you by the OS-specific configuration. If Exim compiles without any
+# problems, you probably do not have to worry about the DBM library. If you
+# do want or need to change it, you should first read the discussion in the
+# file doc/dbm.discuss.txt, which also contains instructions for testing Exim's
+# interface to the DBM library.
+
+# In Local/Makefiles blank lines and lines starting with # are ignored. It is
+# also permitted to use the # character to add a comment to a setting, for
+# example
+#
+# EXIM_GID=42 # the "mail" group
+#
+# However, with some versions of "make" this works only if there is no white
+# space between the end of the setting and the #, so perhaps it is best
+# avoided. A consequence of this facility is that it is not possible to have
+# the # character present in any setting, but I can't think of any cases where
+# this would be wanted.
+###############################################################################
+
+
+
+###############################################################################
+# THESE ARE THINGS YOU MUST SPECIFY #
+###############################################################################
+
+# Exim will not build unless you specify BIN_DIRECTORY, CONFIGURE_FILE, and
+# EXIM_USER. You also need EXIM_GROUP if EXIM_USER specifies a uid by number.
+
+# If you don't specify SPOOL_DIRECTORY, Exim won't fail to build. However, it
+# really is a very good idea to specify it here rather than at run time. This
+# is particularly true if you let the logs go to their default location in the
+# spool directory, because it means that the location of the logs is known
+# before Exim has read the run time configuration file.
+
+#------------------------------------------------------------------------------
+# BIN_DIRECTORY defines where the exim binary will be installed by "make
+# install". The path is also used internally by Exim when it needs to re-invoke
+# itself, either to send an error message, or to recover root privilege. Exim's
+# utility binaries and scripts are also installed in this directory. There is
+# no "standard" place for the binary directory. Some people like to keep all
+# the Exim files under one directory such as /usr/exim; others just let the
+# Exim binaries go into an existing directory such as /usr/sbin or
+# /usr/local/sbin. The installation script will try to create this directory,
+# and any superior directories, if they do not exist.
+
+BIN_DIRECTORY=/usr/sbin
+
+
+#------------------------------------------------------------------------------
+# CONFIGURE_FILE defines where Exim's run time configuration file is to be
+# found. It is the complete pathname for the file, not just a directory. The
+# location of all other run time files and directories can be changed in the
+# run time configuration file. There is a lot of variety in the choice of
+# location in different OS, and in the preferences of different sysadmins. Some
+# common locations are in /etc or /etc/mail or /usr/local/etc or
+# /usr/local/etc/mail. Another possibility is to keep all the Exim files under
+# a single directory such as /usr/exim. Whatever you choose, the installation
+# script will try to make the directory and any superior directories if they
+# don't exist. It will also install a default runtime configuration if this
+# file does not exist.
+
+CONFIGURE_FILE=/etc/mail/exim.conf
+
+# It is possible to specify a colon-separated list of files for CONFIGURE_FILE.
+# In this case, Exim will use the first of them that exists when it is run.
+# However, if a list is specified, the installation script no longer tries to
+# make superior directories or to install a default runtime configuration.
+
+
+#------------------------------------------------------------------------------
+# The Exim binary must normally be setuid root, so that it starts executing as
+# root, but (depending on the options with which it is called) it does not
+# always need to retain the root privilege. These settings define the user and
+# group that is used for Exim processes when they no longer need to be root. In
+# particular, this applies when receiving messages and when doing remote
+# deliveries. (Local deliveries run as various non-root users, typically as the
+# owner of a local mailbox.) Specifying these values as root is not supported.
+
+EXIM_USER=ref:exim
+
+# If you specify EXIM_USER as a name, this is looked up at build time, and the
+# uid number is built into the binary. However, you can specify that this
+# lookup is deferred until runtime. In this case, it is the name that is built
+# into the binary. You can do this by a setting of the form:
+
+# EXIM_USER=ref:exim
+
+# In other words, put "ref:" in front of the user name. If you set EXIM_USER
+# like this, any value specified for EXIM_GROUP is also passed "by reference".
+# Although this costs a bit of resource at runtime, it is convenient to use
+# this feature when building binaries that are to be run on multiple systems
+# where the name may refer to different uids. It also allows you to build Exim
+# on a system where there is no Exim user defined.
+
+# If the setting of EXIM_USER is numeric (e.g. EXIM_USER=42), there must
+# also be a setting of EXIM_GROUP. If, on the other hand, you use a name
+# for EXIM_USER (e.g. EXIM_USER=exim), you don't need to set EXIM_GROUP unless
+# you want to use a group other than the default group for the given user.
+
+# EXIM_GROUP=
+
+# Many sites define a user called "exim", with an appropriate default group,
+# and use
+#
+# EXIM_USER=exim
+#
+# while leaving EXIM_GROUP unspecified (commented out).
+
+
+#------------------------------------------------------------------------------
+# SPOOL_DIRECTORY defines the directory where all the data for messages in
+# transit is kept. It is strongly recommended that you define it here, though
+# it is possible to leave this till the run time configuration.
+
+# Exim creates the spool directory if it does not exist. The owner and group
+# will be those defined by EXIM_USER and EXIM_GROUP, and this also applies to
+# all the files and directories that are created in the spool directory.
+
+# Almost all installations choose this:
+
+SPOOL_DIRECTORY=/var/spool/exim
+
+
+
+###############################################################################
+# THESE ARE THINGS YOU PROBABLY WANT TO SPECIFY #
+###############################################################################
+
+# If you need extra header file search paths on all compiles, put the -I
+# options in INCLUDE. If you want the extra searches only for certain
+# parts of the build, see more specific xxx_INCLUDE variables below.
+
+# INCLUDE=-I/example/include
+
+# You need to specify some routers and transports if you want the Exim that you
+# are building to be capable of delivering mail. You almost certainly need at
+# least one type of lookup. You should consider whether you want to build
+# the Exim monitor or not.
+
+
+#------------------------------------------------------------------------------
+# These settings determine which individual router drivers are included in the
+# Exim binary. There are no defaults in the code; those routers that are wanted
+# must be defined here by setting the appropriate variables to the value "yes".
+# Including a router in the binary does not cause it to be used automatically.
+# It has also to be configured in the run time configuration file. By
+# commenting out those you know you don't want to use, you can make the binary
+# a bit smaller. If you are unsure, leave all of these included for now.
+
+ROUTER_ACCEPT=yes
+ROUTER_DNSLOOKUP=yes
+ROUTER_IPLITERAL=yes
+ROUTER_MANUALROUTE=yes
+ROUTER_QUERYPROGRAM=yes
+ROUTER_REDIRECT=yes
+
+# This one is very special-purpose, so is not included by default.
+
+# ROUTER_IPLOOKUP=yes
+
+
+#------------------------------------------------------------------------------
+# These settings determine which individual transport drivers are included in
+# the Exim binary. There are no defaults; those transports that are wanted must
+# be defined here by setting the appropriate variables to the value "yes".
+# Including a transport in the binary does not cause it to be used
+# automatically. It has also to be configured in the run time configuration
+# file. By commenting out those you know you don't want to use, you can make
+# the binary a bit smaller. If you are unsure, leave all of these included for
+# now.
+
+TRANSPORT_APPENDFILE=yes
+TRANSPORT_AUTOREPLY=yes
+TRANSPORT_PIPE=yes
+TRANSPORT_SMTP=yes
+
+# This one is special-purpose, and commonly not required, so it is not
+# included by default.
+
+TRANSPORT_LMTP=yes
+
+
+#------------------------------------------------------------------------------
+# The appendfile transport can write messages to local mailboxes in a number
+# of formats. The code for three specialist formats, maildir, mailstore, and
+# MBX, is included only when requested. If you do not know what this is about,
+# leave these settings commented out.
+
+SUPPORT_MAILDIR=yes
+# SUPPORT_MAILSTORE=yes
+# SUPPORT_MBX=yes
+
+
+#------------------------------------------------------------------------------
+# See below for dynamic lookup modules.
+# LOOKUP_MODULE_DIR=/usr/lib/exim/lookups/
+# If not using package management but using this anyway, then think about how
+# you perform upgrades and revert them. You should consider the benefit of
+# embedding the Exim version number into LOOKUP_MODULE_DIR, so that you can
+# maintain two concurrent sets of modules.
+
+# To build a module dynamically, you'll need to define CFLAGS_DYNAMIC for
+# your platform. Eg:
+# CFLAGS_DYNAMIC=-shared -rdynamic
+# CFLAGS_DYNAMIC=-shared -rdynamic -fPIC
+
+#------------------------------------------------------------------------------
+# These settings determine which file and database lookup methods are included
+# in the binary. See the manual chapter entitled "File and database lookups"
+# for discussion. DBM and lsearch (linear search) are included by default. If
+# you are unsure about the others, leave them commented out for now.
+# LOOKUP_DNSDB does *not* refer to general mail routing using the DNS. It is
+# for the specialist case of using the DNS as a general database facility (not
+# common).
+# If set to "2" instead of "yes" then the corresponding lookup will be
+# built as a module and must be installed into LOOKUP_MODULE_DIR. You need to
+# add -export-dynamic -rdynamic to EXTRALIBS. You may also need to add -ldl to
+# EXTRALIBS so that dlopen() is available to Exim. You need to define
+# LOOKUP_MODULE_DIR above so the exim binary actually loads dynamic lookup
+# modules.
+# Also, instead of adding all the libraries/includes to LOOKUP_INCLUDE and
+# LOOKUP_LIBS, add them to the respective LOOKUP_*_INCLUDE and LOOKUP_*_LIBS
+# (where * is the name as given here in this list). That ensures that only
+# the dynamic library and not the exim binary will be linked against the
+# library.
+# NOTE: LDAP cannot be built as a module!
+
+LOOKUP_DBM=yes
+LOOKUP_LSEARCH=yes
+LOOKUP_DNSDB=yes
+
+# LOOKUP_CDB=yes
+LOOKUP_DSEARCH=yes
+# LOOKUP_IBASE=yes
+LOOKUP_LDAP=yes
+# LOOKUP_MYSQL=yes
+# LOOKUP_NIS=yes
+# LOOKUP_NISPLUS=yes
+# LOOKUP_ORACLE=yes
+# LOOKUP_PASSWD=yes
+# LOOKUP_PGSQL=yes
+# LOOKUP_SQLITE=yes
+# LOOKUP_WHOSON=yes
+
+# These two settings are obsolete; all three lookups are compiled when
+# LOOKUP_LSEARCH is enabled. However, we retain these for backward
+# compatibility. Setting one forces LOOKUP_LSEARCH if it is not set.
+
+# LOOKUP_WILDLSEARCH=yes
+# LOOKUP_NWILDLSEARCH=yes
+
+
+#------------------------------------------------------------------------------
+# If you have set LOOKUP_LDAP=yes, you should set LDAP_LIB_TYPE to indicate
+# which LDAP library you have. Unfortunately, though most of their functions
+# are the same, there are minor differences. Currently Exim knows about four
+# LDAP libraries: the one from the University of Michigan (also known as
+# OpenLDAP 1), OpenLDAP 2, the Netscape SDK library, and the library that comes
+# with Solaris 7 onwards. Uncomment whichever of these you are using.
+
+# LDAP_LIB_TYPE=OPENLDAP1
+LDAP_LIB_TYPE=OPENLDAP2
+# LDAP_LIB_TYPE=NETSCAPE
+# LDAP_LIB_TYPE=SOLARIS
+
+# If you don't set any of these, Exim assumes the original University of
+# Michigan (OpenLDAP 1) library.
+
+
+#------------------------------------------------------------------------------
+# The PCRE library is required for exim. There is no longer an embedded
+# version of the PCRE library included with the source code, instead you
+# must use a system library or build your own copy of PCRE.
+# In either case you must specify the library link info here. If the
+# PCRE header files are not in the standard search path you must also
+# modify the INCLUDE path (above)
+# The default setting of PCRE_LIBS should work on the vast majority of
+# systems
+
+PCRE_LIBS=-lpcre
+
+
+#------------------------------------------------------------------------------
+# Additional libraries and include directories may be required for some
+# lookup styles (e.g. LDAP, MYSQL or PGSQL). LOOKUP_LIBS is included only on
+# the command for linking Exim itself, not on any auxiliary programs. You
+# don't need to set LOOKUP_INCLUDE if the relevant directories are already
+# specified in INCLUDE. The settings below are just examples; -lpq is for
+# PostgreSQL, -lgds is for Interbase, -lsqlite3 is for SQLite.
+
+# LOOKUP_INCLUDE=-I /usr/local/ldap/include -I /usr/local/mysql/include -I /usr/local/pgsql/include
+# LOOKUP_LIBS=-L/usr/local/lib -lldap -llber -lmysqlclient -lpq -lgds -lsqlite3
+
+
+#------------------------------------------------------------------------------
+# Compiling the Exim monitor: If you want to compile the Exim monitor, a
+# program that requires an X11 display, then EXIM_MONITOR should be set to the
+# value "eximon.bin". Comment out this setting to disable compilation of the
+# monitor. The locations of various X11 directories for libraries and include
+# files are defaulted in the OS/Makefile-Default file, but can be overridden in
+# local OS-specific make files.
+
+
+
+
+#------------------------------------------------------------------------------
+# Compiling Exim with content scanning support: If you want to compile Exim
+# with support for message body content scanning, set WITH_CONTENT_SCAN to
+# the value "yes". This will give you malware and spam scanning in the DATA ACL,
+# and the MIME ACL. Please read the documentation to learn more about these
+# features.
+
+WITH_CONTENT_SCAN=yes
+
+# If you want to use the deprecated "demime" condition in the DATA ACL,
+# uncomment the line below. Doing so will also explicitly turn on the
+WITH_CONTENT_SCAN=yes
+# the "demime" condition.
+
+WITH_OLD_DEMIME=yes
+
+# If you're using ClamAV and are backporting fixes to an old version, instead
+# of staying current (which is the more usual approach) then you may need to
+# use an older API which uses a STREAM command, now deprecated, instead of
+# zINSTREAM. If you need to set this, please let the Exim developers know, as
+# if nobody reports a need for it, we'll remove this option and clean up the
+# code. zINSTREAM was introduced with ClamAV 0.95.
+#
+# WITH_OLD_CLAMAV_STREAM=yes
+
+#------------------------------------------------------------------------------
+# By default Exim includes code to support DKIM (DomainKeys Identified
+# Mail, RFC4871) signing and verification. Verification of signatures is
+# turned on by default. See the spec for information on conditionally
+# disabling it. To disable the inclusion of the entire feature, set
+# DISABLE_DKIM to "yes"
+
+# DISABLE_DKIM=yes
+
+
+#------------------------------------------------------------------------------
+# Compiling Exim with experimental features. These are documented in
+# experimental-spec.txt. "Experimental" means that the way these features are
+# implemented may still change. Backward compatibility is not guaranteed.
+
+# Uncomment the following lines to add SPF support. You need to have libspf2
+# installed on your system (www.libspf2.org). Depending on where it is installed
+# you may have to edit the CFLAGS and LDFLAGS lines.
+
+# EXPERIMENTAL_SPF=yes
+# CFLAGS += -I/usr/local/include
+# LDFLAGS += -lspf2
+
+# Uncomment the following lines to add SRS (Sender rewriting scheme) support.
+# You need to have libsrs_alt installed on your system (srs.mirtol.com).
+# Depending on where it is installed you may have to edit the CFLAGS and
+# LDFLAGS lines.
+
+# EXPERIMENTAL_SRS=yes
+# CFLAGS += -I/usr/local/include
+# LDFLAGS += -lsrs_alt
+
+# Uncomment the following lines to add Brightmail AntiSpam support. You need
+# to have the Brightmail client SDK installed. Please check the experimental
+# documentation for implementation details. You need to edit the CFLAGS and
+# LDFLAGS lines.
+
+# EXPERIMENTAL_BRIGHTMAIL=yes
+# CFLAGS += -I/opt/brightmail/bsdk-6.0/include
+# LDFLAGS += -lxml2_single -lbmiclient_single -L/opt/brightmail/bsdk-6.0/lib
+
+
+
+###############################################################################
+# THESE ARE THINGS YOU MIGHT WANT TO SPECIFY #
+###############################################################################
+
+# The items in this section are those that are commonly changed according to
+# the sysadmin's preferences, but whose defaults are often acceptable. The
+# first five are concerned with security issues, where differing levels of
+# paranoia are appropriate in different environments. Sysadmins also vary in
+# their views on appropriate levels of defence in these areas. If you do not
+# understand these issues, go with the defaults, which are used by many sites.
+
+
+#------------------------------------------------------------------------------
+# Although Exim is normally a setuid program, owned by root, it refuses to run
+# local deliveries as root by default. There is a runtime option called
+# "never_users" which lists the users that must never be used for local
+# deliveries. There is also the setting below, which provides a list that
+# cannot be overridden at runtime. This guards against problems caused by
+# unauthorized changes to the runtime configuration. You are advised not to
+# remove "root" from this option, but you can add other users if you want. The
+# list is colon-separated. It must NOT contain any spaces.
+
+# FIXED_NEVER_USERS=root:bin:daemon
+FIXED_NEVER_USERS=root
+
+
+#------------------------------------------------------------------------------
+# By default, Exim insists that its configuration file be owned by root. You
+# can specify one additional permitted owner here.
+
+# CONFIGURE_OWNER=
+
+# If the configuration file is group-writeable, Exim insists by default that it
+# is owned by root. You can specify one additional permitted group owner here.
+
+# CONFIGURE_GROUP=
+
+# If you specify CONFIGURE_OWNER or CONFIGURE_GROUP as a name, this is looked
+# up at build time, and the uid or gid number is built into the binary.
+# However, you can specify that the lookup is deferred until runtime. In this
+# case, it is the name that is built into the binary. You can do this by a
+# setting of the form:
+
+# CONFIGURE_OWNER=ref:mail
+# CONFIGURE_GROUP=ref:sysadmin
+
+# In other words, put "ref:" in front of the user or group name. Although this
+# costs a bit of resource at runtime, it is convenient to use this feature when
+# building binaries that are to be run on multiple systems where the names may
+# refer to different uids or gids. It also allows you to build Exim on a system
+# where the relevant user or group is not defined.
+
+
+#------------------------------------------------------------------------------
+# The -C option allows Exim to be run with an alternate runtime configuration
+# file. When this is used by root, root privilege is retained by the binary
+# (for any other caller including the Exim user, it is dropped). You can
+# restrict the location of alternate configurations by defining a prefix below.
+# Any file used with -C must then start with this prefix (except that /dev/null
+# is also permitted if the caller is root, because that is used in the install
+# script). If the prefix specifies a directory that is owned by root, a
+# compromise of the Exim account does not permit arbitrary alternate
+# configurations to be used. The prefix can be more restrictive than just a
+# directory (the second example).
+
+# ALT_CONFIG_PREFIX=/some/directory/
+# ALT_CONFIG_PREFIX=/some/directory/exim.conf-
+
+
+#------------------------------------------------------------------------------
+# When a user other than root uses the -C option to override the configuration
+# file (including the Exim user when re-executing Exim to regain root
+# privileges for local message delivery), this will normally cause Exim to
+# drop root privileges. The TRUSTED_CONFIG_LIST option, specifies a file which
+# contains a list of trusted configuration filenames, one per line. If the -C
+# option is used by the Exim user or by the user specified in the
+# CONFIGURE_OWNER setting, to specify a configuration file which is listed in
+# the TRUSTED_CONFIG_LIST file, then root privileges are not dropped by Exim.
+
+# TRUSTED_CONFIG_LIST=/usr/exim/trusted_configs
+
+
+#------------------------------------------------------------------------------
+# Uncommenting this option disables the use of the -D command line option,
+# which changes the values of macros in the runtime configuration file.
+# This is another protection against somebody breaking into the Exim account.
+
+# DISABLE_D_OPTION=yes
+
+
+#------------------------------------------------------------------------------
+# By contrast, you might be maintaining a system which relies upon the ability
+# to override values with -D and assumes that these will be passed through to
+# the delivery processes. As of Exim 4.73, this is no longer the case by
+# default. Going forward, we strongly recommend that you use a shim Exim
+# configuration file owned by root stored under TRUSTED_CONFIG_LIST.
+# That shim can set macros before .include'ing your main configuration file.
+#
+# As a strictly transient measure to ease migration to 4.73, the
+# WHITELIST_D_MACROS value definies a colon-separated list of macro-names
+# which are permitted to be overriden from the command-line which will be
+# honoured by the Exim user. So these are macros that can persist to delivery
+# time.
+# Examples might be -DTLS or -DSPOOL=/some/dir. The values on the
+# command-line are filtered to only permit: [A-Za-z0-9_/.-]*
+#
+# This option is highly likely to be removed in a future release. It exists
+# only to make 4.73 as easy as possible to migrate to. If you use it, we
+# encourage you to schedule time to rework your configuration to not depend
+# upon it. Most people should not need to use this.
+#
+# By default, no macros are whitelisted for -D usage.
+
+# WHITELIST_D_MACROS=TLS:SPOOL
+
+#------------------------------------------------------------------------------
+# Exim has support for the AUTH (authentication) extension of the SMTP
+# protocol, as defined by RFC 2554. If you don't know what SMTP authentication
+# is, you probably won't want to include this code, so you should leave these
+# settings commented out. If you do want to make use of SMTP authentication,
+# you must uncomment at least one of the following, so that appropriate code is
+# included in the Exim binary. You will then need to set up the run time
+# configuration to make use of the mechanism(s) selected.
+
+AUTH_CRAM_MD5=yes
+# AUTH_CYRUS_SASL=yes
+AUTH_DOVECOT=yes
+AUTH_PLAINTEXT=yes
+AUTH_SPA=yes
+
+
+#------------------------------------------------------------------------------
+# If you specified AUTH_CYRUS_SASL above, you should ensure that you have the
+# Cyrus SASL library installed before trying to build Exim, and you probably
+# want to uncomment the following line:
+
+# AUTH_LIBS=-lsasl2
+
+
+#------------------------------------------------------------------------------
+# When Exim is decoding MIME "words" in header lines, most commonly for use
+# in the $header_xxx expansion, it converts any foreign character sets to the
+# one that is set in the headers_charset option. The default setting is
+# defined by this setting:
+
+HEADERS_CHARSET="ISO-8859-1"
+
+# If you are going to make use of $header_xxx expansions in your configuration
+# file, or if your users are going to use them in filter files, and the normal
+# character set on your host is something other than ISO-8859-1, you might
+# like to specify a different default here. This value can be overridden in
+# the runtime configuration, and it can also be overridden in individual filter
+# files.
+#
+# IMPORTANT NOTE: The iconv() function is needed for character code
+# conversions. Please see the next item...
+
+
+#------------------------------------------------------------------------------
+# Character code conversions are possible only if the iconv() function is
+# installed on your operating system. There are two places in Exim where this
+# is relevant: (a) The $header_xxx expansion (see the previous item), and (b)
+# the Sieve filter support. For those OS where iconv() is known to be installed
+# as standard, the file in OS/Makefile-xxxx contains
+#
+# HAVE_ICONV=yes
+#
+# If you are not using one of those systems, but have installed iconv(), you
+# need to uncomment that line above. In some cases, you may find that iconv()
+# and its header file are not in the default places. You might need to use
+# something like this:
+#
+# HAVE_ICONV=yes
+# CFLAGS=-O -I/usr/local/include
+# EXTRALIBS_EXIM=-L/usr/local/lib -liconv
+#
+# but of course there may need to be other things in CFLAGS and EXTRALIBS_EXIM
+# as well.
+
+
+#------------------------------------------------------------------------------
+# The passwords for user accounts are normally encrypted with the crypt()
+# function. Comparisons with encrypted passwords can be done using Exim's
+# "crypteq" expansion operator. (This is commonly used as part of the
+# configuration of an authenticator for use with SMTP AUTH.) At least one
+# operating system has an extended function called crypt16(), which uses up to
+# 16 characters of a password (the normal crypt() uses only the first 8). Exim
+# supports the use of crypt16() as well as crypt() but note the warning below.
+
+# You can always indicate a crypt16-encrypted password by preceding it with
+# "{crypt16}". If you want the default handling (without any preceding
+# indicator) to use crypt16(), uncomment the following line:
+
+# DEFAULT_CRYPT=crypt16
+
+# If you do that, you can still access the basic crypt() function by preceding
+# an encrypted password with "{crypt}". For more details, see the description
+# of the "crypteq" condition in the manual chapter on string expansions.
+
+# Some operating systems do not include a crypt16() function, so Exim has one
+# of its own, which it uses unless HAVE_CRYPT16 is defined. Normally, that will
+# be set in an OS-specific Makefile for the OS that have such a function, so
+# you should not need to bother with it.
+
+# *** WARNING *** WARNING *** WARNING *** WARNING *** WARNING ***
+# It turns out that the above is not entirely accurate. As well as crypt16()
+# there is a function called bigcrypt() that some operating systems have. This
+# may or may not use the same algorithm, and both of them may be different to
+# Exim's built-in crypt16() that is used unless HAVE_CRYPT16 is defined.
+#
+# However, since there is now a move away from the traditional crypt()
+# functions towards using SHA1 and other algorithms, tidying up this area of
+# Exim is seen as very low priority. In practice, if you need to, you can
+# define DEFAULT_CRYPT to the name of any function that has the same interface
+# as the traditional crypt() function.
+# *** WARNING *** WARNING *** WARNING *** WARNING *** WARNING ***
+
+
+#------------------------------------------------------------------------------
+# Exim can be built to support the SMTP STARTTLS command, which implements
+# Transport Layer Security using SSL (Secure Sockets Layer). To do this, you
+# must install the OpenSSL library package or the GnuTLS library. Exim contains
+# no cryptographic code of its own. Uncomment the following lines if you want
+# to build Exim with TLS support. If you don't know what this is all about,
+# leave these settings commented out.
+
+# This setting is required for any TLS support (either OpenSSL or GnuTLS)
+SUPPORT_TLS=yes
+
+# Uncomment this setting if you are using OpenSSL
+TLS_LIBS=-L/usr/lib -lssl -lcrypto
+
+# Uncomment these settings if you are using GnuTLS
+# USE_GNUTLS=yes
+TLS_LIBS=-L/usr/lib -lssl -lcrypto
+
+# If you are running Exim as a server, note that just building it with TLS
+# support is not all you need to do. You also need to set up a suitable
+# certificate, and tell Exim about it by means of the tls_certificate
+# and tls_privatekey run time options. You also need to set tls_advertise_hosts
+# to specify the hosts to which Exim advertises TLS support. On the other hand,
+# if you are running Exim only as a client, building it with TLS support
+# is all you need to do.
+
+# Additional libraries and include files are required for both OpenSSL and
+# GnuTLS. The TLS_LIBS settings above assume that the libraries are installed
+# with all your other libraries. If they are in a special directory, you may
+# need something like
+
+TLS_LIBS=-L/usr/lib -lssl -lcrypto
+# or
+TLS_LIBS=-L/usr/lib -lssl -lcrypto
+
+TLS_LIBS=-L/usr/lib -lssl -lcrypto
+# auxiliary programs. If the include files are not in a standard place, you can
+# set TLS_INCLUDE to specify where they are, for example:
+
+TLS_INCLUDE=-I/usr/include/openssl
+# or
+TLS_INCLUDE=-I/usr/include/openssl
+
+# You don't need to set TLS_INCLUDE if the relevant directories are already
+# specified in INCLUDE.
+
+
+#------------------------------------------------------------------------------
+# The default distribution of Exim contains only the plain text form of the
+# documentation. Other forms are available separately. If you want to install
+# the documentation in "info" format, first fetch the Texinfo documentation
+# sources from the ftp directory and unpack them, which should create files
+# with the extension "texinfo" in the doc directory. You may find that the
+# version number of the texinfo files is different to your Exim version number,
+# because the main documentation isn't updated as often as the code. For
+# example, if you have Exim version 4.43, the source tarball upacks into a
+# directory called exim-4.43, but the texinfo tarball unpacks into exim-4.40.
+# In this case, move the contents of exim-4.40/doc into exim-4.43/doc after you
+# have unpacked them. Then set INFO_DIRECTORY to the location of your info
+# directory. This varies from system to system, but is often /usr/share/info.
+# Once you have done this, "make install" will build the info files and
+# install them in the directory you have defined.
+
+# INFO_DIRECTORY=/usr/share/info
+
+
+#------------------------------------------------------------------------------
+# Exim log directory and files: Exim creates several log files inside a
+# single log directory. You can define the directory and the form of the
+# log file name here. If you do not set anything, Exim creates a directory
+# called "log" inside its spool directory (see SPOOL_DIRECTORY above) and uses
+# the filenames "mainlog", "paniclog", and "rejectlog". If you want to change
+# this, you can set LOG_FILE_PATH to a path name containing one occurrence of
+# %s. This will be replaced by one of the strings "main", "panic", or "reject"
+# to form the final file names. Some installations may want something like this:
+
+LOG_FILE_PATH=/var/log/exim/%slog
+
+# which results in files with names /var/log/exim_mainlog, etc. The directory
+# in which the log files are placed must exist; Exim does not try to create
+# it for itself. It is also your responsibility to ensure that Exim is capable
+# of writing files using this path name. The Exim user (see EXIM_USER above)
+# must be able to create and update files in the directory you have specified.
+
+# You can also configure Exim to use syslog, instead of or as well as log
+# files, by settings such as these
+
+# LOG_FILE_PATH=syslog
+# LOG_FILE_PATH=syslog:/var/log/exim_%slog
+
+# The first of these uses only syslog; the second uses syslog and also writes
+# to log files. Do not include white space in such a setting as it messes up
+# the building process.
+
+
+#------------------------------------------------------------------------------
+# When logging to syslog, the following option caters for syslog replacements
+# that are able to accept log entries longer than the 1024 characters allowed
+# by RFC 3164. It is up to you to make sure your syslog daemon can handle this.
+# Non-printable characters are usually unacceptable regardless, so log entries
+# are still split on newline characters.
+
+# SYSLOG_LONG_LINES=yes
+
+# If you are not interested in the process identifier (pid) of the Exim that is
+# making the call to syslog, then comment out the following line.
+
+SYSLOG_LOG_PID=yes
+
+
+#------------------------------------------------------------------------------
+# Cycling log files: this variable specifies the maximum number of old
+# log files that are kept by the exicyclog log-cycling script. You don't have
+# to use exicyclog. If your operating system has other ways of cycling log
+# files, you can use them instead. The exicyclog script isn't run by default;
+# you have to set up a cron job for it if you want it.
+
+EXICYCLOG_MAX=10
+
+
+#------------------------------------------------------------------------------
+# The compress command is used by the exicyclog script to compress old log
+# files. Both the name of the command and the suffix that it adds to files
+# need to be defined here. See also the EXICYCLOG_MAX configuration.
+
+COMPRESS_COMMAND=/bin/gzip
+COMPRESS_SUFFIX=gz
+
+
+#------------------------------------------------------------------------------
+# If the exigrep utility is fed compressed log files, it tries to uncompress
+# them using this command.
+
+ZCAT_COMMAND=/bin/zcat
+
+
+#------------------------------------------------------------------------------
+# Compiling in support for embedded Perl: If you want to be able to
+# use Perl code in Exim's string manipulation language and you have Perl
+# (version 5.004 or later) installed, set EXIM_PERL to perl.o. Using embedded
+# Perl costs quite a lot of resources. Only do this if you really need it.
+
+# EXIM_PERL=perl.o
+
+
+#------------------------------------------------------------------------------
+# Support for dynamically-loaded string expansion functions via ${dlfunc. If
+# you are using gcc the dynamically-loaded object must be compiled with the
+# -shared option, and you will need to add -export-dynamic to EXTRALIBS so
+# that the local_scan API is made available by the linker. You may also need
+# to add -ldl to EXTRALIBS so that dlopen() is available to Exim.
+
+# EXPAND_DLFUNC=yes
+
+
+#------------------------------------------------------------------------------
+# Exim has support for PAM (Pluggable Authentication Modules), a facility
+# which is available in the latest releases of Solaris and in some GNU/Linux
+# distributions (see http://ftp.kernel.org/pub/linux/libs/pam/). The Exim
+# support, which is intended for use in conjunction with the SMTP AUTH
+# facilities, is included only when requested by the following setting:
+
+SUPPORT_PAM=yes
+
+# You probably need to add -lpam to EXTRALIBS, and in some releases of
+# GNU/Linux -ldl is also needed.
+
+
+#------------------------------------------------------------------------------
+# Support for authentication via Radius is also available. The Exim support,
+# which is intended for use in conjunction with the SMTP AUTH facilities,
+# is included only when requested by setting the following parameter to the
+# location of your Radius configuration file:
+
+# RADIUS_CONFIG_FILE=/etc/radiusclient/radiusclient.conf
+# RADIUS_CONFIG_FILE=/etc/radius.conf
+
+# If you have set RADIUS_CONFIG_FILE, you should also set one of these to
+# indicate which RADIUS library is used:
+
+# RADIUS_LIB_TYPE=RADIUSCLIENT
+# RADIUS_LIB_TYPE=RADIUSCLIENTNEW
+# RADIUS_LIB_TYPE=RADLIB
+
+# RADIUSCLIENT is the radiusclient library; you probably need to add
+# -lradiusclient to EXTRALIBS.
+#
+# The API for the radiusclient library was changed at release 0.4.0.
+# Unfortunately, the header file does not define a version number that clients
+# can use to support both the old and new APIs. If you are using version 0.4.0
+# or later of the radiusclient library, you should use RADIUSCLIENTNEW.
+#
+# RADLIB is the Radius library that comes with FreeBSD (the header file is
+# called radlib.h); you probably need to add -lradius to EXTRALIBS.
+#
+# If you do not set RADIUS_LIB_TYPE, Exim assumes the radiusclient library,
+# using the original API.
+
+
+#------------------------------------------------------------------------------
+# Support for authentication via the Cyrus SASL pwcheck daemon is available.
+# Note, however, that pwcheck is now deprecated in favour of saslauthd (see
+# next item). The Exim support for pwcheck, which is intented for use in
+# conjunction with the SMTP AUTH facilities, is included only when requested by
+# setting the following parameter to the location of the pwcheck daemon's
+# socket.
+#
+# There is no need to install all of SASL on your system. You just need to run
+# ./configure --with-pwcheck, cd to the pwcheck directory within the sources,
+# make and make install. You must create the socket directory (default
+# /var/pwcheck) and chown it to exim's user and group. Once you have installed
+# pwcheck, you should arrange for it to be started by root at boot time.
+
+# CYRUS_PWCHECK_SOCKET=/var/pwcheck/pwcheck
+
+
+#------------------------------------------------------------------------------
+# Support for authentication via the Cyrus SASL saslauthd daemon is available.
+# The Exim support, which is intented for use in conjunction with the SMTP AUTH
+# facilities, is included only when requested by setting the following
+# parameter to the location of the saslauthd daemon's socket.
+#
+# There is no need to install all of SASL on your system. You just need to run
+# ./configure --with-saslauthd (and any other options you need, for example, to
+# select or deselect authentication mechanisms), cd to the saslauthd directory
+# within the sources, make and make install. You must create the socket
+# directory (default /var/state/saslauthd) and chown it to exim's user and
+# group. Once you have installed saslauthd, you should arrange for it to be
+# started by root at boot time.
+
+# CYRUS_SASLAUTHD_SOCKET=/var/state/saslauthd/mux
+
+
+#------------------------------------------------------------------------------
+# TCP wrappers: If you want to use tcpwrappers from within Exim, uncomment
+# this setting. See the manual section entitled "Use of tcpwrappers" in the
+# chapter on building and installing Exim.
+#
+USE_TCP_WRAPPERS=yes
+#
+# You may well also have to specify a local "include" file and an additional
+# library for TCP wrappers, so you probably need something like this:
+#
+USE_TCP_WRAPPERS=yes
+# CFLAGS=-O -I/usr/local/include
+# EXTRALIBS_EXIM=-L/usr/local/lib -lwrap
+#
+# but of course there may need to be other things in CFLAGS and EXTRALIBS_EXIM
+# as well.
+#
+# To use a name other than exim in the tcpwrappers config file,
+# e.g. if you're running multiple daemons with different access lists,
+# or multiple MTAs with the same access list, define
+# TCP_WRAPPERS_DAEMON_NAME accordingly
+#
+# TCP_WRAPPERS_DAEMON_NAME="exim"
+
+
+#------------------------------------------------------------------------------
+# The default action of the exim_install script (which is run by "make
+# install") is to install the Exim binary with a unique name such as
+# exim-4.43-1, and then set up a symbolic link called "exim" to reference it,
+# moving the symbolic link from any previous version. If you define NO_SYMLINK
+# (the value doesn't matter), the symbolic link is not created or moved. You
+# will then have to "turn Exim on" by setting up the link manually.
+
+# NO_SYMLINK=yes
+
+
+#------------------------------------------------------------------------------
+# Another default action of the install script is to install a default runtime
+# configuration file if one does not exist. This configuration has a router for
+# expanding system aliases. The default assumes that these aliases are kept
+# in the traditional file called /etc/aliases. If such a file does not exist,
+# the installation script creates one that contains just comments (no actual
+# aliases). The following setting can be changed to specify a different
+# location for the system alias file.
+
+SYSTEM_ALIASES_FILE=/etc/mail/aliases
+
+
+#------------------------------------------------------------------------------
+# There are some testing options (-be, -bt, -bv) that read data from the
+# standard input when no arguments are supplied. By default, the input lines
+# are read using the standard fgets() function. This does not support line
+# editing during interactive input (though the terminal's "erase" character
+# works as normal). If your operating system has the readline() function, and
+# in addition supports dynamic loading of library functions, you can cause
+# Exim to use readline() for the -be testing option (only) by uncommenting the
+# following setting. Dynamic loading is used so that the library is loaded only
+# when the -be testing option is given; by the time the loading occurs,
+# Exim has given up its root privilege and is running as the calling user. This
+# is the reason why readline() is NOT supported for -bt and -bv, because Exim
+# runs as root or as exim, respectively, for those options. When USE_READLINE
+# is "yes", as well as supporting line editing, a history of input lines in the
+# current run is maintained.
+
+# USE_READLINE=yes
+
+# You may need to add -ldl to EXTRALIBS when you set USE_READLINE=yes.
+# Note that this option adds to the size of the Exim binary, because the
+# dynamic loading library is not otherwise included.
+
+
+
+###############################################################################
+# THINGS YOU ALMOST NEVER NEED TO MENTION #
+###############################################################################
+
+# The settings in this section are available for use in special circumstances.
+# In the vast majority of installations you need not change anything below.
+
+
+#------------------------------------------------------------------------------
+# The following commands live in different places in some OS. Either the
+# ultimate default settings, or the OS-specific files should already point to
+# the right place, but they can be overridden here if necessary. These settings
+# are used when building various scripts to ensure that the correct paths are
+# used when the scripts are run. They are not used in the Makefile itself. Perl
+# is not necessary for running Exim unless you set EXIM_PERL (see above) to get
+# it embedded, but there are some utilities that are Perl scripts. If you
+# haven't got Perl, Exim will still build and run; you just won't be able to
+# use those utilities.
+
+# CHOWN_COMMAND=/usr/bin/chown
+# CHGRP_COMMAND=/usr/bin/chgrp
+# CHMOD_COMMAND=/usr/bin/chmod
+# MV_COMMAND=/bin/mv
+# RM_COMMAND=/bin/rm
+# TOUCH_COMMAND=/usr/bin/touch
+# PERL_COMMAND=/usr/bin/perl
+
+
+#------------------------------------------------------------------------------
+# The following macro can be used to change the command for building a library
+# of functions. By default the "ar" command is used, with options "cq".
+# Only in rare circumstances should you need to change this.
+
+# AR=ar cq
+
+
+#------------------------------------------------------------------------------
+# In some operating systems, the value of the TMPDIR environment variable
+# controls where temporary files are created. Exim does not make use of
+# temporary files, except when delivering to MBX mailboxes. However, if Exim
+# calls any external libraries (e.g. DBM libraries), they may use temporary
+# files, and thus be influenced by the value of TMPDIR. For this reason, when
+# Exim starts, it checks the environment for TMPDIR, and if it finds it is set,
+# it replaces the value with what is defined here. Commenting this setting
+# suppresses the check altogether.
+
+TMPDIR="/tmp"
+
+
+#------------------------------------------------------------------------------
+# The following macros can be used to change the default modes that are used
+# by the appendfile transport. In most installations the defaults are just
+# fine, and in any case, you can change particular instances of the transport
+# at run time if you want.
+
+# APPENDFILE_MODE=0600
+# APPENDFILE_DIRECTORY_MODE=0700
+# APPENDFILE_LOCKFILE_MODE=0600
+
+
+#------------------------------------------------------------------------------
+# In some installations there may be multiple machines sharing file systems,
+# where a different configuration file is required for Exim on the different
+# machines. If CONFIGURE_FILE_USE_NODE is defined, then Exim will first look
+# for a configuration file whose name is that defined by CONFIGURE_FILE,
+# with the node name obtained by uname() tacked on the end, separated by a
+# period (for example, /usr/exim/configure.host.in.some.domain). If this file
+# does not exist, then the bare configuration file name is tried.
+
+# CONFIGURE_FILE_USE_NODE=yes
+
+
+#------------------------------------------------------------------------------
+# In some esoteric configurations two different versions of Exim are run,
+# with different setuid values, and different configuration files are required
+# to handle the different cases. If CONFIGURE_FILE_USE_EUID is defined, then
+# Exim will first look for a configuration file whose name is that defined
+# by CONFIGURE_FILE, with the effective uid tacked on the end, separated by
+# a period (for eximple, /usr/exim/configure.0). If this file does not exist,
+# then the bare configuration file name is tried. In the case when both
+# CONFIGURE_FILE_USE_EUID and CONFIGURE_FILE_USE_NODE are set, four files
+# are tried: <name>.<euid>.<node>, <name>.<node>, <name>.<euid>, and <name>.
+
+# CONFIGURE_FILE_USE_EUID=yes
+
+
+#------------------------------------------------------------------------------
+# The size of the delivery buffers: These specify the sizes (in bytes) of
+# the buffers that are used when copying a message from the spool to a
+# destination. There is rarely any need to change these values.
+
+# DELIVER_IN_BUFFER_SIZE=8192
+# DELIVER_OUT_BUFFER_SIZE=8192
+
+
+#------------------------------------------------------------------------------
+# The mode of the database directory: Exim creates a directory called "db"
+# in its spool directory, to hold its databases of hints. This variable
+# determines the mode of the created directory. The default value in the
+# source is 0750.
+
+# EXIMDB_DIRECTORY_MODE=0750
+
+
+#------------------------------------------------------------------------------
+# Database file mode: The mode of files created in the "db" directory defaults
+# to 0640 in the source, and can be changed here.
+
+# EXIMDB_MODE=0640
+
+
+#------------------------------------------------------------------------------
+# Database lock file mode: The mode of zero-length files created in the "db"
+# directory to use for locking purposes defaults to 0640 in the source, and
+# can be changed here.
+
+# EXIMDB_LOCKFILE_MODE=0640
+
+
+#------------------------------------------------------------------------------
+# This parameter sets the maximum length of the header portion of a message
+# that Exim is prepared to process. The default setting is one megabyte. The
+# limit exists in order to catch rogue mailers that might connect to your SMTP
+# port, start off a header line, and then just pump junk at it for ever. The
+# message_size_limit option would also catch this, but it may not be set.
+# The value set here is the default; it can be changed at runtime.
+
+# HEADER_MAXSIZE="(1024*1024)"
+
+
+#------------------------------------------------------------------------------
+# The mode of the input directory: The input directory is where messages are
+# kept while awaiting delivery. Exim creates it if necessary, using a mode
+# which can be defined here (default 0750).
+
+# INPUT_DIRECTORY_MODE=0750
+
+
+#------------------------------------------------------------------------------
+# The mode of Exim's log directory, when it is created by Exim inside the spool
+# directory, defaults to 0750 but can be changed here.
+
+# LOG_DIRECTORY_MODE=0750
+
+
+#------------------------------------------------------------------------------
+# The log files themselves are created as required, with a mode that defaults
+# to 0640, but which can be changed here.
+
+# LOG_MODE=0640
+
+
+#------------------------------------------------------------------------------
+# The TESTDB lookup is for performing tests on the handling of lookup results,
+# and is not useful for general running. It should be included only when
+# debugging the code of Exim.
+
+# LOOKUP_TESTDB=yes
+
+
+#------------------------------------------------------------------------------
+# /bin/sh is used by default as the shell in which to run commands that are
+# defined in the makefiles. This can be changed if necessary, by uncommenting
+# this line and specifying another shell, but note that a Bourne-compatible
+# shell is expected.
+
+# MAKE_SHELL=/bin/sh
+
+
+#------------------------------------------------------------------------------
+# The maximum number of named lists of each type (address, domain, host, and
+# local part) can be increased by changing this value. It should be set to
+# a multiple of 16.
+
+MAX_NAMED_LIST=16
+
+
+#------------------------------------------------------------------------------
+# Network interfaces: Unless you set the local_interfaces option in the runtime
+# configuration file to restrict Exim to certain interfaces only, it will run
+# code to find all the interfaces there are on your host. Unfortunately,
+# the call to the OS that does this requires a buffer large enough to hold
+# data for all the interfaces - it was designed in the days when a host rarely
+# had more than three or four interfaces. Nowadays hosts can have very many
+# virtual interfaces running on the same hardware. If you have more than 250
+# virtual interfaces, you will need to uncomment this setting and increase the
+# value.
+
+# MAXINTERFACES=250
+
+
+#------------------------------------------------------------------------------
+# Per-message logs: While a message is in the process of being delivered,
+# comments on its progress are written to a message log, for the benefit of
+# human administrators. These logs are held in a directory called "msglog"
+# in the spool directory. Its mode defaults to 0750, but can be changed here.
+# The message log directory is also used for storing files that are used by
+# transports for returning data to a message's sender (see the "return_output"
+# option for transports).
+
+# MSGLOG_DIRECTORY_MODE=0750
+
+
+#------------------------------------------------------------------------------
+# There are three options which are used when compiling the Perl interface and
+# when linking with Perl. The default values for these are placed automatically
+# at the head of the Makefile by the script which builds it. However, if you
+# want to override them, you can do so here.
+
+# PERL_CC=
+# PERL_CCOPTS=
+# PERL_LIBS=
+
+
+#------------------------------------------------------------------------------
+# Identifying the daemon: When an Exim daemon starts up, it writes its pid
+# (process id) to a file so that it can easily be identified. The path of the
+# file can be specified here. Some installations may want something like this:
+
+PID_FILE_PATH=/var/run/exim.pid
+
+# If PID_FILE_PATH is not defined, Exim writes a file in its spool directory
+# using the name "exim-daemon.pid".
+
+# If you start up a daemon without the -bd option (for example, with just
+# the -q15m option), a pid file is not written. Also, if you override the
+# configuration file with the -oX option, no pid file is written. In other
+# words, the pid file is written only for a "standard" daemon.
+
+
+#------------------------------------------------------------------------------
+# If Exim creates the spool directory, it is given this mode, defaulting in the
+# source to 0750.
+
+# SPOOL_DIRECTORY_MODE=0750
+
+
+#------------------------------------------------------------------------------
+# The mode of files on the input spool which hold the contents of messages can
+# be changed here. The default is 0640 so that information from the spool is
+# available to anyone who is a member of the Exim group.
+
+# SPOOL_MODE=0640
+
+
+#------------------------------------------------------------------------------
+# Moving frozen messages: If the following is uncommented, Exim is compiled
+# with support for automatically moving frozen messages out of the main spool
+# directory, a facility that is found useful by some large installations. A
+# run time option is required to cause the moving actually to occur. Such
+# messages become "invisible" to the normal management tools.
+
+# SUPPORT_MOVE_FROZEN_MESSAGES=yes
+
+
+#------------------------------------------------------------------------------
+# Disabling the use of fsync(): DO NOT UNCOMMENT THE FOLLOWING LINE unless you
+# really, really, really know what you are doing. And even then, think again.
+# You should never uncomment this when compiling a binary for distribution.
+# Use it only when compiling Exim for your own use.
+#
+# Uncommenting this line enables the use of a runtime option called
+# disable_fsync, which can be used to stop Exim using fsync() to ensure that
+# files are written to disc before proceeding. When this is disabled, crashes
+# and hardware problems such as power outages can cause data to be lost. This
+# feature should only be used in very exceptional circumstances. YOU HAVE BEEN
+# WARNED.
+
+# ENABLE_DISABLE_FSYNC=yes
+
+HAVE_IPV6=YES
+LOOKUP_LIBS=-lldap
+EXTRALIBS_EXIM=-lwrap -lpam
+# End of EDITME for Exim 4.
diff --git a/community-testing/exim/exim.changelog b/community-testing/exim/exim.changelog
new file mode 100644
index 000000000..69182229e
--- /dev/null
+++ b/community-testing/exim/exim.changelog
@@ -0,0 +1,21 @@
+2010-06-16 Angel Velasquez <angvp@archlinux.org>
+ * Rebuilt against db 5.2.28
+
+2010-05-09 Angel Velasquez <angvp@archlinux.org>
+ * Updated to 4.76
+ * Removed previous patch since is no longer needed
+ * Removed newaliases script FS#22744
+ * Removed sudo dependency
+
+2010-05-07 Angel Velasquez <angvp@archlinux.org>
+ * Adding patch for security issue see: http://goo.gl/QBict
+
+2010-05-06 Angel Velasquez <angvp@archlinux.org>
+ * Setting sticky id to deliver mail. Closes FS#24109
+
+2010-04-30 Angel Velasquez <angvp@archlinux.org>
+ * exim 4.75
+ * Config updated with the aliases path. Closes FS#22743
+ * Removed sed hackings and added a exim.Makefile . Closes FS#22744
+ * Replaced creation of the user at build time. Closes FS#22745
+
diff --git a/community-testing/exim/exim.conf.d b/community-testing/exim/exim.conf.d
new file mode 100644
index 000000000..b9bec4335
--- /dev/null
+++ b/community-testing/exim/exim.conf.d
@@ -0,0 +1 @@
+EXIM_ARGS="-bd -q15m"
diff --git a/community-testing/exim/exim.install b/community-testing/exim/exim.install
new file mode 100644
index 000000000..8ed329559
--- /dev/null
+++ b/community-testing/exim/exim.install
@@ -0,0 +1,25 @@
+# arg 1: the new package version
+post_install() {
+ getent group exim >/dev/null 2>&1 || groupadd -g 79 exim
+ if getent passwd exim > /dev/null 2>&1; then
+ usr/sbin/usermod -d /var/spool/exim -c 'Exim MTA' -s /sbin/nologin exim > /dev/null 2>&1
+ else
+ usr/sbin/useradd -c 'Exim MTA' -u 79 -g exim -d /var/spool/exim -s /sbin/nologin exim
+ fi
+ passwd -l exim > /dev/null
+ chown root.exim /var/spool/exim /var/log/exim
+ chown exim.exim /var/spool/exim/db
+ chmod u+s /usr/sbin/exim
+}
+
+# arg 1: the new package version
+# arg 2: the old package version
+post_upgrade() {
+ post_install $1
+}
+
+# arg 1: the old package version
+pre_remove() {
+ getent passwd exim >/dev/null 2>&1 && userdel exim
+}
+
diff --git a/community-testing/exim/exim.logrotate b/community-testing/exim/exim.logrotate
new file mode 100644
index 000000000..ff38a1452
--- /dev/null
+++ b/community-testing/exim/exim.logrotate
@@ -0,0 +1,5 @@
+/var/log/exim/*log {
+ missingok
+ notifempty
+ delaycompress
+}
diff --git a/community-testing/libgda3/PKGBUILD b/community-testing/libgda3/PKGBUILD
new file mode 100644
index 000000000..07313f713
--- /dev/null
+++ b/community-testing/libgda3/PKGBUILD
@@ -0,0 +1,27 @@
+# $Id: PKGBUILD 49363 2011-06-15 09:20:21Z spupykin $
+# Maintainer: Sergej Pupykin <pupykin.s+arch@gmail.com>
+
+pkgname=libgda3
+pkgver=3.1.5
+pkgrel=9
+pkgdesc="data abstraction layer; with mysql, pgsql, ldap, xml, sqlite providers"
+arch=('i686' 'x86_64')
+url="http://www.gnome-db.org/Download"
+license=('GPL')
+depends=('glib2' 'libxslt' 'popt' 'rarian' 'db' 'gnome-vfs'
+ 'libmysqlclient' 'postgresql-libs>=8.4.1' 'libldap' 'unixodbc' 'sqlite3')
+makedepends=('intltool' 'pkgconfig' 'gtk-doc' 'util-linux-ng')
+options=('!libtool' '!distcc')
+source=(http://ftp.acc.umu.se/pub/GNOME/sources/libgda/3.1/libgda-$pkgver.tar.bz2)
+md5sums=('eb7da5286a112e7cff3111c89fba4456')
+
+build() {
+ cd "$srcdir/libgda-$pkgver"
+ ./configure --prefix=/usr --sysconfdir=/etc
+ make
+}
+package(){
+ cd "$srcdir/libgda-$pkgver"
+ make DESTDIR=$pkgdir install
+ cd "$pkgdir" && find -name \*..so -exec rename '..so' '.a' {} \;
+}
diff --git a/community-testing/librcc/PKGBUILD b/community-testing/librcc/PKGBUILD
new file mode 100644
index 000000000..ca2eb950c
--- /dev/null
+++ b/community-testing/librcc/PKGBUILD
@@ -0,0 +1,53 @@
+# $Id: PKGBUILD 49369 2011-06-15 09:26:07Z spupykin $
+# Maintainer: Sergej Pupykin <pupykin.s+arch@gmail.com>
+# Contributor: Sergej Pupykin <pupykin.s+arch@gmail.com>
+
+pkgname=librcc
+pkgver=0.2.6
+pkgrel=4
+pkgdesc="Charset Conversion Library"
+arch=(i686 x86_64)
+url="http://rusxmms.sourceforge.net/"
+license=('GPL')
+depends=(aspell enca libxml2 db librcd)
+makedepends=(patch gtk gtk2)
+install=librcc.install
+source=(http://downloads.sourceforge.net/rusxmms/${pkgname}-${pkgver}.tar.bz2
+ librcc-strnlen.patch)
+md5sums=('9bbf248c7312c73c0b6ca19b9c5a2af1'
+ '040313d1d8f166ccf2b128cea4c05f21')
+
+build() {
+ cd ${startdir}/src/$pkgname-${pkgver}
+ ./configure --prefix=/usr
+ patch -p1 <$srcdir/librcc-strnlen.patch
+ make
+}
+
+package() {
+ cd ${startdir}/src/$pkgname-${pkgver}
+ mkdir -p $startdir/pkg/etc/rcc
+ mkdir -p $startdir/pkg/usr/lib/rcc/engines
+ mkdir -p $startdir/pkg/usr/bin
+
+ make DESTDIR=$startdir/pkg install
+
+ rm -f $startdir/pkg/usr/lib/*.la
+ rm -f $startdir/pkg/usr/lib/rcc/engines/*.a
+ rm -f $startdir/pkg/usr/lib/rcc/engines/*.la
+
+ make -C examples
+ make -C examples install DESTDIR=$startdir/pkg
+ rm -f $startdir/pkg/usr/bin/example*
+
+ install -m 644 examples/rcc.xml $startdir/pkg/etc
+
+ if [ -f $startdir/pkg/usr/bin/rcc-gtk2-config ]; then
+ ln -s rcc-gtk2-config $startdir/pkg/usr/bin/rcc-config
+ elif [ -f $startdir/pkg/usr/bin/rcc-gtk-config ]; then
+ ln -s rcc-gtk-config $startdir/pkg/usr/bin/rcc-config
+ else
+ echo "#!/bin/bash" > $startdir/pkg/usr/bin/rcc-config
+ echo "echo \"Configuration UI is not available!\"" >> $startdir/pkg/usr/bin/rcc-config
+ fi
+}
diff --git a/community-testing/librcc/librcc-strnlen.patch b/community-testing/librcc/librcc-strnlen.patch
new file mode 100644
index 000000000..9fc0e8b0b
--- /dev/null
+++ b/community-testing/librcc/librcc-strnlen.patch
@@ -0,0 +1,17 @@
+diff -wbBur librcc-0.2.6/src/rccstring.h librcc-0.2.6.qwe/src/rccstring.h
+--- librcc-0.2.6/src/rccstring.h 2006-01-08 15:42:59.000000000 +0000
++++ librcc-0.2.6.qwe/src/rccstring.h 2009-09-28 08:45:37.000000000 +0000
+@@ -18,13 +18,6 @@
+ int rccStringFixID(rcc_string string, rcc_context ctx);
+ int rccStringChangeID(rcc_string string, rcc_language_id language_id);
+
+-#ifdef HAVE_STRNLEN
+-# ifndef strnlen
+-int strnlen(const char *str, size_t size);
+-# endif /* !strnlen */
+-#else
+-int rccStrnlen(const char *str, size_t size);
+-#endif /* HAVE_STRNLEN */
+ int rccIsASCII(const char *str);
+ size_t rccStringSizedGetChars(const char *str, size_t size);
+
diff --git a/community-testing/librcc/librcc.install b/community-testing/librcc/librcc.install
new file mode 100644
index 000000000..53b1d7ce7
--- /dev/null
+++ b/community-testing/librcc/librcc.install
@@ -0,0 +1,6 @@
+post_install() {
+ echo "-- Don't forget to install gtk or/and gtk2 packages to enable librcc"
+ echo " gui features"
+ echo "-- Also you may change /usr/bin/rcc-config symlink to switch between"
+ echo " gtk and gtk2 (rcc-gtk-config and rcc-gtk2-config)"
+}
diff --git a/community-testing/perl-berkeleydb/PKGBUILD b/community-testing/perl-berkeleydb/PKGBUILD
new file mode 100644
index 000000000..f586d53b0
--- /dev/null
+++ b/community-testing/perl-berkeleydb/PKGBUILD
@@ -0,0 +1,32 @@
+# $Id: PKGBUILD 49372 2011-06-15 09:28:37Z spupykin $
+# Maintainer: Sergej Pupykin <pupykin.s+arch@gmail.com>
+# Maintainer: Charles Mauch <cmauch@gmail.com>
+# Contributor: Francois Charette <firmicus@gmx.net>
+
+pkgname=perl-berkeleydb
+pkgver=0.43
+pkgrel=6
+pkgdesc="Interface to Berkeley DB version 2, 3 or 4"
+arch=('i686' 'x86_64')
+url="http://search.cpan.org/dist/BerkeleyDB/"
+license=('GPL' 'PerlArtistic')
+depends=('perl' 'db')
+options=('!emptydirs')
+source=(http://search.cpan.org/CPAN/authors/id/P/PM/PMQS/BerkeleyDB-$pkgver.tar.gz)
+md5sums=('3d0cf0651ed8cd3fc36e328d5924a1e9')
+
+build() {
+ cd $srcdir/BerkeleyDB-$pkgver
+ PERL_MM_USE_DEFAULT=1 perl Makefile.PL INSTALLDIRS=vendor
+ make
+}
+
+package() {
+ _dbver=`pacman -Q db | cut -d\ -f2 | cut -d- -f1`
+ depends=('perl' "db=$_dbver")
+
+ cd $srcdir/BerkeleyDB-$pkgver
+ make pure_install doc_install DESTDIR=$pkgdir
+ find $pkgdir -name '.packlist' -delete
+ find $pkgdir -name '*.pod' -delete
+}
diff --git a/community-testing/perl-libapreq2/PKGBUILD b/community-testing/perl-libapreq2/PKGBUILD
new file mode 100644
index 000000000..b9526091a
--- /dev/null
+++ b/community-testing/perl-libapreq2/PKGBUILD
@@ -0,0 +1,31 @@
+# $Id: PKGBUILD 49380 2011-06-15 09:32:23Z spupykin $
+# Maintainer: Sergej Pupykin <pupykin.s+arch@gmail.com>
+# Maintainer: Tom K <tomk@runbox.com>
+
+pkgname=perl-libapreq2
+pkgver=2.12
+pkgrel=9
+pkgdesc="A safe, standards-compliant, high-performance library used for parsing HTTP cookies, query-strings and POST data."
+arch=('i686' 'x86_64')
+url="http://search.cpan.org/dist/libapreq2"
+depends=('mod_perl' 'apr-util')
+makedepends=('perl-extutils-xsbuilder' 'perl-version')
+license=("GPL")
+options=('!libtool' '!makeflags')
+source=(http://www.cpan.org/authors/id/J/JO/JOESUF/libapreq2-$pkgver.tar.gz)
+md5sums=('76e2acde0d82246dea6f2565f3746eec')
+
+build() {
+ cd $srcdir/libapreq2-$pkgver
+ PERL_MM_USE_DEFAULT=1 perl Makefile.PL --with-apache2-apxs=/usr/sbin/apxs
+ find . -type f -name Makefile -exec sed -i 's#-ldb-5.1#-ldb-5.2#' {} \;
+ sed -i 's#-ldb-5.1#-ldb-5.2#' apreq2-config
+ make
+}
+package(){
+ cd $srcdir/libapreq2-$pkgver
+ make install DESTDIR=$pkgdir
+ sed -i "s#$srcdir#/usr/src#" $pkgdir/usr/bin/apreq2-config
+ find $pkgdir -name '.packlist' -delete
+ find $pkgdir -name '*.pod' -delete
+}
diff --git a/community-testing/perl-xml-libxslt/PKGBUILD b/community-testing/perl-xml-libxslt/PKGBUILD
new file mode 100644
index 000000000..708898b9e
--- /dev/null
+++ b/community-testing/perl-xml-libxslt/PKGBUILD
@@ -0,0 +1,30 @@
+# Id:$
+# Maintainer: François Charette <firmicus ατ gmx δοτ net>
+
+pkgname=perl-xml-libxslt
+pkgver=1.70
+pkgrel=6
+pkgdesc="Interface to the gnome libxslt library "
+arch=('i686' 'x86_64')
+url="http://search.cpan.org/dist/XML-LibXSLT"
+license=('GPL')
+depends=('perl-xml-libxml' 'libxslt')
+source=(http://www.cpan.org/authors/id/P/PA/PAJAS/XML-LibXSLT-$pkgver.tar.gz)
+md5sums=('c63a7913999de076e5c911810f69b392')
+
+build() {
+ cd $srcdir/XML-LibXSLT-$pkgver
+ PERL_MM_USE_DEFAULT=1 perl Makefile.PL INSTALLDIRS=vendor
+ make
+}
+
+check () {
+ cd $srcdir/XML-LibXSLT-$pkgver
+ make test
+}
+
+package() {
+ cd $srcdir/XML-LibXSLT-$pkgver
+ make install DESTDIR=$pkgdir
+}
+
diff --git a/community-testing/poedit/PKGBUILD b/community-testing/poedit/PKGBUILD
new file mode 100644
index 000000000..8c606f322
--- /dev/null
+++ b/community-testing/poedit/PKGBUILD
@@ -0,0 +1,30 @@
+# $Id: PKGBUILD 49387 2011-06-15 09:36:08Z spupykin $
+# Contributor: Andrea Scarpino <andrea@archlinux.org>
+# Contributor: Giovanni Scafora <giovanni@archlinux.org>
+# Contributor: Alexander Fehr <pizzapunk@gmail.com>
+# Maintainer: Daniel J Griffiths <ghost1227@archlinux.us>
+
+pkgname=poedit
+pkgver=1.4.6.1
+pkgrel=5
+pkgdesc="Cross-platform gettext catalogs (.po files) editor"
+arch=('i686' 'x86_64')
+url="http://www.poedit.net/"
+license=('custom')
+depends=('wxgtk>=2.8.11' 'gtkspell' 'db>=5.1' 'hicolor-icon-theme' 'gettext')
+makedepends=('pkgconfig')
+install=poedit.install
+source=(http://downloads.sourceforge.net/${pkgname}/${pkgname}-${pkgver}.tar.gz)
+md5sums=('c63ffd991b1a6085ef356a6922356e0a')
+
+build() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+ ./configure --prefix=/usr
+ make
+}
+
+package() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+ make DESTDIR=${pkgdir} install
+ install -D -m644 COPYING ${pkgdir}/usr/share/licenses/${pkgname}/LICENSE
+}
diff --git a/community-testing/poedit/poedit.install b/community-testing/poedit/poedit.install
new file mode 100644
index 000000000..21b79d2d4
--- /dev/null
+++ b/community-testing/poedit/poedit.install
@@ -0,0 +1,11 @@
+post_install() {
+ gtk-update-icon-cache -q -t -f usr/share/icons/hicolor
+}
+
+post_upgrade() {
+ post_install $1
+}
+
+post_remove() {
+ gtk-update-icon-cache -q -t -f usr/share/icons/hicolor
+}
diff --git a/community-testing/python-bsddb/LICENSE b/community-testing/python-bsddb/LICENSE
new file mode 100644
index 000000000..7d6035775
--- /dev/null
+++ b/community-testing/python-bsddb/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2008-2009 Jesus Cea Avion
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE. \ No newline at end of file
diff --git a/community-testing/python-bsddb/PKGBUILD b/community-testing/python-bsddb/PKGBUILD
new file mode 100644
index 000000000..578d0921d
--- /dev/null
+++ b/community-testing/python-bsddb/PKGBUILD
@@ -0,0 +1,62 @@
+# $Id: PKGBUILD 50695 2011-06-29 15:25:25Z stephane $
+# Maintainer: Kaiting Chen <kaitocracy@gmail.com>
+# Contributor: Stéphane Gaudreault <stephane@archlinux.org>
+# Contributor: Douglas Soares de Andrade <dsandrade@gmail.com>
+# Contributor: William Rea <sillywilly@gmail.com>
+
+pkgbase=python-bsddb
+pkgname=('python2-bsddb' 'python-bsddb')
+_hgrel=600
+pkgver=5.1.2.${_hgrel}
+pkgrel=1
+pkgdesc="Python interface for BerkeleyDB"
+license=('MIT')
+arch=('i686' 'x86_64')
+url="http://www.jcea.es/programacion/pybsddb.htm"
+makedepends=('python2-distribute' 'python-distribute' 'mercurial')
+source=("ftp://ftp.archlinux.org/other/community/${pkgbase}/${pkgbase}-${pkgver}.src.tar.xz"
+ 'LICENSE')
+sha1sums=('50e7b4c9c11b06c6d871dd93af1cc228687a0227'
+ 'ef4e4caf618781104dbf5824279ed39d127b4713')
+
+# source PKGBUILD && mksource
+mksource() {
+ _hgroot="http://hg.jcea.es/pybsddb/"
+ _hgrepo=pybsddb
+ [ -d "${_hgrepo}" ] && hg pull -u -r ${_hgrel} || hg clone ${_hgroot} -r ${_hgrel}
+
+ _dirname=${pkgbase}-${pkgver}
+ mv ${_hgrepo} ${_dirname}
+ tar -cJvf ${_dirname}.src.tar.xz ${_dirname}
+ rm -fr ${_dirname}
+}
+
+build () {
+ cd "${srcdir}"
+
+ cp -r ${pkgbase}-${pkgver}{,-python2}
+
+ # Build python 3 module
+ cd ${pkgbase}-${pkgver}
+ python setup.py --berkeley-db=/usr build
+
+ # Build python 2 module
+ cd ../${pkgbase}-${pkgver}-python2
+ python2 setup.py --berkeley-db=/usr build
+}
+
+package_python2-bsddb() {
+ depends=('db' 'python2>=2.7')
+ cd "${srcdir}/${pkgbase}-${pkgver}-python2"
+
+ python2 setup.py --berkeley-db=/usr install --root="${pkgdir}" --skip-build --optimize=1
+ install -Dm644 "${srcdir}"/LICENSE "${pkgdir}"/usr/share/licenses/${pkgname}/LICENSE
+}
+
+package_python-bsddb() {
+ depends=('db' 'python>=3.2')
+ cd "${srcdir}/${pkgbase}-${pkgver}"
+
+ python setup.py --berkeley-db=/usr install --root="${pkgdir}" --skip-build --optimize=1
+ install -Dm644 "${srcdir}"/LICENSE "${pkgdir}"/usr/share/licenses/${pkgname}/LICENSE
+}
diff --git a/community-testing/wml/PKGBUILD b/community-testing/wml/PKGBUILD
new file mode 100644
index 000000000..33c34b025
--- /dev/null
+++ b/community-testing/wml/PKGBUILD
@@ -0,0 +1,42 @@
+# $Id: PKGBUILD 50698 2011-06-29 15:35:12Z stephane $
+# Maintainer: Sergej Pupykin <pupykin.s+arch@gmail.com>
+
+pkgname=wml
+pkgver=2.0.11
+pkgrel=7
+pkgdesc="The Website Meta Language"
+arch=('i686' 'x86_64')
+url="http://thewml.org/"
+license=('GPL')
+depends=('perl' 'libpng' 'gdbm' 'db' 'ncurses')
+makedepends=('lynx')
+source=("http://thewml.org/distrib/${pkgname}-${pkgver}.tar.gz")
+md5sums=('a26feebf4e59e9a6940f54c69dde05b5')
+build() {
+ cd ${pkgname}-${pkgver}
+
+ # missing Perl modules fix
+ sed -i 's/PREFIX=$(libdir)\/perl/DESTDIR=\.\.\/\.\.\/\.\.\/\.\.\/pkg\/ PREFIX=$(libdir)\/perl/' wml_common/Makefile.in
+ sed -i 's/$(MAKE) pure_perl_install $(MM_INSTALL_OPTS)/$(MAKE) pure_perl_install/' wml_common/Makefile.in
+
+ unset LDFLAGS
+ ./configure \
+ --prefix=/usr
+
+ # compile fixhack
+ sed -i 's#/usr/lib/perl5/core_perl/auto/DynaLoader/DynaLoader.a##' wml_backend/p3_eperl/Makefile
+ sed -i 's/extern struct option options\[\]\;//' ${srcdir}/${pkgname}-${pkgver}/wml_backend/p3_eperl/eperl_proto.h
+ sed -i 's|strip $dsttmp|#strip $dsttmp|' etc/shtool
+ mkdir -p ${pkgdir}/usr/bin ${pkgdir}/usr/lib/wml/exec ${pkgdir}/usr/man/man{1,3,7} ${pkgdir}/usr/man/cat{1,7}
+
+# make clean
+ make
+}
+
+package() {
+ cd ${pkgname}-${pkgver}
+
+ make prefix=${pkgdir}/usr install
+
+ [ -d ${pkgdir}/usr/man ] && mkdir -p ${pkgdir}/usr/share && mv ${pkgdir}/usr/man ${pkgdir}/usr/share
+}
diff --git a/community-testing/xemacs/PKGBUILD b/community-testing/xemacs/PKGBUILD
new file mode 100644
index 000000000..8e7d3fe69
--- /dev/null
+++ b/community-testing/xemacs/PKGBUILD
@@ -0,0 +1,65 @@
+# $Id: PKGBUILD 49690 2011-06-19 10:01:11Z ebelanger $
+# Maintainer: juergen <juergen@archlinux.org>
+# Contributor : Stéphane Gaudreault <stephane@archlinux.org>
+
+pkgname=xemacs
+pkgver=21.5.31
+pkgrel=2
+pkgdesc="An highly customizable open source text editor and application development system forked from GNU Emacs"
+arch=('i686' 'x86_64')
+url="http://www.xemacs.org/"
+license=('GPL')
+depends=('db' 'libpng' 'libtiff' 'gpm' 'desktop-file-utils' 'libxaw' 'gdbm' 'giflib')
+optdepends=('xorg-fonts-75dpi: X bitmap fonts needed for the interface'
+ 'xorg-fonts-100dpi: X bitmap fonts needed for the interface')
+makedepends=('xbitmaps')
+install=xemacs.install
+source=(http://ftp.xemacs.org/pub/xemacs/beta/$pkgname-$pkgver.tar.gz
+ xemacs.desktop
+ xemacs-21.5.29-optimization-bug.patch)
+md5sums=('0185fe905d0b8d8d094d9b60cf262d4a'
+ 'a4d3d5c0aa2c7ce7bec491f809ca3694'
+ 'eb061b10ea3bbe1026df5326ae1618e3')
+
+build() {
+ cd "${srcdir}/${pkgname}-${pkgver}"
+
+# Fix problem caused by improper optimization with GCC>=4.1.2 on i686
+# (and possibly other arches). See
+# http://tracker.xemacs.org/XEmacs/its/issue354
+# for the upstream bug report.
+ patch -Np0 -i ../xemacs-21.5.29-optimization-bug.patch
+
+ ./configure --build="${CHOST}" --prefix=/usr --with-dynamic \
+ --without-postgresql --with-athena=xaw \
+ --enable-database=berkdb --without-ldap \
+ --enable-menubars=lucid --enable-scrollbars=lucid \
+ --enable-widgets=athena --enable-dialogs=athena \
+ --enable-external-widget \
+ --with-jpeg --with-png --with-tiff \
+ --with-ncurses --with-pop --with-xfs --disable-sound \
+ --infodir=/usr/share/info \
+ --with-mule \
+ --mandir=/usr/share/man/man1
+
+ make
+}
+
+package() {
+ cd "${srcdir}/${pkgname}-${pkgver}"
+ make -j1 prefix="${pkgdir}/usr" mandir="${pkgdir}/usr/share/man/man1" infodir="${pkgdir}/usr/share/info" install gzip-el
+
+ rm "${pkgdir}"/usr/bin/{b2m,ctags,etags}
+ rm "${pkgdir}"/usr/share/man/man1/{ctags.1,etags.1}
+
+ # fix FS#7927
+ install -d -m755 "${pkgdir}/usr/share/pixmaps"
+ install -D -m644 "${srcdir}/${pkgname}.desktop" "${pkgdir}/usr/share/applications/${pkgname}.desktop"
+ ln -sf /usr/share/xemacs-${pkgver%.*}-b${pkgver##*.}/etc/xemacs-icon.xpm "${pkgdir}/usr/share/pixmaps/xemacs-icon.xpm"
+
+ # correct permissions
+ chown -R root:root "${pkgdir}"
+
+ rm "${pkgdir}"/usr/share/info/{info.info,texinfo.info-2,cl.info,texinfo.info-1,texinfo.info,standards.info,widget.info}
+}
+# vim: ts=2 sw=2 et ft=sh
diff --git a/community-testing/xemacs/xemacs-21.5.29-optimization-bug.patch b/community-testing/xemacs/xemacs-21.5.29-optimization-bug.patch
new file mode 100644
index 000000000..32a1a02c2
--- /dev/null
+++ b/community-testing/xemacs/xemacs-21.5.29-optimization-bug.patch
@@ -0,0 +1,14 @@
+Work around a GCC optimization bug as described in
+http://tracker.xemacs.org/XEmacs/its/issue354
+
+--- src/dumper.c.~1~ 2008-01-26 09:54:11.000000000 +0100
++++ src/dumper.c 2008-05-03 10:17:03.000000000 +0200
+@@ -2584,7 +2584,7 @@
+ #endif /* !WIN32_NATIVE */
+
+
+-static int
++int
+ pdump_file_try (Wexttext *exe_path)
+ {
+ Wexttext *w = exe_path + wext_strlen (exe_path);
diff --git a/community-testing/xemacs/xemacs.desktop b/community-testing/xemacs/xemacs.desktop
new file mode 100644
index 000000000..257c56947
--- /dev/null
+++ b/community-testing/xemacs/xemacs.desktop
@@ -0,0 +1,34 @@
+[Desktop Entry]
+Name=XEmacs Text Editor
+Name[bg]=редактор XEmacs
+Name[ca]=Editor XEmacs
+Name[cs]=Editor XEmacs
+Name[da]=XEmacs tekstredigerer
+Name[de]=XEmacs Texteditor
+Name[eo]=La redaktilo XEmakso
+Name[es]=Emacs para X
+Name[et]=Emacs X'i kasutajaliidesega
+Name[fi]=XEmacs editori
+Name[fr]=Éditeur XEmacs
+Name[hu]=XEmacs szövegszerkesztő
+Name[is]=XEmacs-ritill
+Name[it]=Emacs per X
+Name[ja]=XEmacs テキスト・エディター
+Name[mk]=Софистицираниот XEmacs уредувач
+Name[no]=XEmacs-tekstredigerer
+Name[pt]=Editor XEmacs
+Name[ro]=Editorul XEmacs
+Name[ru]=Редактор XEmacs
+Name[sk]=Editor XEmacs
+Name[sl]=Urejevalnik XEmacs
+Name[uk]=Редактор XEmacs
+GenericName=Text Editor
+Comment=Edit text
+MimeType=text/english;text/plain;text/x-makefile;text/x-c++hdr;text/x-c++src;text/x-chdr;text/x-csrc;text/x-java;text/x-moc;text/x-pascal;text/x-tcl;text/x-tex;application/x-shellscript;text/x-c;text/x-c++;
+Exec=xemacs %f
+Icon=/usr/share/pixmaps/xemacs-icon.xpm
+Type=Application
+Terminal=false
+Categories=Development;TextEditor;
+Encoding=UTF-8
+StartupWMClass=Emacs
diff --git a/community-testing/xemacs/xemacs.install b/community-testing/xemacs/xemacs.install
new file mode 100644
index 000000000..a04ee187f
--- /dev/null
+++ b/community-testing/xemacs/xemacs.install
@@ -0,0 +1,35 @@
+infodir=usr/share/info
+filelist=(beta.info.gz custom.info.gz
+ emodules.info.gz external-widget.info.gz
+ internals.info-1.gz internals.info-2.gz
+ internals.info-3.gz internals.info-4.gz
+ internals.info-5.gz internals.info.gz
+ lispref.info-1.gz lispref.info-10.gz
+ lispref.info-2.gz lispref.info-3.gz
+ lispref.info-4.gz lispref.info-5.gz
+ lispref.info-6.gz lispref.info-7.gz
+ lispref.info-8.gz lispref.info-9.gz
+ lispref.info.gz new-users-guide.info.gz
+ term.info.gz termcap.info.gz
+ xemacs-faq.info-1.gz xemacs-faq.info-2.gz
+ xemacs-faq.info.gz xemacs.info-1.gz
+ xemacs.info-2.gz xemacs.info-3.gz
+ xemacs.info-4.gz xemacs.info.gz)
+
+post_install() {
+ for file in ${filelist[@]}; do
+ install-info $infodir/$file $infodir/dir 2> /dev/null
+ done
+ update-desktop-database -q
+}
+
+post_upgrade() {
+ post_install $1
+}
+
+pre_remove() {
+ for file in ${filelist[@]}; do
+ install-info --delete $infodir/$file $infodir/dir 2> /dev/null
+ done
+ update-desktop-database -q
+}
diff --git a/community/autofs/PKGBUILD b/community/autofs/PKGBUILD
index 6516f9736..6a85101ea 100644
--- a/community/autofs/PKGBUILD
+++ b/community/autofs/PKGBUILD
@@ -1,12 +1,12 @@
-# $Id: PKGBUILD 47057 2011-05-15 17:16:45Z lfleischer $
+# $Id: PKGBUILD 50871 2011-07-02 10:03:49Z lfleischer $
# Maintainer: Lukas Fleischer <archlinux at cryptocrack dot de>
# Contributor: Andrea Scarpino <andrea@archlinux.org>
# Contributor: Dale Blount <dale@archlinux.org>
# Contributor: Manolis Tzanidakis
pkgname=autofs
-pkgver=5.0.5
-pkgrel=7
+pkgver=5.0.6
+pkgrel=1
pkgdesc='A kernel-based automounter for Linux.'
arch=('i686' 'x86_64')
url='http://freshmeat.net/projects/autofs'
@@ -18,15 +18,11 @@ backup=('etc/autofs/auto.master'
options=(!makeflags)
install='autofs.install'
source=("http://www.kernel.org/pub/linux/daemons/${pkgname}/v5/${pkgname}-${pkgver}.tar.bz2"
- "0001-${pkgname}-5.0.5-include-krb5-library.patch"
- "0002-${pkgname}-5.0.5-remove-ERR_remove_state-openssl-call.patch"
'autofs'
'autofs.conf.d'
'auto.master'
'auto.misc')
-md5sums=('a1d262cb6ebef0c2dd0fe22232fb3d5a'
- '478737b8e3e79365a0e183aa95aab307'
- 'e347999e5dffe142e7e57b067d3d9e9f'
+md5sums=('44cea7f711e2290a0b50fddeda079705'
'e307bf6d2638e46eeb916cf42fe029b2'
'47f597c870410055e0fdb66103daf928'
'a6cefb591e77b31b79dbb7243646c96b'
@@ -38,9 +34,6 @@ build() {
sed -i "s:SUBDIRS = lib daemon modules man samples:SUBDIRS = lib daemon modules man:" \
Makefile.rules
- patch -p1 -i "../0001-${pkgname}-5.0.5-include-krb5-library.patch"
- patch -p1 -i "../0002-${pkgname}-5.0.5-remove-ERR_remove_state-openssl-call.patch"
-
./configure --prefix=/usr --sysconfdir=/etc/autofs --with-mapdir=/etc/autofs --without-hesiod \
--enable-ignore-busy
make
diff --git a/community/calibre/PKGBUILD b/community/calibre/PKGBUILD
index 49bc53644..501d17bba 100644
--- a/community/calibre/PKGBUILD
+++ b/community/calibre/PKGBUILD
@@ -1,10 +1,10 @@
-# $Id: PKGBUILD 50108 2011-06-24 19:28:04Z giovanni $
+# $Id: PKGBUILD 50866 2011-07-02 09:42:55Z giovanni $
# Maintainer: Giovanni Scafora <giovanni@archlinux.org>
# Contributor: Petrov Roman <nwhisper@gmail.com>
# Contributor: Andrea Fagiani <andfagiani _at_ gmail dot com>
pkgname=calibre
-pkgver=0.8.7
+pkgver=0.8.8
pkgrel=1
pkgdesc="Ebook management application"
arch=('i686' 'x86_64')
@@ -20,7 +20,7 @@ optdepends=('ipython: to use calibre-debug')
install=calibre.install
source=(http://downloads.sourceforge.net/${pkgname}/${pkgname}-${pkgver}.tar.gz
desktop_integration.patch)
-md5sums=('d7132ec75f5e742e04e196fc8e2c0884'
+md5sums=('dab191175d370920b366bcd8c0dad917'
'253ce4fe5d01f8ff76b63cd3825755ea')
build() {
diff --git a/community/fotoxx/PKGBUILD b/community/fotoxx/PKGBUILD
index fb8242bad..2204d5639 100644
--- a/community/fotoxx/PKGBUILD
+++ b/community/fotoxx/PKGBUILD
@@ -1,6 +1,6 @@
# Maintainer: Stefan Husmann <stefan-husmann@t-online.de>
pkgname=fotoxx
-pkgver=11.06.2
+pkgver=11.07
pkgrel=1
pkgdesc="A program for improving image files made with a digital camera"
url="http://kornelix.squarespace.com/fotoxx"
@@ -9,7 +9,7 @@ license=('GPL3')
depends=('gimp-ufraw' 'perl-exiftool' 'xdg-utils' 'desktop-file-utils')
optdepends=('mashup: for composing pages to print from within fotoxx' 'brasero: for burning')
source=("http://kornelix.squarespace.com/storage/downloads/$pkgname-$pkgver.tar.gz")
-md5sums=('03fac30ee3239f379d5a9fcdc8ba3933')
+md5sums=('cf7ce6011108da211b0169df6686ae95')
install=fotoxx.install
build() {
diff --git a/community/mongodb/PKGBUILD b/community/mongodb/PKGBUILD
index c19206216..dde0f9f69 100644
--- a/community/mongodb/PKGBUILD
+++ b/community/mongodb/PKGBUILD
@@ -4,7 +4,7 @@
pkgname=mongodb
pkgver=1.8.2
-pkgrel=1
+pkgrel=2
pkgdesc='A high-performance, open source, schema-free document-oriented database.'
arch=('i686' 'x86_64')
url='http://www.mongodb.org'
@@ -18,8 +18,8 @@ source=("http://downloads.mongodb.org/src/mongodb-src-r${pkgver}.tar.gz"
'mongodb.rc'
'mongodb.conf')
md5sums=('951fb1a75d90fc822cf4528585a970cf'
- '859f8f9bb32ef2bd21fec55ae9a87d0a'
- '9e0ea3f96732bb7811f0b64dace56440')
+ '85eaa28e349fdc6250f883624e624cca'
+ '4839fe1d638187ca3226e8267b947318')
build() {
export SCONSFLAGS="$MAKEFLAGS"
@@ -43,7 +43,8 @@ package() {
${pkgdir}/etc/rc.d/mongodb
install -D -m644 ${srcdir}/mongodb.conf \
${pkgdir}/etc/mongodb.conf
- install -d -m700 ${pkgdir}/var/state/mongodb
+ install -d -m700 ${pkgdir}/var/lib/mongodb
+ install -d -m755 ${pkgdir}/var/log/mongodb
if [ -d ${pkgdir}/usr/lib64 ]; then
mv ${pkgdir}/usr/lib64 ${pkgdir}/usr/lib
diff --git a/community/mongodb/mongodb.conf b/community/mongodb/mongodb.conf
index 60d73eae8..c5272b7f2 100644
--- a/community/mongodb/mongodb.conf
+++ b/community/mongodb/mongodb.conf
@@ -3,4 +3,6 @@
bind_ip = 127.0.0.1
quiet = true
-dbpath = /var/state/mongodb
+dbpath = /var/lib/mongodb
+logpath = /var/log/mongodb/mongod.log
+logappend = true
diff --git a/community/mongodb/mongodb.install b/community/mongodb/mongodb.install
index 152f36515..9e13a4d14 100755
--- a/community/mongodb/mongodb.install
+++ b/community/mongodb/mongodb.install
@@ -4,28 +4,25 @@ show_msg(){
if [ "$(arch)" != "x86_64" ]
then
cat <<END
-###########################################################################
-# Warning: the 32 bit version of MongoDB is limited to about 2GB of data. #
-# See http://blog.mongodb.org/post/137788967/32-bit-limitations #
-###########################################################################
+==> Warning: the 32 bit version of MongoDB is limited to about 2GB of data.
+==> See http://blog.mongodb.org/post/137788967/32-bit-limitations
END
fi
}
post_install() {
- useradd -r -g daemon -d /var/state/mongodb -s /bin/false mongodb
- chown -R mongodb:daemon /var/state/mongodb
+ useradd -r -g daemon -d /var/lib/mongodb -s /bin/bash mongodb
+ chown -R mongodb:daemon /var/lib/mongodb
+ chown -R mongodb:daemon /var/log/mongodb
show_msg
}
post_upgrade() {
- chown -R mongodb:daemon /var/state/mongodb
-
- show_msg
+ chown -R mongodb:daemon /var/lib/mongodb
+ chown -R mongodb:daemon /var/log/mongodb
}
pre_remove() {
- /etc/rc.d/mongodb stop
- userdel mongodb
+ userdel mongodb
}
diff --git a/community/mongodb/mongodb.rc b/community/mongodb/mongodb.rc
index b808a2fb0..517258f63 100755
--- a/community/mongodb/mongodb.rc
+++ b/community/mongodb/mongodb.rc
@@ -8,7 +8,7 @@ PID=`pidof /usr/bin/mongod`
case "$1" in
start)
stat_busy "Starting mongodb"
- [ -z "$PID" ] && /bin/su mongodb -s /bin/bash -c "/usr/bin/mongod --config /etc/mongodb.conf &" > /var/log/mongod 2>&1
+ [ -z "$PID" ] && /bin/su mongodb -c "/usr/bin/mongod --config /etc/mongodb.conf --fork" > /dev/null
if [ $? -gt 0 ]; then
stat_fail
else
diff --git a/community/nodejs/PKGBUILD b/community/nodejs/PKGBUILD
new file mode 100644
index 000000000..8c368df2c
--- /dev/null
+++ b/community/nodejs/PKGBUILD
@@ -0,0 +1,58 @@
+# Maintainer: Thomas Dziedzic < gostrc at gmail >
+# Contributor: James Campos <james.r.campos@gmail.com>
+# Contributor: BlackEagle < ike DOT devolder AT gmail DOT com >
+# Contributor: Dongsheng Cai <dongsheng at moodle dot com>
+# Contributor: Masutu Subric <masutu.arch at googlemail dot com>
+# Contributor: TIanyi Cui <tianyicui@gmail.com>
+
+pkgname=nodejs
+pkgver=0.4.9
+pkgrel=1
+pkgdesc='Evented I/O for V8 javascript'
+arch=('i686' 'x86_64')
+url='http://nodejs.org/'
+license=('MIT')
+depends=('python2')
+makedepends=('curl') # curl used for check()
+optdepends=('openssl: TLS support')
+conflicts=('nodejs-unstable')
+source=("http://nodejs.org/dist/node-v${pkgver}.tar.gz")
+sha256sums=('f231ea6d19ea9ea4c7f8e7ff5061e7d301f1635bec7ed0ff1eef2512576ea442')
+
+build() {
+ cd node-v${pkgver}
+
+ # python2 fix
+ for file in $(find . -name '*.py' -print) wscript tools/waf-light tools/node-waf
+ do
+ sed -i 's_^#!.*/usr/bin/python_#!/usr/bin/python2_' ${file}
+ sed -i 's_^#!.*/usr/bin/env.*python_#!/usr/bin/env python2_' ${file}
+ done
+ sed -i "s|cmd_R = 'python |cmd_R = 'python2 |" wscript
+ sed -i "s|python |python2 |" Makefile
+
+ ./configure \
+ --prefix=/usr
+
+ make
+}
+
+check() {
+ cd node-v${pkgver}
+
+ # only 1 test should fail afaik
+ # === release test-http-dns-fail ===
+ # https://github.com/joyent/node/issues/703
+ make test || true
+}
+
+package() {
+ cd node-v${pkgver}
+
+ make DESTDIR=${pkgdir} install
+
+ install -D -m644 LICENSE \
+ ${pkgdir}/usr/share/licenses/nodejs/LICENSE
+}
+
+# vim:set ts=2 sw=2 et:
diff --git a/core/cronie/PKGBUILD b/core/cronie/PKGBUILD
index a455e705b..795884823 100644
--- a/core/cronie/PKGBUILD
+++ b/core/cronie/PKGBUILD
@@ -2,8 +2,8 @@
# Maintainer: Gaetan Bisson <bisson@archlinux.org>
pkgname='cronie'
-pkgver=1.4.7
-pkgrel=9
+pkgver=1.4.8
+pkgrel=1
pkgdesc='Daemon that runs specified programs at scheduled times and related tools'
url='https://fedorahosted.org/cronie/'
license=('custom:BSD')
@@ -13,17 +13,14 @@ optdepends=('smtp-server: sending cron job output via email')
source=("https://fedorahosted.org/releases/c/r/${pkgname}/${pkgname}-${pkgver}.tar.gz"
'cron.deny'
- 'crontab'
'pam.d'
'rc.d')
-sha1sums=('c6644ba0e58bcb14e0bb3f925e3e8cc3f0d47a7f'
+sha1sums=('1d2ce3a6ca2a6f96ff31921e4060be3199dc10f3'
'0f279b8fb820340267d578dc85511c980715f91e'
- '4059bc4ccb75f08b0d4970940799e5d9722b339f'
'5eff7fb31f6bc0a924243ff046704726cf20c221'
'c08c040ed5cb12bc4fd15639a5242d31ec247ef5')
-backup=('etc/crontab'
- 'etc/anacrontab'
+backup=('etc/anacrontab'
'etc/conf.d/crond'
'etc/pam.d/crond'
'etc/cron.deny')
@@ -57,7 +54,6 @@ package() {
install -Dm755 ../rc.d "${pkgdir}"/etc/rc.d/crond
install -Dm644 ../pam.d "${pkgdir}"/etc/pam.d/crond
- install -Dm644 ../crontab "${pkgdir}"/etc/crontab
install -Dm644 ../cron.deny "${pkgdir}"/etc/cron.deny
install -Dm644 crond.sysconfig "${pkgdir}"/etc/conf.d/crond
install -Dm644 contrib/0hourly "${pkgdir}"/etc/cron.d/0hourly
diff --git a/core/sqlite3/PKGBUILD b/core/sqlite3/PKGBUILD
index 63be526f7..fd4fe1d87 100644
--- a/core/sqlite3/PKGBUILD
+++ b/core/sqlite3/PKGBUILD
@@ -1,12 +1,12 @@
-# $Id: PKGBUILD 124333 2011-05-19 19:31:27Z andyrtr $
+# $Id: PKGBUILD 130058 2011-07-01 19:36:29Z andyrtr $
# Maintainer: Andreas Radke <andyrtr@archlinux.org>
# Contributor: Tom Newsom <Jeepster@gmx.co.uk>
pkgbase="sqlite3"
pkgname=('sqlite3' 'sqlite3-tcl' 'sqlite3-doc')
-_amalgamationver=3070603
+_amalgamationver=3070701
_amalgamationver2=${_amalgamationver/00/}
-_docver=3070600
+_docver=${_amalgamationver} #3070700
pkgver=${_amalgamationver2//0/.}
pkgrel=1
pkgdesc="A C library that implements an SQL database engine"
@@ -20,8 +20,8 @@ source=( # tarball containing the amalgamation for SQLite 3.7.5 together with a
http://www.sqlite.org/sqlite-doc-${_docver}.zip
license.txt)
options=('!libtool' '!emptydirs')
-md5sums=('7eb41eea5ffa5cbe359a48629084c425'
- '1c9b9da6f98f1da2d8958254662c393c'
+md5sums=('554026fe7fac47b1cf61c18d5fe43419'
+ '2bca5613abf9352bc525b6a8fd80156e'
'c1cdbc5544034d9012e421e75a5e4890')
build() {
diff --git a/extra/irqbalance/Fix-detection-of-CPUs-in-sysfs.patch b/extra/irqbalance/Fix-detection-of-CPUs-in-sysfs.patch
new file mode 100644
index 000000000..56bd529c2
--- /dev/null
+++ b/extra/irqbalance/Fix-detection-of-CPUs-in-sysfs.patch
@@ -0,0 +1,35 @@
+From: Shawn Bohrer <sbohrer@rgmadvisors.com>
+
+Only count directories that match /sys/devices/system/cpu/cpu[0-9]+ as
+CPUs. Previously any directory that started with cpu was counted which
+caused cpufreq and cpuidle to be counted as CPUs.
+
+Signed-off-by: Shawn Bohrer <sbohrer@rgmadvisors.com>
+---
+ cputree.c | 3 ++-
+ 1 files changed, 2 insertions(+), 1 deletions(-)
+
+diff --git a/cputree.c b/cputree.c
+index 3b0c982..b879785 100644
+--- a/cputree.c
++++ b/cputree.c
+@@ -25,6 +25,7 @@
+ */
+
+ #include "config.h"
++#include <ctype.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <unistd.h>
+@@ -321,7 +322,7 @@ void parse_cpu_tree(void)
+ return;
+ do {
+ entry = readdir(dir);
+- if (entry && strlen(entry->d_name)>3 && strstr(entry->d_name,"cpu")) {
++ if (entry && !strncmp(entry->d_name,"cpu", 3) && isdigit(entry->d_name[3])) {
+ char new_path[PATH_MAX];
+ sprintf(new_path, "/sys/devices/system/cpu/%s", entry->d_name);
+ do_one_cpu(new_path);
+--
+1.6.5.2
+
diff --git a/extra/irqbalance/PKGBUILD b/extra/irqbalance/PKGBUILD
index 13db8d06b..e969c5729 100644
--- a/extra/irqbalance/PKGBUILD
+++ b/extra/irqbalance/PKGBUILD
@@ -3,7 +3,7 @@
pkgname=irqbalance
pkgver=0.56
-pkgrel=1
+pkgrel=3
pkgdesc="IRQ balancing daemon for SMP systems"
arch=('i686' 'x86_64')
url="http://www.irqbalance.org/"
@@ -13,22 +13,32 @@ makedepends=(pkgconfig)
backup=(etc/conf.d/irqbalance)
source=(http://irqbalance.googlecode.com/files/irqbalance-$pkgver.tbz2
irqbalance.conf.d
- irqbalance.rc.d)
+ irqbalance.rc.d
+ current-trunk.patch
+ Fix-detection-of-CPUs-in-sysfs.patch
+ Special-interrupt-counts-line-NMI-may-start-with-a-s.patch)
md5sums=('cd0c4d3b2bb84778a04fc594ad83949a'
'336c1ee99818f9ecda1687e34c69fd6b'
- 'fb82fc5d267d39110baf720d81282a7c')
+ 'fb82fc5d267d39110baf720d81282a7c'
+ '64df09f54cf80d9ce5ff2751a882032d'
+ '00dcb394dac884c116657ade4fe623ee'
+ 'e774aff57054ce3c8b5adc7c3e5a74c2')
build() {
- cd $srcdir/$pkgname-$pkgver
+ cd "$srcdir/$pkgname-$pkgver"
+ patch -Np0 < ../current-trunk.patch
+ patch -Np1 < ../Fix-detection-of-CPUs-in-sysfs.patch
+ patch -Np1 < ../Special-interrupt-counts-line-NMI-may-start-with-a-s.patch
./autogen.sh
- ./configure
+ ./configure --prefix=/usr
make
}
package() {
- cd $srcdir/$pkgname-$pkgver
- install -D -m755 irqbalance $pkgdir/usr/sbin/irqbalance
- install -D -m644 irqbalance.1 $pkgdir/usr/share/man/man1/irqbalance.1
- install -D -m644 ../irqbalance.conf.d $pkgdir/etc/conf.d/irqbalance
- install -D -m755 ../irqbalance.rc.d $pkgdir/etc/rc.d/irqbalance
+ cd "$srcdir/$pkgname-$pkgver"
+ make install DESTDIR="$pkgdir"
+ #install -D -m755 irqbalance $pkgdir/usr/sbin/irqbalance
+ #install -D -m644 irqbalance.1 $pkgdir/usr/share/man/man1/irqbalance.1
+ install -D -m644 ../irqbalance.conf.d "$pkgdir"/etc/conf.d/irqbalance
+ install -D -m755 ../irqbalance.rc.d "$pkgdir"/etc/rc.d/irqbalance
}
diff --git a/extra/irqbalance/Special-interrupt-counts-line-NMI-may-start-with-a-s.patch b/extra/irqbalance/Special-interrupt-counts-line-NMI-may-start-with-a-s.patch
new file mode 100644
index 000000000..c2e69c218
--- /dev/null
+++ b/extra/irqbalance/Special-interrupt-counts-line-NMI-may-start-with-a-s.patch
@@ -0,0 +1,67 @@
+From: Shawn Bohrer <sbohrer@rgmadvisors.com>
+
+Special interrupt counts line NMI may start with a space
+
+The kernel determines the maximum number of possible IRQs and pads the
+first field of /proc/interrupts appropriately. With four or more digits
+of precession the special interrupt counts all start with a space
+instead of a letter. This caused the special interrupt counts to be
+counted on my system and that caused the cpunr count to be off when it
+reached the ERR and MIS lines forcing a CPU rescan.
+
+Signed-off-by: Shawn Bohrer <sbohrer@rgmadvisors.com>
+---
+ procinterrupts.c | 24 +++++++++++-------------
+ 1 files changed, 11 insertions(+), 13 deletions(-)
+
+diff --git a/procinterrupts.c b/procinterrupts.c
+index e336efe..322f4de 100644
+--- a/procinterrupts.c
++++ b/procinterrupts.c
+@@ -55,20 +55,18 @@ void parse_proc_interrupts(void)
+ if (getline(&line, &size, file)==0)
+ break;
+
+-
++ number = strtoul(line, &c, 10);
+ /* lines with letters in front are special, like NMI count. Ignore */
+- if (!(line[0]==' ' || (line[0]>='0' && line[0]<='9')))
+- break;
+- c = strchr(line, ':');
+- if (!c)
++ if (line == c)
++ continue;
++
++ if (c[0] == ':')
++ ++c;
++ else
+ continue;
+- *c = 0;
+- c++;
+- number = strtoul(line, NULL, 10);
++
+ count = 0;
+ cpunr = 0;
+-
+- c2=NULL;
+ while (1) {
+ uint64_t C;
+ C = strtoull(c, &c2, 10);
+@@ -78,11 +76,11 @@ void parse_proc_interrupts(void)
+ c=c2;
+ cpunr++;
+ }
+- if (cpunr != core_count)
++ if (cpunr != core_count)
+ need_cpu_rescan = 1;
+-
++
+ set_interrupt_count(number, count);
+- }
++ }
+ fclose(file);
+ free(line);
+ }
+--
+1.6.5.2
+
diff --git a/extra/irqbalance/current-trunk.patch b/extra/irqbalance/current-trunk.patch
new file mode 100644
index 000000000..83f08575c
--- /dev/null
+++ b/extra/irqbalance/current-trunk.patch
@@ -0,0 +1,217 @@
+Index: powermode.c
+===================================================================
+--- powermode.c (.../tags/irqbalance-0.56) (revision 33)
++++ powermode.c (.../trunk) (revision 33)
+@@ -40,7 +40,8 @@
+ char *line = NULL;
+ size_t size = 0;
+ char *c;
+- uint64_t dummy, irq, softirq;
++ uint64_t dummy __attribute__((unused));
++ uint64_t irq, softirq;
+ file = fopen("/proc/stat", "r");
+ if (!file)
+ return;
+Index: cputree.c
+===================================================================
+--- cputree.c (.../tags/irqbalance-0.56) (revision 33)
++++ cputree.c (.../trunk) (revision 33)
+@@ -47,6 +47,7 @@
+ /* Users want to be able to keep interrupts away from some cpus; store these in a cpumask_t */
+ cpumask_t banned_cpus;
+
++cpumask_t cpu_possible_map;
+
+ /*
+ it's convenient to have the complement of banned_cpus available so that
+@@ -158,6 +159,8 @@
+ memset(cpu, 0, sizeof(struct cpu_core));
+
+ cpu->number = strtoul(&path[27], NULL, 10);
++
++ cpu_set(cpu->number, cpu_possible_map);
+
+ cpu_set(cpu->number, cpu->mask);
+
+@@ -219,15 +222,15 @@
+ core_count++;
+ }
+
+-static void dump_irqs(int spaces, GList *interrupts)
++static void dump_irqs(int spaces, GList *dump_interrupts)
+ {
+ struct interrupt *irq;
+- while (interrupts) {
++ while (dump_interrupts) {
+ int i;
+- for (i=0; i<spaces;i++) printf(" ");
+- irq = interrupts->data;
++ for (i=0; i<spaces; i++) printf(" ");
++ irq = dump_interrupts->data;
+ printf("Interrupt %i (%s/%u) \n", irq->number, classes[irq->class], (unsigned int)irq->workload);
+- interrupts = g_list_next(interrupts);
++ dump_interrupts = g_list_next(dump_interrupts);
+ }
+ }
+
+Index: placement.c
+===================================================================
+--- placement.c (.../tags/irqbalance-0.56) (revision 33)
++++ placement.c (.../trunk) (revision 33)
+@@ -272,7 +272,7 @@
+ }
+ if ((!cpus_empty(irq->node_mask)) &&
+ (!cpus_equal(irq->mask, irq->node_mask)) &&
+- (!cpus_full(irq->node_mask))) {
++ (!__cpus_full(&irq->node_mask, num_possible_cpus()))) {
+ irq->old_mask = irq->mask;
+ irq->mask = irq->node_mask;
+ }
+Index: cpumask.h
+===================================================================
+--- cpumask.h (.../tags/irqbalance-0.56) (revision 33)
++++ cpumask.h (.../trunk) (revision 33)
+@@ -1,7 +1,7 @@
+ #ifndef __LINUX_CPUMASK_H
+ #define __LINUX_CPUMASK_H
+
+-#define NR_CPUS 256
++#define NR_CPUS 4096
+ /*
+ * Cpumasks provide a bitmap suitable for representing the
+ * set of CPU's in a system, one bit position per CPU number.
+Index: bitmap.c
+===================================================================
+--- bitmap.c (.../tags/irqbalance-0.56) (revision 33)
++++ bitmap.c (.../trunk) (revision 33)
+@@ -74,6 +74,19 @@
+ return 1;
+ }
+
++int __bitmap_weight(const unsigned long *bitmap, int bits)
++{
++ int k, w = 0, lim = bits/BITS_PER_LONG;
++
++ for (k = 0; k < lim; k++)
++ w += hweight_long(bitmap[k]);
++
++ if (bits % BITS_PER_LONG)
++ w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
++
++ return w;
++}
++
+ int __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits)
+ {
+Index: irqlist.c
+===================================================================
+--- irqlist.c (.../tags/irqbalance-0.56) (revision 33)
++++ irqlist.c (.../trunk) (revision 33)
+@@ -28,6 +28,7 @@
+ #include <unistd.h>
+ #include <sys/types.h>
+ #include <dirent.h>
++#include <errno.h>
+
+ #include "types.h"
+ #include "irqbalance.h"
+@@ -39,6 +40,7 @@
+ void get_affinity_hint(struct interrupt *irq, int number)
+ {
+ char buf[PATH_MAX];
++ cpumask_t tempmask;
+ char *line = NULL;
+ size_t size = 0;
+ FILE *file;
+@@ -51,7 +53,9 @@
+ fclose(file);
+ return;
+ }
+- cpumask_parse_user(line, strlen(line), irq->node_mask);
++ cpumask_parse_user(line, strlen(line), tempmask);
++ if (!__cpus_full(&tempmask, num_possible_cpus()))
++ irq->node_mask = tempmask;
+ fclose(file);
+ free(line);
+ }
+@@ -64,7 +68,7 @@
+ DIR *dir;
+ struct dirent *entry;
+ char *c, *c2;
+- int nr , count = 0;
++ int nr , count = 0, can_set = 1;
+ char buf[PATH_MAX];
+ sprintf(buf, "/proc/irq/%i", number);
+ dir = opendir(buf);
+@@ -77,7 +81,7 @@
+ size_t size = 0;
+ FILE *file;
+ sprintf(buf, "/proc/irq/%i/smp_affinity", number);
+- file = fopen(buf, "r");
++ file = fopen(buf, "r+");
+ if (!file)
+ continue;
+ if (getline(&line, &size, file)==0) {
+@@ -86,7 +90,13 @@
+ continue;
+ }
+ cpumask_parse_user(line, strlen(line), irq->mask);
+- fclose(file);
++ /*
++ * Check that we can write the affinity, if
++ * not take it out of the list.
++ */
++ fputs(line, file);
++ if (fclose(file) && errno == EIO)
++ can_set = 0;
+ free(line);
+ } else if (strcmp(entry->d_name,"allowed_affinity")==0) {
+ char *line = NULL;
+@@ -119,7 +129,7 @@
+ count++;
+
+ /* if there is no choice in the allowed mask, don't bother to balance */
+- if (count<2)
++ if ((count<2) || (can_set == 0))
+ irq->balance_level = BALANCE_NONE;
+
+
+Index: Makefile.am
+===================================================================
+--- Makefile.am (.../tags/irqbalance-0.56) (revision 33)
++++ Makefile.am (.../trunk) (revision 33)
+@@ -21,7 +21,7 @@
+ #
+
+ AUTOMAKE_OPTIONS = no-dependencies
+-EXTRA_DIST = README INSTALL COPYING autogen.sh m4/cap-ng.m4
++EXTRA_DIST = README INSTALL COPYING autogen.sh cap-ng.m4
+
+ INCLUDES = -I${top_srcdir}
+ LIBS = $(CAPNG_LDADD) $(GLIB_LIBS)
+@@ -31,6 +31,7 @@
+ sbin_PROGRAMS = irqbalance
+ irqbalance_SOURCES = activate.c bitmap.c classify.c cputree.c irqbalance.c \
+ irqlist.c network.c numa.c placement.c powermode.c procinterrupts.c
++dist_man_MANS = irqbalance.1
+
+ CONFIG_CLEAN_FILES = debug*.list config/*
+ clean-generic:
+Index: network.c
+===================================================================
+--- network.c (.../tags/irqbalance-0.56) (revision 33)
++++ network.c (.../trunk) (revision 33)
+@@ -160,10 +160,8 @@
+ }
+
+ while (!feof(file)) {
+- uint64_t rxcount;
+- uint64_t txcount;
+- uint64_t delta;
+- int dummy;
++ uint64_t dummy __attribute__((unused));
++ uint64_t rxcount, txcount, delta;
+ char *c, *c2;
+ if (getline(&line, &size, file)==0)
+ break;
diff --git a/extra/qjackctl/PKGBUILD b/extra/qjackctl/PKGBUILD
index 71cbf6664..1c28b8ae8 100644
--- a/extra/qjackctl/PKGBUILD
+++ b/extra/qjackctl/PKGBUILD
@@ -1,10 +1,10 @@
-# $Id: PKGBUILD 119703 2011-04-13 22:27:56Z schiv $
+# $Id: PKGBUILD 130089 2011-07-02 07:06:29Z schiv $
# Maintainer: Ray Rashif <schiv@archlinux.org>
# Contributor: Tobias Kieslich <tobias@archlinux.org>
pkgname=qjackctl
-pkgver=0.3.7
-pkgrel=2
+pkgver=0.3.8
+pkgrel=1
pkgdesc="A Qt front-end for the JACK low-latency audio server"
url="http://qjackctl.sourceforge.net/"
arch=('i686' 'x86_64')
@@ -12,19 +12,20 @@ license=('GPL')
depends=('jack' 'qt')
options=('!makeflags')
source=("http://downloads.sourceforge.net/$pkgname/$pkgname-$pkgver.tar.gz")
-md5sums=('3462613bd5c92fa6e6ae92950bd69c0b')
+md5sums=('8a0992c01e50d8be22064ab7b08fbc78')
build() {
cd "$srcdir/$pkgname-$pkgver"
- ./configure --prefix=/usr
+ ./configure --prefix=/usr
+
make
}
package() {
cd "$srcdir/$pkgname-$pkgver"
- make DESTDIR="$pkgdir/" install
+ make DESTDIR="$pkgdir" install
}
# vim:set ts=2 sw=2 et:
diff --git a/multilib/lib32-v4l-utils/PKGBUILD b/multilib/lib32-v4l-utils/PKGBUILD
index db5a9c594..2729e8528 100644
--- a/multilib/lib32-v4l-utils/PKGBUILD
+++ b/multilib/lib32-v4l-utils/PKGBUILD
@@ -2,7 +2,7 @@
# Maintainer: Jan "heftig" Steffens <jan.steffens@gmail.com>
_pkgbasename=v4l-utils
pkgname=lib32-$_pkgbasename
-pkgver=0.8.3
+pkgver=0.8.4
pkgrel=1
pkgdesc="Userspace tools and conversion library for Video 4 Linux (32-bit)"
arch=('x86_64')
@@ -12,9 +12,9 @@ replaces=('lib32-libv4l')
conflicts=('lib32-libv4l')
license=('LGPL')
makedepends=('gcc-multilib')
-depends=('lib32-gcc-libs' $_pkgbasename)
+depends=('lib32-gcc-libs' $_pkgbasename lib32-libjpeg-turbo)
source=(http://linuxtv.org/downloads/v4l-utils/${_pkgbasename}-${pkgver}.tar.bz2)
-sha256sums=('525ebed7ef6e0f6a4bb68fe10403a3725eb1ec31a4c557c68256079b00e283b5')
+sha256sums=('20bf73b0c2255dfc6ae82806ec8a663e00ab039df9ee6eadae633466841c0dd7')
build() {
cd "${srcdir}/${_pkgbasename}-${pkgver}"
@@ -31,5 +31,5 @@ build() {
package() {
cd "${srcdir}/${_pkgbasename}-${pkgver}"
make install PREFIX=/usr LIBDIR=/usr/lib32 DESTDIR="${pkgdir}/"
- rm -rf "${pkgdir}"/{usr/{include,share,bin,sbin},etc}
+ rm -rf "${pkgdir}"/{usr/{include,share,bin,sbin},etc,lib}
}
diff --git a/testing/apr-util/PKGBUILD b/testing/apr-util/PKGBUILD
new file mode 100644
index 000000000..5cdab4d99
--- /dev/null
+++ b/testing/apr-util/PKGBUILD
@@ -0,0 +1,28 @@
+# $Id: PKGBUILD 129453 2011-06-28 01:17:42Z stephane $
+# Maintainer: Jan de Groot <jgc@archlinux.org>
+# Maintainer: Pierre Schmitz <pierre@archlinux.de>
+
+pkgname=apr-util
+pkgver=1.3.12
+pkgrel=2
+pkgdesc="The Apache Portable Runtime"
+arch=('i686' 'x86_64')
+url="http://apr.apache.org/"
+depends=('apr' 'gdbm' 'expat' 'db' 'libldap' 'unixodbc')
+options=('!libtool')
+license=('APACHE')
+source=("http://www.apache.org/dist/apr/apr-util-${pkgver}.tar.bz2")
+md5sums=('0f671b037ca62751a8a7005578085560')
+
+build() {
+ cd "${srcdir}/apr-util-${pkgver}"
+ ./configure --prefix=/usr --with-apr=/usr \
+ --without-pgsql --without-mysql --without-sqlite2 --without-sqlite3 \
+ --with-berkeley-db=/usr --with-gdbm=/usr --with-ldap
+ make
+}
+
+package() {
+ cd "${srcdir}/apr-util-${pkgver}"
+ make DESTDIR="${pkgdir}" install
+}
diff --git a/testing/bogofilter/PKGBUILD b/testing/bogofilter/PKGBUILD
new file mode 100644
index 000000000..860895cf9
--- /dev/null
+++ b/testing/bogofilter/PKGBUILD
@@ -0,0 +1,34 @@
+# $Id: PKGBUILD 127873 2011-06-19 06:22:51Z eric $
+# Maintainer: tobias <tobias@archlinux.org>
+# Contributor: Low Kian Seong <fastmail_low@speedymail.org>
+
+pkgname=bogofilter
+pkgver=1.2.2
+pkgrel=3
+pkgdesc="A fast Bayesian spam filtering tool"
+arch=('i686' 'x86_64')
+license=('GPL3')
+url="http://bogofilter.sourceforge.net"
+depends=('db' 'perl' 'gsl')
+backup=('etc/bogofilter/bogofilter.cf')
+source=(http://sourceforge.net/projects/${pkgname}/files/${pkgname}-current/${pkgname}-${pkgver}/${pkgname}-${pkgver}.tar.bz2)
+md5sums=('4bcabdf8c5e7efefcb508eda7e80eebc')
+
+build() {
+ cd "${srcdir}/${pkgname}-${pkgver}"
+ ./configure --prefix=/usr \
+ --sysconfdir=/etc/bogofilter \
+ --localstatedir=/var \
+ --enable-transactions
+ make
+}
+
+package() {
+ cd "${srcdir}/${pkgname}-${pkgver}"
+ make DESTDIR="${pkgdir}" install
+
+ mv "${pkgdir}/etc/bogofilter/bogofilter.cf.example" "${pkgdir}/etc/bogofilter/bogofilter.cf"
+
+ install -dm755 "${pkgdir}/usr/share/${pkgname}/contrib"
+ install -m644 contrib/* "${pkgdir}/usr/share/${pkgname}/contrib/"
+}
diff --git a/testing/claws-mail/PKGBUILD b/testing/claws-mail/PKGBUILD
new file mode 100644
index 000000000..b48969b16
--- /dev/null
+++ b/testing/claws-mail/PKGBUILD
@@ -0,0 +1,60 @@
+# $Id: PKGBUILD 127579 2011-06-16 17:10:43Z andyrtr $
+# Maintainer: Andreas Radke <andyrtr@archlinux.org>
+
+pkgname=claws-mail
+pkgver=3.7.9
+pkgrel=4
+pkgdesc="A GTK+ based e-mail client."
+arch=('i686' 'x86_64')
+license=('GPL3')
+url="http://www.claws-mail.org"
+depends=('gtk2' 'gnutls' 'startup-notification' 'pilot-link' 'enchant'
+ 'gpgme' 'libetpan' 'libsm' 'db' 'dbus-glib' 'hicolor-icon-theme' 'desktop-file-utils')
+makedepends=('compface' 'spamassassin' 'bogofilter' 'valgrind')
+optdepends=('python2: needed for some tools'
+ 'perl: needed for some tools'
+ 'spamassassin: adds support for spamfiltering'
+ 'bogofilter: adds support for spamfiltering'
+ 'html2ps: AUR pkg - adds support for printing html mails together with html plugins')
+replaces=('sylpheed-claws')
+provides=('claws')
+options=(!libtool)
+install=claws-mail.install
+source=(http://downloads.sourceforge.net/sourceforge/sylpheed-claws/${pkgname}-${pkgver}.tar.bz2
+ claws-notify-crash.patch)
+md5sums=('2f9d2dcabf84e312cfeb56efa799b5b3'
+ '9a2903449f679344b5f5f51c91825b45')
+
+build() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+
+ patch -Np0 -i $srcdir/claws-notify-crash.patch
+ sed -i 's@^#!.*python.*@#!/usr/bin/python2@' tools/*.py
+
+ ./configure --prefix=/usr --disable-static \
+ --enable-enchant \
+ --enable-gnutls \
+ --enable-ldap \
+ --disable-dillo-viewer-plugin \
+ --enable-crash-dialog \
+ --enable-pgpmime-plugin \
+ --enable-spamassassin-plugin \
+ --enable-bogofilter-plugin \
+ --enable-jpilot
+
+ make
+}
+
+package() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+ make DESTDIR=${pkgdir} install
+
+ # build and install extra tools
+ cd tools
+ make
+ # all executables and .conf files ; only top directory
+ find -maxdepth 1 -type f -and -perm /111 -or -name '*.conf' | while read i ; do
+ install -D -m755 ${i} \
+ ${pkgdir}/usr/lib/claws-mail/tools/${i}
+ done
+}
diff --git a/testing/claws-mail/claws-mail.install b/testing/claws-mail/claws-mail.install
new file mode 100644
index 000000000..648480c3b
--- /dev/null
+++ b/testing/claws-mail/claws-mail.install
@@ -0,0 +1,13 @@
+post_install() {
+ gtk-update-icon-cache -q -t -f usr/share/icons/hicolor
+ update-desktop-database -q
+}
+
+post_upgrade() {
+ post_install
+}
+
+post_remove() {
+ post_install
+}
+
diff --git a/testing/claws-mail/claws-notify-crash.patch b/testing/claws-mail/claws-notify-crash.patch
new file mode 100644
index 000000000..e1d4eb686
--- /dev/null
+++ b/testing/claws-mail/claws-notify-crash.patch
@@ -0,0 +1,18 @@
+Index: src/main.c
+===================================================================
+RCS file: //claws/src/main.c,v
+retrieving revision 1.115.2.237
+retrieving revision 1.115.2.238
+diff -u -r1.115.2.237 -r1.115.2.238
+--- src/main.c 10 Apr 2011 17:19:04 -0000 1.115.2.237
++++ src/main.c 30 Apr 2011 19:27:15 -0000 1.115.2.238
+@@ -331,7 +331,7 @@
+ gtk_widget_show(hack);
+ }
+
+- xdisplay = gdk_display_get_default();
++ xdisplay = GDK_DISPLAY_XDISPLAY(gdk_display_get_default());
+ sn_display = sn_display_new(xdisplay,
+ sn_error_trap_push,
+ sn_error_trap_pop);
+
diff --git a/testing/db/PKGBUILD b/testing/db/PKGBUILD
new file mode 100644
index 000000000..40d1b020b
--- /dev/null
+++ b/testing/db/PKGBUILD
@@ -0,0 +1,32 @@
+# $Id: PKGBUILD 127440 2011-06-15 00:33:22Z stephane $
+# Maintainer: Stéphane Gaudreault <stephane@archlinux.org>
+# Contributor: Allan McRae <allan@archlinux.org>
+# Contributor: Andreas Radke <andyrtr@archlinux.org>
+
+pkgname=db
+pkgver=5.2.28
+pkgrel=1
+pkgdesc="The Berkeley DB embedded database system"
+arch=('i686' 'x86_64')
+url="http://www.oracle.com/technology/software/products/berkeley-db/index.html"
+license=('custom')
+depends=('gcc-libs' 'sh')
+options=('!libtool')
+install=db.install
+source=(http://download.oracle.com/berkeley-db/db-${pkgver}.tar.gz)
+sha1sums=('e6572e3356cf8bc998da4e889db3b12638833435')
+
+build() {
+ cd "${srcdir}/$pkgname-${pkgver}/build_unix"
+ ../dist/configure --prefix=/usr --enable-compat185 \
+ --enable-shared --enable-static --enable-cxx --enable-dbm
+ make LIBSO_LIBS=-lpthread
+}
+
+package() {
+ cd "${srcdir}/$pkgname-${pkgver}/build_unix"
+ make DESTDIR="${pkgdir}" install
+ rm -rf "${pkgdir}"/usr/docs
+ install -Dm644 "${srcdir}"/${pkgname}-${pkgver}/LICENSE \
+ "${pkgdir}"/usr/share/licenses/${pkgname}/LICENSE
+}
diff --git a/testing/db/db.install b/testing/db/db.install
new file mode 100644
index 000000000..03b64042d
--- /dev/null
+++ b/testing/db/db.install
@@ -0,0 +1,5 @@
+post_upgrade() {
+ if [ "$(vercmp $2 5.1)" -lt 0 ]; then
+ echo " >> Major version update. Consider running db_upgrade on Berkeley DB databases."
+ fi
+}
diff --git a/testing/evolution-data-server/PKGBUILD b/testing/evolution-data-server/PKGBUILD
new file mode 100644
index 000000000..d7303a1eb
--- /dev/null
+++ b/testing/evolution-data-server/PKGBUILD
@@ -0,0 +1,29 @@
+# $Id: PKGBUILD 129456 2011-06-28 01:27:57Z stephane $
+# Maintainer: Jan de Groot <jgc@archlinux.org>
+
+pkgname=evolution-data-server
+pkgver=3.0.2.1
+pkgrel=2
+pkgdesc="Central location for addressbook and calendar storage in the GNOME Desktop"
+arch=('i686' 'x86_64')
+depends=('libsoup' 'nss' 'libgnome-keyring' 'krb5' 'libgweather' 'libical' 'db' 'libgdata')
+makedepends=('intltool' 'gperf' 'gobject-introspection')
+options=('!libtool')
+url="http://www.gnome.org"
+license=('GPL')
+source=(http://ftp.gnome.org/pub/gnome/sources/${pkgname}/3.0/${pkgname}-${pkgver}.tar.bz2)
+sha256sums=('1b14c57a835745ee2c0141a76a07d44688dfb2d758bf3b818fe1860b59451f9b')
+
+build() {
+ cd "${srcdir}/${pkgname}-${pkgver}"
+ ./configure --prefix=/usr --sysconfdir=/etc \
+ --localstatedir=/var --with-openldap=yes \
+ --libexecdir=/usr/lib/evolution-data-server \
+ --with-krb5=/usr --with-libdb=/usr
+ make
+}
+
+package() {
+ cd "${srcdir}/${pkgname}-${pkgver}"
+ make DESTDIR="${pkgdir}" install
+}
diff --git a/testing/evolution-exchange/PKGBUILD b/testing/evolution-exchange/PKGBUILD
new file mode 100644
index 000000000..a04bdc13b
--- /dev/null
+++ b/testing/evolution-exchange/PKGBUILD
@@ -0,0 +1,33 @@
+# $Id: PKGBUILD 129506 2011-06-28 14:23:10Z stephane $
+# Maintainer: Jan de Groot <jgc@archlinux.org>
+
+pkgname=evolution-exchange
+pkgver=3.0.2
+pkgrel=2
+pkgdesc="Ximian Connector Exchange plugin for Evolution"
+arch=('i686' 'x86_64')
+license=('GPL')
+url="http://www.ximian.com"
+depends=('evolution-data-server' 'gtkhtml4' 'gnome-desktop' 'libunique3')
+makedepends=('intltool' 'evolution')
+options=('!libtool' '!emptydirs')
+install=evolution-exchange.install
+source=(http://ftp.gnome.org/pub/gnome/sources/${pkgname}/${pkgver%.*}/${pkgname}-${pkgver}.tar.bz2)
+sha256sums=('84f5c638aee1f6ed49de0d0f331d3b29907f7b211bd6c70f37698dedcd3e5f5a')
+
+build() {
+ cd "${srcdir}/${pkgname}-${pkgver}"
+ ./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var \
+ --libexecdir=/usr/lib/evolution --disable-static \
+ --with-libdb=/usr --with-krb5=/usr
+ make
+}
+
+package() {
+ cd "${srcdir}/${pkgname}-${pkgver}"
+ make DESTDIR="${pkgdir}" install
+
+ install -m755 -d "${pkgdir}/usr/share/gconf/schemas"
+ gconf-merge-schema "${pkgdir}/usr/share/gconf/schemas/${pkgname}.schemas" --domain evolution-exchange-3.0 ${pkgdir}/etc/gconf/schemas/*.schemas
+ rm -f ${pkgdir}/etc/gconf/schemas/*.schemas
+}
diff --git a/testing/evolution-exchange/evolution-exchange.install b/testing/evolution-exchange/evolution-exchange.install
new file mode 100644
index 000000000..1179887f9
--- /dev/null
+++ b/testing/evolution-exchange/evolution-exchange.install
@@ -0,0 +1,17 @@
+pkgname=evolution-exchange
+
+post_install() {
+ usr/sbin/gconfpkg --install ${pkgname}
+}
+
+pre_upgrade() {
+ pre_remove $1
+}
+
+post_upgrade() {
+ post_install $1
+}
+
+pre_remove() {
+ usr/sbin/gconfpkg --uninstall ${pkgname}
+}
diff --git a/testing/iproute2/PKGBUILD b/testing/iproute2/PKGBUILD
new file mode 100644
index 000000000..57089f2c0
--- /dev/null
+++ b/testing/iproute2/PKGBUILD
@@ -0,0 +1,50 @@
+# $Id: PKGBUILD 127646 2011-06-17 12:59:11Z stephane $
+# Maintainer: Ronald van Haren <ronald.archlinux.org>
+# Contributor: Judd Vinet <jvinet@zeroflux.org>
+
+pkgname=iproute2
+pkgver=2.6.38
+pkgrel=4
+pkgdesc="IP Routing Utilities"
+arch=('i686' 'x86_64')
+license=('GPL2')
+url="http://www.linux-foundation.org/en/Net:Iproute2"
+depends=('perl')
+makedepends=('linux-atm')
+optdepends=('linux-atm: ATM support')
+provides=('iproute')
+conflicts=('iproute')
+replaces=('iproute')
+options=('!makeflags')
+backup=('etc/iproute2/ematch_map' 'etc/iproute2/rt_dsfield' 'etc/iproute2/rt_protos' \
+ 'etc/iproute2/rt_realms' 'etc/iproute2/rt_scopes' 'etc/iproute2/rt_tables')
+source=(http://devresources.linux-foundation.org/dev/iproute2/download/iproute2-${pkgver}.tar.bz2
+ 'iproute2-fhs.patch')
+sha1sums=('e9f6d457a06866a2a20a6cba6b3a039b2ec3e14a'
+ '2416b11252364d7a6c742eabb4a6924a75637a46')
+
+build() {
+ cd $srcdir/iproute2-${pkgver}
+
+ # set correct fhs structure
+ patch -Np1 -i ${srcdir}/iproute2-fhs.patch
+
+ ./configure
+
+ make
+}
+
+package() {
+ cd $srcdir/iproute2-${pkgver}
+
+ make DESTDIR=$pkgdir install
+
+ # allow loopback to be started before /usr is mounted, this may not be supported in the future
+ mkdir -p ${pkgdir}/sbin
+ mv ${pkgdir}/usr/sbin/ip ${pkgdir}/sbin/ip
+ ln -s /sbin/ip ${pkgdir}/usr/sbin/ip
+
+ # libnetlink isn't installed, install it FS#19385
+ install -Dm644 include/libnetlink.h ${pkgdir}/usr/include/libnetlink.h
+ install -Dm644 lib/libnetlink.a ${pkgdir}/usr/lib/libnetlink.a
+}
diff --git a/testing/iproute2/iproute2-fhs.patch b/testing/iproute2/iproute2-fhs.patch
new file mode 100644
index 000000000..2608414db
--- /dev/null
+++ b/testing/iproute2/iproute2-fhs.patch
@@ -0,0 +1,84 @@
+diff -Naur iproute2.old/Makefile iproute2-2.6.29/Makefile
+--- iproute2.old/Makefile 2009-11-11 22:05:21.251407668 +0100
++++ iproute2-2.6.29/Makefile 2009-11-11 22:07:09.891833516 +0100
+@@ -1,11 +1,12 @@
+ DESTDIR=/usr/
+ ROOTDIR=$(DESTDIR)
+ LIBDIR=/usr/lib/
+-SBINDIR=/sbin
++SBINDIR=/usr/sbin
+ CONFDIR=/etc/iproute2
+-DOCDIR=/share/doc/iproute2
+-MANDIR=/share/man
++DOCDIR=/usr/share/doc/iproute2
++MANDIR=/usr/share/man
+ ARPDDIR=/var/lib/arpd
++SHAREDIR=/usr/share
+
+ # Path to db_185.h include
+ DBM_INCLUDE:=$(ROOTDIR)/usr/include
+diff -Naur iproute2.old/tc/tc_util.c iproute2-2.6.29/tc/tc_util.c
+--- iproute2.old/tc/tc_util.c 2009-11-11 22:05:21.298076943 +0100
++++ iproute2-2.6.29/tc/tc_util.c 2009-11-11 22:09:32.865152646 +0100
+@@ -24,8 +24,8 @@
+ #include "utils.h"
+ #include "tc_util.h"
+
+-#ifndef LIBDIR
+-#define LIBDIR "/usr/lib/"
++#ifndef SHAREDIR
++#define SHAREDIR "/usr/share"
+ #endif
+
+ const char *get_tc_lib(void)
+@@ -34,7 +34,7 @@
+
+ lib_dir = getenv("TC_LIB_DIR");
+ if (!lib_dir)
+- lib_dir = LIBDIR "/tc/";
++ lib_dir = SHAREDIR "/tc/";
+
+ return lib_dir;
+ }
+diff -Naur iproute2.old/netem/Makefile iproute2-2.6.35/netem/Makefile
+--- iproute2.old/netem/Makefile 2010-08-06 11:30:48.640940183 +0200
++++ iproute2-2.6.35/netem/Makefile 2010-08-06 11:32:34.210908892 +0200
+@@ -20,9 +20,9 @@
+ $(HOSTCC) $(CCOPTS) -I../include -o $@ $@.c -lm
+
+ install: all
+- mkdir -p $(DESTDIR)$(LIBDIR)/tc
++ mkdir -p $(DESTDIR)$(SHAREDIR)/tc
+ for i in $(DISTDATA); \
+- do install -m 644 $$i $(DESTDIR)$(LIBDIR)/tc; \
++ do install -m 644 $$i $(DESTDIR)$(SHAREDIR)/tc; \
+ done
+
+ clean:
+diff -Naur iproute2.old/tc/Makefile iproute2-2.6.35/tc/Makefile
+--- iproute2.old/tc/Makefile 2010-08-06 11:48:35.607472252 +0200
++++ iproute2-2.6.35/tc/Makefile 2010-08-06 11:49:36.977473380 +0200
+@@ -99,18 +99,11 @@
+ $(AR) rcs $@ $(TCLIB)
+
+ install: all
+- mkdir -p $(MODDESTDIR)
+- install -m 0755 tc $(DESTDIR)$(SBINDIR)
+- for i in $(TCSO); \
+- do install -m 755 $$i $(MODDESTDIR); \
+- done
+- if [ ! -f $(MODDESTDIR)/m_ipt.so ]; then \
+- if [ -f $(MODDESTDIR)/m_xt.so ]; \
+- then ln -s m_xt.so $(MODDESTDIR)/m_ipt.so ; \
+- elif [ -f $(MODDESTDIR)/m_xt_old.so ]; \
+- then ln -s m_xt_old.so $(MODDESTDIR)/m_ipt.so ; \
+- fi; \
+- fi
++ mkdir -p $(DESTDIR)$(LIBDIR)/tc
++ install -m 0755 tc $(DESTDIR)$(SBINDIR)
++ for i in $(TCSO); \
++ do install -m 755 $$i $(DESTDIR)$(LIBDIR)/tc; \
++ done
+
+ clean:
+ rm -f $(TCOBJ) $(TCLIB) libtc.a tc *.so emp_ematch.yacc.h; \
diff --git a/testing/libetpan/PKGBUILD b/testing/libetpan/PKGBUILD
new file mode 100644
index 000000000..107264bc4
--- /dev/null
+++ b/testing/libetpan/PKGBUILD
@@ -0,0 +1,26 @@
+# $Id: PKGBUILD 127576 2011-06-16 17:03:09Z andyrtr $
+# Maintainer: Andreas Radke <andyrtr@archlinux.org>
+
+pkgname=libetpan
+pkgver=1.0
+pkgrel=3
+pkgdesc="A portable middleware for email access"
+arch=('i686' 'x86_64')
+url="http://www.etpan.org/"
+license=("custom:etpan")
+depends=('db>=5.2.28' 'libsasl>=2.1.23' 'curl>=7.21.' 'expat>=2.0.1-1')
+options=('!libtool')
+source=(http://downloads.sourceforge.net/sourceforge/${pkgname}/${pkgname}-${pkgver}.tar.gz)
+md5sums=('5addc766141a0b1d29ee1ca4ba1b6808')
+
+build() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+ ./configure --prefix=/usr --disable-static
+ make
+}
+
+package() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+ make DESTDIR=${pkgdir} install
+ install -Dm644 COPYRIGHT ${pkgdir}/usr/share/licenses/$pkgname/license.txt
+}
diff --git a/testing/libproxy/PKGBUILD b/testing/libproxy/PKGBUILD
new file mode 100644
index 000000000..5b1367bd5
--- /dev/null
+++ b/testing/libproxy/PKGBUILD
@@ -0,0 +1,40 @@
+# $Id: PKGBUILD 24494 2009-01-17 20:42:01Z jgc $
+# Maintainer: Jan de Groot <jgc@archlinux.org>
+
+pkgname=libproxy
+pkgver=0.4.7
+pkgrel=1
+pkgdesc="A library that provides automatic proxy configuration management"
+arch=(i686 x86_64)
+license=('LGPL')
+depends=('gcc-libs')
+optdepends=('kdelibs: KDE configuration module'
+ 'networkmanager: NetworkManager configuration module'
+ 'perl: Perl bindings'
+ 'python2: Python bindings')
+makedepends=('cmake' 'networkmanager' 'automoc4' 'python2' 'kdelibs' 'perl')
+url="http://libproxy.googlecode.com"
+source=(http://libproxy.googlecode.com/files/${pkgname}-${pkgver}.tar.gz)
+options=('!libtool')
+md5sums=('509e03a488a61cd62bfbaf3ab6a2a7a5')
+
+build() {
+ cd "${srcdir}"
+ mkdir build
+ cd build
+ cmake ../${pkgname}-${pkgver} \
+ -DCMAKE_INSTALL_PREFIX=/usr \
+ -DLIBEXEC_INSTALL_DIR=/usr/lib/libproxy \
+ -DCMAKE_SKIP_RPATH=ON \
+ -DPERL_VENDORINSTALL=yes \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DWITH_WEBKIT=OFF \
+ -DCMAKE_CXX_FLAGS="${CXXFLAGS}" \
+ -DCMAKE_C_FLAGS="${CFLAGS}"
+ make
+}
+
+package() {
+ cd "${srcdir}/build"
+ make DESTDIR="${pkgdir}" install
+}
diff --git a/testing/libreoffice/PKGBUILD b/testing/libreoffice/PKGBUILD
index 2410ee992..74b940110 100644
--- a/testing/libreoffice/PKGBUILD
+++ b/testing/libreoffice/PKGBUILD
@@ -1,4 +1,4 @@
-# $Id: PKGBUILD 128633 2011-06-24 19:50:33Z andyrtr $
+# $Id: PKGBUILD 130106 2011-07-02 19:39:19Z andyrtr $
# Maintainer: AndyRTR <andyrtr@archlinux.org>
pkgbase="libreoffice"
@@ -25,8 +25,8 @@ pkgname=('libreoffice' 'libreoffice-sdk'
'libreoffice-extension-watch-window'
'libreoffice-extension-wiki-publisher')
_LOver=3.4.1.3
-pkgver=3.4.1rc3
-pkgrel=1
+pkgver=3.4.1
+pkgrel=2
arch=('i686' 'x86_64')
#_LO_tree="3.4"
_OFFICEUPD="340"
@@ -47,8 +47,8 @@ makedepends=( # makedepends
# translate-toolkit - todo move them to extra to allow --with-system-foo builds
# http://download.documentfoundation.org/mirrors/all.html
# http://wiki.documentfoundation.org/Mirrors
-#_mirror="http://download.documentfoundation.org/libreoffice/src"
-_mirror="http://dev-builds.libreoffice.org/pre-releases/src/"
+_mirror="http://download.documentfoundation.org/libreoffice/src"
+#_mirror="http://dev-builds.libreoffice.org/pre-releases/src/"
_additional_source_url="http://hg.services.openoffice.org/binaries"
source=(${_mirror}/${pkgbase}-{artwork,base,bootstrap,calc,components,extensions,extras,filters,help,impress,libs-core,libs-extern,libs-extern-sys,libs-gui,postprocess,sdk,testing,ure,writer}-${_LOver}.tar.bz2 #,translations
${_additional_source_url}/1f24ab1d39f4a51faf22244c94a6203f-xmlsec1-1.2.14.tar.gz
diff --git a/testing/libreoffice/libreoffice.install b/testing/libreoffice/libreoffice.install
index 27f3173ae..177f0d4e6 100644
--- a/testing/libreoffice/libreoffice.install
+++ b/testing/libreoffice/libreoffice.install
@@ -4,7 +4,7 @@ gtk-update-icon-cache -f -q /usr/share/icons/hicolor
update-desktop-database -q
update-mime-database usr/share/mime > /dev/null 2>&1
-echo " * see http://wiki.archlinux.org/index.php/Openoffice"
+echo " * see https://wiki.archlinux.org/index.php/LibreOffice"
echo " * you may want to pacman -Ss libreoffice-extensions"
echo " to see what extensions are prepared to install"
echo " * it's recommended to install {hunspell,mythes,hyphen}-xx pkg for spell checking"
diff --git a/testing/libsasl/PKGBUILD b/testing/libsasl/PKGBUILD
new file mode 100644
index 000000000..0a92c6a7a
--- /dev/null
+++ b/testing/libsasl/PKGBUILD
@@ -0,0 +1,54 @@
+# $Id: PKGBUILD 127649 2011-06-17 13:03:28Z stephane $
+# Maintainer: Jan de Groot <jgc@archlinux.org>
+
+pkgname=libsasl
+pkgver=2.1.23
+pkgrel=6
+pkgdesc="Cyrus Simple Authentication Service Layer (SASL) library"
+arch=('i686' 'x86_64')
+url="http://cyrusimap.web.cmu.edu/downloads.html#sasl"
+license=('custom')
+depends=('db>=4.8')
+optdepends=('cyrus-sasl: saslauthd'
+ 'cyrus-sasl-plugins: authentication plugins other than sasldb')
+source=(ftp://ftp.andrew.cmu.edu/pub/cyrus-mail/cyrus-sasl-${pkgver}.tar.gz
+ cyrus-sasl-2.1.19-checkpw.c.patch
+ cyrus-sasl-db.patch)
+options=('!makeflags')
+md5sums=('2eb0e48106f0e9cd8001e654f267ecbc'
+ 'e27ddff076342e7a3041c4759817d04b'
+ '0658201497aad359c0d66b0ab8032859')
+
+build() {
+ cd "${srcdir}/cyrus-sasl-${pkgver}"
+ patch -Np0 -i ${srcdir}/cyrus-sasl-2.1.19-checkpw.c.patch
+ patch -Np1 -i ${srcdir}/cyrus-sasl-db.patch
+ ./configure --prefix=/usr \
+ --sysconfdir=/etc \
+ --localstatedir=/var \
+ --disable-anon \
+ --disable-cram \
+ --disable-digest \
+ --disable-gssapi \
+ --enable-login \
+ --disable-otp \
+ --enable-plain \
+ --mandir=/usr/share/man
+ for dir in include lib sasldb plugins utils; do
+ pushd ${dir}
+ make
+ popd
+ done
+}
+
+package() {
+ cd "${srcdir}/cyrus-sasl-${pkgver}"
+ for dir in include lib sasldb plugins utils; do
+ pushd ${dir} || return 1
+ make DESTDIR="${pkgdir}" install
+ popd
+ done
+
+ # install license
+ install -D -m644 COPYING "${pkgdir}/usr/share/licenses/${pkgname}/COPYING"
+}
diff --git a/testing/libsasl/cyrus-sasl-2.1.19-checkpw.c.patch b/testing/libsasl/cyrus-sasl-2.1.19-checkpw.c.patch
new file mode 100644
index 000000000..f7bf44b79
--- /dev/null
+++ b/testing/libsasl/cyrus-sasl-2.1.19-checkpw.c.patch
@@ -0,0 +1,170 @@
+diff -ur ../cyrus-sasl-2.1.19.orig/lib/Makefile.in ./lib/Makefile.in
+--- ../cyrus-sasl-2.1.19.orig/lib/Makefile.in 2004-07-02 21:40:15.000000000 +0200
++++ ./lib/Makefile.in 2004-09-07 13:21:22.746680576 +0200
+@@ -120,7 +120,7 @@
+ JAVA_TRUE = @JAVA_TRUE@
+ LDFLAGS = @LDFLAGS@
+ LIBOBJS = @LIBOBJS@
+-LIBS = @LIBS@
++LIBS = -lcrypt @LIBS@
+ LIBTOOL = @LIBTOOL@
+ LIB_CRYPT = @LIB_CRYPT@
+ LIB_DES = @LIB_DES@
+diff -ur ../cyrus-sasl-2.1.19.orig/lib/checkpw.c ./lib/checkpw.c
+--- ../cyrus-sasl-2.1.19.orig/lib/checkpw.c 2004-03-17 14:58:13.000000000 +0100
++++ ./lib/checkpw.c 2004-09-07 13:21:12.645916147 +0200
+@@ -94,6 +94,23 @@
+ # endif
+ #endif
+
++/******************************
++ * crypt(3) patch start *
++ ******************************/
++char *crypt(const char *key, const char *salt);
++
++/* cleartext password formats */
++#define PASSWORD_FORMAT_CLEARTEXT 1
++#define PASSWORD_FORMAT_CRYPT 2
++#define PASSWORD_FORMAT_CRYPTTRAD 3
++#define PASSWORD_SALT_BUF_LEN 22
++
++/* weeds out crypt(3) password's salt */
++int _sasl_get_salt (char *dest, char *src, int format);
++
++/******************************
++ * crypt(3) patch stop *
++ ******************************/
+
+ /* we store the following secret to check plaintext passwords:
+ *
+@@ -143,7 +160,51 @@
+ "*cmusaslsecretPLAIN",
+ NULL };
+ struct propval auxprop_values[3];
+-
++
++ /******************************
++ * crypt(3) patch start *
++ * for password format check *
++ ******************************/
++ sasl_getopt_t *getopt;
++ void *context;
++ const char *p = NULL;
++ /**
++ * MD5: 12 char salt
++ * BLOWFISH: 16 char salt
++ */
++ char salt[PASSWORD_SALT_BUF_LEN];
++ int password_format;
++
++ /* get password format from auxprop configuration */
++ if (_sasl_getcallback(conn, SASL_CB_GETOPT, &getopt, &context) == SASL_OK) {
++ getopt(context, NULL, "password_format", &p, NULL);
++ }
++
++ /* set password format */
++ if (p) {
++ /*
++ memset(pass_format_str, '\0', PASSWORD_FORMAT_STR_LEN);
++ strncpy(pass_format_str, p, (PASSWORD_FORMAT_STR_LEN - 1));
++ */
++ /* modern, modular crypt(3) */
++ if (strncmp(p, "crypt", 11) == 0)
++ password_format = PASSWORD_FORMAT_CRYPT;
++ /* traditional crypt(3) */
++ else if (strncmp(p, "crypt_trad", 11) == 0)
++ password_format = PASSWORD_FORMAT_CRYPTTRAD;
++ /* cleartext password */
++ else
++ password_format = PASSWORD_FORMAT_CLEARTEXT;
++ } else {
++ /* cleartext password */
++ password_format = PASSWORD_FORMAT_CLEARTEXT;
++ }
++
++ /******************************
++ * crypt(3) patch stop *
++ * for password format check *
++ ******************************/
++
+ if (!conn || !userstr)
+ return SASL_BADPARAM;
+
+@@ -180,14 +241,31 @@
+ goto done;
+ }
+
+- /* At the point this has been called, the username has been canonified
+- * and we've done the auxprop lookup. This should be easy. */
+- if(auxprop_values[0].name
+- && auxprop_values[0].values
+- && auxprop_values[0].values[0]
+- && !strcmp(auxprop_values[0].values[0], passwd)) {
+- /* We have a plaintext version and it matched! */
+- return SASL_OK;
++
++ /******************************
++ * crypt(3) patch start *
++ ******************************/
++
++ /* get salt */
++ _sasl_get_salt(salt, (char *) auxprop_values[0].values[0], password_format);
++
++ /* crypt(3)-ed password? */
++ if (password_format != PASSWORD_FORMAT_CLEARTEXT) {
++ /* compare password */
++ if (auxprop_values[0].name && auxprop_values[0].values && auxprop_values[0].values[0] && strcmp(crypt(passwd, salt), auxprop_values[0].values[0]) == 0)
++ return SASL_OK;
++ else
++ ret = SASL_BADAUTH;
++ }
++ else if (password_format == PASSWORD_FORMAT_CLEARTEXT) {
++ /* compare passwords */
++ if (auxprop_values[0].name && auxprop_values[0].values && auxprop_values[0].values[0] && strcmp(auxprop_values[0].values[0], passwd) == 0)
++ return SASL_OK;
++ else
++ ret = SASL_BADAUTH;
++ /******************************
++ * crypt(3) patch stop *
++ ******************************/
+ } else if(auxprop_values[1].name
+ && auxprop_values[1].values
+ && auxprop_values[1].values[0]) {
+@@ -975,3 +1053,37 @@
+ #endif
+ { NULL, NULL }
+ };
++
++/* weeds out crypt(3) password's salt */
++int _sasl_get_salt (char *dest, char *src, int format) {
++ int num; /* how many characters is salt long? */
++ switch (format) {
++ case PASSWORD_FORMAT_CRYPT:
++ /* md5 crypt */
++ if (src[1] == '1')
++ num = 12;
++ /* blowfish crypt */
++ else if (src[1] == '2')
++ num = (src[1] == '2' && src[2] == 'a') ? 17 : 16;
++ /* traditional crypt */
++ else
++ num = 2;
++ break;
++
++ case PASSWORD_FORMAT_CRYPTTRAD:
++ num = 2;
++ break;
++
++ default:
++ return 1;
++ }
++
++ /* destroy destination */
++ memset(dest, '\0', (num + 1));
++
++ /* copy salt to destination */
++ strncpy(dest, src, num);
++
++ return 1;
++}
++
diff --git a/testing/libsasl/cyrus-sasl-db.patch b/testing/libsasl/cyrus-sasl-db.patch
new file mode 100644
index 000000000..08758ab85
--- /dev/null
+++ b/testing/libsasl/cyrus-sasl-db.patch
@@ -0,0 +1,34 @@
+diff -urN aaa/cyrus-sasl-2.1.22/cmulocal/berkdb.m4 cyrus-sasl-2.1.22/cmulocal/berkdb.m4
+--- aaa/cyrus-sasl-2.1.22/cmulocal/berkdb.m4 2005-04-26 21:14:07.000000000 +0200
++++ cyrus-sasl-2.1.22/cmulocal/berkdb.m4 2006-10-02 20:36:17.137852392 +0200
+@@ -213,7 +213,7 @@
+ fi
+
+ saved_LIBS=$LIBS
+- for dbname in db-4.4 db4.4 db44 db-4.3 db4.3 db43 db-4.2 db4.2 db42 db-4.1 db4.1 db41 db-4.0 db4.0 db-4 db40 db4 db-3.3 db3.3 db33 db-3.2 db3.2 db32 db-3.1 db3.1 db31 db-3 db30 db3 db
++ for dbname in db-5.1 db-5.0 db-4.8 db-4.7 db-4.6 db-4.5 db-4.4 db4.4 db44 db-4.3 db4.3 db43 db-4.2 db4.2 db42 db-4.1 db4.1 db41 db-4.0 db4.0 db-4 db40 db4 db-3.3 db3.3 db33 db-3.2 db3.2 db32 db-3.1 db3.1 db31 db-3 db30 db3 db
+ do
+ LIBS="$saved_LIBS -l$dbname"
+ AC_TRY_LINK([#include <db.h>],
+--- cyrus-sasl-2.1.23/sasldb/db_berkeley.c.orig 2009-04-28 17:09:18.000000000 +0200
++++ cyrus-sasl-2.1.23/sasldb/db_berkeley.c 2010-05-18 21:02:20.418940098 +0200
+@@ -100,7 +100,7 @@
+ ret = db_create(mbdb, NULL, 0);
+ if (ret == 0 && *mbdb != NULL)
+ {
+-#if DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 1
++#if (DB_VERSION_MAJOR > 4) || (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 1)
+ ret = (*mbdb)->open(*mbdb, NULL, path, NULL, DB_HASH, flags, 0660);
+ #else
+ ret = (*mbdb)->open(*mbdb, path, NULL, DB_HASH, flags, 0660);
+--- cyrus-sasl-2.1.23/utils/dbconverter-2.c.orig 2003-02-13 20:56:17.000000000 +0100
++++ cyrus-sasl-2.1.23/utils/dbconverter-2.c 2010-05-18 21:11:09.982932556 +0200
+@@ -214,7 +214,7 @@
+ ret = db_create(mbdb, NULL, 0);
+ if (ret == 0 && *mbdb != NULL)
+ {
+-#if DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 1
++#if (DB_VERSION_MAJOR > 4) || (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 1)
+ ret = (*mbdb)->open(*mbdb, NULL, path, NULL, DB_HASH, DB_CREATE, 0664);
+ #else
+ ret = (*mbdb)->open(*mbdb, path, NULL, DB_HASH, DB_CREATE, 0664);
diff --git a/testing/moc/PKGBUILD b/testing/moc/PKGBUILD
new file mode 100644
index 000000000..d782e1b0f
--- /dev/null
+++ b/testing/moc/PKGBUILD
@@ -0,0 +1,44 @@
+# $Id: PKGBUILD 127443 2011-06-15 03:16:57Z eric $
+# Maintainer: Eric Bélanger <eric@archlinux.org>
+
+pkgname=moc
+pkgver=20110528
+pkgrel=3
+pkgdesc="An ncurses console audio player with support for the mp3, ogg, and wave formats"
+arch=('i686' 'x86_64')
+url="http://moc.daper.net/"
+license=('GPL')
+depends=('libmad' 'libid3tag' 'jack' 'curl' 'libsamplerate' 'libtool' 'file')
+makedepends=('speex' 'ffmpeg' 'taglib' 'libmpcdec' 'wavpack' 'libmodplug')
+optdepends=('speex: for using the speex plugin'
+ 'ffmpeg: for using the ffmpeg plugin'
+ 'taglib: for using the musepack plugin'
+ 'libmpcdec: for using the musepack plugin'
+ 'wavpack: for using the wavpack plugin'
+ 'libmodplug: for using the modplug plugin')
+options=('!libtool')
+source=(ftp://ftp.archlinux.org/other/moc/${pkgname}-${pkgver}.tar.xz \
+ gcc-undefined-symbols.diff)
+md5sums=('003fd01af2165264eb666040e4f586cd'
+ 'efacb8559e9145e15b0c25f8fa2a9d79')
+sha1sums=('f79049136ce6616bfd6af2f5e08246a5921441cf'
+ 'a811a4ac7e049914aab528d3f06a6be6634c2720')
+
+build() {
+ cd "${srcdir}/${pkgname}"
+ patch -p0 -i ../gcc-undefined-symbols.diff
+
+# Disabling aac to use the external ffmpeg to play them (FS#13164)
+ ./autogen.sh
+ ./configure --prefix=/usr --without-rcc --without-aac \
+ --with-oss --with-alsa --with-jack --with-mp3 \
+ --with-musepack --with-vorbis --with-flac --with-wavpack \
+ --with-sndfile --with-modplug --with-ffmpeg --with-speex \
+ --with-samplerate --with-curl --disable-debug
+ make
+}
+
+package() {
+ cd "${srcdir}/${pkgname}"
+ make DESTDIR="${pkgdir}" install
+}
diff --git a/testing/moc/gcc-undefined-symbols.diff b/testing/moc/gcc-undefined-symbols.diff
new file mode 100644
index 000000000..09e9b8bda
--- /dev/null
+++ b/testing/moc/gcc-undefined-symbols.diff
@@ -0,0 +1,12 @@
+--- decoder.c~ 2011-05-08 09:28:28.077137883 +0200
++++ decoder.c 2011-05-10 21:40:48.887941968 +0200
+@@ -259,6 +259,9 @@
+ for (i = 0; i < plugins_num; i++)
+ if (plugins[i].decoder->destroy)
+ plugins[i].decoder->destroy ();
++ for (i = 0; i < plugins_num; i++)
++ if (plugins[i].handle)
++ lt_dlclose(plugins[i].handle);
+
+ if (lt_dlexit())
+ logit ("lt_exit() failed: %s", lt_dlerror());
diff --git a/testing/mod_perl/PKGBUILD b/testing/mod_perl/PKGBUILD
new file mode 100644
index 000000000..9f3595deb
--- /dev/null
+++ b/testing/mod_perl/PKGBUILD
@@ -0,0 +1,27 @@
+# $Id: PKGBUILD 129799 2011-06-28 23:07:22Z angvp $
+# Maintainer: Firmicus <francois.archlinux.org>
+# Contributor: Tom K <tomk@runbox.com>
+
+pkgname=mod_perl
+pkgver=2.0.5
+pkgrel=7
+pkgdesc="Apache module that embeds the Perl interpreter within the server"
+url="http://search.cpan.org/dist/${pkgname}/"
+depends=('perl' 'apache' 'db' 'apr-util' 'perl-linux-pid')
+license=('APACHE')
+arch=('i686' 'x86_64')
+options=(!emptydirs)
+source=(http://search.cpan.org/CPAN/authors/id/P/PH/PHRED/${pkgname}-${pkgver}.tar.gz)
+md5sums=('03d01d135a122bd8cebd0cd5b185d674')
+
+build() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+ # install module in vendor directories.
+ perl Makefile.PL INSTALLDIRS=vendor MP_APXS=/usr/sbin/apxs
+ make
+}
+
+package() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+ make install DESTDIR=${pkgdir}
+}
diff --git a/testing/openldap/PKGBUILD b/testing/openldap/PKGBUILD
new file mode 100644
index 000000000..44029b398
--- /dev/null
+++ b/testing/openldap/PKGBUILD
@@ -0,0 +1,107 @@
+# $Id: PKGBUILD 127879 2011-06-19 07:32:04Z andrea $
+# Maintainer:
+# Contributor: Judd Vinet <jvinet@zeroflux.org>
+
+pkgname=openldap
+pkgver=2.4.24
+pkgrel=2
+pkgdesc="LDAP Server"
+arch=('i686' 'x86_64')
+license=('custom')
+url="http://www.openldap.org/"
+backup=('etc/openldap/slapd.conf' 'etc/default/slapd' 'etc/conf.d/slapd')
+depends=("libldap>=${pkgver}" 'tcp_wrappers' 'libfetch' 'util-linux-ng')
+provides=('openldap-clients')
+replaces=('openldap-clients')
+source=("ftp://ftp.openldap.org/pub/OpenLDAP/openldap-release/${pkgname}-${pkgver}.tgz"
+ 'slapd'
+ 'slapd.default')
+md5sums=('116fe1e23a7b67686d5e62274367e6c0'
+ '832354417c495f29affd2c772808959d'
+ '6be69f6b7e522cb64cce8703da81ed32')
+options=('emptydirs')
+
+build() {
+ cd "${srcdir}"/${pkgname}-${pkgver}
+
+ export LIBS=-ldb
+ ./configure --prefix=/usr \
+ --mandir=/usr/share/man \
+ --libexecdir=/usr/sbin \
+ --sysconfdir=/etc \
+ --localstatedir=/var/lib/openldap \
+ --enable-bdb \
+ --enable-crypt \
+ --enable-dynamic \
+ --with-threads \
+ --enable-wrappers \
+ --enable-spasswd \
+ --with-cyrus-sasl
+
+ find . -name 'Makefile' -exec \
+ sed -e 's|$(LDAP_LIBDIR)/liblber/liblber.la|/usr/lib/liblber-2.4.so.2|g' \
+ -e 's|$(LDAP_LIBDIR)/libldap/libldap.la|/usr/lib/libldap-2.4.so.2|g' \
+ -e 's|$(LDAP_LIBDIR)/libldap_r/libldap_r.la|/usr/lib/libldap_r-2.4.so.2|g' \
+ -i {} \;
+
+ cd include
+ make
+
+ cd ../libraries
+ for dir in liblutil librewrite liblunicode; do
+ pushd ${dir}
+ make depend
+ make
+ popd
+ done
+
+ cd ../servers
+ make depend
+ make
+
+ cd ../clients
+ make depend
+ make
+
+ cd ../doc/man
+ for dir in man{1,5,8}; do
+ pushd ${dir}
+ make
+ popd
+ done
+}
+
+package() {
+ cd "${srcdir}"/${pkgname}-${pkgver}
+
+ cd servers
+ make DESTDIR="${pkgdir}" install
+
+ cd ../clients
+ make DESTDIR="${pkgdir}" install
+
+ cd ../doc/man
+ for dir in man{1,5,8}; do
+ pushd ${dir}
+ make DESTDIR="${pkgdir}" install
+ popd
+ done
+ rm "${pkgdir}"/usr/share/man/man5/ldap.conf.5
+
+ cd ../..
+
+ install -dm700 "${pkgdir}"/etc/openldap/slapd.d
+ install -Dm755 "${srcdir}"/slapd "${pkgdir}"/etc/rc.d/slapd
+ install -Dm644 "${srcdir}"/slapd.default "${pkgdir}"/etc/conf.d/slapd
+ install -dm700 "${pkgdir}"/var/lib/openldap
+
+ # get rid of duplicate default conf files
+ rm "${pkgdir}"/etc/openldap/*.default
+
+ # hack to fix screwed up dirs
+ sed -e 's|^pidfile[[:space:]].*$|pidfile /var/run/slapd.pid|g' \
+ -e 's|^argsfile[[:space:]].*$|argsfile /var/run/slapd.args|g' \
+ -i "${pkgdir}"/etc/openldap/slapd.conf
+
+ install -Dm644 LICENSE "${pkgdir}"/usr/share/licenses/$pkgname/LICENSE
+}
diff --git a/testing/openldap/slapd b/testing/openldap/slapd
new file mode 100755
index 000000000..392075027
--- /dev/null
+++ b/testing/openldap/slapd
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+. /etc/rc.conf
+. /etc/rc.d/functions
+
+[ -f "/etc/conf.d/slapd" ] && . /etc/conf.d/slapd
+
+PID=`pidof -o %PPID /usr/sbin/slapd`
+case "$1" in
+ start)
+ stat_busy "Starting OpenLDAP"
+ if [ -z "$PID" ]; then
+ if [ -z "$SLAPD_SERVICES" ]; then
+ /usr/sbin/slapd $SLAPD_OPTIONS
+ else
+ /usr/sbin/slapd -h "$SLAPD_SERVICES" $SLAPD_OPTIONS
+ fi
+ if [ $? -gt 0 ]; then
+ stat_fail
+ else
+ stat_done
+ fi
+ add_daemon slapd
+ else
+ stat_fail
+ fi
+ ;;
+ stop)
+ stat_busy "Stopping OpenLDAP"
+ [ ! -z "$PID" ] && kill $PID &> /dev/null
+ if [ $? -gt 0 ]; then
+ stat_fail
+ else
+ rm -f /var/run/slapd.pid
+ rm -f /var/run/slapd.args
+ rm_daemon slapd
+ stat_done
+ fi
+ ;;
+ restart)
+ $0 stop
+ sleep 3
+ $0 start
+ ;;
+ *)
+ echo "usage: $0 {start|stop|restart}"
+esac
+exit 0
diff --git a/testing/openldap/slapd.default b/testing/openldap/slapd.default
new file mode 100644
index 000000000..72ae2a6a7
--- /dev/null
+++ b/testing/openldap/slapd.default
@@ -0,0 +1,6 @@
+# slapd normally serves ldap only on all TCP-ports 389. slapd can also
+# service requests on TCP-port 636 (ldaps) and requests via unix
+# sockets.
+# Example usage:
+#SLAPD_SERVICES="ldap://127.0.0.1:389/ ldaps:/// ldapi:///"
+SLAPD_OPTIONS=""
diff --git a/testing/pam/PKGBUILD b/testing/pam/PKGBUILD
new file mode 100644
index 000000000..5a4f10780
--- /dev/null
+++ b/testing/pam/PKGBUILD
@@ -0,0 +1,56 @@
+# $Id: PKGBUILD 127852 2011-06-18 20:12:05Z stephane $
+# Maintainer: Tobias Powalowski <tpowa@archlinux.org>
+# Contributor: judd <jvinet@zeroflux.org>
+
+pkgname=pam
+pkgver=1.1.3
+pkgrel=2
+pkgdesc="PAM (Pluggable Authentication Modules) library"
+arch=('i686' 'x86_64')
+license=('GPL2')
+url="http://www.kernel.org/pub/linux/libs/pam/"
+depends=('glibc' 'db' 'cracklib')
+makedepends=('flex' 'w3m' 'docbook-xml>=4.4' 'docbook-xsl')
+backup=(etc/security/{access.conf,group.conf,limits.conf,namespace.conf,namespace.init,pam_env.conf,time.conf} etc/pam.d/other etc/default/passwd etc/environment)
+source=(http://www.kernel.org/pub/linux/libs/pam/library/Linux-PAM-$pkgver.tar.bz2
+ ftp://ftp.suse.com/pub/people/kukuk/pam/pam_unix2/pam_unix2-2.6.tar.bz2
+ other)
+options=('!libtool' '!emptydirs')
+md5sums=('6db7fcb5db6253350e3a4648ceac40e7'
+ 'e2788389a6c59224110a45fcff30e02b'
+ '6e6c8719e5989d976a14610f340bd33a')
+
+build() {
+ cd $srcdir/Linux-PAM-$pkgver
+ ./configure --sysconfdir=/etc DESTDIR=$pkgdir --libdir=/lib
+ make
+}
+
+package() {
+ cd $srcdir/Linux-PAM-$pkgver
+ make INSTALL=/bin/install DESTDIR=$pkgdir install
+ install -D -m644 ../other $pkgdir/etc/pam.d/other
+ # build pam_unix2 module
+ # source ftp://ftp.suse.com/pub/people/kukuk/pam/pam_unix2
+ cd $srcdir/pam_unix2-2.6
+ ./configure
+ make
+ make DESTDIR=$pkgdir install
+ # add the realtime permissions for audio users
+ sed -i 's|# End of file||' $pkgdir/etc/security/limits.conf
+ cat >>$pkgdir/etc/security/limits.conf <<_EOT
+* - rtprio 0
+* - nice 0
+@audio - rtprio 65
+@audio - nice -10
+@audio - memlock 40000
+_EOT
+ # fix some missing symlinks from old pam for compatibility
+ cd $pkgdir/lib/security
+ ln -s pam_unix.so pam_unix_acct.so
+ ln -s pam_unix.so pam_unix_auth.so
+ ln -s pam_unix.so pam_unix_passwd.so
+ ln -s pam_unix.so pam_unix_session.so
+ # set unix_chkpwd uid
+ chmod +s $pkgdir/sbin/unix_chkpwd
+}
diff --git a/testing/pam/other b/testing/pam/other
new file mode 100644
index 000000000..08498b423
--- /dev/null
+++ b/testing/pam/other
@@ -0,0 +1,5 @@
+#%PAM-1.0
+auth required pam_unix.so
+account required pam_unix.so
+password required pam_unix.so
+session required pam_unix.so
diff --git a/testing/pam/pam.install b/testing/pam/pam.install
new file mode 100644
index 000000000..9d4588084
--- /dev/null
+++ b/testing/pam/pam.install
@@ -0,0 +1,12 @@
+# arg 1: the new package version
+post_install() {
+ # need to run this immediately -- pacman only runs it at the end of
+ # all package installs
+ /sbin/ldconfig -r .
+}
+
+# arg 1: the new package version
+# arg 2: the old package version
+post_upgrade() {
+ post_install $1
+}
diff --git a/testing/perl/0001-Append-CFLAGS-and-LDFLAGS-to-their-Config.pm-counter.patch b/testing/perl/0001-Append-CFLAGS-and-LDFLAGS-to-their-Config.pm-counter.patch
new file mode 100644
index 000000000..1404460df
--- /dev/null
+++ b/testing/perl/0001-Append-CFLAGS-and-LDFLAGS-to-their-Config.pm-counter.patch
@@ -0,0 +1,83 @@
+From bb249b0b26c2e79a6f55355ef94889070f07fd21 Mon Sep 17 00:00:00 2001
+From: Niko Tyni <ntyni@debian.org>
+Date: Thu, 28 Apr 2011 09:18:54 +0300
+Subject: [PATCH] Append CFLAGS and LDFLAGS to their Config.pm counterparts in
+ EU::CBuilder
+
+Since ExtUtils::CBuilder 0.27_04 (bleadperl commit 06e8058f27e4),
+CFLAGS and LDFLAGS from the environment have overridden the Config.pm
+ccflags and ldflags settings. This can cause binary incompatibilities
+between the core Perl and extensions built with EU::CBuilder.
+
+Append to the Config.pm values rather than overriding them.
+---
+ .../lib/ExtUtils/CBuilder/Base.pm | 6 +++-
+ dist/ExtUtils-CBuilder/t/04-base.t | 25 +++++++++++++++++++-
+ 2 files changed, 28 insertions(+), 3 deletions(-)
+
+diff --git a/dist/ExtUtils-CBuilder/lib/ExtUtils/CBuilder/Base.pm b/dist/ExtUtils-CBuilder/lib/ExtUtils/CBuilder/Base.pm
+index b572312..2255c51 100644
+--- a/dist/ExtUtils-CBuilder/lib/ExtUtils/CBuilder/Base.pm
++++ b/dist/ExtUtils-CBuilder/lib/ExtUtils/CBuilder/Base.pm
+@@ -40,11 +40,13 @@ sub new {
+ $self->{config}{$k} = $v unless exists $self->{config}{$k};
+ }
+ $self->{config}{cc} = $ENV{CC} if defined $ENV{CC};
+- $self->{config}{ccflags} = $ENV{CFLAGS} if defined $ENV{CFLAGS};
++ $self->{config}{ccflags} = join(" ", $self->{config}{ccflags}, $ENV{CFLAGS})
++ if defined $ENV{CFLAGS};
+ $self->{config}{cxx} = $ENV{CXX} if defined $ENV{CXX};
+ $self->{config}{cxxflags} = $ENV{CXXFLAGS} if defined $ENV{CXXFLAGS};
+ $self->{config}{ld} = $ENV{LD} if defined $ENV{LD};
+- $self->{config}{ldflags} = $ENV{LDFLAGS} if defined $ENV{LDFLAGS};
++ $self->{config}{ldflags} = join(" ", $self->{config}{ldflags}, $ENV{LDFLAGS})
++ if defined $ENV{LDFLAGS};
+
+ unless ( exists $self->{config}{cxx} ) {
+ my ($ccpath, $ccbase, $ccsfx ) = fileparse($self->{config}{cc}, qr/\.[^.]*/);
+diff --git a/dist/ExtUtils-CBuilder/t/04-base.t b/dist/ExtUtils-CBuilder/t/04-base.t
+index c3bf6b5..1bb15aa 100644
+--- a/dist/ExtUtils-CBuilder/t/04-base.t
++++ b/dist/ExtUtils-CBuilder/t/04-base.t
+@@ -1,7 +1,7 @@
+ #! perl -w
+
+ use strict;
+-use Test::More tests => 50;
++use Test::More tests => 64;
+ use Config;
+ use Cwd;
+ use File::Path qw( mkpath );
+@@ -326,6 +326,29 @@ is_deeply( $mksymlists_args,
+ "_prepare_mksymlists_args(): got expected arguments for Mksymlists",
+ );
+
++my %testvars = (
++ CFLAGS => 'ccflags',
++ LDFLAGS => 'ldflags',
++);
++
++while (my ($VAR, $var) = each %testvars) {
++ local $ENV{$VAR};
++ $base = ExtUtils::CBuilder::Base->new( quiet => 1 );
++ ok( $base, "ExtUtils::CBuilder::Base->new() returned true value" );
++ isa_ok( $base, 'ExtUtils::CBuilder::Base' );
++ like($base->{config}{$var}, qr/\Q$Config{$var}/,
++ "honours $var from Config.pm");
++
++ $ENV{$VAR} = "-foo -bar";
++ $base = ExtUtils::CBuilder::Base->new( quiet => 1 );
++ ok( $base, "ExtUtils::CBuilder::Base->new() returned true value" );
++ isa_ok( $base, 'ExtUtils::CBuilder::Base' );
++ like($base->{config}{$var}, qr/\Q$ENV{$VAR}/,
++ "honours $VAR from the environment");
++ like($base->{config}{$var}, qr/\Q$Config{$var}/,
++ "doesn't override $var from Config.pm with $VAR from the environment");
++}
++
+ #####
+
+ for ($source_file, $object_file, $lib_file) {
+--
+1.7.4.4
+
diff --git a/testing/perl/ChangeLog b/testing/perl/ChangeLog
new file mode 100644
index 000000000..9add39e20
--- /dev/null
+++ b/testing/perl/ChangeLog
@@ -0,0 +1,66 @@
+2011-06-22 Angel Velasquez <angvp@archlinux.org>
+ * Added a patch for ExtUtils doesnt overwrite CFLAGS and LDFLAGS
+ * Fixed #FS22197, FS#22441, FS#24767
+ * Rebuilt perl 5.14.1-2 against db 5.2.28
+
+2011-06-16 Angel Velasquez <angvp@archlinux.org>
+ * Fixed #FS24660
+ * Rebuilt against db 5.2.28
+
+2011-05-16 Angel Velasquez <angvp@archlinux.org>
+ * perl 5.14.0
+ * Removed patch for h2ph warning from 5.12.3
+ * Removed provides array, you can use corelist -v 5.14.0 to know the
+ modules included with the perl core, through Module::CoreList (thx j3nnn1
+ for the tip)
+
+2010-11-07 kevin <kevin@archlinux.org>
+
+ * perl 5.12.2-1
+ - Using /usr/bin/*_perl for script directories
+
+2010-11-06 kevin <kevin@archlinux.org>
+
+ - Removed otherlibdirs directive from Configure
+ - Removed /usr/*/perl5/site_perl/5.10.1 from INC
+ - Finally removed legacy dirs /usr/lib/perl5/current and
+ /usr/lib/perl5/site_perl/current from @INC
+
+2010-05-23 kevin <kevin@archlinux.org>
+
+ * perl 5.12.1-2
+ - Francois updated the provides array.
+
+2010-05-23 kevin <kevin@archlinux.org>
+
+ * perl 5.12.1-1
+
+2010-05-16 kevin <kevin@archlinux.org>
+
+ * perl 5.12.0-2
+
+2010-05-12 kevin <kevin@archlinux.org>
+
+ - FS#19411. Removed the for loop in perlbin.sh which didn't work on zsh.
+ This makes the loop variables unnecessary so the script no longer
+ pollutes the user's environment.
+ - FS#19427. Added /usr/*/perl5/site_perl/5.10.1 to otherlibdirs to support
+ user built modules.
+
+2010-05-09 kevin <kevin@archlinux.org>
+
+ * perl 5.12.0-1
+ - Modified perlbin.sh to only add existing dirs to PATH. Fixes FS#17402,
+ path points to non-existant directories
+
+2010-05-07 kevin <kevin@archlinux.org>
+
+ - Added this changelog.
+ - Added -Dinc_version_list=none to fix FS#19136, double entry in @INC.
+ This removes the duplicates and versioned directory entries.
+ - Change scriptdirs to /usr/lib/perl5/{core,vendor,site}_perl/bin to fix
+ Fix FS#13808, binaries don't follow FHS.
+ - Stopped using versioned directories in sitelib and sitearch.
+
+
+# vim: set ft=changelog ts=4 sw=4 et:
diff --git a/testing/perl/PKGBUILD b/testing/perl/PKGBUILD
new file mode 100644
index 000000000..d25f2aae4
--- /dev/null
+++ b/testing/perl/PKGBUILD
@@ -0,0 +1,103 @@
+# $Id: PKGBUILD 129898 2011-06-29 17:25:10Z foutrelis $
+# Maintainer: Angel Velasquez <angvp@archlinux.org>
+# Contributor: kevin <kevin.archlinux.org>
+# Contributor: judd <jvinet.zeroflux.org>
+# Contributor: francois <francois.archlinux.org>
+pkgname=perl
+pkgver=5.14.1
+pkgrel=3
+pkgdesc="A highly capable, feature-rich programming language"
+arch=(i686 x86_64)
+license=('GPL' 'PerlArtistic')
+url="http://www.perl.org"
+groups=('base')
+depends=('gdbm' 'db' 'coreutils' 'glibc' 'sh')
+changelog=ChangeLog
+source=(http://www.cpan.org/src/5.0/perl-${pkgver}.tar.bz2
+perlbin.sh
+perlbin.csh
+0001-Append-CFLAGS-and-LDFLAGS-to-their-Config.pm-counter.patch)
+install=perl.install
+options=('!makeflags' '!purge')
+md5sums=('97cd306a2c22929cc141a09568f43bb0'
+ '5ed2542fdb9a60682f215bd33701e61a'
+ '1f0cbbee783e8a6d32f01be5118e0d5e'
+ 'c25d86206d649046538c3daab7874564')
+
+build() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+
+ if [ "${CARCH}" = "x86_64" ]; then
+ # for x86_64
+ arch_opts="-Dcccdlflags='-fPIC'"
+ else
+ # for i686
+ arch_opts=""
+ fi
+ ./Configure -des -Dusethreads -Duseshrplib -Doptimize="${CFLAGS}" \
+ -Dprefix=/usr -Dinstallprefix=${pkgdir}/usr -Dvendorprefix=/usr \
+ -Dprivlib=/usr/share/perl5/core_perl \
+ -Darchlib=/usr/lib/perl5/core_perl \
+ -Dsitelib=/usr/share/perl5/site_perl \
+ -Dsitearch=/usr/lib/perl5/site_perl \
+ -Dvendorlib=/usr/share/perl5/vendor_perl \
+ -Dvendorarch=/usr/lib/perl5/vendor_perl \
+ -Dscriptdir=/usr/bin/core_perl \
+ -Dsitescript=/usr/bin/site_perl \
+ -Dvendorscript=/usr/bin/vendor_perl \
+ -Dinc_version_list=none \
+ -Dman1ext=1perl -Dman3ext=3perl ${arch_opts} \
+ -Dlddlflags="-shared ${LDFLAGS}" -Dldflags="${LDFLAGS}"
+ patch -Np1 -i $srcdir/0001-Append-CFLAGS-and-LDFLAGS-to-their-Config.pm-counter.patch
+ make
+}
+package() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+ make install
+
+ ### Perl Settings ###
+ # Change man page extensions for site and vendor module builds.
+ # Use archlinux email address instead of my own.
+ sed -e '/^man1ext=/ s/1perl/1p/' -e '/^man3ext=/ s/3perl/3pm/' \
+ -e "/^cf_email=/ s/'.*'/'kevin@archlinux.org'/" \
+ -e "/^perladmin=/ s/'.*'/'kevin@archlinux.org'/" \
+ -i ${pkgdir}/usr/lib/perl5/core_perl/Config_heavy.pl
+
+ ### CPAN Settings ###
+ # Set CPAN default config to use the site directories.
+ sed -e '/(makepl_arg =>/ s/""/"INSTALLDIRS=site"/' \
+ -e '/(mbuildpl_arg =>/ s/""/"installdirs=site"/' \
+ -i ${pkgdir}/usr/share/perl5/core_perl/CPAN/FirstTime.pm
+
+ ### CPANPLUS Settings ###
+ # Set CPANPLUS default config to use the site directories.
+ sed -e "/{'makemakerflags'}/ s/'';/'INSTALLDIRS=site';/" \
+ -e "/{'buildflags'}/ s/'';/'installdirs=site';/" \
+ -i ${pkgdir}/usr/share/perl5/core_perl/CPANPLUS/Config.pm
+
+ # Profile script to set paths to perl scripts.
+ install -D -m755 ${srcdir}/perlbin.sh \
+ ${pkgdir}/etc/profile.d/perlbin.sh
+ # Profile script to set paths to perl scripts on csh. (FS#22441)
+ install -D -m755 ${srcdir}/perlbin.csh \
+ ${pkgdir}/etc/profile.d/perlbin.csh
+
+ (cd ${pkgdir}/usr/bin; mv perl${pkgver} perl)
+ (cd ${pkgdir}/usr/bin/core_perl; ln -sf c2ph pstruct; ln -sf s2p psed)
+ grep -Rl "${pkgdir}" ${pkgdir}/usr | \
+ xargs sed -i "s^${pkgdir}^^g"
+
+ # Remove all pod files *except* those under /usr/share/perl5/core_perl/pod/
+ # (FS#16488)
+ rm -f $pkgdir/usr/share/perl5/core_perl/*.pod
+ for d in $pkgdir/usr/share/perl5/core_perl/*; do
+ if [ -d $d -a $(basename $d) != "pod" ]; then
+ find $d -name *.pod -delete
+ fi
+ done
+ find $pkgdir/usr/lib -name *.pod -delete
+ find $pkgdir -name .packlist -delete
+ # Add /usr/lib/perl5/core_perl/CORE/ to standard library path (FS#24660)
+ install -dv ${pkgdir}/etc/ld.so.conf.d
+ echo "/usr/lib/perl5/core_perl/CORE" > ${pkgdir}/etc/ld.so.conf.d/perl.conf
+}
diff --git a/testing/perl/fix-h2ph-and-tests.patch b/testing/perl/fix-h2ph-and-tests.patch
new file mode 100644
index 000000000..a2d176ec6
--- /dev/null
+++ b/testing/perl/fix-h2ph-and-tests.patch
@@ -0,0 +1,104 @@
+From 8d66b3f930dc6d88b524d103e304308ae73a46e7 Mon Sep 17 00:00:00 2001
+From: Robin Barker <rmbarker@cpan.org>
+Date: Thu, 22 Apr 2010 11:51:20 +0100
+Subject: [PATCH 1/1] Fix h2ph and test
+
+---
+ lib/h2ph.t | 12 ++++++++++--
+ utils/h2ph.PL | 28 +++++++++++++++++++++++-----
+ 2 files changed, 33 insertions(+), 7 deletions(-)
+
+diff --git a/lib/h2ph.t b/lib/h2ph.t
+index 27dd7b9..8d62d46 100644
+--- a/lib/h2ph.t
++++ b/lib/h2ph.t
+@@ -18,7 +18,7 @@ if (!(-e $extracted_program)) {
+ exit 0;
+ }
+
+-plan(4);
++plan(5);
+
+ # quickly compare two text files
+ sub txt_compare {
+@@ -41,8 +41,16 @@ $result = runperl( progfile => 'lib/h2ph.pht',
+ stderr => 1 );
+ like( $result, qr/syntax OK$/, "output compiles");
+
++$result = runperl( progfile => '_h2ph_pre.ph',
++ switches => ['-c'],
++ stderr => 1 );
++like( $result, qr/syntax OK$/, "preamble compiles");
++
+ $result = runperl( switches => ["-w"],
+- prog => '$SIG{__WARN__} = sub { die $_[0] }; require q(lib/h2ph.pht);');
++ stderr => 1,
++ prog => <<'PROG' );
++$SIG{__WARN__} = sub { die $_[0] }; require q(lib/h2ph.pht);
++PROG
+ is( $result, '', "output free of warnings" );
+
+ # cleanup
+diff --git a/utils/h2ph.PL b/utils/h2ph.PL
+index 8f56db4..1255807 100644
+--- a/utils/h2ph.PL
++++ b/utils/h2ph.PL
+@@ -401,7 +401,10 @@ if ($opt_e && (scalar(keys %bad_file) > 0)) {
+ exit $Exit;
+
+ sub expr {
+- $new = '"(assembly code)"' and return if /\b__asm__\b/; # freak out.
++ if (/\b__asm__\b/) { # freak out
++ $new = '"(assembly code)"';
++ return
++ }
+ my $joined_args;
+ if(keys(%curargs)) {
+ $joined_args = join('|', keys(%curargs));
+@@ -770,7 +773,7 @@ sub inc_dirs
+ sub build_preamble_if_necessary
+ {
+ # Increment $VERSION every time this function is modified:
+- my $VERSION = 2;
++ my $VERSION = 3;
+ my $preamble = "$Dest_dir/_h2ph_pre.ph";
+
+ # Can we skip building the preamble file?
+@@ -798,7 +801,16 @@ sub build_preamble_if_necessary
+ # parenthesized value: d=(v)
+ $define{$_} = $1;
+ }
+- if ($define{$_} =~ /^([+-]?(\d+)?\.\d+([eE][+-]?\d+)?)[FL]?$/) {
++ if (/^(\w+)\((\w)\)$/) {
++ my($macro, $arg) = ($1, $2);
++ my $def = $define{$_};
++ $def =~ s/$arg/\$\{$arg\}/g;
++ print PREAMBLE <<DEFINE;
++unless (defined &$macro) { sub $macro(\$) { my (\$$arg) = \@_; \"$def\" } }
++
++DEFINE
++ } elsif
++ ($define{$_} =~ /^([+-]?(\d+)?\.\d+([eE][+-]?\d+)?)[FL]?$/) {
+ # float:
+ print PREAMBLE
+ "unless (defined &$_) { sub $_() { $1 } }\n\n";
+@@ -807,8 +819,14 @@ sub build_preamble_if_necessary
+ print PREAMBLE
+ "unless (defined &$_) { sub $_() { $1 } }\n\n";
+ } elsif ($define{$_} =~ /^\w+$/) {
+- print PREAMBLE
+- "unless (defined &$_) { sub $_() { &$define{$_} } }\n\n";
++ my $def = $define{$_};
++ if ($isatype{$def}) {
++ print PREAMBLE
++ "unless (defined &$_) { sub $_() { \"$def\" } }\n\n";
++ } else {
++ print PREAMBLE
++ "unless (defined &$_) { sub $_() { &$def } }\n\n";
++ }
+ } else {
+ print PREAMBLE
+ "unless (defined &$_) { sub $_() { \"",
+--
+1.6.5.2.74.g610f9.dirty
+
diff --git a/testing/perl/perl.install b/testing/perl/perl.install
new file mode 100644
index 000000000..3f7d58f23
--- /dev/null
+++ b/testing/perl/perl.install
@@ -0,0 +1,18 @@
+# arg 1: the new package version
+post_install() {
+ for ver in 5.8.{0,1,2,3,4,5,6,7,8}; do
+ [ -h usr/lib/perl5/$ver ] && rm usr/lib/perl5/$ver
+ [ -h usr/lib/perl5/site_perl/$ver ] && rm usr/lib/perl5/site_perl/$ver
+ [ -h usr/bin/perl$ver ] && rm usr/bin/perl$ver
+ done
+ return 0
+}
+
+post_upgrade() {
+ echo '- The directories /usr/lib/perl5/current, /usr/lib/perl5/site_perl/current,'
+ echo ' /usr/lib/perl5/site_perl/5.10.1, and /usr/share/perl5/site_perl/5.10.1'
+ echo ' have been removed from @INC.'
+
+ echo '- The script/binary directories are now /usr/bin/*_perl instead of'
+ echo ' /usr/lib/perl5/*_perl/bin which will be eventually removed.'
+}
diff --git a/testing/perl/perlbin.csh b/testing/perl/perlbin.csh
new file mode 100644
index 000000000..535f0b18d
--- /dev/null
+++ b/testing/perl/perlbin.csh
@@ -0,0 +1,15 @@
+# Set path to perl scriptdirs if they exist
+# https://wiki.archlinux.org/index.php/Perl_Policy#Binaries_and_Scripts
+# Added /usr/bin/*_perl dirs for scripts
+# Remove /usr/lib/perl5/*_perl/bin in next release
+
+[ -d /usr/bin/site_perl ] && setenv PATH ${PATH}:/usr/bin/site_perl
+[ -d /usr/lib/perl5/site_perl/bin ] && setenv PATH ${PATH}:/usr/lib/perl5/site_perl/bin
+
+[ -d /usr/bin/vendor_perl ] && setenv PATH ${PATH}:/usr/bin/vendor_perl
+[ -d /usr/lib/perl5/vendor_perl/bin ] && setenv PATH ${PATH}:/usr/lib/perl5/vendor_perl/bin
+
+[ -d /usr/bin/core_perl ] && setenv PATH ${PATH}:/usr/bin/core_perl
+
+# If you have modules in non-standard directories you can add them here.
+#export PERLLIB=dir1:dir2
diff --git a/testing/perl/perlbin.sh b/testing/perl/perlbin.sh
new file mode 100755
index 000000000..20f830436
--- /dev/null
+++ b/testing/perl/perlbin.sh
@@ -0,0 +1,18 @@
+# Set path to perl scriptdirs if they exist
+# https://wiki.archlinux.org/index.php/Perl_Policy#Binaries_and_Scripts
+# Added /usr/bin/*_perl dirs for scripts
+# Remove /usr/lib/perl5/*_perl/bin in next release
+
+[ -d /usr/bin/site_perl ] && PATH=$PATH:/usr/bin/site_perl
+[ -d /usr/lib/perl5/site_perl/bin ] && PATH=$PATH:/usr/lib/perl5/site_perl/bin
+
+[ -d /usr/bin/vendor_perl ] && PATH=$PATH:/usr/bin/vendor_perl
+[ -d /usr/lib/perl5/vendor_perl/bin ] && PATH=$PATH:/usr/lib/perl5/vendor_perl/bin
+
+[ -d /usr/bin/core_perl ] && PATH=$PATH:/usr/bin/core_perl
+
+export PATH
+
+# If you have modules in non-standard directories you can add them here.
+#export PERLLIB=dir1:dir2
+
diff --git a/testing/php/PKGBUILD b/testing/php/PKGBUILD
new file mode 100644
index 000000000..4e7f515cd
--- /dev/null
+++ b/testing/php/PKGBUILD
@@ -0,0 +1,379 @@
+# $Id: PKGBUILD 127446 2011-06-15 08:33:01Z pierre $
+# Maintainer: Pierre Schmitz <pierre@archlinux.de>
+
+pkgbase=php
+pkgname=('php'
+ 'php-cgi'
+ 'php-apache'
+ 'php-fpm'
+ 'php-embed'
+ 'php-pear'
+ 'php-curl'
+ 'php-enchant'
+ 'php-gd'
+ 'php-gmp'
+ 'php-intl'
+ 'php-ldap'
+ 'php-mcrypt'
+ 'php-mssql'
+ 'php-odbc'
+ 'php-pgsql'
+ 'php-pspell'
+ 'php-snmp'
+ 'php-sqlite'
+ 'php-tidy'
+ 'php-xsl')
+pkgver=5.3.6
+pkgrel=5
+_suhosinver=${pkgver}-0.9.10
+arch=('i686' 'x86_64')
+license=('PHP')
+url='http://www.php.net'
+makedepends=('apache' 'imap' 'postgresql-libs' 'mysql' 'libldap' 'postfix'
+ 'sqlite3' 'unixodbc' 'net-snmp' 'libzip' 'enchant' 'file' 'freetds'
+ 'libmcrypt' 'tidyhtml' 'aspell' 'libtool' 'libpng' 'libjpeg' 'icu'
+ 'curl' 'libxslt' 'openssl' 'bzip2' 'db' 'gmp' 'freetype2')
+source=("http://www.php.net/distributions/${pkgbase}-${pkgver}.tar.bz2"
+ "suhosin-patch-${_suhosinver}.patch.gz"
+ 'php.ini.patch' 'apache.conf' 'rc.d.php-fpm' 'php-fpm.conf.in.patch'
+ 'logrotate.d.php-fpm')
+md5sums=('2286f5a82a6e8397955a0025c1c2ad98'
+ 'fff1a38877142f3ae6036dbe5a85d0a6'
+ '39eaa70d276fc3d45d6bcf6cd5ae1106'
+ 'dec2cbaad64e3abf4f0ec70e1de4e8e9'
+ 'b01be5f816988fcee7e78225836e5e27'
+ 'd50ff349da08110a7cc8c691ce2d0423'
+ '07c4e412909ac65a44ec90e7a2c4bade')
+
+build() {
+ # ldap-sasl does not compile with --as-needed
+ export LDFLAGS="${LDFLAGS//-Wl,--as-needed}"
+ export LDFLAGS="${LDFLAGS//,--as-needed}"
+
+ phpconfig="--srcdir=../${pkgbase}-${pkgver} \
+ --prefix=/usr \
+ --sysconfdir=/etc/php \
+ --localstatedir=/var \
+ --with-layout=GNU \
+ --with-config-file-path=/etc/php \
+ --with-config-file-scan-dir=/etc/php/conf.d \
+ --enable-inline-optimization \
+ --disable-debug \
+ --disable-rpath \
+ --disable-static \
+ --enable-shared \
+ --mandir=/usr/share/man \
+ --without-pear \
+ "
+
+ phpextensions="--enable-bcmath=shared \
+ --enable-calendar=shared \
+ --enable-dba=shared \
+ --enable-exif=shared \
+ --enable-ftp=shared \
+ --enable-gd-native-ttf \
+ --enable-intl=shared \
+ --enable-json=shared \
+ --enable-mbregex \
+ --enable-mbstring \
+ --enable-pdo \
+ --enable-phar=shared \
+ --enable-posix=shared \
+ --enable-session \
+ --enable-shmop=shared \
+ --enable-soap=shared \
+ --enable-sockets=shared \
+ --enable-sqlite-utf8 \
+ --enable-sysvmsg=shared \
+ --enable-sysvsem=shared \
+ --enable-sysvshm=shared \
+ --enable-xml \
+ --enable-zip=shared \
+ --with-bz2=shared \
+ --with-curl=shared \
+ --with-db4=/usr \
+ --with-enchant=shared,/usr \
+ --with-freetype-dir=shared,/usr \
+ --with-gd=shared \
+ --with-gdbm=shared \
+ --with-gettext=shared \
+ --with-gmp=shared \
+ --with-iconv=shared \
+ --with-icu-dir=/usr \
+ --with-imap-ssl=shared \
+ --with-imap=shared \
+ --with-jpeg-dir=shared,/usr \
+ --with-ldap=shared \
+ --with-ldap-sasl \
+ --with-mcrypt=shared \
+ --with-mhash \
+ --with-mssql=shared \
+ --with-mysql-sock=/var/run/mysqld/mysqld.sock \
+ --with-mysql=shared,mysqlnd \
+ --with-mysqli=shared,mysqlnd \
+ --with-openssl=shared \
+ --with-pcre-regex=/usr \
+ --with-pdo-mysql=shared,mysqlnd \
+ --with-pdo-odbc=shared,unixODBC,/usr \
+ --with-pdo-pgsql=shared \
+ --with-pdo-sqlite=shared,/usr \
+ --with-pgsql=shared \
+ --with-png-dir=shared,/usr \
+ --with-pspell=shared \
+ --with-regex=php \
+ --with-snmp=shared \
+ --with-sqlite3=shared,/usr \
+ --with-sqlite=shared \
+ --with-tidy=shared \
+ --with-unixODBC=shared,/usr \
+ --with-xmlrpc=shared \
+ --with-xsl=shared \
+ --with-zlib \
+ --without-db2 \
+ --without-db3 \
+ "
+
+ EXTENSION_DIR=/usr/lib/php/modules
+ export EXTENSION_DIR
+ PEAR_INSTALLDIR=/usr/share/pear
+ export PEAR_INSTALLDIR
+
+ cd ${srcdir}/${pkgbase}-${pkgver}
+
+ # apply suhosin patch
+ patch -p1 -i ${srcdir}/suhosin-patch-${_suhosinver}.patch
+
+ # adjust paths
+ patch -p0 -i ${srcdir}/php.ini.patch
+ patch -p0 -i ${srcdir}/php-fpm.conf.in.patch
+
+ # php
+ mkdir ${srcdir}/build-php
+ cd ${srcdir}/build-php
+ ln -s ../${pkgbase}-${pkgver}/configure
+ ./configure ${phpconfig} \
+ --disable-cgi \
+ --with-readline \
+ --enable-pcntl \
+ ${phpextensions}
+ make
+
+ # cgi and fcgi
+ # reuse the previous run; this will save us a lot of time
+ cp -a ${srcdir}/build-php ${srcdir}/build-cgi
+ cd ${srcdir}/build-cgi
+ ./configure ${phpconfig} \
+ --disable-cli \
+ --enable-cgi \
+ ${phpextensions}
+ make
+
+ # apache
+ cp -a ${srcdir}/build-php ${srcdir}/build-apache
+ cd ${srcdir}/build-apache
+ ./configure ${phpconfig} \
+ --disable-cli \
+ --with-apxs2 \
+ ${phpextensions}
+ make
+
+ # fpm
+ cp -a ${srcdir}/build-php ${srcdir}/build-fpm
+ cd ${srcdir}/build-fpm
+ ./configure ${phpconfig} \
+ --disable-cli \
+ --enable-fpm \
+ --with-fpm-user=http \
+ --with-fpm-group=http \
+ ${phpextensions}
+ make
+
+ # embed
+ cp -a ${srcdir}/build-php ${srcdir}/build-embed
+ cd ${srcdir}/build-embed
+ ./configure ${phpconfig} \
+ --disable-cli \
+ --enable-embed=shared \
+ ${phpextensions}
+ make
+
+ # pear
+ cp -a ${srcdir}/build-php ${srcdir}/build-pear
+ cd ${srcdir}/build-pear
+ ./configure ${phpconfig} \
+ --disable-cgi \
+ --with-readline \
+ --enable-pcntl \
+ --with-pear \
+ ${phpextensions}
+ make
+}
+
+# check() {
+# cd ${srcdir}/build-php
+# make test
+# }
+
+package_php() {
+ pkgdesc='An HTML-embedded scripting language'
+ depends=('pcre' 'libxml2' 'bzip2' 'openssl')
+ replaces=('php-fileinfo')
+ provides=('php-fileinfo')
+ conflicts=('php-fileinfo')
+ backup=('etc/php/php.ini')
+
+ cd ${srcdir}/build-php
+ make -j1 INSTALL_ROOT=${pkgdir} install
+ install -d -m755 ${pkgdir}/usr/share/pear
+ # install php.ini
+ install -D -m644 ${srcdir}/${pkgbase}-${pkgver}/php.ini-production ${pkgdir}/etc/php/php.ini
+ install -d -m755 ${pkgdir}/etc/php/conf.d/
+
+ # remove static modules
+ rm -f ${pkgdir}/usr/lib/php/modules/*.a
+ # remove modules provided by sub packages
+ rm -f ${pkgdir}/usr/lib/php/modules/{curl,enchant,gd,gmp,intl,ldap,mcrypt,mssql,odbc,pdo_odbc,pgsql,pdo_pgsql,pspell,snmp,sqlite3,pdo_sqlite,tidy,xsl}.so
+}
+
+package_php-cgi() {
+ pkgdesc='CGI and FCGI SAPI for PHP'
+ depends=('php' 'pcre' 'libxml2')
+
+ install -D -m755 ${srcdir}/build-cgi/sapi/cgi/php-cgi ${pkgdir}/usr/bin/php-cgi
+}
+
+package_php-apache() {
+ pkgdesc='Apache SAPI for PHP'
+ depends=('php' 'apache' 'pcre' 'libxml2')
+ backup=('etc/httpd/conf/extra/php5_module.conf')
+
+ install -D -m755 ${srcdir}/build-apache/libs/libphp5.so ${pkgdir}/usr/lib/httpd/modules/libphp5.so
+ install -D -m644 ${srcdir}/apache.conf ${pkgdir}/etc/httpd/conf/extra/php5_module.conf
+}
+
+package_php-fpm() {
+ pkgdesc='FastCGI Process Manager for PHP'
+ depends=('php')
+ backup=('etc/php/php-fpm.conf')
+
+ install -D -m755 ${srcdir}/build-fpm/sapi/fpm/php-fpm ${pkgdir}/usr/sbin/php-fpm
+ install -D -m644 ${srcdir}/build-fpm/sapi/fpm/php-fpm.8 ${pkgdir}/usr/share/man/man8/php-fpm.8
+ install -D -m644 ${srcdir}/build-fpm/sapi/fpm/php-fpm.conf ${pkgdir}/etc/php/php-fpm.conf
+ install -D -m755 ${srcdir}/rc.d.php-fpm ${pkgdir}/etc/rc.d/php-fpm
+ install -D -m644 ${srcdir}/logrotate.d.php-fpm ${pkgdir}/etc/logrotate.d/php-fpm
+ install -d -m755 ${pkgdir}/etc/php/fpm.d
+}
+
+package_php-embed() {
+ pkgdesc='Embed SAPI for PHP'
+ depends=('php' 'pcre' 'libxml2')
+
+ install -D -m755 ${srcdir}/build-embed/libs/libphp5.so ${pkgdir}/usr/lib/libphp5.so
+ install -D -m644 ${srcdir}/${pkgbase}-${pkgver}/sapi/embed/php_embed.h ${pkgdir}/usr/include/php/sapi/embed/php_embed.h
+}
+
+package_php-pear() {
+ pkgdesc='PHP Extension and Application Repository'
+ depends=('php' 'bash')
+ backup=('etc/php/pear.conf')
+
+ cd ${srcdir}/build-pear
+ make -j1 install-pear INSTALL_ROOT=${pkgdir}
+ local i
+ while read i; do
+ [ ! -e "$i" ] || rm -rf "$i"
+ done < <(find ${pkgdir} -name '.*')
+}
+
+package_php-curl() {
+ depends=('php' 'curl')
+ pkgdesc='curl module for PHP'
+ install -D -m755 ${srcdir}/build-php/modules/curl.so ${pkgdir}/usr/lib/php/modules/curl.so
+}
+
+package_php-enchant() {
+ depends=('php' 'enchant')
+ pkgdesc='enchant module for PHP'
+ install -D -m755 ${srcdir}/build-php/modules/enchant.so ${pkgdir}/usr/lib/php/modules/enchant.so
+}
+
+package_php-gd() {
+ depends=('php' 'libpng' 'libjpeg' 'freetype2')
+ pkgdesc='gd module for PHP'
+ install -D -m755 ${srcdir}/build-php/modules/gd.so ${pkgdir}/usr/lib/php/modules/gd.so
+}
+
+package_php-gmp() {
+ depends=('php' 'gmp')
+ pkgdesc='gmp module for PHP'
+ install -D -m755 ${srcdir}/build-php/modules/gmp.so ${pkgdir}/usr/lib/php/modules/gmp.so
+}
+
+package_php-intl() {
+ depends=('php' 'icu')
+ pkgdesc='intl module for PHP'
+ install -D -m755 ${srcdir}/build-php/modules/intl.so ${pkgdir}/usr/lib/php/modules/intl.so
+}
+
+package_php-ldap() {
+ depends=('php' 'libldap')
+ pkgdesc='ldap module for PHP'
+ install -D -m755 ${srcdir}/build-php/modules/ldap.so ${pkgdir}/usr/lib/php/modules/ldap.so
+}
+
+package_php-mcrypt() {
+ depends=('php' 'libmcrypt' 'libtool')
+ pkgdesc='mcrypt module for PHP'
+ install -D -m755 ${srcdir}/build-php/modules/mcrypt.so ${pkgdir}/usr/lib/php/modules/mcrypt.so
+}
+
+package_php-mssql() {
+ depends=('php' 'freetds')
+ pkgdesc='mssql module for PHP'
+ install -D -m755 ${srcdir}/build-php/modules/mssql.so ${pkgdir}/usr/lib/php/modules/mssql.so
+}
+
+package_php-odbc() {
+ depends=('php' 'unixodbc')
+ pkgdesc='ODBC modules for PHP'
+ install -D -m755 ${srcdir}/build-php/modules/odbc.so ${pkgdir}/usr/lib/php/modules/odbc.so
+ install -D -m755 ${srcdir}/build-php/modules/pdo_odbc.so ${pkgdir}/usr/lib/php/modules/pdo_odbc.so
+}
+
+package_php-pgsql() {
+ depends=('php' 'postgresql-libs')
+ pkgdesc='PostgreSQL modules for PHP'
+ install -D -m755 ${srcdir}/build-php/modules/pgsql.so ${pkgdir}/usr/lib/php/modules/pgsql.so
+ install -D -m755 ${srcdir}/build-php/modules/pdo_pgsql.so ${pkgdir}/usr/lib/php/modules/pdo_pgsql.so
+}
+
+package_php-pspell() {
+ depends=('php' 'aspell')
+ pkgdesc='pspell module for PHP'
+ install -D -m755 ${srcdir}/build-php/modules/pspell.so ${pkgdir}/usr/lib/php/modules/pspell.so
+}
+
+package_php-snmp() {
+ depends=('php' 'net-snmp')
+ pkgdesc='snmp module for PHP'
+ install -D -m755 ${srcdir}/build-php/modules/snmp.so ${pkgdir}/usr/lib/php/modules/snmp.so
+}
+
+package_php-sqlite() {
+ depends=('php' 'sqlite3')
+ pkgdesc='sqlite3 module for PHP'
+ install -D -m755 ${srcdir}/build-php/modules/sqlite3.so ${pkgdir}/usr/lib/php/modules/sqlite3.so
+ install -D -m755 ${srcdir}/build-php/modules/pdo_sqlite.so ${pkgdir}/usr/lib/php/modules/pdo_sqlite.so
+}
+
+package_php-tidy() {
+ depends=('php' 'tidyhtml')
+ pkgdesc='tidy module for PHP'
+ install -D -m755 ${srcdir}/build-php/modules/tidy.so ${pkgdir}/usr/lib/php/modules/tidy.so
+}
+
+package_php-xsl() {
+ depends=('php' 'libxslt')
+ pkgdesc='xsl module for PHP'
+ install -D -m755 ${srcdir}/build-php/modules/xsl.so ${pkgdir}/usr/lib/php/modules/xsl.so
+}
diff --git a/testing/php/apache.conf b/testing/php/apache.conf
new file mode 100644
index 000000000..c3ca0aad5
--- /dev/null
+++ b/testing/php/apache.conf
@@ -0,0 +1,13 @@
+# Required modules: dir_module, php5_module
+
+<IfModule dir_module>
+ <IfModule php5_module>
+ DirectoryIndex index.php index.html
+ <FilesMatch "\.php$">
+ SetHandler application/x-httpd-php
+ </FilesMatch>
+ <FilesMatch "\.phps$">
+ SetHandler application/x-httpd-php-source
+ </FilesMatch>
+ </IfModule>
+</IfModule>
diff --git a/testing/php/logrotate.d.php-fpm b/testing/php/logrotate.d.php-fpm
new file mode 100644
index 000000000..7a1ba2597
--- /dev/null
+++ b/testing/php/logrotate.d.php-fpm
@@ -0,0 +1,6 @@
+/var/log/php-fpm.log {
+ missingok
+ postrotate
+ /etc/rc.d/php-fpm logrotate >/dev/null || true
+ endscript
+}
diff --git a/testing/php/php-fpm.conf.in.patch b/testing/php/php-fpm.conf.in.patch
new file mode 100644
index 000000000..93c62430a
--- /dev/null
+++ b/testing/php/php-fpm.conf.in.patch
@@ -0,0 +1,80 @@
+--- sapi/fpm/php-fpm.conf.in 2010-12-11 08:31:47.695294987 +0100
++++ sapi/fpm/php-fpm.conf.in 2010-12-11 08:31:55.907812237 +0100
+@@ -12,7 +12,7 @@
+ ; Relative path can also be used. They will be prefixed by:
+ ; - the global prefix if it's been set (-p arguement)
+ ; - @prefix@ otherwise
+-;include=etc/fpm.d/*.conf
++;include=/etc/php/fpm.d/*.conf
+
+ ;;;;;;;;;;;;;;;;;;
+ ; Global Options ;
+@@ -22,7 +22,7 @@
+ ; Pid file
+ ; Note: the default prefix is @EXPANDED_LOCALSTATEDIR@
+ ; Default Value: none
+-;pid = run/php-fpm.pid
++pid = run/php-fpm/php-fpm.pid
+
+ ; Error log file
+ ; Note: the default prefix is @EXPANDED_LOCALSTATEDIR@
+@@ -93,7 +93,8 @@
+ ; specific port;
+ ; '/path/to/unix/socket' - to listen on a unix socket.
+ ; Note: This value is mandatory.
+-listen = 127.0.0.1:9000
++;listen = 127.0.0.1:9000
++listen = /var/run/php-fpm/php-fpm.sock
+
+ ; Set listen(2) backlog. A value of '-1' means unlimited.
+ ; Default Value: 128 (-1 on FreeBSD and OpenBSD)
+@@ -112,9 +113,9 @@
+ ; BSD-derived systems allow connections regardless of permissions.
+ ; Default Values: user and group are set as the running user
+ ; mode is set to 0666
+-;listen.owner = @php_fpm_user@
+-;listen.group = @php_fpm_group@
+-;listen.mode = 0666
++listen.owner = @php_fpm_user@
++listen.group = @php_fpm_group@
++listen.mode = 0660
+
+ ; Unix user/group of processes
+ ; Note: The user is mandatory. If the group is not set, the default user's group
+@@ -154,23 +155,23 @@
+ ; The number of child processes created on startup.
+ ; Note: Used only when pm is set to 'dynamic'
+ ; Default Value: min_spare_servers + (max_spare_servers - min_spare_servers) / 2
+-;pm.start_servers = 20
++pm.start_servers = 20
+
+ ; The desired minimum number of idle server processes.
+ ; Note: Used only when pm is set to 'dynamic'
+ ; Note: Mandatory when pm is set to 'dynamic'
+-;pm.min_spare_servers = 5
++pm.min_spare_servers = 5
+
+ ; The desired maximum number of idle server processes.
+ ; Note: Used only when pm is set to 'dynamic'
+ ; Note: Mandatory when pm is set to 'dynamic'
+-;pm.max_spare_servers = 35
++pm.max_spare_servers = 35
+
+ ; The number of requests each child process should execute before respawning.
+ ; This can be useful to work around memory leaks in 3rd party libraries. For
+ ; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS.
+ ; Default Value: 0
+-;pm.max_requests = 500
++pm.max_requests = 500
+
+ ; The URI to view the FPM status page. If this value is not set, no URI will be
+ ; recognized as a status page. By default, the status page shows the following
+@@ -264,7 +265,7 @@
+ ; Chdir to this directory at the start.
+ ; Note: relative path can be used.
+ ; Default Value: current directory or / when chroot
+-;chdir = /var/www
++;chdir = /srv/http
+
+ ; Redirect worker stdout and stderr into main error log. If not set, stdout and
+ ; stderr will be redirected to /dev/null according to FastCGI specs.
diff --git a/testing/php/php.ini.patch b/testing/php/php.ini.patch
new file mode 100644
index 000000000..356e190b4
--- /dev/null
+++ b/testing/php/php.ini.patch
@@ -0,0 +1,126 @@
+--- php.ini-production 2011-02-09 01:25:44.000000000 +0100
++++ php.ini-production 2011-03-19 11:11:44.496987763 +0100
+@@ -376,7 +376,7 @@
+ ; or per-virtualhost web server configuration file. This directive is
+ ; *NOT* affected by whether Safe Mode is turned On or Off.
+ ; http://php.net/open-basedir
+-;open_basedir =
++open_basedir = /srv/http/:/home/:/tmp/:/usr/share/pear/
+
+ ; This directive allows you to disable certain functions for security reasons.
+ ; It receives a comma-delimited list of function names. This directive is
+@@ -781,7 +781,7 @@
+ ;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ ; UNIX: "/path1:/path2"
+-;include_path = ".:/php/includes"
++include_path = ".:/usr/share/pear"
+ ;
+ ; Windows: "\path1;\path2"
+ ;include_path = ".;c:\php\includes"
+@@ -804,7 +804,7 @@
+
+ ; Directory in which the loadable extensions (modules) reside.
+ ; http://php.net/extension-dir
+-; extension_dir = "./"
++extension_dir = "/usr/lib/php/modules/"
+ ; On windows:
+ ; extension_dir = "ext"
+
+@@ -938,53 +938,49 @@
+ ; If you only provide the name of the extension, PHP will look for it in its
+ ; default extension directory.
+ ;
+-; Windows Extensions
+-; Note that ODBC support is built in, so no dll is needed for it.
+-; Note that many DLL files are located in the extensions/ (PHP 4) ext/ (PHP 5)
+-; extension folders as well as the separate PECL DLL download (PHP 5).
+-; Be sure to appropriately set the extension_dir directive.
+-;
+-;extension=php_bz2.dll
+-;extension=php_curl.dll
+-;extension=php_fileinfo.dll
+-;extension=php_gd2.dll
+-;extension=php_gettext.dll
+-;extension=php_gmp.dll
+-;extension=php_intl.dll
+-;extension=php_imap.dll
+-;extension=php_interbase.dll
+-;extension=php_ldap.dll
+-;extension=php_mbstring.dll
+-;extension=php_exif.dll ; Must be after mbstring as it depends on it
+-;extension=php_mysql.dll
+-;extension=php_mysqli.dll
+-;extension=php_oci8.dll ; Use with Oracle 10gR2 Instant Client
+-;extension=php_oci8_11g.dll ; Use with Oracle 11g Instant Client
+-;extension=php_openssl.dll
+-;extension=php_pdo_firebird.dll
+-;extension=php_pdo_mssql.dll
+-;extension=php_pdo_mysql.dll
+-;extension=php_pdo_oci.dll
+-;extension=php_pdo_odbc.dll
+-;extension=php_pdo_pgsql.dll
+-;extension=php_pdo_sqlite.dll
+-;extension=php_pgsql.dll
+-;extension=php_pspell.dll
+-;extension=php_shmop.dll
+-
+-; The MIBS data available in the PHP distribution must be installed.
+-; See http://www.php.net/manual/en/snmp.installation.php
+-;extension=php_snmp.dll
+-
+-;extension=php_soap.dll
+-;extension=php_sockets.dll
+-;extension=php_sqlite.dll
+-;extension=php_sqlite3.dll
+-;extension=php_sybase_ct.dll
+-;extension=php_tidy.dll
+-;extension=php_xmlrpc.dll
+-;extension=php_xsl.dll
+-;extension=php_zip.dll
++;extension=bcmath.so
++;extension=bz2.so
++;extension=calendar.so
++;extension=curl.so
++;extension=dba.so
++;extension=enchant.so
++;extension=exif.so
++;extension=ftp.so
++;extension=gd.so
++extension=gettext.so
++;extension=gmp.so
++;extension=iconv.so
++;extension=imap.so
++;extension=intl.so
++;extension=json.so
++;extension=ldap.so
++;extension=mcrypt.so
++;extension=mssql.so
++;extension=mysqli.so
++;extension=mysql.so
++;extension=odbc.so
++;extension=openssl.so
++;extension=pdo_mysql.so
++;extension=pdo_odbc.so
++;extension=pdo_pgsql.so
++;extension=pdo_sqlite.so
++;extension=pgsql.so
++;extension=phar.so
++;extension=posix.so
++;extension=pspell.so
++;extension=shmop.so
++;extension=snmp.so
++;extension=soap.so
++;extension=sockets.so
++;extension=sqlite3.so
++;extension=sqlite.so
++;extension=sysvmsg.so
++;extension=sysvsem.so
++;extension=sysvshm.so
++;extension=tidy.so
++;extension=xmlrpc.so
++;extension=xsl.so
++;extension=zip.so
+
+ ;;;;;;;;;;;;;;;;;;;
+ ; Module Settings ;
diff --git a/testing/php/rc.d.php-fpm b/testing/php/rc.d.php-fpm
new file mode 100644
index 000000000..54bcf4d5b
--- /dev/null
+++ b/testing/php/rc.d.php-fpm
@@ -0,0 +1,158 @@
+#!/bin/bash
+
+. /etc/rc.conf
+. /etc/rc.d/functions
+
+
+wait_for_pid () {
+ try=0
+ while test $try -lt 35 ; do
+ case "$1" in
+ 'created')
+ if [ -f "$2" ] ; then
+ try=''
+ break
+ fi
+ ;;
+ 'removed')
+ if [ ! -f "$2" ] ; then
+ try=''
+ break
+ fi
+ ;;
+ esac
+
+ stat_append '.'
+ try=`expr $try + 1`
+ sleep 1
+ done
+}
+
+test_config() {
+ stat_busy 'Checking configuration'
+ if [ $(id -u) -ne 0 ]; then
+ stat_append '(This script must be run as root)'
+ stat_die
+ fi
+
+ if [ ! -r /etc/php/php-fpm.conf ]; then
+ stat_append '(/etc/php/php-fpm.conf not found)'
+ stat_die
+ fi
+
+ local test=$(/usr/sbin/php-fpm -t 2>&1)
+ if [ $? -gt 0 ]; then
+ stat_append '(error in /etc/php/php-fpm.conf)'
+ stat_die
+ elif echo $test | grep -qi 'error'; then
+ stat_append '(error in /etc/php/php.ini)'
+ stat_die
+ fi
+
+ [ -d /var/run/php-fpm ] || install -d -m755 /var/run/php-fpm
+
+ stat_done
+}
+
+case "$1" in
+ start)
+ test_config
+ stat_busy 'Starting php-fpm'
+
+ /usr/sbin/php-fpm
+
+ if [ "$?" != 0 ] ; then
+ stat_fail
+ exit 1
+ fi
+
+ wait_for_pid created /var/run/php-fpm/php-fpm.pid
+
+ if [ -n "$try" ] ; then
+ stat_fail
+ exit 1
+ else
+ add_daemon php-fpm
+ stat_done
+ fi
+ ;;
+
+ stop)
+ test_config
+ stat_busy 'Gracefully shutting down php-fpm'
+
+ if [ ! -r /var/run/php-fpm/php-fpm.pid ] ; then
+ stat_fail
+ exit 1
+ fi
+
+ kill -QUIT `cat /var/run/php-fpm/php-fpm.pid`
+
+ wait_for_pid removed /var/run/php-fpm.pid
+
+ if [ -n "$try" ] ; then
+ stat_fail
+ exit 1
+ else
+ rm_daemon php-fpm
+ stat_done
+ fi
+ ;;
+
+ force-quit)
+ stat_busy 'Terminating php-fpm'
+
+ if [ ! -r /var/run/php-fpm/php-fpm.pid ] ; then
+ stat_fail
+ exit 1
+ fi
+
+ kill -TERM `cat /var/run/php-fpm/php-fpm.pid`
+
+ wait_for_pid removed /var/run/php-fpm/php-fpm.pid
+
+ if [ -n "$try" ] ; then
+ stat_fail
+ exit 1
+ else
+ rm_daemon php-fpm
+ stat_done
+ fi
+ ;;
+
+ restart)
+ $0 stop
+ $0 start
+ ;;
+
+ reload)
+ test_config
+ stat_busy 'Reload service php-fpm'
+
+ if [ ! -r /var/run/php-fpm/php-fpm.pid ] ; then
+ stat_fail
+ exit 1
+ fi
+
+ kill -USR2 `cat /var/run/php-fpm/php-fpm.pid`
+ stat_done
+ ;;
+
+ logrotate)
+ stat_busy 'Reopen php-fpm log'
+
+ if [ ! -r /var/run/php-fpm/php-fpm.pid ] ; then
+ stat_fail
+ exit 1
+ fi
+
+ kill -USR1 `cat /var/run/php-fpm/php-fpm.pid`
+ stat_done
+ ;;
+
+ *)
+ echo "usage: $0 {start|stop|force-quit|restart|reload|logrotate}"
+ exit 1
+ ;;
+
+esac
diff --git a/testing/php/suhosin-patch-5.3.6-0.9.10.patch.gz b/testing/php/suhosin-patch-5.3.6-0.9.10.patch.gz
new file mode 100644
index 000000000..7167ce2d0
--- /dev/null
+++ b/testing/php/suhosin-patch-5.3.6-0.9.10.patch.gz
Binary files differ
diff --git a/testing/postfix/PKGBUILD b/testing/postfix/PKGBUILD
new file mode 100644
index 000000000..7ae08c572
--- /dev/null
+++ b/testing/postfix/PKGBUILD
@@ -0,0 +1,64 @@
+# $Id: PKGBUILD 127765 2011-06-18 08:12:47Z bisson $
+# Contributor: Jeff Brodnax <tullyarcher@bellsouth.net>
+# Maintainer: Gaetan Bisson <bisson@archlinux.org>
+# Maintainer: Paul Mattal <paul@archlinux.org>
+
+pkgname=postfix
+pkgver=2.8.3
+pkgrel=4
+pkgdesc='Secure, fast, easy to administer drop in replacement for Sendmail (MTA)'
+url='http://www.postfix.org/'
+arch=('i686' 'x86_64')
+license=('custom')
+depends=('pcre' 'libsasl' 'libmysqlclient' 'postgresql-libs' 'sqlite3' 'libldap' 'db')
+backup=('etc/postfix/'{access,aliases,canonical,generic,header_checks,main.cf,master.cf,relocated,transport,virtual})
+source=("ftp://ftp.porcupine.org/mirrors/postfix-release/official/${pkgname}-${pkgver}.tar.gz" \
+ 'aliases.patch' \
+ 'rc.d')
+sha1sums=('2604066f158f5327449960afd6334b996dc01799'
+ '5fc3de6c7df1e5851a0a379e825148868808318b'
+ '40c6be2eb55e6437a402f43775cdb3d22ea87a66')
+
+provides=('smtp-server' 'smtp-forwarder')
+replaces=('postfix-mysql' 'postfix-pgsql')
+conflicts=('postfix-mysql' 'postfix-pgsql' 'smtp-server' 'smtp-forwarder')
+
+install=install
+
+build() {
+ cd "${srcdir}/${pkgname}-${pkgver}"
+
+ make makefiles DEBUG='' CCARGS=' \
+ -DUSE_SASL_AUTH -DUSE_CYRUS_SASL -I/usr/include/sasl \
+ -DHAS_LDAP \
+ -DUSE_TLS \
+ -DHAS_MYSQL -I/usr/include/mysql \
+ -DHAS_PGSQL -I/usr/include/postgresql \
+ -DHAS_SQLITE \
+ ' AUXLIBS=' \
+ -lsasl2 \
+ -lldap -llber \
+ -lssl -lcrypto \
+ -lmysqlclient -lz -lm \
+ -lpq \
+ -lsqlite3 -lpthread \
+ ' OPT="${CFLAGS} ${LDFLAGS}"
+
+ make
+}
+
+package() {
+ cd "${srcdir}/${pkgname}-${pkgver}"
+
+ sh postfix-install -non-interactive \
+ install_root="${pkgdir}" \
+ daemon_directory="/usr/lib/${pkgname}" \
+ sample_directory="/etc/${pkgname}/sample" \
+ manpage_directory="/usr/share/man"
+
+ install -D -m755 ../rc.d "${pkgdir}/etc/rc.d/${pkgname}"
+ install -D -m644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
+
+ cd "${pkgdir}"
+ patch -p0 < "${srcdir}"/aliases.patch
+}
diff --git a/testing/postfix/aliases.patch b/testing/postfix/aliases.patch
new file mode 100644
index 000000000..6767870b7
--- /dev/null
+++ b/testing/postfix/aliases.patch
@@ -0,0 +1,18 @@
+--- etc/postfix/main.cf.orig 2010-12-13 20:18:22.000000000 +0100
++++ etc/postfix/main.cf 2010-12-13 20:18:24.000000000 +0100
+@@ -382,6 +382,7 @@
+ #alias_maps = hash:/etc/aliases
+ #alias_maps = hash:/etc/aliases, nis:mail.aliases
+ #alias_maps = netinfo:/aliases
++alias_maps = hash:/etc/postfix/aliases
+
+ # The alias_database parameter specifies the alias database(s) that
+ # are built with "newaliases" or "sendmail -bi". This is a separate
+@@ -392,6 +393,7 @@
+ #alias_database = dbm:/etc/mail/aliases
+ #alias_database = hash:/etc/aliases
+ #alias_database = hash:/etc/aliases, hash:/opt/majordomo/aliases
++alias_database = $alias_maps
+
+ # ADDRESS EXTENSIONS (e.g., user+foo)
+ #
diff --git a/testing/postfix/install b/testing/postfix/install
new file mode 100644
index 000000000..f5d81a565
--- /dev/null
+++ b/testing/postfix/install
@@ -0,0 +1,35 @@
+post_install() {
+ getent group postdrop &>/dev/null || groupadd -g 75 postdrop >/dev/null
+ getent group postfix &>/dev/null || groupadd -g 73 postfix >/dev/null
+ getent passwd postfix &>/dev/null || useradd -u 73 -d /var/spool/postfix -g postfix -s /bin/false postfix >/dev/null
+
+ cd var/spool/postfix
+ chown postfix active bounce corrupt defer deferred flush hold incoming private public maildrop trace saved
+ chgrp postdrop public maildrop
+ cd ../../../usr/sbin
+ chgrp postdrop postqueue postdrop
+ chmod g+s postqueue postdrop
+ cd ../..
+ chown postfix var/lib/postfix
+
+ newaliases
+}
+
+post_upgrade() {
+ post_install
+
+ if [ `vercmp $2 2.8` = -1 ]; then cat <<EOF
+
+==> You must now execute "/etc/rc.d/postfix reload" (or restart).
+==> This is needed because the queue manager to delivery agent
+==> protocol has changed.
+
+EOF
+ fi
+}
+
+pre_remove() {
+ getent passwd postfix &>/dev/null && userdel postfix >/dev/null
+ getent group postfix &>/dev/null && groupdel postfix >/dev/null
+ getent group postdrop &>/dev/null && groupdel postdrop >/dev/null
+}
diff --git a/testing/postfix/rc.d b/testing/postfix/rc.d
new file mode 100755
index 000000000..fa314bb84
--- /dev/null
+++ b/testing/postfix/rc.d
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+. /etc/rc.conf
+. /etc/rc.d/functions
+
+name=postfix
+
+case "$1" in
+start)
+ stat_busy "Starting $name daemon"
+ /usr/sbin/postfix start &>/dev/null \
+ && { add_daemon $name; stat_done; } \
+ || { stat_fail; exit 1; }
+ ;;
+stop)
+ stat_busy "Stopping $name daemon"
+ /usr/sbin/postfix stop &>/dev/null \
+ && { rm_daemon $name; stat_done; } \
+ || { stat_fail; exit 1; }
+ ;;
+reload)
+ stat_busy "Reloading $name daemon"
+ /usr/sbin/postfix reload &>/dev/null \
+ && { stat_done; } \
+ || { stat_fail; exit 1; }
+ ;;
+restart)
+ $0 stop
+ sleep 1
+ $0 start
+ ;;
+*)
+ echo "usage: $0 {start|stop|restart|reload}"
+ ;;
+esac
+exit 0
diff --git a/testing/python2/PKGBUILD b/testing/python2/PKGBUILD
new file mode 100644
index 000000000..e0c363027
--- /dev/null
+++ b/testing/python2/PKGBUILD
@@ -0,0 +1,83 @@
+# $Id: PKGBUILD 129895 2011-06-29 15:22:59Z stephane $
+# Maintainer: Stéphane Gaudreault <stephane@archlinux.org>
+# Contributer: Allan McRae <allan@archlinux.org>
+# Contributer: Jason Chu <jason@archlinux.org>
+
+pkgname=python2
+pkgver=2.7.2
+pkgrel=2
+_pybasever=2.7
+pkgdesc="A high-level scripting language"
+arch=('i686' 'x86_64')
+license=('PSF')
+url="http://www.python.org/"
+depends=('bzip2' 'gdbm' 'openssl' 'zlib' 'expat' 'sqlite3' 'libffi')
+makedepends=('tk')
+optdepends=('tk: for IDLE')
+conflicts=('python<3')
+options=('!makeflags')
+source=(http://www.python.org/ftp/python/${pkgver%rc?}/Python-${pkgver}.tar.xz)
+sha1sums=('56700044141402dc35e7a0a24aa7ffda1a8c1a53')
+
+build() {
+ cd "${srcdir}/Python-${pkgver}"
+
+ # Temporary workaround for FS#22322
+ # See http://bugs.python.org/issue10835 for upstream report
+ sed -i "/progname =/s/python/python${_pybasever}/" Python/pythonrun.c
+
+ # Enable built-in SQLite3 module to load extensions (fix FS#22122)
+ sed -i "/SQLITE_OMIT_LOAD_EXTENSION/d" setup.py
+
+ # FS#23997
+ sed -i -e "s|^#.* /usr/local/bin/python|#!/usr/bin/python2|" Lib/cgi.py
+
+ # Ensure that we are using the system copy of various libraries (expat, zlib and libffi),
+ # rather than copies shipped in the tarball
+ rm -r Modules/expat
+ rm -r Modules/zlib
+ rm -r Modules/_ctypes/{darwin,libffi}*
+
+ export OPT="${CFLAGS}"
+ ./configure --prefix=/usr --enable-shared --with-threads --enable-ipv6 \
+ --enable-unicode=ucs4 --with-system-expat --with-system-ffi
+
+ make
+}
+
+package() {
+ cd "${srcdir}/Python-${pkgver}"
+ make DESTDIR="${pkgdir}" altinstall maninstall
+
+ ln -sf python${_pybasever} "${pkgdir}/usr/bin/python2"
+ ln -sf python${_pybasever}-config "${pkgdir}/usr/bin/python2-config"
+ ln -sf python${_pybasever}.1 "${pkgdir}/usr/share/man/man1/python2.1"
+
+ ln -sf ../../libpython${_pybasever}.so \
+ "${pkgdir}/usr/lib/python${_pybasever}/config/libpython${_pybasever}.so"
+
+ mv "${pkgdir}/usr/bin/smtpd.py" "${pkgdir}/usr/lib/python${_pybasever}/"
+
+ # some useful "stuff"
+ install -dm755 "${pkgdir}"/usr/lib/python${_pybasever}/Tools/{i18n,scripts}
+ install -m755 Tools/i18n/{msgfmt,pygettext}.py \
+ "${pkgdir}/usr/lib/python${_pybasever}/Tools/i18n/"
+ install -m755 Tools/scripts/{README,*py} \
+ "${pkgdir}/usr/lib/python${_pybasever}/Tools/scripts/"
+
+ # fix conflicts with python
+ mv "${pkgdir}"/usr/bin/idle{,2}
+ mv "${pkgdir}"/usr/bin/pydoc{,2}
+ mv "${pkgdir}"/usr/bin/2to3{,-2.7}
+
+ # clean up #!s
+ find "${pkgdir}/usr/lib/python${_pybasever}/" -name '*.py' | \
+ xargs sed -i "s|#[ ]*![ ]*/usr/bin/env python$|#!/usr/bin/env python2|"
+
+ # clean-up reference to build directory
+ sed -i "s#${srcdir}/Python-${pkgver}:##" \
+ "${pkgdir}/usr/lib/python${_pybasever}/config/Makefile"
+
+ # license
+ install -Dm644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
+}
diff --git a/testing/rasqal/PKGBUILD b/testing/rasqal/PKGBUILD
new file mode 100644
index 000000000..63c0c31b3
--- /dev/null
+++ b/testing/rasqal/PKGBUILD
@@ -0,0 +1,29 @@
+# $Id: PKGBUILD 130165 2011-07-02 23:00:40Z andrea $
+# Maintainer: AndyRTR <andyrtr@archlinux.org>
+# Contributor: Lawrence Lee <valheru@facticius.net>
+
+pkgname=rasqal
+epoch=1
+pkgver=0.9.26
+pkgrel=1
+pkgdesc="A free C library that handles Resource Description Framework (RDF) query syntaxes, query construction and query execution returning result bindings"
+url="http://librdf.org/rasqal"
+license=('GPL' 'LGPL')
+arch=('i686' 'x86_64')
+depends=('raptor>=2.0.3' 'mpfr')
+options=('!libtool')
+source=(http://download.librdf.org/source/${pkgname}-${pkgver}.tar.gz)
+md5sums=('1e9fe5423498f10f636319633855e691')
+
+build() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+ ./configure --prefix=/usr \
+ --disable-static \
+ --enable-release
+ make
+}
+
+package() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+ make DESTDIR=${pkgdir} install
+}
diff --git a/testing/redland/PKGBUILD b/testing/redland/PKGBUILD
new file mode 100644
index 000000000..949804063
--- /dev/null
+++ b/testing/redland/PKGBUILD
@@ -0,0 +1,78 @@
+# $Id: PKGBUILD 129914 2011-06-29 19:31:01Z andrea $
+# Maintainer: AndyRTR <andyrtr@archlinux.org>
+# Contributor: Francois Charette <francois.archlinux.org>
+
+pkgbase=redland
+epoch=1
+pkgname=('redland' 'redland-storage-mysql' 'redland-storage-postgresql' 'redland-storage-virtuoso' 'redland-storage-sqlite')
+pkgver=1.0.13
+pkgrel=4
+url="http://librdf.org/"
+license=("GPL")
+arch=('i686' 'x86_64')
+makedepends=('rasqal>=0.9.26' 'raptor>=2.0.3' 'db>=5.2' 'postgresql-libs' 'libmysqlclient' 'unixodbc' 'sqlite3')
+options=('!libtool')
+source=(http://download.librdf.org/source/$pkgname-$pkgver.tar.gz
+ rpath.diff)
+md5sums=('96c15f36f842ad7e1c9d225e4ca97b68'
+ 'acc85e784f01a656bd56777f95880787')
+
+build() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+ ./configure --prefix=/usr \
+ --enable-release \
+ --disable-static \
+ --with-raptor=system \
+ --with-rasqal=system \
+ --with-sqlite=3
+
+ # nuke rpath
+ patch -Np0 -i ${srcdir}/rpath.diff
+
+ make
+}
+
+package_redland() {
+ pkgdesc="Library that provides a high-level interface to RDF data"
+ depends=('rasqal>=0.9.26' 'raptor>=2.0.3' 'libtool')
+
+ cd ${srcdir}/${pkgbase}-${pkgver}
+ make DESTDIR=${pkgdir} install
+ rm -rf ${pkgdir}/usr/lib/redland
+}
+
+package_redland-storage-mysql() {
+ pkgdesc="MySQL storage support for Redland"
+ depends=('redland' 'libmysqlclient')
+
+ cd ${srcdir}/${pkgbase}-${pkgver}
+ install -dm755 ${pkgdir}/usr/lib/redland
+ install -m755 src/.libs/librdf_storage_mysql.so ${pkgdir}/usr/lib/redland/librdf_storage_mysql.so
+}
+
+package_redland-storage-postgresql() {
+ pkgdesc="PostgreSQL storage support for Redland"
+ depends=('redland' 'postgresql-libs')
+
+ cd ${srcdir}/${pkgbase}-${pkgver}
+ install -dm755 ${pkgdir}/usr/lib/redland
+ install -m755 src/.libs/librdf_storage_postgresql.so ${pkgdir}/usr/lib/redland/
+}
+
+package_redland-storage-virtuoso() {
+ pkgdesc="Virtuoso storage support for Redland"
+ depends=('redland' 'unixodbc' 'db')
+
+ cd ${srcdir}/${pkgbase}-${pkgver}
+ install -dm755 ${pkgdir}/usr/lib/redland
+ install -m755 src/.libs/librdf_storage_virtuoso.so ${pkgdir}/usr/lib/redland/
+}
+
+package_redland-storage-sqlite() {
+ pkgdesc="SQLite storage support for Redland"
+ depends=('redland' 'sqlite3' 'db')
+
+ cd ${srcdir}/${pkgbase}-${pkgver}
+ install -dm755 ${pkgdir}/usr/lib/redland
+ install -m755 src/.libs/librdf_storage_sqlite.so ${pkgdir}/usr/lib/redland/
+}
diff --git a/testing/redland/rpath.diff b/testing/redland/rpath.diff
new file mode 100644
index 000000000..831bc80ed
--- /dev/null
+++ b/testing/redland/rpath.diff
@@ -0,0 +1,11 @@
+--- libtool 2011-02-20 10:46:47.000000000 +0100
++++ libtool.new 2011-02-20 10:52:58.285764909 +0100
+@@ -324,7 +324,7 @@
+
+ # Flag to hardcode $libdir into a binary during linking.
+ # This must work even if $libdir does not exist
+-hardcode_libdir_flag_spec="\${wl}-rpath \${wl}\$libdir"
++hardcode_libdir_flag_spec=" "
+
+ # If ld is used when linking, flag to hardcode $libdir into a binary
+ # during linking. This must work even if $libdir does not exist.
diff --git a/testing/ruby/PKGBUILD b/testing/ruby/PKGBUILD
new file mode 100644
index 000000000..a829449da
--- /dev/null
+++ b/testing/ruby/PKGBUILD
@@ -0,0 +1,49 @@
+# $Id: PKGBUILD 127449 2011-06-15 09:33:37Z eric $
+# Maintainer:
+# Contributor: Allan McRae <allan@archlinux.org>
+# Contributor: John Proctor <jproctor@prium.net>
+# Contributor: Jeramy Rutley <jrutley@gmail.com>
+
+pkgbase=ruby
+pkgname=('ruby' 'ruby-docs')
+pkgver=1.9.2_p180
+pkgrel=3
+pkgdesc="An object-oriented language for quick and easy programming"
+arch=('i686' 'x86_64')
+url="http://www.ruby-lang.org/en/"
+license=('custom')
+makedepends=('openssl' 'tk' 'libffi' 'doxygen' 'graphviz')
+options=('!emptydirs' '!makeflags')
+source=("ftp://ftp.ruby-lang.org/pub/${pkgbase}/1.9/${pkgbase}-${pkgver//_/-}.tar.bz2")
+md5sums=('68510eeb7511c403b91fe5476f250538')
+sha1sums=('10824b44c8060c7b9b5afc0b3519a1e9f02f7fe5')
+
+build() {
+ cd "${srcdir}/${pkgbase}-${pkgver//_/-}"
+ ./configure --prefix=/usr \
+ --sysconfdir=/etc \
+ --enable-shared \
+ --enable-pthread \
+ --disable-rpath
+ make
+}
+
+package_ruby() {
+ depends=('openssl' 'libffi')
+ provides=('rubygems' 'rake')
+ conflicts=('rake')
+ optdepends=('tk: for Ruby/TK')
+
+ cd "${srcdir}/${pkgbase}-${pkgver//_/-}"
+ make DESTDIR="${pkgdir}" install-nodoc
+ install -Dm644 COPYING "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
+}
+
+package_ruby-docs() {
+ pkgdesc="Documentation files for ruby"
+ depends=('ruby')
+
+ cd "${srcdir}/${pkgbase}-${pkgver//_/-}"
+ make DESTDIR="${pkgdir}" install-doc install-capi
+ install -Dm644 COPYING "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
+}
diff --git a/testing/soprano/PKGBUILD b/testing/soprano/PKGBUILD
new file mode 100644
index 000000000..698851fbd
--- /dev/null
+++ b/testing/soprano/PKGBUILD
@@ -0,0 +1,35 @@
+# $Id: PKGBUILD 130173 2011-07-02 23:43:27Z andrea $
+# Maintainer: Andrea Scarpino <andrea@archlinux.org>
+# Contributor: Tobias Powalowski <tpowa@archlinux.org>
+
+pkgname=soprano
+pkgver=2.6.50git20110703
+pkgrel=1
+pkgdesc='A library which provides a highly usable object-oriented C++/Qt4 framework for RDF data'
+arch=('i686' 'x86_64')
+url='http://soprano.sourceforge.net/'
+license=('GPL' 'LGPL')
+depends=('qt' 'redland-storage-virtuoso' 'libiodbc' 'virtuoso')
+makedepends=('cmake' 'openjdk6' 'doxygen')
+#source=("http://downloads.sourceforge.net/${pkgname}/${pkgname}-${pkgver}.tar.bz2")
+source=("${pkgname}-${pkgver}.tar.bz2"::"http://quickgit.kde.org/?p=soprano.git&a=snapshot&h=3a7a527fd07c0d58146eb9e6770ecbcfb6717bc6&fmt=tbz2")
+md5sums=('21ef1f075062ea13d3e55d89d8f92fd2')
+
+build() {
+ cd "${srcdir}"
+ mkdir build
+ cd build
+ . /etc/profile.d/openjdk6.sh
+ # we need the rpath
+ cmake ../${pkgname} \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_SKIP_RPATH=OFF \
+ -DCMAKE_INSTALL_PREFIX=/usr \
+ -DSOPRANO_DISABLE_CLUCENE_INDEX=ON
+ make
+}
+
+package() {
+ cd "${srcdir}"/build
+ make DESTDIR="${pkgdir}" install
+}
diff --git a/testing/subversion/PKGBUILD b/testing/subversion/PKGBUILD
new file mode 100644
index 000000000..8f8e8adbe
--- /dev/null
+++ b/testing/subversion/PKGBUILD
@@ -0,0 +1,100 @@
+# $Id: PKGBUILD 130071 2011-07-01 21:53:41Z stephane $
+# Maintainer: Paul Mattal <paul@archlinux.org>
+# Contributor: Jason Chu <jason@archlinux.org>
+
+pkgname=subversion
+pkgver=1.6.17
+pkgrel=6
+pkgdesc="A Modern Concurrent Version Control System"
+arch=('i686' 'x86_64')
+license=('apache' 'bsd')
+depends=('neon' 'apr-util' 'sqlite3')
+optdepends=('libgnome-keyring' 'kdeutils-kwallet' 'bash-completion: for svn bash completion')
+makedepends=('krb5' 'apache' 'python2' 'perl' 'swig' 'ruby' 'java-runtime'
+ 'autoconf' 'db' 'e2fsprogs' 'libgnome-keyring' 'kdelibs')
+backup=('etc/xinetd.d/svn' 'etc/conf.d/svnserve')
+url="http://subversion.apache.org/"
+provides=('svn')
+options=('!makeflags' '!libtool')
+source=(http://subversion.tigris.org/downloads/$pkgname-$pkgver.tar.bz2
+ svnserve svn svnserve.conf svnmerge.py
+ subversion.rpath.fix.patch
+ subversion.suppress.deprecation.warnings.patch
+ subversion-perl-bindings.patch)
+md5sums=('81e5dc5beee4b3fc025ac70c0b6caa14'
+ 'a2b029e8385007ffb99b437b30521c90'
+ 'a0db6dd43af33952739b6ec089852630'
+ 'c459e299192552f61578f3438abf0664'
+ 'a6371baeda7e224504629ecdda2749b4'
+ '6b4340ba9d8845cd8497e013ae01be3f'
+ '1166f3b7413d7e7450299b3525680bbe'
+ '0591aa39837931161b4d61ff35c7b147')
+
+build() {
+ cd "${srcdir}/${pkgname}-${pkgver}"
+
+ export PYTHON=/usr/bin/python2
+
+ # apply patches
+ patch -Np0 -i ../subversion.rpath.fix.patch
+ patch -Np1 -i ../subversion.suppress.deprecation.warnings.patch
+ patch -Np1 -i ../subversion-perl-bindings.patch
+
+ # configure
+ autoreconf
+ ./configure --prefix=/usr --with-apr=/usr --with-apr-util=/usr \
+ --with-zlib=/usr --with-neon=/usr --with-apxs \
+ --with-sqlite=/usr --with-berkeley-db=:/usr/include/:/usr/lib:db-5.2 \
+ --enable-javahl --with-gnome-keyring --with-kwallet
+
+ # build
+ (make external-all && make LT_LDFLAGS="-L$Fdestdir/usr/lib" local-all )
+}
+
+#check() {
+# cd "${srcdir}/${pkgname}-${pkgver}"
+# export LANG=C LC_ALL=C
+# make check check-swig-pl check-swig-py CLEANUP=yes
+#}
+
+package() {
+ cd "${srcdir}/${pkgname}-${pkgver}"
+
+ # install
+ export LD_LIBRARY_PATH=${pkgdir}/usr/lib:$LD_LIBRARY_PATH
+ make DESTDIR=${pkgdir} install
+
+ make DESTDIR=${pkgdir} swig-py
+ make install-swig-py DESTDIR=${pkgdir}
+
+ install -d ${pkgdir}/usr/lib/python2.7
+ mv ${pkgdir}/usr/lib/svn-python/ ${pkgdir}/usr/lib/python2.7/site-packages
+
+ install -d ${pkgdir}/usr/share/subversion
+ install -d -m 755 tools/hook-scripts ${pkgdir}/usr/share/subversion/
+ rm -f ${pkgdir}/usr/share/subversion/hook-scripts/*.in
+
+ make DESTDIR=${pkgdir} swig-pl
+ make install-swig-pl DESTDIR=${pkgdir} INSTALLDIRS=vendor
+ rm -f ${pkgdir}/usr/lib/perl5/vendor_perl/auto/SVN/_Core/.packlist
+ rm -rf ${pkgdir}/usr/lib/perl5/core_perl
+
+ make DESTDIR=${pkgdir} swig-rb
+ make install-swig-rb DESTDIR=${pkgdir}
+
+ make DESTDIR=${pkgdir} javahl
+ make DESTDIR=${pkgdir} install-javahl
+
+ install -d ${pkgdir}/etc/{rc.d,xinetd.d,conf.d}
+
+ install -m 755 ${srcdir}/svnserve ${pkgdir}/etc/rc.d
+ install -m 644 ${srcdir}/svn ${pkgdir}/etc/xinetd.d
+ install -m 644 ${srcdir}/svnserve.conf ${pkgdir}/etc/conf.d/svnserve
+ install -m 755 ${srcdir}/svnmerge.py ${pkgdir}/usr/bin/svnmerge
+ install -D -m 644 ${srcdir}/subversion-$pkgver/COPYING \
+ ${pkgdir}/usr/share/licenses/$pkgname/LICENSE
+
+ # bash completion
+ install -Dm 644 ${srcdir}/${pkgname}-${pkgver}/tools/client-side/bash_completion \
+ ${pkgdir}/etc/bash_completion.d/subversion
+}
diff --git a/testing/subversion/subversion-perl-bindings.patch b/testing/subversion/subversion-perl-bindings.patch
new file mode 100644
index 000000000..3c34daa47
--- /dev/null
+++ b/testing/subversion/subversion-perl-bindings.patch
@@ -0,0 +1,12 @@
+diff -Naur subversion-1.6.17.ori/subversion/bindings/swig/perl/native/Makefile.PL.in subversion-1.6.17/subversion/bindings/swig/perl/native/Makefile.PL.in
+--- subversion-1.6.17.ori/subversion/bindings/swig/perl/native/Makefile.PL.in 2010-11-24 20:42:16.000000000 +0000
++++ subversion-1.6.17/subversion/bindings/swig/perl/native/Makefile.PL.in 2011-07-01 20:16:16.520892074 +0000
+@@ -43,7 +43,7 @@
+ my %config = (
+ ABSTRACT => 'Perl bindings for Subversion',
+ DEFINE => $cppflags,
+- CCFLAGS => $cflags,
++ CCFLAGS => $Config{ccflags},
+ INC => join(' ',$apr_cflags, $apu_cflags,
+ " -I$swig_srcdir/perl/libsvn_swig_perl",
+ " -I$svnlib_srcdir/include",
diff --git a/testing/subversion/subversion.rpath.fix.patch b/testing/subversion/subversion.rpath.fix.patch
new file mode 100644
index 000000000..ba6ee9e4e
--- /dev/null
+++ b/testing/subversion/subversion.rpath.fix.patch
@@ -0,0 +1,10 @@
+--- Makefile.in.orig 2009-02-16 14:10:48.000000000 -0200
++++ Makefile.in 2009-06-04 00:56:29.000000000 -0300
+@@ -678,6 +678,7 @@
+
+ $(SWIG_PL_DIR)/native/Makefile: $(SWIG_PL_DIR)/native/Makefile.PL
+ cd $(SWIG_PL_DIR)/native; $(PERL) Makefile.PL
++ cd $(SWIG_PL_DIR)/native; sed -i 's|LD_RUN_PATH|DIE_RPATH_DIE|g' Makefile{,.{client,delta,fs,ra,repos,wc}}
+
+ swig-pl_DEPS = autogen-swig-pl libsvn_swig_perl \
+ $(SWIG_PL_DIR)/native/Makefile
diff --git a/testing/subversion/subversion.suppress.deprecation.warnings.patch b/testing/subversion/subversion.suppress.deprecation.warnings.patch
new file mode 100644
index 000000000..94ce89b18
--- /dev/null
+++ b/testing/subversion/subversion.suppress.deprecation.warnings.patch
@@ -0,0 +1,22 @@
+diff -urN subversion-1.6.9/subversion/bindings/swig/python/svn/core.py subversion-1.6.9-fixed/subversion/bindings/swig/python/svn/core.py
+--- subversion-1.6.9/subversion/bindings/swig/python/svn/core.py 2009-02-13 11:22:26.000000000 -0500
++++ subversion-1.6.9-fixed/subversion/bindings/swig/python/svn/core.py 2010-02-08 07:46:29.000000000 -0500
+@@ -19,6 +19,7 @@
+ from libsvn.core import *
+ import libsvn.core as _libsvncore
+ import atexit as _atexit
++import warnings
+
+ class SubversionException(Exception):
+ def __init__(self, message=None, apr_err=None, child=None,
+@@ -44,7 +45,9 @@
+ Exception.__init__(self, *args)
+
+ self.apr_err = apr_err
+- self.message = message
++ with warnings.catch_warnings():
++ warnings.simplefilter("ignore", DeprecationWarning)
++ self.message = message
+ self.child = child
+ self.file = file
+ self.line = line
diff --git a/testing/subversion/svn b/testing/subversion/svn
new file mode 100644
index 000000000..8988aaf63
--- /dev/null
+++ b/testing/subversion/svn
@@ -0,0 +1,11 @@
+service svn
+{
+ flags = REUSE
+ socket_type = stream
+ wait = no
+ user = root
+ server = /usr/bin/svnserve
+ server_args = -i
+ log_on_failure += USERID
+ disable = yes
+}
diff --git a/testing/subversion/svnmerge.py b/testing/subversion/svnmerge.py
new file mode 100644
index 000000000..d8931648f
--- /dev/null
+++ b/testing/subversion/svnmerge.py
@@ -0,0 +1,2370 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+# Copyright (c) 2005, Giovanni Bajo
+# Copyright (c) 2004-2005, Awarix, Inc.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+#
+# Author: Archie Cobbs <archie at awarix dot com>
+# Rewritten in Python by: Giovanni Bajo <rasky at develer dot com>
+#
+# Acknowledgments:
+# John Belmonte <john at neggie dot net> - metadata and usability
+# improvements
+# Blair Zajac <blair at orcaware dot com> - random improvements
+# Raman Gupta <rocketraman at fastmail dot fm> - bidirectional and transitive
+# merging support
+# Dustin J. Mitchell <dustin at zmanda dot com> - support for multiple
+# location identifier formats
+#
+# $HeadURL$
+# $LastChangedDate$
+# $LastChangedBy$
+# $LastChangedRevision$
+#
+# Requisites:
+# svnmerge.py has been tested with all SVN major versions since 1.1 (both
+# client and server). It is unknown if it works with previous versions.
+#
+# Differences from svnmerge.sh:
+# - More portable: tested as working in FreeBSD and OS/2.
+# - Add double-verbose mode, which shows every svn command executed (-v -v).
+# - "svnmerge avail" now only shows commits in source, not also commits in
+# other parts of the repository.
+# - Add "svnmerge block" to flag some revisions as blocked, so that
+# they will not show up anymore in the available list. Added also
+# the complementary "svnmerge unblock".
+# - "svnmerge avail" has grown two new options:
+# -B to display a list of the blocked revisions
+# -A to display both the blocked and the available revisions.
+# - Improved generated commit message to make it machine parsable even when
+# merging commits which are themselves merges.
+# - Add --force option to skip working copy check
+# - Add --record-only option to "svnmerge merge" to avoid performing
+# an actual merge, yet record that a merge happened.
+# - Can use a variety of location-identifier formats
+#
+# TODO:
+# - Add "svnmerge avail -R": show logs in reverse order
+#
+# Information for Hackers:
+#
+# Identifiers for branches:
+# A branch is identified in three ways within this source:
+# - as a working copy (variable name usually includes 'dir')
+# - as a fully qualified URL
+# - as a path identifier (an opaque string indicating a particular path
+# in a particular repository; variable name includes 'pathid')
+# A "target" is generally user-specified, and may be a working copy or
+# a URL.
+
+import sys, os, getopt, re, types, tempfile, time, locale
+from bisect import bisect
+from xml.dom import pulldom
+
+NAME = "svnmerge"
+if not hasattr(sys, "version_info") or sys.version_info < (2, 0):
+ error("requires Python 2.0 or newer")
+
+# Set up the separator used to separate individual log messages from
+# each revision merged into the target location. Also, create a
+# regular expression that will find this same separator in already
+# committed log messages, so that the separator used for this run of
+# svnmerge.py will have one more LOG_SEPARATOR appended to the longest
+# separator found in all the commits.
+LOG_SEPARATOR = 8 * '.'
+LOG_SEPARATOR_RE = re.compile('^((%s)+)' % re.escape(LOG_SEPARATOR),
+ re.MULTILINE)
+
+# Each line of the embedded log messages will be prefixed by LOG_LINE_PREFIX.
+LOG_LINE_PREFIX = 2 * ' '
+
+# Set python to the default locale as per environment settings, same as svn
+# TODO we should really parse config and if log-encoding is specified, set
+# the locale to match that encoding
+locale.setlocale(locale.LC_ALL, '')
+
+# We want the svn output (such as svn info) to be non-localized
+# Using LC_MESSAGES should not affect localized output of svn log, for example
+if os.environ.has_key("LC_ALL"):
+ del os.environ["LC_ALL"]
+os.environ["LC_MESSAGES"] = "C"
+
+###############################################################################
+# Support for older Python versions
+###############################################################################
+
+# True/False constants are Python 2.2+
+try:
+ True, False
+except NameError:
+ True, False = 1, 0
+
+def lstrip(s, ch):
+ """Replacement for str.lstrip (support for arbitrary chars to strip was
+ added in Python 2.2.2)."""
+ i = 0
+ try:
+ while s[i] == ch:
+ i = i+1
+ return s[i:]
+ except IndexError:
+ return ""
+
+def rstrip(s, ch):
+ """Replacement for str.rstrip (support for arbitrary chars to strip was
+ added in Python 2.2.2)."""
+ try:
+ if s[-1] != ch:
+ return s
+ i = -2
+ while s[i] == ch:
+ i = i-1
+ return s[:i+1]
+ except IndexError:
+ return ""
+
+def strip(s, ch):
+ """Replacement for str.strip (support for arbitrary chars to strip was
+ added in Python 2.2.2)."""
+ return lstrip(rstrip(s, ch), ch)
+
+def rsplit(s, sep, maxsplits=0):
+ """Like str.rsplit, which is Python 2.4+ only."""
+ L = s.split(sep)
+ if not 0 < maxsplits <= len(L):
+ return L
+ return [sep.join(L[0:-maxsplits])] + L[-maxsplits:]
+
+###############################################################################
+
+def kwextract(s):
+ """Extract info from a svn keyword string."""
+ try:
+ return strip(s, "$").strip().split(": ")[1]
+ except IndexError:
+ return "<unknown>"
+
+__revision__ = kwextract('$Rev$')
+__date__ = kwextract('$Date$')
+
+# Additional options, not (yet?) mapped to command line flags
+default_opts = {
+ "svn": "svn",
+ "prop": NAME + "-integrated",
+ "block-prop": NAME + "-blocked",
+ "commit-verbose": True,
+ "verbose": 0,
+}
+logs = {}
+
+def console_width():
+ """Get the width of the console screen (if any)."""
+ try:
+ return int(os.environ["COLUMNS"])
+ except (KeyError, ValueError):
+ pass
+
+ try:
+ # Call the Windows API (requires ctypes library)
+ from ctypes import windll, create_string_buffer
+ h = windll.kernel32.GetStdHandle(-11)
+ csbi = create_string_buffer(22)
+ res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
+ if res:
+ import struct
+ (bufx, bufy,
+ curx, cury, wattr,
+ left, top, right, bottom,
+ maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
+ return right - left + 1
+ except ImportError:
+ pass
+
+ # Parse the output of stty -a
+ if os.isatty(1):
+ out = os.popen("stty -a").read()
+ m = re.search(r"columns (\d+);", out)
+ if m:
+ return int(m.group(1))
+
+ # sensible default
+ return 80
+
+def error(s):
+ """Subroutine to output an error and bail."""
+ print >> sys.stderr, "%s: %s" % (NAME, s)
+ sys.exit(1)
+
+def report(s):
+ """Subroutine to output progress message, unless in quiet mode."""
+ if opts["verbose"]:
+ print "%s: %s" % (NAME, s)
+
+def prefix_lines(prefix, lines):
+ """Given a string representing one or more lines of text, insert the
+ specified prefix at the beginning of each line, and return the result.
+ The input must be terminated by a newline."""
+ assert lines[-1] == "\n"
+ return prefix + lines[:-1].replace("\n", "\n"+prefix) + "\n"
+
+def recode_stdout_to_file(s):
+ if locale.getdefaultlocale()[1] is None or not hasattr(sys.stdout, "encoding") \
+ or sys.stdout.encoding is None:
+ return s
+ u = s.decode(sys.stdout.encoding)
+ return u.encode(locale.getdefaultlocale()[1])
+
+class LaunchError(Exception):
+ """Signal a failure in execution of an external command. Parameters are the
+ exit code of the process, the original command line, and the output of the
+ command."""
+
+try:
+ """Launch a sub-process. Return its output (both stdout and stderr),
+ optionally split by lines (if split_lines is True). Raise a LaunchError
+ exception if the exit code of the process is non-zero (failure).
+
+ This function has two implementations, one based on subprocess (preferred),
+ and one based on popen (for compatibility).
+ """
+ import subprocess
+ import shlex
+
+ def launch(cmd, split_lines=True):
+ # Requiring python 2.4 or higher, on some platforms we get
+ # much faster performance from the subprocess module (where python
+ # doesn't try to close an exhorbitant number of file descriptors)
+ stdout = ""
+ stderr = ""
+ try:
+ if os.name == 'nt':
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, \
+ close_fds=False, stderr=subprocess.PIPE)
+ else:
+ # Use shlex to break up the parameters intelligently,
+ # respecting quotes. shlex can't handle unicode.
+ args = shlex.split(cmd.encode('ascii'))
+ p = subprocess.Popen(args, stdout=subprocess.PIPE, \
+ close_fds=False, stderr=subprocess.PIPE)
+ stdoutAndErr = p.communicate()
+ stdout = stdoutAndErr[0]
+ stderr = stdoutAndErr[1]
+ except OSError, inst:
+ # Using 1 as failure code; should get actual number somehow? For
+ # examples see svnmerge_test.py's TestCase_launch.test_failure and
+ # TestCase_launch.test_failurecode.
+ raise LaunchError(1, cmd, stdout + " " + stderr + ": " + str(inst))
+
+ if p.returncode == 0:
+ if split_lines:
+ # Setting keepends=True for compatibility with previous logic
+ # (where file.readlines() preserves newlines)
+ return stdout.splitlines(True)
+ else:
+ return stdout
+ else:
+ raise LaunchError(p.returncode, cmd, stdout + stderr)
+except ImportError:
+ # support versions of python before 2.4 (slower on some systems)
+ def launch(cmd, split_lines=True):
+ if os.name not in ['nt', 'os2']:
+ import popen2
+ p = popen2.Popen4(cmd)
+ p.tochild.close()
+ if split_lines:
+ out = p.fromchild.readlines()
+ else:
+ out = p.fromchild.read()
+ ret = p.wait()
+ if ret == 0:
+ ret = None
+ else:
+ ret >>= 8
+ else:
+ i,k = os.popen4(cmd)
+ i.close()
+ if split_lines:
+ out = k.readlines()
+ else:
+ out = k.read()
+ ret = k.close()
+
+ if ret is None:
+ return out
+ raise LaunchError(ret, cmd, out)
+
+def launchsvn(s, show=False, pretend=False, **kwargs):
+ """Launch SVN and grab its output."""
+ username = password = configdir = ""
+ if opts.get("username", None):
+ username = "--username=" + opts["username"]
+ if opts.get("password", None):
+ password = "--password=" + opts["password"]
+ if opts.get("config-dir", None):
+ configdir = "--config-dir=" + opts["config-dir"]
+ cmd = ' '.join(filter(None, [opts["svn"], "--non-interactive",
+ username, password, configdir, s]))
+ if show or opts["verbose"] >= 2:
+ print cmd
+ if pretend:
+ return None
+ return launch(cmd, **kwargs)
+
+def svn_command(s):
+ """Do (or pretend to do) an SVN command."""
+ out = launchsvn(s, show=opts["show-changes"] or opts["dry-run"],
+ pretend=opts["dry-run"],
+ split_lines=False)
+ if not opts["dry-run"]:
+ print out
+
+def check_dir_clean(dir):
+ """Check the current status of dir for local mods."""
+ if opts["force"]:
+ report('skipping status check because of --force')
+ return
+ report('checking status of "%s"' % dir)
+
+ # Checking with -q does not show unversioned files or external
+ # directories. Though it displays a debug message for external
+ # directories, after a blank line. So, practically, the first line
+ # matters: if it's non-empty there is a modification.
+ out = launchsvn("status -q %s" % dir)
+ if out and out[0].strip():
+ error('"%s" has local modifications; it must be clean' % dir)
+
+class PathIdentifier:
+ """Abstraction for a path identifier, so that we can start talking
+ about it before we know the form that it takes in the properties (its
+ external_form). Objects are referenced in the class variable 'locobjs',
+ keyed by all known forms."""
+
+ # a map of UUID (or None) to repository root URL.
+ repo_hints = {}
+
+ # a map from any known string form to the corresponding PathIdentifier
+ locobjs = {}
+
+ def __init__(self, repo_relative_path, uuid=None, url=None, external_form=None):
+ self.repo_relative_path = repo_relative_path
+ self.uuid = uuid
+ self.url = url
+ self.external_form = external_form
+
+ def __repr__(self):
+ return "<PathIdentifier " + ', '.join('%s=%r' % i for i in self.__dict__.items()) + '>'
+
+ def __str__(self):
+ """Return a printable string representation"""
+ if self.external_form:
+ return self.external_form
+ if self.url:
+ return self.format('url')
+ if self.uuid:
+ return self.format('uuid')
+ return self.format('path')
+
+ def from_pathid(pathid_str):
+ """convert pathid_str to a PathIdentifier"""
+ if not PathIdentifier.locobjs.has_key(pathid_str):
+ if is_url(pathid_str):
+ # we can determine every form; PathIdentifier.hint knows how to do that
+ PathIdentifier.hint(pathid_str)
+ elif pathid_str[:7] == 'uuid://':
+ mo = re.match('uuid://([^/]*)(.*)', pathid_str)
+ if not mo:
+ error("Invalid path identifier '%s'" % pathid_str)
+ uuid, repo_relative_path = mo.groups()
+ pathid = PathIdentifier(repo_relative_path, uuid=uuid)
+ # we can cache this by uuid:// pathid and by repo-relative path
+ PathIdentifier.locobjs[pathid_str] = PathIdentifier.locobjs[repo_relative_path] = pathid
+ elif pathid_str and pathid_str[0] == '/':
+ # strip any trailing slashes
+ pathid_str = pathid_str.rstrip('/')
+ pathid = PathIdentifier(repo_relative_path=pathid_str)
+ # we can only cache this by repo-relative path
+ PathIdentifier.locobjs[pathid_str] = pathid
+ else:
+ error("Invalid path identifier '%s'" % pathid_str)
+ return PathIdentifier.locobjs[pathid_str]
+ from_pathid = staticmethod(from_pathid)
+
+ def from_target(target):
+ """Convert a target (either a working copy path or an URL) into a
+ path identifier."""
+ # prime the cache first if we don't know about this target yet
+ if not PathIdentifier.locobjs.has_key(target):
+ PathIdentifier.hint(target)
+
+ try:
+ return PathIdentifier.locobjs[target]
+ except KeyError:
+ error("Could not recognize path identifier '%s'" % target)
+ from_target = staticmethod(from_target)
+
+ def hint(target):
+ """Cache some information about target, as it may be referenced by
+ repo-relative path in subversion properties; the cache can help to
+ expand such a relative path to a full path identifier."""
+ if PathIdentifier.locobjs.has_key(target): return
+ if not is_url(target) and not is_wc(target): return
+
+ url = target_to_url(target)
+
+ root = get_repo_root(url)
+ assert root[-1] != "/"
+ assert url[:len(root)] == root, "url=%r, root=%r" % (url, root)
+ repo_relative_path = url[len(root):]
+
+ try:
+ uuid = get_svninfo(target)['Repository UUID']
+ uuid_pathid = 'uuid://%s%s' % (uuid, repo_relative_path)
+ except KeyError:
+ uuid = None
+ uuid_pathid = None
+
+ locobj = PathIdentifier.locobjs.get(url) or \
+ (uuid_pathid and PathIdentifier.locobjs.get(uuid_pathid))
+ if not locobj:
+ locobj = PathIdentifier(repo_relative_path, uuid=uuid, url=url)
+
+ PathIdentifier.repo_hints[uuid] = root # (uuid may be None)
+
+ PathIdentifier.locobjs[target] = locobj
+ PathIdentifier.locobjs[url] = locobj
+ if uuid_pathid:
+ PathIdentifier.locobjs[uuid_pathid] = locobj
+ if not PathIdentifier.locobjs.has_key(repo_relative_path):
+ PathIdentifier.locobjs[repo_relative_path] = locobj
+ hint = staticmethod(hint)
+
+ def format(self, fmt):
+ if fmt == 'path':
+ return self.repo_relative_path
+ elif fmt == 'uuid':
+ return "uuid://%s%s" % (self.uuid, self.repo_relative_path)
+ elif fmt == 'url':
+ return self.url
+ else:
+ error("Unkonwn path type '%s'" % fmt)
+
+ def match_substring(self, str):
+ """Test whether str is a substring of any representation of this
+ PathIdentifier."""
+ if self.repo_relative_path.find(str) >= 0:
+ return True
+
+ if self.uuid:
+ if ("uuid://%s%s" % (self.uuid, self.repo_relative_path)).find(str) >= 0:
+ return True
+
+ if self.url:
+ if (self.url + self.repo_relative_path).find(str) >= 0:
+ return True
+
+ return False
+
+ def get_url(self):
+ """Convert a pathid into a URL. If this is not possible, error out."""
+ if self.url:
+ return self.url
+ # if we have a uuid and happen to know the URL for it, use that
+ elif self.uuid and PathIdentifier.repo_hints.has_key(self.uuid):
+ self.url = PathIdentifier.repo_hints[self.uuid] + self.repo_relative_path
+ PathIdentifier.locobjs[self.url] = self
+ return self.url
+ # if we've only seen one rep, use that (a guess, but an educated one)
+ elif not self.uuid and len(PathIdentifier.repo_hints) == 1:
+ uuid, root = PathIdentifier.repo_hints.items()[0]
+ if uuid:
+ self.uuid = uuid
+ PathIdentifier.locobjs['uuid://%s%s' % (uuid, self.repo_relative_path)] = self
+ self.url = root + self.repo_relative_path
+ PathIdentifier.locobjs[self.url] = self
+ report("Guessing that '%s' refers to '%s'" % (self, self.url))
+ return self.url
+ else:
+ error("Cannot determine URL for '%s'; " % self +
+ "Explicit source argument (-S/--source) required.\n")
+
+class RevisionLog:
+ """
+ A log of the revisions which affected a given URL between two
+ revisions.
+ """
+
+ def __init__(self, url, begin, end, find_propchanges=False):
+ """
+ Create a new RevisionLog object, which stores, in self.revs, a list
+ of the revisions which affected the specified URL between begin and
+ end. If find_propchanges is True, self.propchange_revs will contain a
+ list of the revisions which changed properties directly on the
+ specified URL. URL must be the URL for a directory in the repository.
+ """
+ self.url = url
+
+ # Setup the log options (--quiet, so we don't show log messages)
+ log_opts = '--xml --quiet -r%s:%s "%s"' % (begin, end, url)
+ if find_propchanges:
+ # The --verbose flag lets us grab merge tracking information
+ # by looking at propchanges
+ log_opts = "--verbose " + log_opts
+
+ # Read the log to look for revision numbers and merge-tracking info
+ self.revs = []
+ self.propchange_revs = []
+ repos_pathid = PathIdentifier.from_target(url)
+ for chg in SvnLogParser(launchsvn("log %s" % log_opts,
+ split_lines=False)):
+ self.revs.append(chg.revision())
+ for p in chg.paths():
+ if p.action() == 'M' and p.pathid() == repos_pathid.repo_relative_path:
+ self.propchange_revs.append(chg.revision())
+
+ # Save the range of the log
+ self.begin = int(begin)
+ if end == "HEAD":
+ # If end is not provided, we do not know which is the latest
+ # revision in the repository. So we set 'end' to the latest
+ # known revision.
+ self.end = self.revs[-1]
+ else:
+ self.end = int(end)
+
+ self._merges = None
+ self._blocks = None
+
+ def merge_metadata(self):
+ """
+ Return a VersionedProperty object, with a cached view of the merge
+ metadata in the range of this log.
+ """
+
+ # Load merge metadata if necessary
+ if not self._merges:
+ self._merges = VersionedProperty(self.url, opts["prop"])
+ self._merges.load(self)
+
+ return self._merges
+
+ def block_metadata(self):
+ if not self._blocks:
+ self._blocks = VersionedProperty(self.url, opts["block-prop"])
+ self._blocks.load(self)
+
+ return self._blocks
+
+
+class VersionedProperty:
+ """
+ A read-only, cached view of a versioned property.
+
+ self.revs contains a list of the revisions in which the property changes.
+ self.values stores the new values at each corresponding revision. If the
+ value of the property is unknown, it is set to None.
+
+ Initially, we set self.revs to [0] and self.values to [None]. This
+ indicates that, as of revision zero, we know nothing about the value of
+ the property.
+
+ Later, if you run self.load(log), we cache the value of this property over
+ the entire range of the log by noting each revision in which the property
+ was changed. At the end of the range of the log, we invalidate our cache
+ by adding the value "None" to our cache for any revisions which fall out
+ of the range of our log.
+
+ Once self.revs and self.values are filled, we can find the value of the
+ property at any arbitrary revision using a binary search on self.revs.
+ Once we find the last revision during which the property was changed,
+ we can lookup the associated value in self.values. (If the associated
+ value is None, the associated value was not cached and we have to do
+ a full propget.)
+
+ An example: We know that the 'svnmerge' property was added in r10, and
+ changed in r21. We gathered log info up until r40.
+
+ revs = [0, 10, 21, 40]
+ values = [None, "val1", "val2", None]
+
+ What these values say:
+ - From r0 to r9, we know nothing about the property.
+ - In r10, the property was set to "val1". This property stayed the same
+ until r21, when it was changed to "val2".
+ - We don't know what happened after r40.
+ """
+
+ def __init__(self, url, name):
+ """View the history of a versioned property at URL with name"""
+ self.url = url
+ self.name = name
+
+ # We know nothing about the value of the property. Setup revs
+ # and values to indicate as such.
+ self.revs = [0]
+ self.values = [None]
+
+ # We don't have any revisions cached
+ self._initial_value = None
+ self._changed_revs = []
+ self._changed_values = []
+
+ def load(self, log):
+ """
+ Load the history of property changes from the specified
+ RevisionLog object.
+ """
+
+ # Get the property value before the range of the log
+ if log.begin > 1:
+ self.revs.append(log.begin-1)
+ try:
+ self._initial_value = self.raw_get(log.begin-1)
+ except LaunchError:
+ # The specified URL might not exist before the
+ # range of the log. If so, we can safely assume
+ # that the property was empty at that time.
+ self._initial_value = { }
+ self.values.append(self._initial_value)
+ else:
+ self._initial_value = { }
+ self.values[0] = self._initial_value
+
+ # Cache the property values in the log range
+ old_value = self._initial_value
+ for rev in log.propchange_revs:
+ new_value = self.raw_get(rev)
+ if new_value != old_value:
+ self._changed_revs.append(rev)
+ self._changed_values.append(new_value)
+ self.revs.append(rev)
+ self.values.append(new_value)
+ old_value = new_value
+
+ # Indicate that we know nothing about the value of the property
+ # after the range of the log.
+ if log.revs:
+ self.revs.append(log.end+1)
+ self.values.append(None)
+
+ def raw_get(self, rev=None):
+ """
+ Get the property at revision REV. If rev is not specified, get
+ the property at revision HEAD.
+ """
+ return get_revlist_prop(self.url, self.name, rev)
+
+ def get(self, rev=None):
+ """
+ Get the property at revision REV. If rev is not specified, get
+ the property at revision HEAD.
+ """
+
+ if rev is not None:
+
+ # Find the index using a binary search
+ i = bisect(self.revs, rev) - 1
+
+ # Return the value of the property, if it was cached
+ if self.values[i] is not None:
+ return self.values[i]
+
+ # Get the current value of the property
+ return self.raw_get(rev)
+
+ def changed_revs(self, key=None):
+ """
+ Get a list of the revisions in which the specified dictionary
+ key was changed in this property. If key is not specified,
+ return a list of revisions in which any key was changed.
+ """
+ if key is None:
+ return self._changed_revs
+ else:
+ changed_revs = []
+ old_val = self._initial_value
+ for rev, val in zip(self._changed_revs, self._changed_values):
+ if val.get(key) != old_val.get(key):
+ changed_revs.append(rev)
+ old_val = val
+ return changed_revs
+
+ def initialized_revs(self):
+ """
+ Get a list of the revisions in which keys were added or
+ removed in this property.
+ """
+ initialized_revs = []
+ old_len = len(self._initial_value)
+ for rev, val in zip(self._changed_revs, self._changed_values):
+ if len(val) != old_len:
+ initialized_revs.append(rev)
+ old_len = len(val)
+ return initialized_revs
+
+class RevisionSet:
+ """
+ A set of revisions, held in dictionary form for easy manipulation. If we
+ were to rewrite this script for Python 2.3+, we would subclass this from
+ set (or UserSet). As this class does not include branch
+ information, it's assumed that one instance will be used per
+ branch.
+ """
+ def __init__(self, parm):
+ """Constructs a RevisionSet from a string in property form, or from
+ a dictionary whose keys are the revisions. Raises ValueError if the
+ input string is invalid."""
+
+ self._revs = {}
+
+ revision_range_split_re = re.compile('[-:]')
+
+ if isinstance(parm, types.DictType):
+ self._revs = parm.copy()
+ elif isinstance(parm, types.ListType):
+ for R in parm:
+ self._revs[int(R)] = 1
+ else:
+ parm = parm.strip()
+ if parm:
+ for R in parm.split(","):
+ rev_or_revs = re.split(revision_range_split_re, R)
+ if len(rev_or_revs) == 1:
+ self._revs[int(rev_or_revs[0])] = 1
+ elif len(rev_or_revs) == 2:
+ for rev in range(int(rev_or_revs[0]),
+ int(rev_or_revs[1])+1):
+ self._revs[rev] = 1
+ else:
+ raise ValueError, 'Ill formatted revision range: ' + R
+
+ def sorted(self):
+ revnums = self._revs.keys()
+ revnums.sort()
+ return revnums
+
+ def normalized(self):
+ """Returns a normalized version of the revision set, which is an
+ ordered list of couples (start,end), with the minimum number of
+ intervals."""
+ revnums = self.sorted()
+ revnums.reverse()
+ ret = []
+ while revnums:
+ s = e = revnums.pop()
+ while revnums and revnums[-1] in (e, e+1):
+ e = revnums.pop()
+ ret.append((s, e))
+ return ret
+
+ def __str__(self):
+ """Convert the revision set to a string, using its normalized form."""
+ L = []
+ for s,e in self.normalized():
+ if s == e:
+ L.append(str(s))
+ else:
+ L.append(str(s) + "-" + str(e))
+ return ",".join(L)
+
+ def __contains__(self, rev):
+ return self._revs.has_key(rev)
+
+ def __sub__(self, rs):
+ """Compute subtraction as in sets."""
+ revs = {}
+ for r in self._revs.keys():
+ if r not in rs:
+ revs[r] = 1
+ return RevisionSet(revs)
+
+ def __and__(self, rs):
+ """Compute intersections as in sets."""
+ revs = {}
+ for r in self._revs.keys():
+ if r in rs:
+ revs[r] = 1
+ return RevisionSet(revs)
+
+ def __nonzero__(self):
+ return len(self._revs) != 0
+
+ def __len__(self):
+ """Return the number of revisions in the set."""
+ return len(self._revs)
+
+ def __iter__(self):
+ return iter(self.sorted())
+
+ def __or__(self, rs):
+ """Compute set union."""
+ revs = self._revs.copy()
+ revs.update(rs._revs)
+ return RevisionSet(revs)
+
+def merge_props_to_revision_set(merge_props, pathid):
+ """A converter which returns a RevisionSet instance containing the
+ revisions from PATH as known to BRANCH_PROPS. BRANCH_PROPS is a
+ dictionary of pathid -> revision set branch integration information
+ (as returned by get_merge_props())."""
+ if not merge_props.has_key(pathid):
+ error('no integration info available for path "%s"' % pathid)
+ return RevisionSet(merge_props[pathid])
+
+def dict_from_revlist_prop(propvalue):
+ """Given a property value as a string containing per-source revision
+ lists, return a dictionary whose key is a source path identifier
+ and whose value is the revisions for that source."""
+ prop = {}
+
+ # Multiple sources are separated by any whitespace.
+ for L in propvalue.split():
+ # We use rsplit to play safe and allow colons in pathids.
+ pathid_str, revs = rsplit(L.strip(), ":", 1)
+
+ pathid = PathIdentifier.from_pathid(pathid_str)
+
+ # cache the "external" form we saw
+ pathid.external_form = pathid_str
+
+ prop[pathid] = revs
+ return prop
+
+def get_revlist_prop(url_or_dir, propname, rev=None):
+ """Given a repository URL or working copy path and a property
+ name, extract the values of the property which store per-source
+ revision lists and return a dictionary whose key is a source path
+ identifier, and whose value is the revisions for that source."""
+
+ # Note that propget does not return an error if the property does
+ # not exist, it simply does not output anything. So we do not need
+ # to check for LaunchError here.
+ args = '--strict "%s" "%s"' % (propname, url_or_dir)
+ if rev:
+ args = '-r %s %s' % (rev, args)
+ out = launchsvn('propget %s' % args, split_lines=False)
+
+ return dict_from_revlist_prop(out)
+
+def get_merge_props(dir):
+ """Extract the merged revisions."""
+ return get_revlist_prop(dir, opts["prop"])
+
+def get_block_props(dir):
+ """Extract the blocked revisions."""
+ return get_revlist_prop(dir, opts["block-prop"])
+
+def get_blocked_revs(dir, source_pathid):
+ p = get_block_props(dir)
+ if p.has_key(source_pathid):
+ return RevisionSet(p[source_pathid])
+ return RevisionSet("")
+
+def format_merge_props(props, sep=" "):
+ """Formats the hash PROPS as a string suitable for use as a
+ Subversion property value."""
+ assert sep in ["\t", "\n", " "] # must be a whitespace
+ props = props.items()
+ props.sort()
+ L = []
+ for h, r in props:
+ L.append("%s:%s" % (h, r))
+ return sep.join(L)
+
+def _run_propset(dir, prop, value):
+ """Set the property 'prop' of directory 'dir' to value 'value'. We go
+ through a temporary file to not run into command line length limits."""
+ try:
+ fd, fname = tempfile.mkstemp()
+ f = os.fdopen(fd, "wb")
+ except AttributeError:
+ # Fallback for Python <= 2.3 which does not have mkstemp (mktemp
+ # suffers from race conditions. Not that we care...)
+ fname = tempfile.mktemp()
+ f = open(fname, "wb")
+
+ try:
+ f.write(value)
+ f.close()
+ report("property data written to temp file: %s" % value)
+ svn_command('propset "%s" -F "%s" "%s"' % (prop, fname, dir))
+ finally:
+ os.remove(fname)
+
+def set_props(dir, name, props):
+ props = format_merge_props(props)
+ if props:
+ _run_propset(dir, name, props)
+ else:
+ # Check if NAME exists on DIR before trying to delete it.
+ # As of 1.6 propdel no longer supports deleting a
+ # non-existent property.
+ out = launchsvn('propget "%s" "%s"' % (name, dir))
+ if out:
+ svn_command('propdel "%s" "%s"' % (name, dir))
+
+def set_merge_props(dir, props):
+ set_props(dir, opts["prop"], props)
+
+def set_block_props(dir, props):
+ set_props(dir, opts["block-prop"], props)
+
+def set_blocked_revs(dir, source_pathid, revs):
+ props = get_block_props(dir)
+ if revs:
+ props[source_pathid] = str(revs)
+ elif props.has_key(source_pathid):
+ del props[source_pathid]
+ set_block_props(dir, props)
+
+def is_url(url):
+ """Check if url looks like a valid url."""
+ return re.search(r"^[a-zA-Z][-+\.\w]*://[^\s]+$", url) is not None and url[:4] != 'uuid'
+
+def check_url(url):
+ """Similar to is_url, but actually invoke get_svninfo to find out"""
+ return get_svninfo(url) != {}
+
+def is_pathid(pathid):
+ return isinstance(pathid, PathIdentifier)
+
+def is_wc(dir):
+ """Check if a directory is a working copy."""
+ return os.path.isdir(os.path.join(dir, ".svn")) or \
+ os.path.isdir(os.path.join(dir, "_svn"))
+
+_cache_svninfo = {}
+def get_svninfo(target):
+ """Extract the subversion information for a target (through 'svn info').
+ This function uses an internal cache to let clients query information
+ many times."""
+ if _cache_svninfo.has_key(target):
+ return _cache_svninfo[target]
+ info = {}
+ for L in launchsvn('info "%s"' % target):
+ L = L.strip()
+ if not L:
+ continue
+ key, value = L.split(": ", 1)
+ info[key] = value.strip()
+ _cache_svninfo[target] = info
+ return info
+
+def target_to_url(target):
+ """Convert working copy path or repos URL to a repos URL."""
+ if is_wc(target):
+ info = get_svninfo(target)
+ return info["URL"]
+ return target
+
+_cache_reporoot = {}
+def get_repo_root(target):
+ """Compute the root repos URL given a working-copy path, or a URL."""
+ # Try using "svn info WCDIR". This works only on SVN clients >= 1.3
+ if not is_url(target):
+ try:
+ info = get_svninfo(target)
+ root = info["Repository Root"]
+ _cache_reporoot[root] = None
+ return root
+ except KeyError:
+ pass
+ url = target_to_url(target)
+ assert url[-1] != '/'
+ else:
+ url = target
+
+ # Go through the cache of the repository roots. This avoids extra
+ # server round-trips if we are asking the root of different URLs
+ # in the same repository (the cache in get_svninfo() cannot detect
+ # that of course and would issue a remote command).
+ assert is_url(url)
+ for r in _cache_reporoot:
+ if url.startswith(r):
+ return r
+
+ # Try using "svn info URL". This works only on SVN clients >= 1.2
+ try:
+ info = get_svninfo(url)
+ # info may be {}, in which case we'll see KeyError here
+ root = info["Repository Root"]
+ _cache_reporoot[root] = None
+ return root
+ except (KeyError, LaunchError):
+ pass
+
+ # Constrained to older svn clients, we are stuck with this ugly
+ # trial-and-error implementation. It could be made faster with a
+ # binary search.
+ while url:
+ temp = os.path.dirname(url)
+ try:
+ launchsvn('proplist "%s"' % temp)
+ except LaunchError:
+ _cache_reporoot[url] = None
+ return rstrip(url, "/")
+ url = temp
+
+ error("svn repos root of %s not found" % target)
+
+class SvnLogParser:
+ """
+ Parse the "svn log", going through the XML output and using pulldom (which
+ would even allow streaming the command output).
+ """
+ def __init__(self, xml):
+ self._events = pulldom.parseString(xml)
+ def __getitem__(self, idx):
+ for event, node in self._events:
+ if event == pulldom.START_ELEMENT and node.tagName == "logentry":
+ self._events.expandNode(node)
+ return self.SvnLogRevision(node)
+ raise IndexError, "Could not find 'logentry' tag in xml"
+
+ class SvnLogRevision:
+ def __init__(self, xmlnode):
+ self.n = xmlnode
+ def revision(self):
+ return int(self.n.getAttribute("revision"))
+ def author(self):
+ return self.n.getElementsByTagName("author")[0].firstChild.data
+ def paths(self):
+ return [self.SvnLogPath(n)
+ for n in self.n.getElementsByTagName("path")]
+
+ class SvnLogPath:
+ def __init__(self, xmlnode):
+ self.n = xmlnode
+ def action(self):
+ return self.n.getAttribute("action")
+ def pathid(self):
+ return self.n.firstChild.data
+ def copyfrom_rev(self):
+ try: return self.n.getAttribute("copyfrom-rev")
+ except KeyError: return None
+ def copyfrom_pathid(self):
+ try: return self.n.getAttribute("copyfrom-path")
+ except KeyError: return None
+
+def get_copyfrom(target):
+ """Get copyfrom info for a given target (it represents the
+ repository-relative path from where it was branched). NOTE:
+ repos root has no copyfrom info. In this case None is returned.
+
+ Returns the:
+ - source file or directory from which the copy was made
+ - revision from which that source was copied
+ - revision in which the copy was committed
+ """
+ repos_path = PathIdentifier.from_target(target).repo_relative_path
+ for chg in SvnLogParser(launchsvn('log -v --xml --stop-on-copy "%s"'
+ % target, split_lines=False)):
+ for p in chg.paths():
+ if p.action() == 'A' and p.pathid() == repos_path:
+ # These values will be None if the corresponding elements are
+ # not found in the log.
+ return p.copyfrom_pathid(), p.copyfrom_rev(), chg.revision()
+ return None,None,None
+
+def get_latest_rev(url):
+ """Get the latest revision of the repository of which URL is part."""
+ try:
+ info = get_svninfo(url)
+ if not info.has_key("Revision"):
+ error("Not a valid URL: %s" % url)
+ return info["Revision"]
+ except LaunchError:
+ # Alternative method for latest revision checking (for svn < 1.2)
+ report('checking latest revision of "%s"' % url)
+ L = launchsvn('proplist --revprop -r HEAD "%s"' % opts["source-url"])[0]
+ rev = re.search("revision (\d+)", L).group(1)
+ report('latest revision of "%s" is %s' % (url, rev))
+ return rev
+
+def get_created_rev(url):
+ """Lookup the revision at which the path identified by the
+ provided URL was first created."""
+ oldest_rev = -1
+ report('determining oldest revision for URL "%s"' % url)
+ ### TODO: Refactor this to use a modified RevisionLog class.
+ lines = None
+ cmd = "log -r1:HEAD --stop-on-copy -q " + url
+ try:
+ lines = launchsvn(cmd + " --limit=1")
+ except LaunchError:
+ # Assume that --limit isn't supported by the installed 'svn'.
+ lines = launchsvn(cmd)
+ if lines and len(lines) > 1:
+ i = lines[1].find(" ")
+ if i != -1:
+ oldest_rev = int(lines[1][1:i])
+ if oldest_rev == -1:
+ error('unable to determine oldest revision for URL "%s"' % url)
+ return oldest_rev
+
+def get_commit_log(url, revnum):
+ """Return the log message for a specific integer revision
+ number."""
+ out = launchsvn("log --incremental -r%d %s" % (revnum, url))
+ return recode_stdout_to_file("".join(out[1:]))
+
+def construct_merged_log_message(url, revnums):
+ """Return a commit log message containing all the commit messages
+ in the specified revisions at the given URL. The separator used
+ in this log message is determined by searching for the longest
+ svnmerge separator existing in the commit log messages and
+ extending it by one more separator. This results in a new commit
+ log message that is clearer in describing merges that contain
+ other merges. Trailing newlines are removed from the embedded
+ log messages."""
+ messages = ['']
+ longest_sep = ''
+ for r in revnums.sorted():
+ message = get_commit_log(url, r)
+ if message:
+ message = re.sub(r'(\r\n|\r|\n)', "\n", message)
+ message = rstrip(message, "\n") + "\n"
+ messages.append(prefix_lines(LOG_LINE_PREFIX, message))
+ for match in LOG_SEPARATOR_RE.findall(message):
+ sep = match[1]
+ if len(sep) > len(longest_sep):
+ longest_sep = sep
+
+ longest_sep += LOG_SEPARATOR + "\n"
+ messages.append('')
+ return longest_sep.join(messages)
+
+def get_default_source(branch_target, branch_props):
+ """Return the default source for branch_target (given its branch_props).
+ Error out if there is ambiguity."""
+ if not branch_props:
+ error("no integration info available")
+
+ props = branch_props.copy()
+ pathid = PathIdentifier.from_target(branch_target)
+
+ # To make bidirectional merges easier, find the target's
+ # repository local path so it can be removed from the list of
+ # possible integration sources.
+ if props.has_key(pathid):
+ del props[pathid]
+
+ if len(props) > 1:
+ err_msg = "multiple sources found. "
+ err_msg += "Explicit source argument (-S/--source) required.\n"
+ err_msg += "The merge sources available are:"
+ for prop in props:
+ err_msg += "\n " + str(prop)
+ error(err_msg)
+
+ return props.keys()[0]
+
+def should_find_reflected(branch_dir):
+ should_find_reflected = opts["bidirectional"]
+
+ # If the source has integration info for the target, set find_reflected
+ # even if --bidirectional wasn't specified
+ if not should_find_reflected:
+ source_props = get_merge_props(opts["source-url"])
+ should_find_reflected = source_props.has_key(PathIdentifier.from_target(branch_dir))
+
+ return should_find_reflected
+
+def analyze_revs(target_pathid, url, begin=1, end=None,
+ find_reflected=False):
+ """For the source of the merges in the source URL being merged into
+ target_pathid, analyze the revisions in the interval begin-end (which
+ defaults to 1-HEAD), to find out which revisions are changes in
+ the url, which are changes elsewhere (so-called 'phantom'
+ revisions), optionally which are reflected changes (to avoid
+ conflicts that can occur when doing bidirectional merging between
+ branches), and which revisions initialize merge tracking against other
+ branches. Return a tuple of four RevisionSet's:
+ (real_revs, phantom_revs, reflected_revs, initialized_revs).
+
+ NOTE: To maximize speed, if "end" is not provided, the function is
+ not able to find phantom revisions following the last real
+ revision in the URL.
+ """
+
+ begin = str(begin)
+ if end is None:
+ end = "HEAD"
+ else:
+ end = str(end)
+ if long(begin) > long(end):
+ return RevisionSet(""), RevisionSet(""), \
+ RevisionSet(""), RevisionSet("")
+
+ logs[url] = RevisionLog(url, begin, end, find_reflected)
+ revs = RevisionSet(logs[url].revs)
+
+ if end == "HEAD":
+ # If end is not provided, we do not know which is the latest revision
+ # in the repository. So return the phantom revision set only up to
+ # the latest known revision.
+ end = str(list(revs)[-1])
+
+ phantom_revs = RevisionSet("%s-%s" % (begin, end)) - revs
+
+ if find_reflected:
+ reflected_revs = logs[url].merge_metadata().changed_revs(target_pathid)
+ reflected_revs += logs[url].block_metadata().changed_revs(target_pathid)
+ else:
+ reflected_revs = []
+
+ initialized_revs = RevisionSet(logs[url].merge_metadata().initialized_revs())
+ reflected_revs = RevisionSet(reflected_revs)
+
+ return revs, phantom_revs, reflected_revs, initialized_revs
+
+def analyze_source_revs(branch_target, source_url, **kwargs):
+ """For the given branch and source, extract the real and phantom
+ source revisions."""
+ branch_url = target_to_url(branch_target)
+ branch_pathid = PathIdentifier.from_target(branch_target)
+
+ # Extract the latest repository revision from the URL of the branch
+ # directory (which is already cached at this point).
+ end_rev = get_latest_rev(source_url)
+
+ # Calculate the base of analysis. If there is a "1-XX" interval in the
+ # merged_revs, we do not need to check those.
+ base = 1
+ r = opts["merged-revs"].normalized()
+ if r and r[0][0] == 1:
+ base = r[0][1] + 1
+
+ # See if the user filtered the revision set. If so, we are not
+ # interested in something outside that range.
+ if opts["revision"]:
+ revs = RevisionSet(opts["revision"]).sorted()
+ if base < revs[0]:
+ base = revs[0]
+ if end_rev > revs[-1]:
+ end_rev = revs[-1]
+
+ return analyze_revs(branch_pathid, source_url, base, end_rev, **kwargs)
+
+def minimal_merge_intervals(revs, phantom_revs):
+ """Produce the smallest number of intervals suitable for merging. revs
+ is the RevisionSet which we want to merge, and phantom_revs are phantom
+ revisions which can be used to concatenate intervals, thus minimizing the
+ number of operations."""
+ revnums = revs.normalized()
+ ret = []
+
+ cur = revnums.pop()
+ while revnums:
+ next = revnums.pop()
+ assert next[1] < cur[0] # otherwise it is not ordered
+ assert cur[0] - next[1] > 1 # otherwise it is not normalized
+ for i in range(next[1]+1, cur[0]):
+ if i not in phantom_revs:
+ ret.append(cur)
+ cur = next
+ break
+ else:
+ cur = (next[0], cur[1])
+
+ ret.append(cur)
+ ret.reverse()
+ return ret
+
+def display_revisions(revs, display_style, revisions_msg, source_url):
+ """Show REVS as dictated by DISPLAY_STYLE, either numerically, in
+ log format, or as diffs. When displaying revisions numerically,
+ prefix output with REVISIONS_MSG when in verbose mode. Otherwise,
+ request logs or diffs using SOURCE_URL."""
+ if display_style == "revisions":
+ if revs:
+ report(revisions_msg)
+ print revs
+ elif display_style == "logs":
+ for start,end in revs.normalized():
+ svn_command('log --incremental -v -r %d:%d %s' % \
+ (start, end, source_url))
+ elif display_style in ("diffs", "summarize"):
+ if display_style == 'summarize':
+ summarize = '--summarize '
+ else:
+ summarize = ''
+
+ for start, end in revs.normalized():
+ print
+ if start == end:
+ print "%s: changes in revision %d follow" % (NAME, start)
+ else:
+ print "%s: changes in revisions %d-%d follow" % (NAME,
+ start, end)
+ print
+
+ # Note: the starting revision number to 'svn diff' is
+ # NOT inclusive so we have to subtract one from ${START}.
+ svn_command("diff -r %d:%d %s %s" % (start - 1, end, summarize,
+ source_url))
+ else:
+ assert False, "unhandled display style: %s" % display_style
+
+def action_init(target_dir, target_props):
+ """Initialize for merges."""
+ # Check that directory is ready for being modified
+ check_dir_clean(target_dir)
+
+ target_pathid = PathIdentifier.from_target(target_dir)
+ source_pathid = opts['source-pathid']
+ if source_pathid == target_pathid:
+ error("cannot init integration source path '%s'\nIts path identifier does not "
+ "differ from the path identifier of the current directory, '%s'."
+ % (source_pathid, target_pathid))
+
+ source_url = opts['source-url']
+
+ # If the user hasn't specified the revisions to use, see if the
+ # "source" is a copy from the current tree and if so, we can use
+ # the version data obtained from it.
+ revision_range = opts["revision"]
+ if not revision_range:
+ # If source was originally copied from target, and we are merging
+ # changes from source to target (the copy target is the merge source,
+ # and the copy source is the merge target), then we want to mark as
+ # integrated up to the rev in which the copy was committed which
+ # created the merge source:
+ cf_source, cf_rev, copy_committed_in_rev = get_copyfrom(source_url)
+
+ cf_pathid = None
+ if cf_source:
+ cf_url = get_repo_root(source_url) + cf_source
+ if is_url(cf_url) and check_url(cf_url):
+ cf_pathid = PathIdentifier.from_target(cf_url)
+
+ if target_pathid == cf_pathid:
+ report('the source "%s" was copied from "%s" in rev %s and committed in rev %s' %
+ (source_url, target_dir, cf_rev, copy_committed_in_rev))
+ revision_range = "1-" + str(copy_committed_in_rev)
+
+ if not revision_range:
+ # If the reverse is true: copy source is the merge source, and
+ # the copy target is the merge target, then we want to mark as
+ # integrated up to the specific rev of the merge target from
+ # which the merge source was copied. (Longer discussion at:
+ # http://subversion.tigris.org/issues/show_bug.cgi?id=2810 )
+ cf_source, cf_rev, copy_committed_in_rev = get_copyfrom(target_dir)
+
+ cf_pathid = None
+ if cf_source:
+ cf_url = get_repo_root(target_dir) + cf_source
+ if is_url(cf_url) and check_url(cf_url):
+ cf_pathid = PathIdentifier.from_target(cf_url)
+
+ source_pathid = PathIdentifier.from_target(source_url)
+ if source_pathid == cf_pathid:
+ report('the target "%s" was copied the source "%s" in rev %s and committed in rev %s' %
+ (target_dir, source_url, cf_rev, copy_committed_in_rev))
+ revision_range = "1-" + cf_rev
+
+ # When neither the merge source nor target is a copy of the other, and
+ # the user did not specify a revision range, then choose a default which is
+ # the current revision; saying, in effect, "everything has been merged, so
+ # mark as integrated up to the latest rev on source url).
+ if not revision_range:
+ revision_range = "1-" + get_latest_rev(source_url)
+
+ revs = RevisionSet(revision_range)
+
+ report('marking "%s" as already containing revisions "%s" of "%s"' %
+ (target_dir, revs, source_url))
+
+ revs = str(revs)
+ # If the local svnmerge-integrated property already has an entry
+ # for the source-pathid, simply error out.
+ if not opts["force"] and target_props.has_key(source_pathid):
+ error('Repository-relative path %s has already been initialized at %s\n'
+ 'Use --force to re-initialize' % (source_pathid, target_dir))
+ # set the pathid's external_form based on the user's options
+ source_pathid.external_form = source_pathid.format(opts['location-type'])
+
+ revs = str(revs)
+ target_props[source_pathid] = revs
+
+ # Set property
+ set_merge_props(target_dir, target_props)
+
+ # Write out commit message if desired
+ if opts["commit-file"]:
+ f = open(opts["commit-file"], "w")
+ print >>f, 'Initialized merge tracking via "%s" with revisions "%s" from ' \
+ % (NAME, revs)
+ print >>f, '%s' % source_url
+ f.close()
+ report('wrote commit message to "%s"' % opts["commit-file"])
+
+def action_avail(branch_dir, branch_props):
+ """Show commits available for merges."""
+ source_revs, phantom_revs, reflected_revs, initialized_revs = \
+ analyze_source_revs(branch_dir, opts["source-url"],
+ find_reflected=
+ should_find_reflected(branch_dir))
+ report('skipping phantom revisions: %s' % phantom_revs)
+ if reflected_revs:
+ report('skipping reflected revisions: %s' % reflected_revs)
+ report('skipping initialized revisions: %s' % initialized_revs)
+
+ blocked_revs = get_blocked_revs(branch_dir, opts["source-pathid"])
+ avail_revs = source_revs - opts["merged-revs"] - blocked_revs - \
+ reflected_revs - initialized_revs
+
+ # Compose the set of revisions to show
+ revs = RevisionSet("")
+ report_msg = "revisions available to be merged are:"
+ if "avail" in opts["avail-showwhat"]:
+ revs |= avail_revs
+ if "blocked" in opts["avail-showwhat"]:
+ revs |= blocked_revs
+ report_msg = "revisions blocked are:"
+
+ # Limit to revisions specified by -r (if any)
+ if opts["revision"]:
+ revs = revs & RevisionSet(opts["revision"])
+
+ display_revisions(revs, opts["avail-display"],
+ report_msg,
+ opts["source-url"])
+
+def action_integrated(branch_dir, branch_props):
+ """Show change sets already merged. This set of revisions is
+ calculated from taking svnmerge-integrated property from the
+ branch, and subtracting any revision older than the branch
+ creation revision."""
+ # Extract the integration info for the branch_dir
+ branch_props = get_merge_props(branch_dir)
+ revs = merge_props_to_revision_set(branch_props, opts["source-pathid"])
+
+ # Lookup the oldest revision on the branch path.
+ oldest_src_rev = get_created_rev(opts["source-url"])
+
+ # Subtract any revisions which pre-date the branch.
+ report("subtracting revisions which pre-date the source URL (%d)" %
+ oldest_src_rev)
+ revs = revs - RevisionSet(range(1, oldest_src_rev))
+
+ # Limit to revisions specified by -r (if any)
+ if opts["revision"]:
+ revs = revs & RevisionSet(opts["revision"])
+
+ display_revisions(revs, opts["integrated-display"],
+ "revisions already integrated are:", opts["source-url"])
+
+def action_merge(branch_dir, branch_props):
+ """Record merge meta data, and do the actual merge (if not
+ requested otherwise via --record-only)."""
+ # Check branch directory is ready for being modified
+ check_dir_clean(branch_dir)
+
+ source_revs, phantom_revs, reflected_revs, initialized_revs = \
+ analyze_source_revs(branch_dir, opts["source-url"],
+ find_reflected=
+ should_find_reflected(branch_dir))
+
+ if opts["revision"]:
+ revs = RevisionSet(opts["revision"])
+ else:
+ revs = source_revs
+
+ blocked_revs = get_blocked_revs(branch_dir, opts["source-pathid"])
+ merged_revs = opts["merged-revs"]
+
+ # Show what we're doing
+ if opts["verbose"]: # just to avoid useless calculations
+ if merged_revs & revs:
+ report('"%s" already contains revisions %s' % (branch_dir,
+ merged_revs & revs))
+ if phantom_revs:
+ report('memorizing phantom revision(s): %s' % phantom_revs)
+ if reflected_revs:
+ report('memorizing reflected revision(s): %s' % reflected_revs)
+ if blocked_revs & revs:
+ report('skipping blocked revisions(s): %s' % (blocked_revs & revs))
+ if initialized_revs:
+ report('skipping initialized revision(s): %s' % initialized_revs)
+
+ # Compute final merge set.
+ revs = revs - merged_revs - blocked_revs - reflected_revs - \
+ phantom_revs - initialized_revs
+ if not revs:
+ report('no revisions to merge, exiting')
+ return
+
+ # When manually marking revisions as merged, we only update the
+ # integration meta data, and don't perform an actual merge.
+ record_only = opts["record-only"]
+
+ if record_only:
+ report('recording merge of revision(s) %s from "%s"' %
+ (revs, opts["source-url"]))
+ else:
+ report('merging in revision(s) %s from "%s"' %
+ (revs, opts["source-url"]))
+
+ # Do the merge(s). Note: the starting revision number to 'svn merge'
+ # is NOT inclusive so we have to subtract one from start.
+ # We try to keep the number of merge operations as low as possible,
+ # because it is faster and reduces the number of conflicts.
+ old_block_props = get_block_props(branch_dir)
+ merge_metadata = logs[opts["source-url"]].merge_metadata()
+ block_metadata = logs[opts["source-url"]].block_metadata()
+ for start,end in minimal_merge_intervals(revs, phantom_revs):
+ if not record_only:
+ # Preset merge/blocked properties to the source value at
+ # the start rev to avoid spurious property conflicts
+ set_merge_props(branch_dir, merge_metadata.get(start - 1))
+ set_block_props(branch_dir, block_metadata.get(start - 1))
+ # Do the merge
+ svn_command("merge --force -r %d:%d %s %s" % \
+ (start - 1, end, opts["source-url"], branch_dir))
+ # TODO: to support graph merging, add logic to merge the property
+ # meta-data manually
+
+ # Update the set of merged revisions.
+ merged_revs = merged_revs | revs | reflected_revs | phantom_revs | initialized_revs
+ branch_props[opts["source-pathid"]] = str(merged_revs)
+ set_merge_props(branch_dir, branch_props)
+ # Reset the blocked revs
+ set_block_props(branch_dir, old_block_props)
+
+ # Write out commit message if desired
+ if opts["commit-file"]:
+ f = open(opts["commit-file"], "w")
+ if record_only:
+ print >>f, 'Recorded merge of revisions %s via %s from ' % \
+ (revs, NAME)
+ else:
+ print >>f, 'Merged revisions %s via %s from ' % \
+ (revs, NAME)
+ print >>f, '%s' % opts["source-url"]
+ if opts["commit-verbose"]:
+ print >>f
+ print >>f, construct_merged_log_message(opts["source-url"], revs),
+
+ f.close()
+ report('wrote commit message to "%s"' % opts["commit-file"])
+
+def action_block(branch_dir, branch_props):
+ """Block revisions."""
+ # Check branch directory is ready for being modified
+ check_dir_clean(branch_dir)
+
+ source_revs, phantom_revs, reflected_revs, initialized_revs = \
+ analyze_source_revs(branch_dir, opts["source-url"])
+ revs_to_block = source_revs - opts["merged-revs"]
+
+ # Limit to revisions specified by -r (if any)
+ if opts["revision"]:
+ revs_to_block = RevisionSet(opts["revision"]) & revs_to_block
+
+ if not revs_to_block:
+ error('no available revisions to block')
+
+ # Change blocked information
+ blocked_revs = get_blocked_revs(branch_dir, opts["source-pathid"])
+ blocked_revs = blocked_revs | revs_to_block
+ set_blocked_revs(branch_dir, opts["source-pathid"], blocked_revs)
+
+ # Write out commit message if desired
+ if opts["commit-file"]:
+ f = open(opts["commit-file"], "w")
+ print >>f, 'Blocked revisions %s via %s' % (revs_to_block, NAME)
+ if opts["commit-verbose"]:
+ print >>f
+ print >>f, construct_merged_log_message(opts["source-url"],
+ revs_to_block),
+
+ f.close()
+ report('wrote commit message to "%s"' % opts["commit-file"])
+
+def action_unblock(branch_dir, branch_props):
+ """Unblock revisions."""
+ # Check branch directory is ready for being modified
+ check_dir_clean(branch_dir)
+
+ blocked_revs = get_blocked_revs(branch_dir, opts["source-pathid"])
+ revs_to_unblock = blocked_revs
+
+ # Limit to revisions specified by -r (if any)
+ if opts["revision"]:
+ revs_to_unblock = revs_to_unblock & RevisionSet(opts["revision"])
+
+ if not revs_to_unblock:
+ error('no available revisions to unblock')
+
+ # Change blocked information
+ blocked_revs = blocked_revs - revs_to_unblock
+ set_blocked_revs(branch_dir, opts["source-pathid"], blocked_revs)
+
+ # Write out commit message if desired
+ if opts["commit-file"]:
+ f = open(opts["commit-file"], "w")
+ print >>f, 'Unblocked revisions %s via %s' % (revs_to_unblock, NAME)
+ if opts["commit-verbose"]:
+ print >>f
+ print >>f, construct_merged_log_message(opts["source-url"],
+ revs_to_unblock),
+ f.close()
+ report('wrote commit message to "%s"' % opts["commit-file"])
+
+def action_rollback(branch_dir, branch_props):
+ """Rollback previously integrated revisions."""
+
+ # Make sure the revision arguments are present
+ if not opts["revision"]:
+ error("The '-r' option is mandatory for rollback")
+
+ # Check branch directory is ready for being modified
+ check_dir_clean(branch_dir)
+
+ # Extract the integration info for the branch_dir
+ branch_props = get_merge_props(branch_dir)
+ # Get the list of all revisions already merged into this source-pathid.
+ merged_revs = merge_props_to_revision_set(branch_props,
+ opts["source-pathid"])
+
+ # At which revision was the src created?
+ oldest_src_rev = get_created_rev(opts["source-url"])
+ src_pre_exist_range = RevisionSet("1-%d" % oldest_src_rev)
+
+ # Limit to revisions specified by -r (if any)
+ revs = merged_revs & RevisionSet(opts["revision"])
+
+ # make sure there's some revision to rollback
+ if not revs:
+ report("Nothing to rollback in revision range r%s" % opts["revision"])
+ return
+
+ # If even one specified revision lies outside the lifetime of the
+ # merge source, error out.
+ if revs & src_pre_exist_range:
+ err_str = "Specified revision range falls out of the rollback range.\n"
+ err_str += "%s was created at r%d" % (opts["source-pathid"],
+ oldest_src_rev)
+ error(err_str)
+
+ record_only = opts["record-only"]
+
+ if record_only:
+ report('recording rollback of revision(s) %s from "%s"' %
+ (revs, opts["source-url"]))
+ else:
+ report('rollback of revision(s) %s from "%s"' %
+ (revs, opts["source-url"]))
+
+ # Do the reverse merge(s). Note: the starting revision number
+ # to 'svn merge' is NOT inclusive so we have to subtract one from start.
+ # We try to keep the number of merge operations as low as possible,
+ # because it is faster and reduces the number of conflicts.
+ rollback_intervals = minimal_merge_intervals(revs, [])
+ # rollback in the reverse order of merge
+ rollback_intervals.reverse()
+ for start, end in rollback_intervals:
+ if not record_only:
+ # Do the merge
+ svn_command("merge --force -r %d:%d %s %s" % \
+ (end, start - 1, opts["source-url"], branch_dir))
+
+ # Write out commit message if desired
+ # calculate the phantom revs first
+ if opts["commit-file"]:
+ f = open(opts["commit-file"], "w")
+ if record_only:
+ print >>f, 'Recorded rollback of revisions %s via %s from ' % \
+ (revs , NAME)
+ else:
+ print >>f, 'Rolled back revisions %s via %s from ' % \
+ (revs , NAME)
+ print >>f, '%s' % opts["source-url"]
+
+ f.close()
+ report('wrote commit message to "%s"' % opts["commit-file"])
+
+ # Update the set of merged revisions.
+ merged_revs = merged_revs - revs
+ branch_props[opts["source-pathid"]] = str(merged_revs)
+ set_merge_props(branch_dir, branch_props)
+
+def action_uninit(branch_dir, branch_props):
+ """Uninit SOURCE URL."""
+ # Check branch directory is ready for being modified
+ check_dir_clean(branch_dir)
+
+ # If the source-pathid does not have an entry in the svnmerge-integrated
+ # property, simply error out.
+ if not branch_props.has_key(opts["source-pathid"]):
+ error('Repository-relative path "%s" does not contain merge '
+ 'tracking information for "%s"' \
+ % (opts["source-pathid"], branch_dir))
+
+ del branch_props[opts["source-pathid"]]
+
+ # Set merge property with the selected source deleted
+ set_merge_props(branch_dir, branch_props)
+
+ # Set blocked revisions for the selected source to None
+ set_blocked_revs(branch_dir, opts["source-pathid"], None)
+
+ # Write out commit message if desired
+ if opts["commit-file"]:
+ f = open(opts["commit-file"], "w")
+ print >>f, 'Removed merge tracking for "%s" for ' % NAME
+ print >>f, '%s' % opts["source-url"]
+ f.close()
+ report('wrote commit message to "%s"' % opts["commit-file"])
+
+###############################################################################
+# Command line parsing -- options and commands management
+###############################################################################
+
+class OptBase:
+ def __init__(self, *args, **kwargs):
+ self.help = kwargs["help"]
+ del kwargs["help"]
+ self.lflags = []
+ self.sflags = []
+ for a in args:
+ if a.startswith("--"): self.lflags.append(a)
+ elif a.startswith("-"): self.sflags.append(a)
+ else:
+ raise TypeError, "invalid flag name: %s" % a
+ if kwargs.has_key("dest"):
+ self.dest = kwargs["dest"]
+ del kwargs["dest"]
+ else:
+ if not self.lflags:
+ raise TypeError, "cannot deduce dest name without long options"
+ self.dest = self.lflags[0][2:]
+ if kwargs:
+ raise TypeError, "invalid keyword arguments: %r" % kwargs.keys()
+ def repr_flags(self):
+ f = self.sflags + self.lflags
+ r = f[0]
+ for fl in f[1:]:
+ r += " [%s]" % fl
+ return r
+
+class Option(OptBase):
+ def __init__(self, *args, **kwargs):
+ self.default = kwargs.setdefault("default", 0)
+ del kwargs["default"]
+ self.value = kwargs.setdefault("value", None)
+ del kwargs["value"]
+ OptBase.__init__(self, *args, **kwargs)
+ def apply(self, state, value):
+ assert value == ""
+ if self.value is not None:
+ state[self.dest] = self.value
+ else:
+ state[self.dest] += 1
+
+class OptionArg(OptBase):
+ def __init__(self, *args, **kwargs):
+ self.default = kwargs["default"]
+ del kwargs["default"]
+ self.metavar = kwargs.setdefault("metavar", None)
+ del kwargs["metavar"]
+ OptBase.__init__(self, *args, **kwargs)
+
+ if self.metavar is None:
+ if self.dest is not None:
+ self.metavar = self.dest.upper()
+ else:
+ self.metavar = "arg"
+ if self.default:
+ self.help += " (default: %s)" % self.default
+ def apply(self, state, value):
+ assert value is not None
+ state[self.dest] = value
+ def repr_flags(self):
+ r = OptBase.repr_flags(self)
+ return r + " " + self.metavar
+
+class CommandOpts:
+ class Cmd:
+ def __init__(self, *args):
+ self.name, self.func, self.usage, self.help, self.opts = args
+ def short_help(self):
+ return self.help.split(".")[0]
+ def __str__(self):
+ return self.name
+ def __call__(self, *args, **kwargs):
+ return self.func(*args, **kwargs)
+
+ def __init__(self, global_opts, common_opts, command_table, version=None):
+ self.progname = NAME
+ self.version = version.replace("%prog", self.progname)
+ self.cwidth = console_width() - 2
+ self.ctable = command_table.copy()
+ self.gopts = global_opts[:]
+ self.copts = common_opts[:]
+ self._add_builtins()
+ for k in self.ctable.keys():
+ cmd = self.Cmd(k, *self.ctable[k])
+ opts = []
+ for o in cmd.opts:
+ if isinstance(o, types.StringType) or \
+ isinstance(o, types.UnicodeType):
+ o = self._find_common(o)
+ opts.append(o)
+ cmd.opts = opts
+ self.ctable[k] = cmd
+
+ def _add_builtins(self):
+ self.gopts.append(
+ Option("-h", "--help", help="show help for this command and exit"))
+ if self.version is not None:
+ self.gopts.append(
+ Option("-V", "--version", help="show version info and exit"))
+ self.ctable["help"] = (self._cmd_help,
+ "help [COMMAND]",
+ "Display help for a specific command. If COMMAND is omitted, "
+ "display brief command description.",
+ [])
+
+ def _cmd_help(self, cmd=None, *args):
+ if args:
+ self.error("wrong number of arguments", "help")
+ if cmd is not None:
+ cmd = self._command(cmd)
+ self.print_command_help(cmd)
+ else:
+ self.print_command_list()
+
+ def _paragraph(self, text, width=78):
+ chunks = re.split("\s+", text.strip())
+ chunks.reverse()
+ lines = []
+ while chunks:
+ L = chunks.pop()
+ while chunks and len(L) + len(chunks[-1]) + 1 <= width:
+ L += " " + chunks.pop()
+ lines.append(L)
+ return lines
+
+ def _paragraphs(self, text, *args, **kwargs):
+ pars = text.split("\n\n")
+ lines = self._paragraph(pars[0], *args, **kwargs)
+ for p in pars[1:]:
+ lines.append("")
+ lines.extend(self._paragraph(p, *args, **kwargs))
+ return lines
+
+ def _print_wrapped(self, text, indent=0):
+ text = self._paragraphs(text, self.cwidth - indent)
+ print text.pop(0)
+ for t in text:
+ print " " * indent + t
+
+ def _find_common(self, fl):
+ for o in self.copts:
+ if fl in o.lflags+o.sflags:
+ return o
+ assert False, fl
+
+ def _compute_flags(self, opts, check_conflicts=True):
+ back = {}
+ sfl = ""
+ lfl = []
+ for o in opts:
+ sapp = lapp = ""
+ if isinstance(o, OptionArg):
+ sapp, lapp = ":", "="
+ for s in o.sflags:
+ if check_conflicts and back.has_key(s):
+ raise RuntimeError, "option conflict: %s" % s
+ back[s] = o
+ sfl += s[1:] + sapp
+ for l in o.lflags:
+ if check_conflicts and back.has_key(l):
+ raise RuntimeError, "option conflict: %s" % l
+ back[l] = o
+ lfl.append(l[2:] + lapp)
+ return sfl, lfl, back
+
+ def _extract_command(self, args):
+ """
+ Try to extract the command name from the argument list. This is
+ non-trivial because we want to allow command-specific options even
+ before the command itself.
+ """
+ opts = self.gopts[:]
+ for cmd in self.ctable.values():
+ opts.extend(cmd.opts)
+ sfl, lfl, _ = self._compute_flags(opts, check_conflicts=False)
+
+ lopts,largs = getopt.getopt(args, sfl, lfl)
+ if not largs:
+ return None
+ return self._command(largs[0])
+
+ def _fancy_getopt(self, args, opts, state=None):
+ if state is None:
+ state= {}
+ for o in opts:
+ if not state.has_key(o.dest):
+ state[o.dest] = o.default
+
+ sfl, lfl, back = self._compute_flags(opts)
+ try:
+ lopts,args = getopt.gnu_getopt(args, sfl, lfl)
+ except AttributeError:
+ # Before Python 2.3, there was no gnu_getopt support.
+ # So we can't parse intermixed positional arguments
+ # and options.
+ lopts,args = getopt.getopt(args, sfl, lfl)
+
+ for o,v in lopts:
+ back[o].apply(state, v)
+ return state, args
+
+ def _command(self, cmd):
+ if not self.ctable.has_key(cmd):
+ self.error("unknown command: '%s'" % cmd)
+ return self.ctable[cmd]
+
+ def parse(self, args):
+ if not args:
+ self.print_small_help()
+ sys.exit(0)
+
+ cmd = None
+ try:
+ cmd = self._extract_command(args)
+ opts = self.gopts[:]
+ if cmd:
+ opts.extend(cmd.opts)
+ args.remove(cmd.name)
+ state, args = self._fancy_getopt(args, opts)
+ except getopt.GetoptError, e:
+ self.error(e, cmd)
+
+ # Handle builtins
+ if self.version is not None and state["version"]:
+ self.print_version()
+ sys.exit(0)
+ if state["help"]: # special case for --help
+ if cmd:
+ self.print_command_help(cmd)
+ sys.exit(0)
+ cmd = self.ctable["help"]
+ else:
+ if cmd is None:
+ self.error("command argument required")
+ if str(cmd) == "help":
+ cmd(*args)
+ sys.exit(0)
+ return cmd, args, state
+
+ def error(self, s, cmd=None):
+ print >>sys.stderr, "%s: %s" % (self.progname, s)
+ if cmd is not None:
+ self.print_command_help(cmd)
+ else:
+ self.print_small_help()
+ sys.exit(1)
+ def print_small_help(self):
+ print "Type '%s help' for usage" % self.progname
+ def print_usage_line(self):
+ print "usage: %s <subcommand> [options...] [args...]\n" % self.progname
+ def print_command_list(self):
+ print "Available commands (use '%s help COMMAND' for more details):\n" \
+ % self.progname
+ cmds = self.ctable.keys()
+ cmds.sort()
+ indent = max(map(len, cmds))
+ for c in cmds:
+ h = self.ctable[c].short_help()
+ print " %-*s " % (indent, c),
+ self._print_wrapped(h, indent+6)
+ def print_command_help(self, cmd):
+ cmd = self.ctable[str(cmd)]
+ print 'usage: %s %s\n' % (self.progname, cmd.usage)
+ self._print_wrapped(cmd.help)
+ def print_opts(opts, self=self):
+ if not opts: return
+ flags = [o.repr_flags() for o in opts]
+ indent = max(map(len, flags))
+ for f,o in zip(flags, opts):
+ print " %-*s :" % (indent, f),
+ self._print_wrapped(o.help, indent+5)
+ print '\nCommand options:'
+ print_opts(cmd.opts)
+ print '\nGlobal options:'
+ print_opts(self.gopts)
+
+ def print_version(self):
+ print self.version
+
+###############################################################################
+# Options and Commands description
+###############################################################################
+
+global_opts = [
+ Option("-F", "--force",
+ help="force operation even if the working copy is not clean, or "
+ "there are pending updates"),
+ Option("-n", "--dry-run",
+ help="don't actually change anything, just pretend; "
+ "implies --show-changes"),
+ Option("-s", "--show-changes",
+ help="show subversion commands that make changes"),
+ Option("-v", "--verbose",
+ help="verbose mode: output more information about progress"),
+ OptionArg("-u", "--username",
+ default=None,
+ help="invoke subversion commands with the supplied username"),
+ OptionArg("-p", "--password",
+ default=None,
+ help="invoke subversion commands with the supplied password"),
+ OptionArg("-c", "--config-dir", metavar="DIR",
+ default=None,
+ help="cause subversion commands to consult runtime config directory DIR"),
+]
+
+common_opts = [
+ Option("-b", "--bidirectional",
+ value=True,
+ default=False,
+ help="remove reflected and initialized revisions from merge candidates. "
+ "Not required but may be specified to speed things up slightly"),
+ OptionArg("-f", "--commit-file", metavar="FILE",
+ default="svnmerge-commit-message.txt",
+ help="set the name of the file where the suggested log message "
+ "is written to"),
+ Option("-M", "--record-only",
+ value=True,
+ default=False,
+ help="do not perform an actual merge of the changes, yet record "
+ "that a merge happened"),
+ OptionArg("-r", "--revision",
+ metavar="REVLIST",
+ default="",
+ help="specify a revision list, consisting of revision numbers "
+ 'and ranges separated by commas, e.g., "534,537-539,540"'),
+ OptionArg("-S", "--source", "--head",
+ default=None,
+ help="specify a merge source for this branch. It can be either "
+ "a working directory path, a full URL, or an unambiguous "
+ "substring of one of the locations for which merge tracking was "
+ "already initialized. Needed only to disambiguate in case of "
+ "multiple merge sources"),
+]
+
+command_table = {
+ "init": (action_init,
+ "init [OPTION...] [SOURCE]",
+ """Initialize merge tracking from SOURCE on the current working
+ directory.
+
+ If SOURCE is specified, all the revisions in SOURCE are marked as already
+ merged; if this is not correct, you can use --revision to specify the
+ exact list of already-merged revisions.
+
+ If SOURCE is omitted, then it is computed from the "svn cp" history of the
+ current working directory (searching back for the branch point); in this
+ case, %s assumes that no revision has been integrated yet since
+ the branch point (unless you teach it with --revision).""" % NAME,
+ [
+ "-f", "-r", # import common opts
+ OptionArg("-L", "--location-type",
+ dest="location-type",
+ default="path",
+ help="Use this type of location identifier in the new " +
+ "Subversion properties; 'uuid', 'url', or 'path' " +
+ "(default)"),
+ ]),
+
+ "avail": (action_avail,
+ "avail [OPTION...] [PATH]",
+ """Show unmerged revisions available for PATH as a revision list.
+ If --revision is given, the revisions shown will be limited to those
+ also specified in the option.
+
+ When svnmerge is used to bidirectionally merge changes between a
+ branch and its source, it is necessary to not merge the same changes
+ forth and back: e.g., if you committed a merge of a certain
+ revision of the branch into the source, you do not want that commit
+ to appear as available to merged into the branch (as the code
+ originated in the branch itself!). svnmerge will automatically
+ exclude these so-called "reflected" revisions.""",
+ [
+ Option("-A", "--all",
+ dest="avail-showwhat",
+ value=["blocked", "avail"],
+ default=["avail"],
+ help="show both available and blocked revisions (aka ignore "
+ "blocked revisions)"),
+ "-b",
+ Option("-B", "--blocked",
+ dest="avail-showwhat",
+ value=["blocked"],
+ help="show the blocked revision list (see '%s block')" % NAME),
+ Option("-d", "--diff",
+ dest="avail-display",
+ value="diffs",
+ default="revisions",
+ help="show corresponding diff instead of revision list"),
+ Option("--summarize",
+ dest="avail-display",
+ value="summarize",
+ help="show summarized diff instead of revision list"),
+ Option("-l", "--log",
+ dest="avail-display",
+ value="logs",
+ help="show corresponding log history instead of revision list"),
+ "-r",
+ "-S",
+ ]),
+
+ "integrated": (action_integrated,
+ "integrated [OPTION...] [PATH]",
+ """Show merged revisions available for PATH as a revision list.
+ If --revision is given, the revisions shown will be limited to
+ those also specified in the option.""",
+ [
+ Option("-d", "--diff",
+ dest="integrated-display",
+ value="diffs",
+ default="revisions",
+ help="show corresponding diff instead of revision list"),
+ Option("-l", "--log",
+ dest="integrated-display",
+ value="logs",
+ help="show corresponding log history instead of revision list"),
+ "-r",
+ "-S",
+ ]),
+
+ "rollback": (action_rollback,
+ "rollback [OPTION...] [PATH]",
+ """Rollback previously merged in revisions from PATH. The
+ --revision option is mandatory, and specifies which revisions
+ will be rolled back. Only the previously integrated merges
+ will be rolled back.
+
+ When manually rolling back changes, --record-only can be used to
+ instruct %s that a manual rollback of a certain revision
+ already happened, so that it can record it and offer that
+ revision for merge henceforth.""" % (NAME),
+ [
+ "-f", "-r", "-S", "-M", # import common opts
+ ]),
+
+ "merge": (action_merge,
+ "merge [OPTION...] [PATH]",
+ """Merge in revisions into PATH from its source. If --revision is omitted,
+ all the available revisions will be merged. In any case, already merged-in
+ revisions will NOT be merged again.
+
+ When svnmerge is used to bidirectionally merge changes between a
+ branch and its source, it is necessary to not merge the same changes
+ forth and back: e.g., if you committed a merge of a certain
+ revision of the branch into the source, you do not want that commit
+ to appear as available to merged into the branch (as the code
+ originated in the branch itself!). svnmerge will automatically
+ exclude these so-called "reflected" revisions.
+
+ When manually merging changes across branches, --record-only can
+ be used to instruct %s that a manual merge of a certain revision
+ already happened, so that it can record it and not offer that
+ revision for merge anymore. Conversely, when there are revisions
+ which should not be merged, use '%s block'.""" % (NAME, NAME),
+ [
+ "-b", "-f", "-r", "-S", "-M", # import common opts
+ ]),
+
+ "block": (action_block,
+ "block [OPTION...] [PATH]",
+ """Block revisions within PATH so that they disappear from the available
+ list. This is useful to hide revisions which will not be integrated.
+ If --revision is omitted, it defaults to all the available revisions.
+
+ Do not use this option to hide revisions that were manually merged
+ into the branch. Instead, use '%s merge --record-only', which
+ records that a merge happened (as opposed to a merge which should
+ not happen).""" % NAME,
+ [
+ "-f", "-r", "-S", # import common opts
+ ]),
+
+ "unblock": (action_unblock,
+ "unblock [OPTION...] [PATH]",
+ """Revert the effect of '%s block'. If --revision is omitted, all the
+ blocked revisions are unblocked""" % NAME,
+ [
+ "-f", "-r", "-S", # import common opts
+ ]),
+
+ "uninit": (action_uninit,
+ "uninit [OPTION...] [PATH]",
+ """Remove merge tracking information from PATH. It cleans any kind of merge
+ tracking information (including the list of blocked revisions). If there
+ are multiple sources, use --source to indicate which source you want to
+ forget about.""",
+ [
+ "-f", "-S", # import common opts
+ ]),
+}
+
+
+def main(args):
+ global opts
+
+ # Initialize default options
+ opts = default_opts.copy()
+ logs.clear()
+
+ optsparser = CommandOpts(global_opts, common_opts, command_table,
+ version="%%prog r%s\n modified: %s\n\n"
+ "Copyright (C) 2004,2005 Awarix Inc.\n"
+ "Copyright (C) 2005, Giovanni Bajo"
+ % (__revision__, __date__))
+
+ cmd, args, state = optsparser.parse(args)
+ opts.update(state)
+
+ source = opts.get("source", None)
+ branch_dir = "."
+
+ if str(cmd) == "init":
+ if len(args) == 1:
+ source = args[0]
+ elif len(args) > 1:
+ optsparser.error("wrong number of parameters", cmd)
+ elif str(cmd) in command_table.keys():
+ if len(args) == 1:
+ branch_dir = args[0]
+ elif len(args) > 1:
+ optsparser.error("wrong number of parameters", cmd)
+ else:
+ assert False, "command not handled: %s" % cmd
+
+ # Validate branch_dir
+ if not is_wc(branch_dir):
+ if str(cmd) == "avail":
+ info = None
+ # it should be noted here that svn info does not error exit
+ # if an invalid target is specified to it (as is
+ # intuitive). so the try, except code is not absolutely
+ # necessary. but, I retain it to indicate the intuitive
+ # handling.
+ try:
+ info = get_svninfo(branch_dir)
+ except LaunchError:
+ pass
+ # test that we definitely targeted a subversion directory,
+ # mirroring the purpose of the earlier is_wc() call
+ if info is None or not info.has_key("Node Kind") or info["Node Kind"] != "directory":
+ error('"%s" is neither a valid URL, nor a working directory' % branch_dir)
+ else:
+ error('"%s" is not a subversion working directory' % branch_dir)
+
+ # give out some hints as to potential pathids
+ PathIdentifier.hint(branch_dir)
+ if source: PathIdentifier.hint(source)
+
+ # Extract the integration info for the branch_dir
+ branch_props = get_merge_props(branch_dir)
+
+ # Calculate source_url and source_path
+ report("calculate source path for the branch")
+ if not source:
+ if str(cmd) == "init":
+ cf_source, cf_rev, copy_committed_in_rev = get_copyfrom(branch_dir)
+ if not cf_source:
+ error('no copyfrom info available. '
+ 'Explicit source argument (-S/--source) required.')
+ opts["source-url"] = get_repo_root(branch_dir) + cf_source
+ opts["source-pathid"] = PathIdentifier.from_target(opts["source-url"])
+
+ if not opts["revision"]:
+ opts["revision"] = "1-" + cf_rev
+ else:
+ opts["source-pathid"] = get_default_source(branch_dir, branch_props)
+ opts["source-url"] = opts["source-pathid"].get_url()
+
+ assert is_pathid(opts["source-pathid"])
+ assert is_url(opts["source-url"])
+ else:
+ # The source was given as a command line argument and is stored in
+ # SOURCE. Ensure that the specified source does not end in a /,
+ # otherwise it's easy to have the same source path listed more
+ # than once in the integrated version properties, with and without
+ # trailing /'s.
+ source = rstrip(source, "/")
+ if not is_wc(source) and not is_url(source):
+ # Check if it is a substring of a pathid recorded
+ # within the branch properties.
+ found = []
+ for pathid in branch_props.keys():
+ if pathid.match_substring(source):
+ found.append(pathid)
+ if len(found) == 1:
+ # (assumes pathid is a repository-relative-path)
+ source_pathid = found[0]
+ source = source_pathid.get_url()
+ else:
+ error('"%s" is neither a valid URL, nor an unambiguous '
+ 'substring of a repository path, nor a working directory'
+ % source)
+ else:
+ source_pathid = PathIdentifier.from_target(source)
+
+ source_pathid = PathIdentifier.from_target(source)
+ if str(cmd) == "init" and \
+ source_pathid == PathIdentifier.from_target("."):
+ error("cannot init integration source path '%s'\n"
+ "Its repository-relative path must differ from the "
+ "repository-relative path of the current directory."
+ % source_pathid)
+ opts["source-pathid"] = source_pathid
+ opts["source-url"] = target_to_url(source)
+
+ # Sanity check source_url
+ assert is_url(opts["source-url"])
+ # SVN does not support non-normalized URL (and we should not
+ # have created them)
+ assert opts["source-url"].find("/..") < 0
+
+ report('source is "%s"' % opts["source-url"])
+
+ # Get previously merged revisions (except when command is init)
+ if str(cmd) != "init":
+ opts["merged-revs"] = merge_props_to_revision_set(branch_props,
+ opts["source-pathid"])
+
+ # Perform the action
+ cmd(branch_dir, branch_props)
+
+
+if __name__ == "__main__":
+ try:
+ main(sys.argv[1:])
+ except LaunchError, (ret, cmd, out):
+ err_msg = "command execution failed (exit code: %d)\n" % ret
+ err_msg += cmd + "\n"
+ err_msg += "".join(out)
+ error(err_msg)
+ except KeyboardInterrupt:
+ # Avoid traceback on CTRL+C
+ print "aborted by user"
+ sys.exit(1)
diff --git a/testing/subversion/svnserve b/testing/subversion/svnserve
new file mode 100755
index 000000000..670fee742
--- /dev/null
+++ b/testing/subversion/svnserve
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+. /etc/rc.conf
+. /etc/rc.d/functions
+. /etc/conf.d/svnserve
+
+PID=`pidof -o %PPID /usr/bin/svnserve`
+case "$1" in
+ start)
+ stat_busy "Starting svnserve"
+ if [ -z "$PID" ]; then
+ if [ -n "$SVNSERVE_USER" ]; then
+ su -s '/bin/sh' $SVNSERVE_USER -c "/usr/bin/svnserve -d $SVNSERVE_ARGS" &
+ else
+ /usr/bin/svnserve -d $SVNSERVE_ARGS &
+ fi
+ fi
+ if [ ! -z "$PID" -o $? -gt 0 ]; then
+ stat_fail
+ else
+ add_daemon svnserve
+ stat_done
+ fi
+ ;;
+ stop)
+ stat_busy "Stopping svnserve"
+ [ ! -z "$PID" ] && kill $PID &> /dev/null
+ if [ $? -gt 0 ]; then
+ stat_fail
+ else
+ rm_daemon svnserve
+ stat_done
+ fi
+ ;;
+ restart)
+ $0 stop
+ sleep 1
+ $0 start
+ ;;
+ *)
+ echo "usage: $0 {start|stop|restart}"
+esac
diff --git a/testing/subversion/svnserve.conf b/testing/subversion/svnserve.conf
new file mode 100644
index 000000000..37fb7ea10
--- /dev/null
+++ b/testing/subversion/svnserve.conf
@@ -0,0 +1,7 @@
+#
+# Parameters to be passed to svnserve
+#
+#SVNSERVE_ARGS="-r /path/to/some/repos"
+SVNSERVE_ARGS=""
+
+#SVNSERVE_USER="svn"