summaryrefslogtreecommitdiff
path: root/testing
diff options
context:
space:
mode:
authorJoshua Ismael Haase Hernández <hahj87@gmail.com>2011-06-05 20:20:25 -0500
committerJoshua Ismael Haase Hernández <hahj87@gmail.com>2011-06-05 20:20:25 -0500
commit33c22f43a52aae722ce4d9cbe47f8d2fff31f395 (patch)
tree71f9661d9a843cca28225852f8784e48a2b96cb2 /testing
parentc87732e5659b56943ef4f120d7c71dcaabc3f849 (diff)
parent3695b5d62c2aef6e82abc95d775a2ebd89bce081 (diff)
Merge branch 'master' of vparabola:~/abslibre-pre-mips64el
Conflicts: multilib-testing/lib32-mesa/PKGBUILD multilib/lib32-glew/PKGBUILD testing/iproute2/PKGBUILD
Diffstat (limited to 'testing')
-rw-r--r--testing/coreutils/PKGBUILD69
-rw-r--r--testing/coreutils/coreutils-pam.patch428
-rw-r--r--testing/coreutils/coreutils-uname.patch173
-rw-r--r--testing/coreutils/coreutils.install21
-rw-r--r--testing/coreutils/su.pam9
-rw-r--r--testing/dvdrip/PKGBUILD4
-rw-r--r--testing/dvdrip/dvdrip.install12
-rw-r--r--testing/git/PKGBUILD14
-rw-r--r--testing/imagemagick/PKGBUILD8
-rw-r--r--testing/iproute2/PKGBUILD50
-rw-r--r--testing/man-db/1361_1360.diff25
-rw-r--r--testing/man-db/PKGBUILD60
-rw-r--r--testing/man-db/convert-mans11
-rwxr-xr-xtesting/man-db/man-db.cron.daily39
-rw-r--r--testing/man-db/man-db.install22
-rw-r--r--testing/module-init-tools/PKGBUILD39
-rw-r--r--testing/module-init-tools/modprobe.conf3
-rw-r--r--testing/net-tools/PKGBUILD43
-rw-r--r--testing/net-tools/gcc340.patch46
-rw-r--r--testing/net-tools/net-tools-1.60-2.6-compilefix.patch23
-rw-r--r--testing/net-tools/net-tools-1.60-miiioctl.patch17
-rw-r--r--testing/net-tools/net-tools-1.60-nameif.patch58
-rw-r--r--testing/net-tools/net-tools-1.60-nameif_strncpy.patch13
-rw-r--r--testing/net-tools/net-tools.patch52
-rw-r--r--testing/pidgin/PKGBUILD114
-rw-r--r--testing/pidgin/nm09-more.patch49
-rw-r--r--testing/pidgin/nm09-pidgin.patch38
-rw-r--r--testing/pidgin/pidgin.install11
-rw-r--r--testing/subversion/PKGBUILD98
-rw-r--r--testing/subversion/subversion.rpath.fix.patch10
-rw-r--r--testing/subversion/subversion.suppress.deprecation.warnings.patch22
-rw-r--r--testing/subversion/svn11
-rw-r--r--testing/subversion/svnmerge.py2370
-rwxr-xr-xtesting/subversion/svnserve42
-rw-r--r--testing/subversion/svnserve.conf7
-rw-r--r--testing/udev/81-arch.rules83
-rw-r--r--testing/udev/PKGBUILD101
-rw-r--r--testing/udev/static-audio-nodes-group.patch27
-rw-r--r--testing/udev/static-nodes-permissions.patch57
-rw-r--r--testing/udev/udev.install65
-rw-r--r--testing/yp-tools/PKGBUILD26
-rw-r--r--testing/ypbind-mt/PKGBUILD39
-rw-r--r--testing/ypbind-mt/nisdomainname.conf4
-rwxr-xr-xtesting/ypbind-mt/ypbind35
-rw-r--r--testing/ypbind-mt/ypbind.conf4
45 files changed, 4429 insertions, 23 deletions
diff --git a/testing/coreutils/PKGBUILD b/testing/coreutils/PKGBUILD
new file mode 100644
index 000000000..8fdbe0149
--- /dev/null
+++ b/testing/coreutils/PKGBUILD
@@ -0,0 +1,69 @@
+# $Id: PKGBUILD 126199 2011-06-02 14:37:57Z bisson $
+# Maintainer: Allan McRae <allan@archlinux.org>
+# Contributor: judd <jvinet@zeroflux.org>
+
+pkgname=coreutils
+pkgver=8.12
+pkgrel=2
+pkgdesc="The basic file, shell and text manipulation utilities of the GNU operating system"
+arch=('i686' 'x86_64')
+license=('GPL3')
+url="http://www.gnu.org/software/coreutils"
+groups=('base')
+depends=('glibc' 'shadow' 'pam' 'acl' 'gmp' 'libcap')
+replaces=('mktemp')
+backup=('etc/pam.d/su')
+install=${pkgname}.install
+options=('!emptydirs')
+source=(ftp://ftp.gnu.org/gnu/$pkgname/$pkgname-$pkgver.tar.xz
+ coreutils-uname.patch
+ coreutils-pam.patch
+ su.pam)
+md5sums=('0f7d43c2d2e24314b43a6c6267e25b90'
+ 'c4fcca138b6abf6d443d48a6f0cd8833'
+ 'aad79a2aa6d566c375d7bdd1b0767278'
+ 'fa85e5cce5d723275b14365ba71a8aad')
+
+build() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+
+ # added su wheel group pam patch (from fedora git)
+ patch -Np1 -i ${srcdir}/coreutils-pam.patch
+
+ # linux specific uname improvement (from gentoo portage)
+ patch -Np1 -i ${srcdir}/coreutils-uname.patch
+
+ autoreconf -v
+ ./configure --prefix=/usr \
+ --enable-install-program=su,hostname \
+ --enable-no-install-program=groups,kill,uptime \
+ --enable-pam
+ make
+}
+
+check() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+ make RUN_EXPENSIVE_TESTS=yes check
+}
+
+package() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+ make DESTDIR=${pkgdir} install
+
+ cd ${pkgdir}/usr/bin
+ install -dm755 ${pkgdir}/{bin,usr/sbin}
+
+ # binaries required by FHS
+ _fhs="cat chgrp chmod chown cp date dd df echo false hostname \
+ ln ls mkdir mknod mv pwd rm rmdir stty su sync true uname"
+ mv ${_fhs} ${pkgdir}/bin
+
+ # binaries required by various Arch scripts
+ _bin="cut dir dircolors du install mkfifo readlink shred \
+ sleep touch tr vdir"
+ mv ${_bin} ${pkgdir}/bin
+ ln -sf /bin/sleep ${pkgdir}/usr/bin/sleep
+
+ mv chroot ${pkgdir}/usr/sbin
+ install -Dm644 ${srcdir}/su.pam ${pkgdir}/etc/pam.d/su
+}
diff --git a/testing/coreutils/coreutils-pam.patch b/testing/coreutils/coreutils-pam.patch
new file mode 100644
index 000000000..e61908f3f
--- /dev/null
+++ b/testing/coreutils/coreutils-pam.patch
@@ -0,0 +1,428 @@
+diff -urNp coreutils-8.4-orig/configure.ac coreutils-8.4/configure.ac
+--- coreutils-8.4-orig/configure.ac 2010-01-11 18:20:42.000000000 +0100
++++ coreutils-8.4/configure.ac 2010-02-12 10:17:46.000000000 +0100
+@@ -126,6 +126,13 @@ if test "$gl_gcc_warnings" = yes; then
+ AC_SUBST([GNULIB_WARN_CFLAGS])
+ fi
+
++dnl Give the chance to enable PAM
++AC_ARG_ENABLE(pam, dnl
++[ --enable-pam Enable use of the PAM libraries],
++[AC_DEFINE(USE_PAM, 1, [Define if you want to use PAM])
++LIB_PAM="-ldl -lpam -lpam_misc"
++AC_SUBST(LIB_PAM)])
++
+ AC_FUNC_FORK
+
+ optional_bin_progs=
+diff -urNp coreutils-8.4-orig/doc/coreutils.texi coreutils-8.4/doc/coreutils.texi
+--- coreutils-8.4-orig/doc/coreutils.texi 2010-01-03 18:06:20.000000000 +0100
++++ coreutils-8.4/doc/coreutils.texi 2010-02-12 10:17:46.000000000 +0100
+@@ -15081,8 +15081,11 @@ to certain shells, etc.).
+ @findex syslog
+ @command{su} can optionally be compiled to use @code{syslog} to report
+ failed, and optionally successful, @command{su} attempts. (If the system
+-supports @code{syslog}.) However, GNU @command{su} does not check if the
+-user is a member of the @code{wheel} group; see below.
++supports @code{syslog}.)
++
++This version of @command{su} has support for using PAM for
++authentication. You can edit @file{/etc/pam.d/su} to customize its
++behaviour.
+
+ The program accepts the following options. Also see @ref{Common options}.
+
+@@ -15124,6 +15127,8 @@ environment variables except @env{TERM},
+ @env{PATH} to a compiled-in default value. Change to @var{user}'s home
+ directory. Prepend @samp{-} to the shell's name, intended to make it
+ read its login startup file(s).
++Additionaly @env{DISPLAY} and @env{XAUTHORITY} environment variables
++are preserved as well for PAM functionality.
+
+ @item -m
+ @itemx -p
+@@ -15163,33 +15168,6 @@ Exit status:
+ the exit status of the subshell otherwise
+ @end display
+
+-@cindex wheel group, not supported
+-@cindex group wheel, not supported
+-@cindex fascism
+-@subsection Why GNU @command{su} does not support the @samp{wheel} group
+-
+-(This section is by Richard Stallman.)
+-
+-@cindex Twenex
+-@cindex MIT AI lab
+-Sometimes a few of the users try to hold total power over all the
+-rest. For example, in 1984, a few users at the MIT AI lab decided to
+-seize power by changing the operator password on the Twenex system and
+-keeping it secret from everyone else. (I was able to thwart this coup
+-and give power back to the users by patching the kernel, but I
+-wouldn't know how to do that in Unix.)
+-
+-However, occasionally the rulers do tell someone. Under the usual
+-@command{su} mechanism, once someone learns the root password who
+-sympathizes with the ordinary users, he or she can tell the rest. The
+-``wheel group'' feature would make this impossible, and thus cement the
+-power of the rulers.
+-
+-I'm on the side of the masses, not that of the rulers. If you are
+-used to supporting the bosses and sysadmins in whatever they do, you
+-might find this idea strange at first.
+-
+-
+ @node timeout invocation
+ @section @command{timeout}: Run a command with a time limit
+
+diff -urNp coreutils-8.4-orig/src/Makefile.am coreutils-8.4/src/Makefile.am
+--- coreutils-8.4-orig/src/Makefile.am 2010-01-03 18:06:20.000000000 +0100
++++ coreutils-8.4/src/Makefile.am 2010-02-12 10:17:46.000000000 +0100
+@@ -361,7 +361,7 @@ factor_LDADD += $(LIB_GMP)
+ uptime_LDADD += $(GETLOADAVG_LIBS)
+
+ # for crypt
+-su_LDADD += $(LIB_CRYPT)
++su_LDADD += $(LIB_CRYPT) @LIB_PAM@
+
+ # for various ACL functions
+ copy_LDADD += $(LIB_ACL)
+diff -urNp coreutils-8.4-orig/src/su.c coreutils-8.4/src/su.c
+--- coreutils-8.4-orig/src/su.c 2010-02-12 10:15:15.000000000 +0100
++++ coreutils-8.4/src/su.c 2010-02-12 10:24:29.000000000 +0100
+@@ -37,6 +37,16 @@
+ restricts who can su to UID 0 accounts. RMS considers that to
+ be fascist.
+
++#ifdef USE_PAM
++
++ Actually, with PAM, su has nothing to do with whether or not a
++ wheel group is enforced by su. RMS tries to restrict your access
++ to a su which implements the wheel group, but PAM considers that
++ to be fascist, and gives the user/sysadmin the opportunity to
++ enforce a wheel group by proper editing of /etc/pam.conf
++
++#endif
++
+ Compile-time options:
+ -DSYSLOG_SUCCESS Log successful su's (by default, to root) with syslog.
+ -DSYSLOG_FAILURE Log failed su's (by default, to root) with syslog.
+@@ -53,6 +63,15 @@
+ #include <pwd.h>
+ #include <grp.h>
+
++#ifdef USE_PAM
++# include <signal.h>
++# include <sys/wait.h>
++# include <sys/fsuid.h>
++# include <unistd.h>
++# include <security/pam_appl.h>
++# include <security/pam_misc.h>
++#endif /* USE_PAM */
++
+ #include "system.h"
+ #include "getpass.h"
+
+@@ -120,10 +139,17 @@
+ /* The user to become if none is specified. */
+ #define DEFAULT_USER "root"
+
++#ifndef USE_PAM
+ char *crypt (char const *key, char const *salt);
++#endif
+
+-static void run_shell (char const *, char const *, char **, size_t)
++static void run_shell (char const *, char const *, char **, size_t,
++ const struct passwd *)
++#ifdef USE_PAM
++ ;
++#else
+ ATTRIBUTE_NORETURN;
++#endif
+
+ /* If true, pass the `-f' option to the subshell. */
+ static bool fast_startup;
+@@ -209,7 +235,26 @@ log_su (struct passwd const *pw, bool su
+ }
+ #endif
+
++#ifdef USE_PAM
++static pam_handle_t *pamh = NULL;
++static int retval;
++static struct pam_conv conv = {
++ misc_conv,
++ NULL
++};
++
++#define PAM_BAIL_P if (retval) { \
++ pam_end(pamh, PAM_SUCCESS); \
++ return 0; \
++}
++#define PAM_BAIL_P_VOID if (retval) { \
++ pam_end(pamh, PAM_SUCCESS); \
++return; \
++}
++#endif
++
+ /* Ask the user for a password.
++ If PAM is in use, let PAM ask for the password if necessary.
+ Return true if the user gives the correct password for entry PW,
+ false if not. Return true without asking for a password if run by UID 0
+ or if PW has an empty password. */
+@@ -217,6 +262,44 @@ log_su (struct passwd const *pw, bool su
+ static bool
+ correct_password (const struct passwd *pw)
+ {
++#ifdef USE_PAM
++ struct passwd *caller;
++ char *tty_name, *ttyn;
++ retval = pam_start(PROGRAM_NAME, pw->pw_name, &conv, &pamh);
++ PAM_BAIL_P;
++
++ if (getuid() != 0 && !isatty(0)) {
++ fprintf(stderr, "standard in must be a tty\n");
++ exit(1);
++ }
++
++ caller = getpwuid(getuid());
++ if(caller != NULL && caller->pw_name != NULL) {
++ retval = pam_set_item(pamh, PAM_RUSER, caller->pw_name);
++ PAM_BAIL_P;
++ }
++
++ ttyn = ttyname(0);
++ if (ttyn) {
++ if (strncmp(ttyn, "/dev/", 5) == 0)
++ tty_name = ttyn+5;
++ else
++ tty_name = ttyn;
++ retval = pam_set_item(pamh, PAM_TTY, tty_name);
++ PAM_BAIL_P;
++ }
++ retval = pam_authenticate(pamh, 0);
++ PAM_BAIL_P;
++ retval = pam_acct_mgmt(pamh, 0);
++ if (retval == PAM_NEW_AUTHTOK_REQD) {
++ /* password has expired. Offer option to change it. */
++ retval = pam_chauthtok(pamh, PAM_CHANGE_EXPIRED_AUTHTOK);
++ PAM_BAIL_P;
++ }
++ PAM_BAIL_P;
++ /* must be authenticated if this point was reached */
++ return 1;
++#else /* !USE_PAM */
+ char *unencrypted, *encrypted, *correct;
+ #if HAVE_GETSPNAM && HAVE_STRUCT_SPWD_SP_PWDP
+ /* Shadow passwd stuff for SVR3 and maybe other systems. */
+@@ -241,6 +324,7 @@ correct_password (const struct passwd *p
+ encrypted = crypt (unencrypted, correct);
+ memset (unencrypted, 0, strlen (unencrypted));
+ return STREQ (encrypted, correct);
++#endif /* !USE_PAM */
+ }
+
+ /* Update `environ' for the new shell based on PW, with SHELL being
+@@ -254,12 +338,18 @@ modify_environment (const struct passwd
+ /* Leave TERM unchanged. Set HOME, SHELL, USER, LOGNAME, PATH.
+ Unset all other environment variables. */
+ char const *term = getenv ("TERM");
++ char const *display = getenv ("DISPLAY");
++ char const *xauthority = getenv ("XAUTHORITY");
+ if (term)
+ term = xstrdup (term);
+ environ = xmalloc ((6 + !!term) * sizeof (char *));
+ environ[0] = NULL;
+ if (term)
+ xsetenv ("TERM", term);
++ if (display)
++ xsetenv ("DISPLAY", display);
++ if (xauthority)
++ xsetenv ("XAUTHORITY", xauthority);
+ xsetenv ("HOME", pw->pw_dir);
+ xsetenv ("SHELL", shell);
+ xsetenv ("USER", pw->pw_name);
+@@ -292,8 +382,13 @@ change_identity (const struct passwd *pw
+ {
+ #ifdef HAVE_INITGROUPS
+ errno = 0;
+- if (initgroups (pw->pw_name, pw->pw_gid) == -1)
++ if (initgroups (pw->pw_name, pw->pw_gid) == -1) {
++#ifdef USE_PAM
++ pam_close_session(pamh, 0);
++ pam_end(pamh, PAM_ABORT);
++#endif
+ error (EXIT_CANCELED, errno, _("cannot set groups"));
++ }
+ endgrent ();
+ #endif
+ if (setgid (pw->pw_gid))
+@@ -302,6 +397,31 @@ change_identity (const struct passwd *pw
+ error (EXIT_CANCELED, errno, _("cannot set user id"));
+ }
+
++#ifdef USE_PAM
++static int caught=0;
++/* Signal handler for parent process later */
++static void su_catch_sig(int sig)
++{
++ ++caught;
++}
++
++int
++pam_copyenv (pam_handle_t *pamh)
++{
++ char **env;
++
++ env = pam_getenvlist(pamh);
++ if(env) {
++ while(*env) {
++ if (putenv (*env))
++ xalloc_die ();
++ env++;
++ }
++ }
++ return(0);
++}
++#endif
++
+ /* Run SHELL, or DEFAULT_SHELL if SHELL is empty.
+ If COMMAND is nonzero, pass it to the shell with the -c option.
+ Pass ADDITIONAL_ARGS to the shell as more arguments; there
+@@ -309,17 +429,49 @@ change_identity (const struct passwd *pw
+
+ static void
+ run_shell (char const *shell, char const *command, char **additional_args,
+- size_t n_additional_args)
++ size_t n_additional_args, const struct passwd *pw)
+ {
+ size_t n_args = 1 + fast_startup + 2 * !!command + n_additional_args + 1;
+ char const **args = xnmalloc (n_args, sizeof *args);
+ size_t argno = 1;
++#ifdef USE_PAM
++ int child;
++ sigset_t ourset;
++ int status;
++
++ retval = pam_open_session(pamh,0);
++ if (retval != PAM_SUCCESS) {
++ fprintf (stderr, "could not open session\n");
++ exit (1);
++ }
++
++/* do this at the last possible moment, because environment variables may
++ be passed even in the session phase
++*/
++ if(pam_copyenv(pamh) != PAM_SUCCESS)
++ fprintf (stderr, "error copying PAM environment\n");
++
++ /* Credentials should be set in the parent */
++ if (pam_setcred(pamh, PAM_ESTABLISH_CRED) != PAM_SUCCESS) {
++ pam_close_session(pamh, 0);
++ fprintf(stderr, "could not set PAM credentials\n");
++ exit(1);
++ }
++
++ child = fork();
++ if (child == 0) { /* child shell */
++ change_identity (pw);
++ pam_end(pamh, 0);
++#endif
+
+ if (simulate_login)
+ {
+ char *arg0;
+ char *shell_basename;
+
++ if(chdir(pw->pw_dir))
++ error(0, errno, _("warning: cannot change directory to %s"), pw->pw_dir);
++
+ shell_basename = last_component (shell);
+ arg0 = xmalloc (strlen (shell_basename) + 2);
+ arg0[0] = '-';
+@@ -344,6 +496,67 @@ run_shell (char const *shell, char const
+ error (0, errno, "%s", shell);
+ exit (exit_status);
+ }
++#ifdef USE_PAM
++ } else if (child == -1) {
++ fprintf(stderr, "can not fork user shell: %s", strerror(errno));
++ pam_setcred(pamh, PAM_DELETE_CRED | PAM_SILENT);
++ pam_close_session(pamh, 0);
++ pam_end(pamh, PAM_ABORT);
++ exit(1);
++ }
++ /* parent only */
++ sigfillset(&ourset);
++ if (sigprocmask(SIG_BLOCK, &ourset, NULL)) {
++ fprintf(stderr, "%s: signal malfunction\n", PROGRAM_NAME);
++ caught = 1;
++ }
++ if (!caught) {
++ struct sigaction action;
++ action.sa_handler = su_catch_sig;
++ sigemptyset(&action.sa_mask);
++ action.sa_flags = 0;
++ sigemptyset(&ourset);
++ if (sigaddset(&ourset, SIGTERM)
++ || sigaddset(&ourset, SIGALRM)
++ || sigaction(SIGTERM, &action, NULL)
++ || sigprocmask(SIG_UNBLOCK, &ourset, NULL)) {
++ fprintf(stderr, "%s: signal masking malfunction\n", PROGRAM_NAME);
++ caught = 1;
++ }
++ }
++ if (!caught) {
++ do {
++ int pid;
++
++ pid = waitpid(-1, &status, WUNTRACED);
++
++ if (((pid_t)-1 != pid) && (0 != WIFSTOPPED (status))) {
++ kill(getpid(), WSTOPSIG(status));
++ /* once we get here, we must have resumed */
++ kill(pid, SIGCONT);
++ }
++ } while (0 != WIFSTOPPED(status));
++ }
++
++ if (caught) {
++ fprintf(stderr, "\nSession terminated, killing shell...");
++ kill (child, SIGTERM);
++ }
++ /* Not checking retval on this because we need to call close session */
++ pam_setcred(pamh, PAM_DELETE_CRED | PAM_SILENT);
++ retval = pam_close_session(pamh, 0);
++ PAM_BAIL_P_VOID;
++ retval = pam_end(pamh, PAM_SUCCESS);
++ PAM_BAIL_P_VOID;
++ if (caught) {
++ sleep(2);
++ kill(child, SIGKILL);
++ fprintf(stderr, " ...killed.\n");
++ exit(-1);
++ }
++ exit ((0 != WIFEXITED (status)) ? WEXITSTATUS (status)
++ : WTERMSIG (status) + 128);
++#endif /* USE_PAM */
+ }
+
+ /* Return true if SHELL is a restricted shell (one not returned by
+@@ -511,9 +724,9 @@ main (int argc, char **argv)
+ shell = xstrdup (shell ? shell : pw->pw_shell);
+ modify_environment (pw, shell);
+
++#ifndef USE_PAM
+ change_identity (pw);
+- if (simulate_login && chdir (pw->pw_dir) != 0)
+- error (0, errno, _("warning: cannot change directory to %s"), pw->pw_dir);
++#endif
+
+ /* error() flushes stderr, but does not check for write failure.
+ Normally, we would catch this via our atexit() hook of
+@@ -523,5 +736,5 @@ main (int argc, char **argv)
+ if (ferror (stderr))
+ exit (EXIT_CANCELED);
+
+- run_shell (shell, command, argv + optind, MAX (0, argc - optind));
++ run_shell (shell, command, argv + optind, MAX (0, argc - optind), pw);
+ }
diff --git a/testing/coreutils/coreutils-uname.patch b/testing/coreutils/coreutils-uname.patch
new file mode 100644
index 000000000..b458abeba
--- /dev/null
+++ b/testing/coreutils/coreutils-uname.patch
@@ -0,0 +1,173 @@
+On linux platforms, grok /proc/cpuinfo for the CPU/vendor info.
+
+Prob not suitable for upstream seeing as how it's 100% linux-specific
+http://lists.gnu.org/archive/html/bug-coreutils/2005-09/msg00063.html
+
+Patch originally by Carlos E. Gorges <carlos@techlinux.com.br>, but
+heavily reworked to suck less.
+
+To add support for additional platforms, check out the show_cpuinfo()
+func in the linux/arch/<ARCH>/ source tree of the kernel.
+
+--- coreutils/src/uname.c
++++ coreutils/src/uname.c
+@@ -50,6 +50,11 @@
+ # include <mach-o/arch.h>
+ #endif
+
++#if defined(__linux__)
++# define USE_PROCINFO
++# define UNAME_HARDWARE_PLATFORM
++#endif
++
+ #include "system.h"
+ #include "error.h"
+ #include "quote.h"
+@@ -138,6 +143,117 @@
+ exit (status);
+ }
+
++#if defined(USE_PROCINFO)
++
++# if defined(__s390__) || defined(__s390x__)
++# define CPUINFO_FILE "/proc/sysinfo"
++# define CPUINFO_FORMAT "%64[^\t :]%*[ :]%256[^\n]%c"
++# else
++# define CPUINFO_FILE "/proc/cpuinfo"
++# define CPUINFO_FORMAT "%64[^\t:]\t:%256[^\n]%c"
++# endif
++
++# define PROCINFO_PROCESSOR 0
++# define PROCINFO_HARDWARE_PLATFORM 1
++
++static void __eat_cpuinfo_space(char *buf)
++{
++ /* first eat trailing space */
++ char *tmp = buf + strlen(buf) - 1;
++ while (tmp > buf && isspace(*tmp))
++ *tmp-- = '\0';
++ /* then eat leading space */
++ tmp = buf;
++ while (*tmp && isspace(*tmp))
++ tmp++;
++ if (tmp != buf)
++ memmove(buf, tmp, strlen(tmp)+1);
++ /* finally collapse whitespace */
++ tmp = buf;
++ while (tmp[0] && tmp[1]) {
++ if (isspace(tmp[0]) && isspace(tmp[1])) {
++ memmove(tmp, tmp+1, strlen(tmp));
++ continue;
++ }
++ ++tmp;
++ }
++}
++
++static int __linux_procinfo(int x, char *fstr, size_t s)
++{
++ FILE *fp;
++
++ char *procinfo_keys[] = {
++ /* --processor --hardware-platform */
++ #if defined(__alpha__)
++ "cpu model", "system type"
++ #elif defined(__arm__)
++ "Processor", "Hardware"
++ #elif defined(__avr32__)
++ "processor", "cpu family"
++ #elif defined(__bfin__)
++ "CPU", "BOARD Name"
++ #elif defined(__cris__)
++ "cpu", "cpu model"
++ #elif defined(__frv__)
++ "CPU-Core", "System"
++ #elif defined(__i386__) || defined(__x86_64__)
++ "model name", "vendor_id"
++ #elif defined(__ia64__)
++ "family", "vendor"
++ #elif defined(__hppa__)
++ "cpu", "model"
++ #elif defined(__m68k__)
++ "CPU", "MMU"
++ #elif defined(__mips__)
++ "cpu model", "system type"
++ #elif defined(__powerpc__) || defined(__powerpc64__)
++ "cpu", "machine"
++ #elif defined(__s390__) || defined(__s390x__)
++ "Type", "Manufacturer"
++ #elif defined(__sh__)
++ "cpu type", "machine"
++ #elif defined(sparc) || defined(__sparc__)
++ "type", "cpu"
++ #elif defined(__vax__)
++ "cpu type", "cpu"
++ #else
++ "unknown", "unknown"
++ #endif
++ };
++
++ if ((fp = fopen(CPUINFO_FILE, "r")) != NULL) {
++ char key[65], value[257], eol, *ret = NULL;
++
++ while (fscanf(fp, CPUINFO_FORMAT, key, value, &eol) != EOF) {
++ __eat_cpuinfo_space(key);
++ if (!strcmp(key, procinfo_keys[x])) {
++ __eat_cpuinfo_space(value);
++ ret = value;
++ break;
++ }
++ if (eol != '\n') {
++ /* we need two fscanf's here in case the previous
++ * length limit caused us to read right up to the
++ * newline ... doing "%*[^\n]\n" wont eat the newline
++ */
++ fscanf(fp, "%*[^\n]");
++ fscanf(fp, "\n");
++ }
++ }
++ fclose(fp);
++
++ if (ret) {
++ strncpy(fstr, ret, s);
++ return 0;
++ }
++ }
++
++ return -1;
++}
++
++#endif
++
+ /* Print ELEMENT, preceded by a space if something has already been
+ printed. */
+
+@@ -250,10 +344,14 @@ main (int argc, char **argv)
+ if (toprint & PRINT_PROCESSOR)
+ {
+ char const *element = unknown;
+-#if HAVE_SYSINFO && defined SI_ARCHITECTURE
++#if ( HAVE_SYSINFO && defined SI_ARCHITECTURE ) || defined(USE_PROCINFO)
+ {
+ static char processor[257];
++#if defined(USE_PROCINFO)
++ if (0 <= __linux_procinfo (PROCINFO_PROCESSOR, processor, sizeof processor))
++#else
+ if (0 <= sysinfo (SI_ARCHITECTURE, processor, sizeof processor))
++#endif
+ element = processor;
+ }
+ #endif
+@@ -306,9 +404,13 @@ main (int argc, char **argv)
+ if (element == unknown)
+ {
+ static char hardware_platform[257];
++#if defined(USE_PROCINFO)
++ if (0 <= __linux_procinfo (PROCINFO_HARDWARE_PLATFORM, hardware_platform, sizeof hardware_platform))
++#else
+ size_t s = sizeof hardware_platform;
+ static int mib[] = { CTL_HW, UNAME_HARDWARE_PLATFORM };
+ if (sysctl (mib, 2, hardware_platform, &s, 0, 0) >= 0)
++#endif
+ element = hardware_platform;
+ }
+ #endif
diff --git a/testing/coreutils/coreutils.install b/testing/coreutils/coreutils.install
new file mode 100644
index 000000000..8caae6686
--- /dev/null
+++ b/testing/coreutils/coreutils.install
@@ -0,0 +1,21 @@
+infodir=usr/share/info
+filelist=(coreutils.info)
+
+post_install() {
+ [ -x usr/bin/install-info ] || return 0
+ for file in ${filelist[@]}; do
+ usr/bin/install-info $infodir/$file.gz $infodir/dir 2> /dev/null
+ done
+}
+
+post_upgrade() {
+ post_install $1
+}
+
+pre_remove() {
+ [ -x usr/bin/install-info ] || return 0
+ for file in ${filelist[@]}; do
+ usr/bin/install-info --delete $infodir/$file.gz $infodir/dir 2> /dev/null
+ done
+}
+
diff --git a/testing/coreutils/su.pam b/testing/coreutils/su.pam
new file mode 100644
index 000000000..cf15f40f1
--- /dev/null
+++ b/testing/coreutils/su.pam
@@ -0,0 +1,9 @@
+#%PAM-1.0
+auth sufficient pam_rootok.so
+# Uncomment the following line to implicitly trust users in the "wheel" group.
+#auth sufficient pam_wheel.so trust use_uid
+# Uncomment the following line to require a user to be in the "wheel" group.
+#auth required pam_wheel.so use_uid
+auth required pam_unix.so
+account required pam_unix.so
+session required pam_unix.so
diff --git a/testing/dvdrip/PKGBUILD b/testing/dvdrip/PKGBUILD
index 17838b169..119a2a1d9 100644
--- a/testing/dvdrip/PKGBUILD
+++ b/testing/dvdrip/PKGBUILD
@@ -1,10 +1,10 @@
-# $Id: PKGBUILD 125147 2011-05-25 19:02:09Z foutrelis $
+# $Id: PKGBUILD 126069 2011-06-01 10:23:05Z foutrelis $
# Maintainer: Giovanni Scafora <giovanni@archlinux.org>
# Contributor: Fredrik Hammar <Horney_C86@Hotmail.com>
pkgname=dvdrip
pkgver=0.98.11
-pkgrel=7
+pkgrel=8
pkgdesc="A Gtk frontend for transcode writen in Perl"
arch=('i686' 'x86_64')
license=('custom')
diff --git a/testing/dvdrip/dvdrip.install b/testing/dvdrip/dvdrip.install
index 4cd6160c4..e111ef946 100644
--- a/testing/dvdrip/dvdrip.install
+++ b/testing/dvdrip/dvdrip.install
@@ -1,19 +1,11 @@
-# this is the scrollkeeper handling sample file
-
post_install() {
- echo "update desktop mime database..."
update-desktop-database -q
}
post_upgrade() {
- post_install $1
-}
-
-pre_remove() {
- /bin/true
+ post_install
}
post_remove() {
- echo "update desktop mime database..."
- update-desktop-database -q
+ post_install
}
diff --git a/testing/git/PKGBUILD b/testing/git/PKGBUILD
index 0a96745dd..713c5dd11 100644
--- a/testing/git/PKGBUILD
+++ b/testing/git/PKGBUILD
@@ -1,9 +1,9 @@
-# $Id: PKGBUILD 125150 2011-05-25 19:02:49Z foutrelis $
+# $Id: PKGBUILD 126224 2011-06-02 19:12:58Z dan $
# Maintainer: Dan McGee <dan@archlinux.org>
pkgname=git
-pkgver=1.7.5.2
-pkgrel=2
+pkgver=1.7.5.4
+pkgrel=1
pkgdesc="the fast distributed version control system"
arch=(i686 x86_64)
url="http://git-scm.com/"
@@ -75,11 +75,11 @@ package() {
install -D -m644 "$srcdir"/git-daemon.conf "$pkgdir"/etc/conf.d/git-daemon.conf
}
-md5sums=('f79ab8fe79b35346b499f131cbf381a4'
- '43fc5538f137231f5c96e7da5eb6c934'
+md5sums=('4985b774db84d3bbcc2b8d90952552a3'
+ '7a74ef1c64395a07301359a8707c828a'
'8e2648910fd5dd4f1c41d3c7fa9e9156'
'2e42bf97779a1c6411d89043334c9e78')
-sha256sums=('65dbb9789a1ff8aa7e4f7e156b9ce36ef101810608ed55a297156348d70f9349'
- '8521210a77809d189a3fe72e2bd13df50ecf8ec58e7098ec1655dfdb4086b6ca'
+sha256sums=('fed9a6c7d07b063cfba9e3eb40fbd0d8120785225289fcf2fb56bee18dffd5ee'
+ '87d612195426fedfe4ee5274d9903e8f85aa16c410b4a3ba41610c23ed9211d7'
'2e0a50bdaf8f387a499895e1c204bff78244eaa72b78187c8a84ef40c0b82598'
'e8bfe29d8393d2b87517c4dd56ea834b213aa00bf3d7fcde4ead3457cadbbc68')
diff --git a/testing/imagemagick/PKGBUILD b/testing/imagemagick/PKGBUILD
index 39f9e8830..29f188e18 100644
--- a/testing/imagemagick/PKGBUILD
+++ b/testing/imagemagick/PKGBUILD
@@ -1,9 +1,9 @@
-# $Id: PKGBUILD 124573 2011-05-23 01:23:35Z eric $
+# $Id: PKGBUILD 126048 2011-06-01 05:22:12Z eric $
# Maintainer: Eric Bélanger <eric@archlinux.org>
pkgbase=imagemagick
pkgname=('imagemagick' 'imagemagick-doc')
-pkgver=6.6.9.10
+pkgver=6.7.0.2
pkgrel=1
arch=('i686' 'x86_64')
url="http://www.imagemagick.org/"
@@ -13,9 +13,9 @@ depends=('libtool' 'lcms' 'libxt' 'gcc-libs' 'bzip2' 'xz' 'freetype2' 'fontconfi
makedepends=('ghostscript' 'openexr' 'libwmf' 'librsvg' 'libxml2' 'jasper' 'libpng')
source=(ftp://ftp.imagemagick.org/pub/ImageMagick/ImageMagick-${pkgver%.*}-${pkgver##*.}.tar.xz \
perlmagick.rpath.patch)
-md5sums=('cf8940e964be608ddae152b90a576282'
+md5sums=('23a53b96b8e75c3ffd8cbbbfc1041b2f'
'ff9974decbfe9846f8e347239d87e4eb')
-sha1sums=('18d141e904853c68a43f53862bc67e3c8b66d664'
+sha1sums=('ff8e666d58a27af1ce3ab2c5408d10c233a4d809'
'23405f80904b1de94ebd7bd6fe2a332471b8c283')
build() {
diff --git a/testing/iproute2/PKGBUILD b/testing/iproute2/PKGBUILD
new file mode 100644
index 000000000..f9479bd84
--- /dev/null
+++ b/testing/iproute2/PKGBUILD
@@ -0,0 +1,50 @@
+# $Id: PKGBUILD 126335 2011-06-04 22:38:16Z tomegun $
+# Maintainer: Ronald van Haren <ronald.archlinux.org>
+# Contributor: Judd Vinet <jvinet@zeroflux.org>
+
+pkgname=iproute2
+pkgver=2.6.38
+pkgrel=3
+pkgdesc="IP Routing Utilities"
+arch=('i686' 'x86_64')
+license=('GPL2')
+url="http://www.linux-foundation.org/en/Net:Iproute2"
+depends=('perl')
+makedepends=('linux-atm')
+optdepends=('linux-atm: ATM support')
+provides=('iproute')
+conflicts=('iproute')
+replaces=('iproute')
+options=('!makeflags')
+backup=('etc/iproute2/ematch_map' 'etc/iproute2/rt_dsfield' 'etc/iproute2/rt_protos' \
+ 'etc/iproute2/rt_realms' 'etc/iproute2/rt_scopes' 'etc/iproute2/rt_tables')
+source=(http://devresources.linux-foundation.org/dev/iproute2/download/iproute2-${pkgver}.tar.bz2
+ 'iproute2-fhs.patch')
+sha1sums=('e9f6d457a06866a2a20a6cba6b3a039b2ec3e14a'
+ '2416b11252364d7a6c742eabb4a6924a75637a46')
+
+build() {
+ cd $srcdir/iproute2-${pkgver}
+
+ # set correct fhs structure
+ patch -Np1 -i ${srcdir}/iproute2-fhs.patch
+
+ ./configure
+
+ make
+}
+
+package() {
+ cd $srcdir/iproute2-${pkgver}
+
+ make DESTDIR=$pkgdir install
+
+ # allow loopback to be started before /usr is mounted, this may not be supported in the future
+ mkdir -p ${pkgdir}/sbin
+ mv ${pkgdir}/usr/sbin/ip ${pkgdir}/sbin/ip
+ ln -s /sbin/ip ${pkgdir}/usr/sbin/ip
+
+ # libnetlink isn't installed, install it FS#19385
+ install -Dm644 include/libnetlink.h ${pkgdir}/usr/include/libnetlink.h
+ install -Dm644 lib/libnetlink.a ${pkgdir}/usr/lib/libnetlink.a
+}
diff --git a/testing/man-db/1361_1360.diff b/testing/man-db/1361_1360.diff
new file mode 100644
index 000000000..c93856980
--- /dev/null
+++ b/testing/man-db/1361_1360.diff
@@ -0,0 +1,25 @@
+=== modified file 'src/straycats.c'
+--- src/straycats.c 2011-01-10 20:08:22 +0000
++++ src/straycats.c 2011-06-04 06:34:51 +0000
+@@ -177,6 +177,7 @@
+ char *lang, *page_encoding;
+ char *mandir_base;
+ pipecmd *col_cmd;
++ char *col_locale;
+ char *fullpath;
+
+ /* we have a straycat. Need to filter it and get
+@@ -226,6 +227,12 @@
+ col_cmd = pipecmd_new_argstr
+ (get_def_user ("col", COL));
+ pipecmd_arg (col_cmd, "-bx");
++ col_locale = find_charset_locale ("UTF-8");
++ if (col_locale) {
++ pipecmd_setenv (col_cmd, "LC_CTYPE",
++ col_locale);
++ free (col_locale);
++ }
+ pipeline_command (decomp, col_cmd);
+
+ fullpath = canonicalize_file_name (catdir);
+
diff --git a/testing/man-db/PKGBUILD b/testing/man-db/PKGBUILD
new file mode 100644
index 000000000..25675079f
--- /dev/null
+++ b/testing/man-db/PKGBUILD
@@ -0,0 +1,60 @@
+# $Id: PKGBUILD 126294 2011-06-04 09:41:54Z andyrtr $
+# Maintainer: Andreas Radke <andyrtr@archlinux.org>
+# Contributor: Sergej Pupykin <sergej@aur.archlinux.org>
+
+pkgname=man-db
+pkgver=2.6.0.2
+pkgrel=2
+pkgdesc="A utility for reading man pages"
+arch=('i686' 'x86_64')
+url="http://www.nongnu.org/man-db/"
+license=('GPL' 'LGPL')
+groups=('base')
+depends=( 'bash' 'gdbm' 'zlib' 'groff' 'libpipeline')
+optdepends=('less' 'gzip')
+backup=('etc/man_db.conf'
+ 'etc/cron.daily/man-db')
+conflicts=('man')
+provides=('man')
+replaces=('man')
+install=${pkgname}.install
+source=(http://savannah.nongnu.org/download/man-db/$pkgname-$pkgver.tar.gz
+ 1361_1360.diff
+ #http://launchpad.net/man-db/main/${pkgver}/+download/${pkgname}-${pkgver}.tar.gz
+ convert-mans man-db.cron.daily)
+options=('!libtool')
+md5sums=('2b41c96efec032d2b74ccbf2e401f93e'
+ '08b76b1f924c5493a280b79fc0aebde4'
+ '2b7662a7d5b33fe91f9f3e034361a2f6'
+ 'd30c39ae47560304471b5461719e0f03')
+
+build() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+ # https://bugs.archlinux.org/task/18722
+ patch -Np0 -i $srcdir/1361_1360.diff
+ ./configure --prefix=/usr --sysconfdir=/etc --libexecdir=/usr/lib \
+ --with-db=gdbm --disable-setuid --enable-mandirs=GNU \
+ --with-sections="1 n l 8 3 0 2 5 4 9 6 7"
+ make
+}
+
+check() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+ make check
+}
+
+package() {
+ cd ${srcdir}/${pkgname}-${pkgver}
+ make DESTDIR=${pkgdir} install
+
+ # part of groff pkg
+ rm -f ${pkgdir}/usr/bin/zsoelim
+
+ # script from LFS to convert manpages, see
+ # http://www.linuxfromscratch.org/lfs/view/6.4/chapter06/man-db.html
+ install -D -m755 ${srcdir}/convert-mans ${pkgdir}/usr/bin/convert-mans
+
+ #install whatis cron script
+ install -D -m744 ${srcdir}/man-db.cron.daily ${pkgdir}/etc/cron.daily/man-db
+}
+
diff --git a/testing/man-db/convert-mans b/testing/man-db/convert-mans
new file mode 100644
index 000000000..58a0224b0
--- /dev/null
+++ b/testing/man-db/convert-mans
@@ -0,0 +1,11 @@
+#!/bin/sh -e
+FROM="$1"
+TO="$2"
+shift ; shift
+while [ $# -gt 0 ]
+do
+ FILE="$1"
+ shift
+ iconv -f "$FROM" -t "$TO" "$FILE" >.tmp.iconv
+ mv .tmp.iconv "$FILE"
+done
diff --git a/testing/man-db/man-db.cron.daily b/testing/man-db/man-db.cron.daily
new file mode 100755
index 000000000..53e66e1e9
--- /dev/null
+++ b/testing/man-db/man-db.cron.daily
@@ -0,0 +1,39 @@
+#!/bin/sh
+
+# nicenesses range from -20 (most favorable scheduling) to 19 (least favorable)
+NICE=19
+
+# 0 for none, 1 for real time, 2 for best-effort, 3 for idle
+IONICE_CLASS=2
+
+# 0-7 (for IONICE_CLASS 1 and 2 only), 0=highest, 7=lowest
+IONICE_PRIORITY=7
+
+UPDATEMANDB="/usr/bin/mandb --quiet"
+
+# Update the "whatis" database
+#/usr/sbin/makewhatis -u -w
+
+# taken from Debian
+# man-db cron daily
+set -e
+
+if ! [ -d /var/cache/man ]; then
+ # Recover from deletion, per FHS.
+ mkdir -p /var/cache/man
+ chmod 755 /var/cache/man
+fi
+
+# regenerate man database
+
+if [ -x /usr/bin/nice ]; then
+ UPDATEMANDB="/usr/bin/nice -n ${NICE:-19} ${UPDATEMANDB}"
+fi
+
+if [ -x /usr/bin/ionice ]; then
+ UPDATEMANDB="/usr/bin/ionice -c ${IONICE_CLASS:-2} -n ${IONICE_PRIORITY:-7} ${UPDATEMANDB}"
+fi
+
+${UPDATEMANDB}
+
+exit 0
diff --git a/testing/man-db/man-db.install b/testing/man-db/man-db.install
new file mode 100644
index 000000000..f6f0f27a6
--- /dev/null
+++ b/testing/man-db/man-db.install
@@ -0,0 +1,22 @@
+post_install() {
+ echo "it's recommended to create an initial"
+ echo "database running as root:"
+ echo "\"/usr/bin/mandb --quiet\""
+}
+
+post_upgrade() {
+ if [ "`vercmp $2 2.5.3-2`" -lt 0 ]; then
+ echo "systemuser \"man\" is no more required"
+ echo "run \"userdel man\". please also"
+ echo "chown root:root /var/cache/man"
+ fi
+ # force database rebuild to get rid off badly imported pages
+ if [ "`vercmp $2 2.6.0.2`" -lt 0 ]; then
+ echo "(re)building database..."
+ mandb -c --quiet
+ fi
+}
+
+post_remove() {
+ rm -rf /var/cache/man
+}
diff --git a/testing/module-init-tools/PKGBUILD b/testing/module-init-tools/PKGBUILD
new file mode 100644
index 000000000..4ecfab9c9
--- /dev/null
+++ b/testing/module-init-tools/PKGBUILD
@@ -0,0 +1,39 @@
+# $Id: PKGBUILD 126053 2011-06-01 05:32:56Z andyrtr $
+# Maintainer: Aaron Griffin <aaron@archlinux.org>
+# Contributor: judd <jvinet@zeroflux.org>
+
+pkgname=module-init-tools
+pkgver=3.13
+pkgrel=1
+pkgdesc="utilities needed by Linux systems for managing loadable kernel modules"
+arch=('i686' 'x86_64')
+url="http://kerneltools.org"
+license=('GPL')
+depends=('glibc')
+backup=('etc/modprobe.d/modprobe.conf')
+source=(http://www.kernel.org/pub/linux/utils/kernel/module-init-tools/module-init-tools-$pkgver.tar.bz2
+ modprobe.conf)
+md5sums=('dc575e7df00d9f745bf23b32f927b7a6'
+ '316f1bda4c21af02b30252eb014a0a55')
+
+build() {
+ cd $srcdir/$pkgname-$pkgver
+
+ # do not regenerate man pages
+ touch *.{5,8}
+
+ ./configure --prefix=/usr --exec-prefix=/ --enable-zlib
+ make
+}
+
+package() {
+ cd $srcdir/$pkgname-$pkgver
+
+ make DESTDIR=$pkgdir install
+
+ # Install our custom (read: empty) modprobe.conf
+ install -Dm644 $srcdir/modprobe.conf $pkgdir/etc/modprobe.d/modprobe.conf
+
+ # fix man page (FS#17559)
+ sed -i "s#mod#man5/mod#" $pkgdir/usr/share/man/man5/modprobe.d.5
+}
diff --git a/testing/module-init-tools/modprobe.conf b/testing/module-init-tools/modprobe.conf
new file mode 100644
index 000000000..83865a3af
--- /dev/null
+++ b/testing/module-init-tools/modprobe.conf
@@ -0,0 +1,3 @@
+#
+# /etc/modprobe.d/modprobe.conf (for v2.6 kernels)
+#
diff --git a/testing/net-tools/PKGBUILD b/testing/net-tools/PKGBUILD
new file mode 100644
index 000000000..d9b2380bc
--- /dev/null
+++ b/testing/net-tools/PKGBUILD
@@ -0,0 +1,43 @@
+# $Id: PKGBUILD 126212 2011-06-02 16:50:35Z bisson $
+# Maintainer: judd <jvinet@zeroflux.org>
+pkgname=net-tools
+pkgver=1.60
+pkgrel=16
+pkgdesc="Configuration tools for Linux networking"
+arch=(i686 x86_64)
+license=('GPL')
+url="http://www.tazenda.demon.co.uk/phil/net-tools"
+depends=('glibc')
+source=(http://www.tazenda.demon.co.uk/phil/$pkgname/$pkgname-$pkgver.tar.bz2
+ net-tools.patch gcc340.patch net-tools-1.60-2.6-compilefix.patch
+ net-tools-1.60-miiioctl.patch
+ net-tools-1.60-nameif.patch
+ net-tools-1.60-nameif_strncpy.patch)
+options=(!makeflags)
+md5sums=('888774accab40217dde927e21979c165'
+ '7ef8d0c6818faa0fdeea94970a20e3fb'
+ 'b52d899cba9956bb0055150506f41ac1'
+ '51de6eabe2d6d6dc860f72c41cee636b'
+ 'c16109863bc63f3dad4ef35305a340bb'
+ '29a32617382fab1735acba4d920f1fcd'
+ 'e66466b9304dac85eb42b32f1ec3b284')
+
+build() {
+ cd $srcdir/$pkgname-$pkgver
+ patch -Np1 -i ../net-tools.patch
+ patch -Np1 -i ../net-tools-1.60-2.6-compilefix.patch
+ patch -Np1 -i ../net-tools-1.60-miiioctl.patch
+ patch -Np1 -i ../gcc340.patch
+ patch -Np1 -i ${srcdir}/net-tools-1.60-nameif.patch
+ patch -Np1 -i ${srcdir}/net-tools-1.60-nameif_strncpy.patch
+ yes "" | make
+}
+
+package() {
+ cd $srcdir/$pkgname-$pkgver
+ make BASEDIR=$pkgdir update
+
+ # the following is provided by yp-tools and coreutils
+ rm "${pkgdir}"/bin/{{,dns,nis,yp}domainname,hostname}
+ rm "${pkgdir}"/usr/share/man/man1/{{,dns,nis,yp}domainname,hostname}.1
+}
diff --git a/testing/net-tools/gcc340.patch b/testing/net-tools/gcc340.patch
new file mode 100644
index 000000000..8089bf217
--- /dev/null
+++ b/testing/net-tools/gcc340.patch
@@ -0,0 +1,46 @@
+diff -Naur net-tools-1.60-orig/hostname.c net-tools-1.60/hostname.c
+--- net-tools-1.60-orig/hostname.c 2001-04-08 10:04:23.000000000 -0700
++++ net-tools-1.60/hostname.c 2004-05-07 17:22:14.000000000 -0700
+@@ -78,6 +78,7 @@
+ fprintf(stderr, _("%s: name too long\n"), program_name);
+ break;
+ default:
++ ;
+ }
+ exit(1);
+ }
+@@ -98,6 +99,7 @@
+ fprintf(stderr, _("%s: name too long\n"), program_name);
+ break;
+ default:
++ ;
+ }
+ exit(1);
+ };
+@@ -117,6 +119,7 @@
+ fprintf(stderr, _("%s: name too long\n"), program_name);
+ break;
+ default:
++ ;
+ }
+ exit(1);
+ };
+@@ -174,6 +177,7 @@
+ printf("%s\n", hp->h_name);
+ break;
+ default:
++ ;
+ }
+ }
+
+diff -Naur net-tools-1.60-orig/lib/inet_sr.c net-tools-1.60/lib/inet_sr.c
+--- net-tools-1.60-orig/lib/inet_sr.c 2000-02-20 13:46:45.000000000 -0800
++++ net-tools-1.60/lib/inet_sr.c 2004-05-07 17:20:14.000000000 -0700
+@@ -105,6 +105,7 @@
+ case 2:
+ isnet = 0; break;
+ default:
++ ;
+ }
+
+ /* Fill in the other fields. */
diff --git a/testing/net-tools/net-tools-1.60-2.6-compilefix.patch b/testing/net-tools/net-tools-1.60-2.6-compilefix.patch
new file mode 100644
index 000000000..92ce90128
--- /dev/null
+++ b/testing/net-tools/net-tools-1.60-2.6-compilefix.patch
@@ -0,0 +1,23 @@
+diff -ruN net-tools-1.60.orig/lib/x25_sr.c net-tools-1.60/lib/x25_sr.c
+--- net-tools-1.60.orig/lib/x25_sr.c 2000-05-20 15:38:10.000000000 +0200
++++ net-tools-1.60/lib/x25_sr.c 2003-10-18 20:33:31.927574928 +0200
+@@ -22,6 +22,7 @@
+ #include <sys/socket.h>
+ #include <sys/ioctl.h>
+ #include <linux/x25.h>
++#include <linux/version.h>
+ #include <ctype.h>
+ #include <errno.h>
+ #include <netdb.h>
+@@ -77,7 +78,11 @@
+ rt.sigdigits=sigdigits;
+
+ /* x25_route_struct.address isn't type struct sockaddr_x25, Why? */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
+ memcpy(&rt.address, &sx25.sx25_addr, sizeof(x25_address));
++#else
++ memcpy(&rt.address, &sx25.sx25_addr, sizeof(struct x25_address));
++#endif
+
+ while (*args) {
+ if (!strcmp(*args,"device") || !strcmp(*args,"dev")) {
diff --git a/testing/net-tools/net-tools-1.60-miiioctl.patch b/testing/net-tools/net-tools-1.60-miiioctl.patch
new file mode 100644
index 000000000..4859273ed
--- /dev/null
+++ b/testing/net-tools/net-tools-1.60-miiioctl.patch
@@ -0,0 +1,17 @@
+--- net-tools-1.60/include/mii.h.bar Tue Jul 31 11:49:39 2001
++++ net-tools-1.60/include/mii.h Tue Jul 31 11:49:33 2001
+@@ -11,11 +11,9 @@
+
+ /* network interface ioctl's for MII commands */
+ #ifndef SIOCGMIIPHY
+-#define SIOCGMIIPHY (SIOCDEVPRIVATE) /* Read from current PHY */
+-#define SIOCGMIIREG (SIOCDEVPRIVATE+1) /* Read any PHY register */
+-#define SIOCSMIIREG (SIOCDEVPRIVATE+2) /* Write any PHY register */
+-#define SIOCGPARAMS (SIOCDEVPRIVATE+3) /* Read operational parameters */
+-#define SIOCSPARAMS (SIOCDEVPRIVATE+4) /* Set operational parameters */
++#define SIOCGMIIPHY 0x8947 /* Read from current PHY */
++#define SIOCGMIIREG 0x8948 /* Read any PHY register */
++#define SIOCSMIIREG 0x8949 /* Write any PHY register */
+ #endif
+
+ #include <linux/types.h>
diff --git a/testing/net-tools/net-tools-1.60-nameif.patch b/testing/net-tools/net-tools-1.60-nameif.patch
new file mode 100644
index 000000000..54def00c2
--- /dev/null
+++ b/testing/net-tools/net-tools-1.60-nameif.patch
@@ -0,0 +1,58 @@
+--- net-tools-1.60/nameif.c.nameif 2000-10-18 19:26:29.000000000 +0200
++++ net-tools-1.60/nameif.c 2003-03-19 11:02:01.000000000 +0100
+@@ -117,7 +117,8 @@
+ }
+
+ struct change {
+- struct change *next,**pprev;
++ struct change *next;
++ int found;
+ char ifname[IFNAMSIZ+1];
+ unsigned char mac[6];
+ };
+@@ -139,10 +140,7 @@
+ ch->ifname, pos);
+ if (parsemac(p,ch->mac) < 0)
+ complain(_("cannot parse MAC `%s' at %s"), p, pos);
+- if (clist)
+- clist->pprev = &ch->next;
+ ch->next = clist;
+- ch->pprev = &clist;
+ clist = ch;
+ return 0;
+ }
+@@ -200,7 +198,7 @@
+
+ void usage(void)
+ {
+- fprintf(stderr, _("usage: nameif [-c configurationfile] [-s] {ifname macaddress}"));
++ fprintf(stderr, _("usage: nameif [-c configurationfile] [-s] {ifname macaddress}\n"));
+ exit(1);
+ }
+
+@@ -277,21 +275,21 @@
+ ch = lookupmac(mac);
+ if (!ch)
+ continue;
+-
+- *ch->pprev = ch->next;
++
++ ch->found = 1;
+ if (strcmp(p, ch->ifname)) {
+ if (setname(p, ch->ifname) < 0)
+ complain(_("cannot change name of %s to %s: %s"),
+ p, ch->ifname, strerror(errno));
+ }
+- free(ch);
+ }
+ fclose(ifh);
+
+ while (clist) {
+ struct change *ch = clist;
+ clist = clist->next;
+- warning(_("interface '%s' not found"), ch->ifname);
++ if (!ch->found)
++ warning(_("interface '%s' not found"), ch->ifname);
+ free(ch);
+ }
+
diff --git a/testing/net-tools/net-tools-1.60-nameif_strncpy.patch b/testing/net-tools/net-tools-1.60-nameif_strncpy.patch
new file mode 100644
index 000000000..7568e21a2
--- /dev/null
+++ b/testing/net-tools/net-tools-1.60-nameif_strncpy.patch
@@ -0,0 +1,13 @@
+--- net-tools-1.60/nameif.c.ncpy 2006-10-03 14:24:21.000000000 +0200
++++ net-tools-1.60/nameif.c 2006-10-03 14:22:43.000000000 +0200
+@@ -100,8 +100,8 @@
+ struct ifreq ifr;
+ opensock();
+ memset(&ifr,0,sizeof(struct ifreq));
+- strcpy(ifr.ifr_name, oldname);
+- strcpy(ifr.ifr_newname, newname);
++ strncpy(ifr.ifr_name, oldname, IF_NAMESIZE);
++ strncpy(ifr.ifr_newname, newname, IF_NAMESIZE);
+ return ioctl(ctl_sk, SIOCSIFNAME, &ifr);
+ }
+
diff --git a/testing/net-tools/net-tools.patch b/testing/net-tools/net-tools.patch
new file mode 100644
index 000000000..87e062eb2
--- /dev/null
+++ b/testing/net-tools/net-tools.patch
@@ -0,0 +1,52 @@
+diff -Naur net-tools-1.60-orig/config.in net-tools-1.60/config.in
+--- net-tools-1.60-orig/config.in 2000-05-21 07:32:12.000000000 -0700
++++ net-tools-1.60/config.in 2004-05-31 12:36:00.000000000 -0700
+@@ -49,7 +49,7 @@
+ *
+ bool 'UNIX protocol family' HAVE_AFUNIX y
+ bool 'INET (TCP/IP) protocol family' HAVE_AFINET y
+-bool 'INET6 (IPv6) protocol family' HAVE_AFINET6 n
++bool 'INET6 (IPv6) protocol family' HAVE_AFINET6 y
+ bool 'Novell IPX/SPX protocol family' HAVE_AFIPX y
+ bool 'Appletalk DDP protocol family' HAVE_AFATALK y
+ bool 'AX25 (packet radio) protocol family' HAVE_AFAX25 y
+@@ -86,6 +86,6 @@
+ *
+ * Other Features.
+ *
+-bool 'IP Masquerading support' HAVE_FW_MASQUERADE n
+-bool 'Build iptunnel and ipmaddr' HAVE_IP_TOOLS n
+-bool 'Build mii-tool' HAVE_MII n
++bool 'IP Masquerading support' HAVE_FW_MASQUERADE y
++bool 'Build iptunnel and ipmaddr' HAVE_IP_TOOLS y
++bool 'Build mii-tool' HAVE_MII y
+diff -Naur net-tools-1.60-orig/mii-tool.c net-tools-1.60/mii-tool.c
+--- net-tools-1.60-orig/mii-tool.c 2000-05-21 07:31:17.000000000 -0700
++++ net-tools-1.60/mii-tool.c 2004-05-31 12:35:21.000000000 -0700
+@@ -379,16 +379,16 @@
+ /*--------------------------------------------------------------------*/
+
+ const char *usage =
+-"usage: %s [-VvRrwl] [-A media,... | -F media] [interface ...]
+- -V, --version display version information
+- -v, --verbose more verbose output
+- -R, --reset reset MII to poweron state
+- -r, --restart restart autonegotiation
+- -w, --watch monitor for link status changes
+- -l, --log with -w, write events to syslog
+- -A, --advertise=media,... advertise only specified media
+- -F, --force=media force specified media technology
+-media: 100baseT4, 100baseTx-FD, 100baseTx-HD, 10baseT-FD, 10baseT-HD,
++"usage: %s [-VvRrwl] [-A media,... | -F media] [interface ...]\n\
++ -V, --version display version information\n\
++ -v, --verbose more verbose output\n\
++ -R, --reset reset MII to poweron state\n\
++ -r, --restart restart autonegotiation\n\
++ -w, --watch monitor for link status changes\n\
++ -l, --log with -w, write events to syslog\n\
++ -A, --advertise=media,... advertise only specified media\n\
++ -F, --force=media force specified media technology\n\
++media: 100baseT4, 100baseTx-FD, 100baseTx-HD, 10baseT-FD, 10baseT-HD,\n\
+ (to advertise both HD and FD) 100baseTx, 10baseT\n";
+
+ int main(int argc, char **argv)
diff --git a/testing/pidgin/PKGBUILD b/testing/pidgin/PKGBUILD
new file mode 100644
index 000000000..3ed646b74
--- /dev/null
+++ b/testing/pidgin/PKGBUILD
@@ -0,0 +1,114 @@
+# $Id: PKGBUILD 124935 2011-05-25 09:46:33Z foutrelis $
+# Maintainer: Evangelos Foutras <foutrelis@gmail.com>
+# Contributor: Ionut Biru <ibiru@archlinux.org>
+# Contributor: Andrea Scarpino <andrea@archlinux.org>
+# Contributor: Alexander Fehr <pizzapunk gmail com>
+# Contributor: Lucien Immink <l.immink@student.fnt.hvu.nl>
+
+pkgname=('pidgin' 'libpurple' 'finch')
+pkgver=2.7.11
+pkgrel=6
+arch=('i686' 'x86_64')
+url="http://pidgin.im/"
+license=('GPL')
+makedepends=('startup-notification' 'gtkspell' 'libxss' 'nss' 'libsasl' 'libsm'
+ 'python2' 'hicolor-icon-theme' 'silc-toolkit' 'gstreamer0.10'
+ 'farsight2' 'avahi' 'tk' 'ca-certificates' 'intltool'
+ 'networkmanager')
+options=('!libtool')
+source=(http://downloads.sourceforge.net/$pkgname/$pkgname-$pkgver.tar.bz2
+ nm09-pidgin.patch
+ nm09-more.patch)
+md5sums=('07c2a2535b4d7436b5ec7685fe063fec'
+ '744a21b4dbaf949dba7cd3b75b12b4fe'
+ 'a673659d86c7a65aa710f7c8c7feda82')
+
+build() {
+ cd "$srcdir/$pkgname-$pkgver"
+
+ # Update for NetworkManager 0.9 connection states
+ # (http://developer.pidgin.im/ticket/13505)
+ # (http://developer.pidgin.im/ticket/13859)
+ patch -Np1 -i "$srcdir/nm09-pidgin.patch"
+ patch -Np1 -i "$srcdir/nm09-more.patch"
+
+ # Use Python 2
+ sed -i 's/env python$/\02/' */plugins/*.py \
+ libpurple/purple-{remote,notifications-example,url-handler}
+
+ ./configure \
+ --prefix=/usr \
+ --sysconfdir=/etc \
+ --disable-schemas-install \
+ --disable-meanwhile \
+ --disable-gnutls \
+ --enable-cyrus-sasl \
+ --disable-doxygen \
+ --enable-nm \
+ --with-python=/usr/bin/python2 \
+ --with-system-ssl-certs=/etc/ssl/certs
+ make
+}
+
+package_pidgin(){
+ pkgdesc="Multi-protocol instant messaging client"
+ depends=("libpurple=$pkgver-$pkgrel" 'startup-notification' 'gtkspell'
+ 'libxss' 'libsm' 'gstreamer0.10' 'hicolor-icon-theme')
+ optdepends=('aspell: for spelling correction'
+ 'ca-certificates: SSL CA certificates'
+ 'gstreamer0.10-good-plugins: video and voice support'
+ 'tk: Tcl/Tk scripting support')
+ install=pidgin.install
+
+ cd "$srcdir/pidgin-$pkgver"
+
+ # For linking
+ make -C libpurple DESTDIR="$pkgdir" install-libLTLIBRARIES
+
+ make -C pidgin DESTDIR="$pkgdir" install
+ make -C doc DESTDIR="$pkgdir" install
+
+ # Remove files that are packaged in libpurle
+ make -C libpurple DESTDIR="$pkgdir" uninstall-libLTLIBRARIES
+
+ install -Dm644 pidgin.desktop "$pkgdir"/usr/share/applications/pidgin.desktop
+
+ rm "$pkgdir/usr/share/man/man1/finch.1"
+}
+
+package_libpurple(){
+ pkgdesc="IM library extracted from Pidgin"
+ depends=('farsight2' 'libsasl' 'dbus-glib' 'silc-toolkit' 'nss'
+ 'cyrus-sasl-plugins')
+ optdepends=('avahi: Bonjour protocol support'
+ 'dbus-python: for purple-remote and purple-url-handler')
+
+ cd "$srcdir/pidgin-$pkgver"
+
+ for _dir in libpurple share/sounds share/ca-certs m4macros po; do
+ make -C "$_dir" DESTDIR="$pkgdir" install
+ done
+}
+
+package_finch(){
+ pkgdesc="A ncurses-based messaging client"
+ depends=("libpurple=$pkgver-$pkgrel" 'python2' 'gstreamer0.10')
+ optdepends=('avahi: Bonjour protocol support'
+ 'ca-certificates: SSL CA certificates'
+ 'tk: Tcl/Tk scripting support')
+
+ cd "$srcdir/pidgin-$pkgver"
+
+ # For linking
+ make -C libpurple DESTDIR="$pkgdir" install-libLTLIBRARIES
+
+ make -C finch DESTDIR="$pkgdir" install
+ make -C doc DESTDIR="$pkgdir" install
+
+ # Remove files that are packaged in libpurle
+ make -C libpurple DESTDIR="$pkgdir" uninstall-libLTLIBRARIES
+
+ rm "$pkgdir"/usr/share/man/man1/pidgin.1
+}
+
+# vim:set ts=2 sw=2 et:
diff --git a/testing/pidgin/nm09-more.patch b/testing/pidgin/nm09-more.patch
new file mode 100644
index 000000000..8c708df9a
--- /dev/null
+++ b/testing/pidgin/nm09-more.patch
@@ -0,0 +1,49 @@
+diff -up pidgin-2.7.11/libpurple/network.c.nm09more pidgin-2.7.11/libpurple/network.c
+--- pidgin-2.7.11/libpurple/network.c.nm09more 2011-04-26 12:01:27.700085246 -0500
++++ pidgin-2.7.11/libpurple/network.c 2011-05-24 13:13:28.185165657 -0500
+@@ -833,8 +833,20 @@ purple_network_is_available(void)
+ purple_debug_warning("network", "NetworkManager not active. Assuming connection exists.\n");
+ }
+
+- if (nm_state == NM_STATE_UNKNOWN || nm_state == NM_STATE_CONNECTED)
+- return TRUE;
++ switch (nm_state)
++ {
++ case NM_STATE_UNKNOWN:
++#if NM_CHECK_VERSION(0,8,992)
++ case NM_STATE_CONNECTED_LOCAL:
++ case NM_STATE_CONNECTED_SITE:
++ case NM_STATE_CONNECTED_GLOBAL:
++#else
++ case NM_STATE_CONNECTED:
++#endif
++ return TRUE;
++ default:
++ break;
++ }
+
+ return FALSE;
+
+@@ -1170,9 +1182,14 @@ purple_network_init(void)
+ NM_DBUS_SERVICE,
+ NM_DBUS_PATH,
+ NM_DBUS_INTERFACE);
++ /* NM 0.6 signal */
+ dbus_g_proxy_add_signal(nm_proxy, "StateChange", G_TYPE_UINT, G_TYPE_INVALID);
+ dbus_g_proxy_connect_signal(nm_proxy, "StateChange",
+ G_CALLBACK(nm_state_change_cb), NULL, NULL);
++ /* NM 0.7 and later signal */
++ dbus_g_proxy_add_signal(nm_proxy, "StateChanged", G_TYPE_UINT, G_TYPE_INVALID);
++ dbus_g_proxy_connect_signal(nm_proxy, "StateChanged",
++ G_CALLBACK(nm_state_change_cb), NULL, NULL);
+
+ dbus_proxy = dbus_g_proxy_new_for_name(nm_conn,
+ DBUS_SERVICE_DBUS,
+@@ -1207,6 +1224,7 @@ purple_network_uninit(void)
+ #ifdef HAVE_NETWORKMANAGER
+ if (nm_proxy) {
+ dbus_g_proxy_disconnect_signal(nm_proxy, "StateChange", G_CALLBACK(nm_state_change_cb), NULL);
++ dbus_g_proxy_disconnect_signal(nm_proxy, "StateChanged", G_CALLBACK(nm_state_change_cb), NULL);
+ g_object_unref(G_OBJECT(nm_proxy));
+ }
+ if (dbus_proxy) {
diff --git a/testing/pidgin/nm09-pidgin.patch b/testing/pidgin/nm09-pidgin.patch
new file mode 100644
index 000000000..1c2471d1f
--- /dev/null
+++ b/testing/pidgin/nm09-pidgin.patch
@@ -0,0 +1,38 @@
+diff -up pidgin-2.7.10/libpurple/network.c.foo pidgin-2.7.10/libpurple/network.c
+--- pidgin-2.7.10/libpurple/network.c.foo 2011-03-10 02:21:43.920933267 -0600
++++ pidgin-2.7.10/libpurple/network.c 2011-03-10 02:23:11.466838793 -0600
+@@ -71,6 +71,10 @@
+ #include <dbus/dbus-glib.h>
+ #include <NetworkManager.h>
+
++#if !defined(NM_CHECK_VERSION)
++#define NM_CHECK_VERSION(x,y,z) 0
++#endif
++
+ static DBusGConnection *nm_conn = NULL;
+ static DBusGProxy *nm_proxy = NULL;
+ static DBusGProxy *dbus_proxy = NULL;
+@@ -863,7 +867,13 @@ nm_update_state(NMState state)
+
+ switch(state)
+ {
++#if NM_CHECK_VERSION(0,8,992)
++ case NM_STATE_CONNECTED_LOCAL:
++ case NM_STATE_CONNECTED_SITE:
++ case NM_STATE_CONNECTED_GLOBAL:
++#else
+ case NM_STATE_CONNECTED:
++#endif
+ /* Call res_init in case DNS servers have changed */
+ res_init();
+ /* update STUN IP in case we it changed (theoretically we could
+@@ -880,6 +890,9 @@ nm_update_state(NMState state)
+ case NM_STATE_ASLEEP:
+ case NM_STATE_CONNECTING:
+ case NM_STATE_DISCONNECTED:
++#if NM_CHECK_VERSION(0,8,992)
++ case NM_STATE_DISCONNECTING:
++#endif
+ if (prev != NM_STATE_CONNECTED && prev != NM_STATE_UNKNOWN)
+ break;
+ if (ui_ops != NULL && ui_ops->network_disconnected != NULL)
diff --git a/testing/pidgin/pidgin.install b/testing/pidgin/pidgin.install
new file mode 100644
index 000000000..1a05f573e
--- /dev/null
+++ b/testing/pidgin/pidgin.install
@@ -0,0 +1,11 @@
+post_install() {
+ gtk-update-icon-cache -q -t -f usr/share/icons/hicolor
+}
+
+post_upgrade() {
+ post_install
+}
+
+post_remove() {
+ post_install
+}
diff --git a/testing/subversion/PKGBUILD b/testing/subversion/PKGBUILD
new file mode 100644
index 000000000..40784dcb7
--- /dev/null
+++ b/testing/subversion/PKGBUILD
@@ -0,0 +1,98 @@
+# $Id: PKGBUILD 126240 2011-06-02 20:16:49Z stephane $
+# Maintainer: Paul Mattal <paul@archlinux.org>
+# Contributor: Jason Chu <jason@archlinux.org>
+
+pkgname=subversion
+pkgver=1.6.17
+pkgrel=3
+pkgdesc="Replacement for CVS, another versioning system (SVN)"
+arch=('i686' 'x86_64')
+license=('apache' 'bsd')
+depends=('neon' 'apr-util')
+makedepends=('krb5' 'apache' 'python2' 'perl' 'swig' 'ruby' 'java-runtime'
+ 'autoconf' 'sqlite3' 'db' 'e2fsprogs' 'libgnome-keyring' 'kdelibs')
+source=(http://subversion.tigris.org/downloads/$pkgname-$pkgver.tar.bz2
+ svnserve svn svnserve.conf svnmerge.py
+ subversion.rpath.fix.patch
+ subversion.suppress.deprecation.warnings.patch)
+
+backup=('etc/xinetd.d/svn' 'etc/conf.d/svnserve')
+url="http://subversion.apache.org/"
+provides=('svn')
+options=('!makeflags' '!libtool')
+optdepends=('libgnome-keyring' 'kdeutils-kwallet' 'bash-completion: for svn bash completion')
+
+build() {
+ cd "${srcdir}/${pkgname}-${pkgver}"
+
+ export PYTHON=/usr/bin/python2
+
+ # apply patches
+ patch -Np0 -i ../subversion.rpath.fix.patch
+ patch -Np1 -i ../subversion.suppress.deprecation.warnings.patch
+
+ # configure
+ autoreconf
+ ./configure --prefix=/usr --with-apr=/usr --with-apr-util=/usr \
+ --with-zlib=/usr --with-neon=/usr --with-apxs \
+ --with-sqlite=/usr --with-berkeley-db=:/usr/include/:/usr/lib:db-5.1 \
+ --enable-javahl --with-gnome-keyring --with-kwallet
+
+ # build
+ (make external-all && make LT_LDFLAGS="-L$Fdestdir/usr/lib" local-all )
+}
+
+#check() {
+# cd "${srcdir}/${pkgname}-${pkgver}"
+# export LANG=C LC_ALL=C
+# make check check-swig-pl check-swig-py CLEANUP=yes
+#}
+
+package() {
+ cd "${srcdir}/${pkgname}-${pkgver}"
+
+ # install
+ export LD_LIBRARY_PATH=${pkgdir}/usr/lib:$LD_LIBRARY_PATH
+ make DESTDIR=${pkgdir} install
+
+ make DESTDIR=${pkgdir} swig-py
+ make install-swig-py DESTDIR=${pkgdir}
+
+ install -d ${pkgdir}/usr/lib/python2.7
+ mv ${pkgdir}/usr/lib/svn-python/ ${pkgdir}/usr/lib/python2.7/site-packages
+
+ install -d ${pkgdir}/usr/share/subversion
+ install -d -m 755 tools/hook-scripts ${pkgdir}/usr/share/subversion/
+ rm -f ${pkgdir}/usr/share/subversion/hook-scripts/*.in
+
+ make DESTDIR=${pkgdir} swig-pl
+ make install-swig-pl DESTDIR=${pkgdir} INSTALLDIRS=vendor
+ rm -f ${pkgdir}/usr/lib/perl5/vendor_perl/auto/SVN/_Core/.packlist
+ rm -rf ${pkgdir}/usr/lib/perl5/core_perl
+
+ make DESTDIR=${pkgdir} swig-rb
+ make install-swig-rb DESTDIR=${pkgdir}
+
+ make DESTDIR=${pkgdir} javahl
+ make DESTDIR=${pkgdir} install-javahl
+
+ install -d ${pkgdir}/etc/{rc.d,xinetd.d,conf.d}
+
+ install -m 755 ${srcdir}/svnserve ${pkgdir}/etc/rc.d
+ install -m 644 ${srcdir}/svn ${pkgdir}/etc/xinetd.d
+ install -m 644 ${srcdir}/svnserve.conf ${pkgdir}/etc/conf.d/svnserve
+ install -m 755 ${srcdir}/svnmerge.py ${pkgdir}/usr/bin/svnmerge
+ install -D -m 644 ${srcdir}/subversion-$pkgver/COPYING \
+ ${pkgdir}/usr/share/licenses/$pkgname/LICENSE
+
+ # bash completion
+ install -Dm 644 ${srcdir}/${pkgname}-${pkgver}/tools/client-side/bash_completion \
+ ${pkgdir}/etc/bash_completion.d/subversion
+}
+md5sums=('81e5dc5beee4b3fc025ac70c0b6caa14'
+ 'a2b029e8385007ffb99b437b30521c90'
+ 'a0db6dd43af33952739b6ec089852630'
+ 'c459e299192552f61578f3438abf0664'
+ 'a6371baeda7e224504629ecdda2749b4'
+ '6b4340ba9d8845cd8497e013ae01be3f'
+ '1166f3b7413d7e7450299b3525680bbe')
diff --git a/testing/subversion/subversion.rpath.fix.patch b/testing/subversion/subversion.rpath.fix.patch
new file mode 100644
index 000000000..ba6ee9e4e
--- /dev/null
+++ b/testing/subversion/subversion.rpath.fix.patch
@@ -0,0 +1,10 @@
+--- Makefile.in.orig 2009-02-16 14:10:48.000000000 -0200
++++ Makefile.in 2009-06-04 00:56:29.000000000 -0300
+@@ -678,6 +678,7 @@
+
+ $(SWIG_PL_DIR)/native/Makefile: $(SWIG_PL_DIR)/native/Makefile.PL
+ cd $(SWIG_PL_DIR)/native; $(PERL) Makefile.PL
++ cd $(SWIG_PL_DIR)/native; sed -i 's|LD_RUN_PATH|DIE_RPATH_DIE|g' Makefile{,.{client,delta,fs,ra,repos,wc}}
+
+ swig-pl_DEPS = autogen-swig-pl libsvn_swig_perl \
+ $(SWIG_PL_DIR)/native/Makefile
diff --git a/testing/subversion/subversion.suppress.deprecation.warnings.patch b/testing/subversion/subversion.suppress.deprecation.warnings.patch
new file mode 100644
index 000000000..94ce89b18
--- /dev/null
+++ b/testing/subversion/subversion.suppress.deprecation.warnings.patch
@@ -0,0 +1,22 @@
+diff -urN subversion-1.6.9/subversion/bindings/swig/python/svn/core.py subversion-1.6.9-fixed/subversion/bindings/swig/python/svn/core.py
+--- subversion-1.6.9/subversion/bindings/swig/python/svn/core.py 2009-02-13 11:22:26.000000000 -0500
++++ subversion-1.6.9-fixed/subversion/bindings/swig/python/svn/core.py 2010-02-08 07:46:29.000000000 -0500
+@@ -19,6 +19,7 @@
+ from libsvn.core import *
+ import libsvn.core as _libsvncore
+ import atexit as _atexit
++import warnings
+
+ class SubversionException(Exception):
+ def __init__(self, message=None, apr_err=None, child=None,
+@@ -44,7 +45,9 @@
+ Exception.__init__(self, *args)
+
+ self.apr_err = apr_err
+- self.message = message
++ with warnings.catch_warnings():
++ warnings.simplefilter("ignore", DeprecationWarning)
++ self.message = message
+ self.child = child
+ self.file = file
+ self.line = line
diff --git a/testing/subversion/svn b/testing/subversion/svn
new file mode 100644
index 000000000..8988aaf63
--- /dev/null
+++ b/testing/subversion/svn
@@ -0,0 +1,11 @@
+service svn
+{
+ flags = REUSE
+ socket_type = stream
+ wait = no
+ user = root
+ server = /usr/bin/svnserve
+ server_args = -i
+ log_on_failure += USERID
+ disable = yes
+}
diff --git a/testing/subversion/svnmerge.py b/testing/subversion/svnmerge.py
new file mode 100644
index 000000000..d8931648f
--- /dev/null
+++ b/testing/subversion/svnmerge.py
@@ -0,0 +1,2370 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+# Copyright (c) 2005, Giovanni Bajo
+# Copyright (c) 2004-2005, Awarix, Inc.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+#
+# Author: Archie Cobbs <archie at awarix dot com>
+# Rewritten in Python by: Giovanni Bajo <rasky at develer dot com>
+#
+# Acknowledgments:
+# John Belmonte <john at neggie dot net> - metadata and usability
+# improvements
+# Blair Zajac <blair at orcaware dot com> - random improvements
+# Raman Gupta <rocketraman at fastmail dot fm> - bidirectional and transitive
+# merging support
+# Dustin J. Mitchell <dustin at zmanda dot com> - support for multiple
+# location identifier formats
+#
+# $HeadURL$
+# $LastChangedDate$
+# $LastChangedBy$
+# $LastChangedRevision$
+#
+# Requisites:
+# svnmerge.py has been tested with all SVN major versions since 1.1 (both
+# client and server). It is unknown if it works with previous versions.
+#
+# Differences from svnmerge.sh:
+# - More portable: tested as working in FreeBSD and OS/2.
+# - Add double-verbose mode, which shows every svn command executed (-v -v).
+# - "svnmerge avail" now only shows commits in source, not also commits in
+# other parts of the repository.
+# - Add "svnmerge block" to flag some revisions as blocked, so that
+# they will not show up anymore in the available list. Added also
+# the complementary "svnmerge unblock".
+# - "svnmerge avail" has grown two new options:
+# -B to display a list of the blocked revisions
+# -A to display both the blocked and the available revisions.
+# - Improved generated commit message to make it machine parsable even when
+# merging commits which are themselves merges.
+# - Add --force option to skip working copy check
+# - Add --record-only option to "svnmerge merge" to avoid performing
+# an actual merge, yet record that a merge happened.
+# - Can use a variety of location-identifier formats
+#
+# TODO:
+# - Add "svnmerge avail -R": show logs in reverse order
+#
+# Information for Hackers:
+#
+# Identifiers for branches:
+# A branch is identified in three ways within this source:
+# - as a working copy (variable name usually includes 'dir')
+# - as a fully qualified URL
+# - as a path identifier (an opaque string indicating a particular path
+# in a particular repository; variable name includes 'pathid')
+# A "target" is generally user-specified, and may be a working copy or
+# a URL.
+
+import sys, os, getopt, re, types, tempfile, time, locale
+from bisect import bisect
+from xml.dom import pulldom
+
+NAME = "svnmerge"
+if not hasattr(sys, "version_info") or sys.version_info < (2, 0):
+ error("requires Python 2.0 or newer")
+
+# Set up the separator used to separate individual log messages from
+# each revision merged into the target location. Also, create a
+# regular expression that will find this same separator in already
+# committed log messages, so that the separator used for this run of
+# svnmerge.py will have one more LOG_SEPARATOR appended to the longest
+# separator found in all the commits.
+LOG_SEPARATOR = 8 * '.'
+LOG_SEPARATOR_RE = re.compile('^((%s)+)' % re.escape(LOG_SEPARATOR),
+ re.MULTILINE)
+
+# Each line of the embedded log messages will be prefixed by LOG_LINE_PREFIX.
+LOG_LINE_PREFIX = 2 * ' '
+
+# Set python to the default locale as per environment settings, same as svn
+# TODO we should really parse config and if log-encoding is specified, set
+# the locale to match that encoding
+locale.setlocale(locale.LC_ALL, '')
+
+# We want the svn output (such as svn info) to be non-localized
+# Using LC_MESSAGES should not affect localized output of svn log, for example
+if os.environ.has_key("LC_ALL"):
+ del os.environ["LC_ALL"]
+os.environ["LC_MESSAGES"] = "C"
+
+###############################################################################
+# Support for older Python versions
+###############################################################################
+
+# True/False constants are Python 2.2+
+try:
+ True, False
+except NameError:
+ True, False = 1, 0
+
+def lstrip(s, ch):
+ """Replacement for str.lstrip (support for arbitrary chars to strip was
+ added in Python 2.2.2)."""
+ i = 0
+ try:
+ while s[i] == ch:
+ i = i+1
+ return s[i:]
+ except IndexError:
+ return ""
+
+def rstrip(s, ch):
+ """Replacement for str.rstrip (support for arbitrary chars to strip was
+ added in Python 2.2.2)."""
+ try:
+ if s[-1] != ch:
+ return s
+ i = -2
+ while s[i] == ch:
+ i = i-1
+ return s[:i+1]
+ except IndexError:
+ return ""
+
+def strip(s, ch):
+ """Replacement for str.strip (support for arbitrary chars to strip was
+ added in Python 2.2.2)."""
+ return lstrip(rstrip(s, ch), ch)
+
+def rsplit(s, sep, maxsplits=0):
+ """Like str.rsplit, which is Python 2.4+ only."""
+ L = s.split(sep)
+ if not 0 < maxsplits <= len(L):
+ return L
+ return [sep.join(L[0:-maxsplits])] + L[-maxsplits:]
+
+###############################################################################
+
+def kwextract(s):
+ """Extract info from a svn keyword string."""
+ try:
+ return strip(s, "$").strip().split(": ")[1]
+ except IndexError:
+ return "<unknown>"
+
+__revision__ = kwextract('$Rev$')
+__date__ = kwextract('$Date$')
+
+# Additional options, not (yet?) mapped to command line flags
+default_opts = {
+ "svn": "svn",
+ "prop": NAME + "-integrated",
+ "block-prop": NAME + "-blocked",
+ "commit-verbose": True,
+ "verbose": 0,
+}
+logs = {}
+
+def console_width():
+ """Get the width of the console screen (if any)."""
+ try:
+ return int(os.environ["COLUMNS"])
+ except (KeyError, ValueError):
+ pass
+
+ try:
+ # Call the Windows API (requires ctypes library)
+ from ctypes import windll, create_string_buffer
+ h = windll.kernel32.GetStdHandle(-11)
+ csbi = create_string_buffer(22)
+ res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
+ if res:
+ import struct
+ (bufx, bufy,
+ curx, cury, wattr,
+ left, top, right, bottom,
+ maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
+ return right - left + 1
+ except ImportError:
+ pass
+
+ # Parse the output of stty -a
+ if os.isatty(1):
+ out = os.popen("stty -a").read()
+ m = re.search(r"columns (\d+);", out)
+ if m:
+ return int(m.group(1))
+
+ # sensible default
+ return 80
+
+def error(s):
+ """Subroutine to output an error and bail."""
+ print >> sys.stderr, "%s: %s" % (NAME, s)
+ sys.exit(1)
+
+def report(s):
+ """Subroutine to output progress message, unless in quiet mode."""
+ if opts["verbose"]:
+ print "%s: %s" % (NAME, s)
+
+def prefix_lines(prefix, lines):
+ """Given a string representing one or more lines of text, insert the
+ specified prefix at the beginning of each line, and return the result.
+ The input must be terminated by a newline."""
+ assert lines[-1] == "\n"
+ return prefix + lines[:-1].replace("\n", "\n"+prefix) + "\n"
+
+def recode_stdout_to_file(s):
+ if locale.getdefaultlocale()[1] is None or not hasattr(sys.stdout, "encoding") \
+ or sys.stdout.encoding is None:
+ return s
+ u = s.decode(sys.stdout.encoding)
+ return u.encode(locale.getdefaultlocale()[1])
+
+class LaunchError(Exception):
+ """Signal a failure in execution of an external command. Parameters are the
+ exit code of the process, the original command line, and the output of the
+ command."""
+
+try:
+ """Launch a sub-process. Return its output (both stdout and stderr),
+ optionally split by lines (if split_lines is True). Raise a LaunchError
+ exception if the exit code of the process is non-zero (failure).
+
+ This function has two implementations, one based on subprocess (preferred),
+ and one based on popen (for compatibility).
+ """
+ import subprocess
+ import shlex
+
+ def launch(cmd, split_lines=True):
+ # Requiring python 2.4 or higher, on some platforms we get
+ # much faster performance from the subprocess module (where python
+ # doesn't try to close an exhorbitant number of file descriptors)
+ stdout = ""
+ stderr = ""
+ try:
+ if os.name == 'nt':
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, \
+ close_fds=False, stderr=subprocess.PIPE)
+ else:
+ # Use shlex to break up the parameters intelligently,
+ # respecting quotes. shlex can't handle unicode.
+ args = shlex.split(cmd.encode('ascii'))
+ p = subprocess.Popen(args, stdout=subprocess.PIPE, \
+ close_fds=False, stderr=subprocess.PIPE)
+ stdoutAndErr = p.communicate()
+ stdout = stdoutAndErr[0]
+ stderr = stdoutAndErr[1]
+ except OSError, inst:
+ # Using 1 as failure code; should get actual number somehow? For
+ # examples see svnmerge_test.py's TestCase_launch.test_failure and
+ # TestCase_launch.test_failurecode.
+ raise LaunchError(1, cmd, stdout + " " + stderr + ": " + str(inst))
+
+ if p.returncode == 0:
+ if split_lines:
+ # Setting keepends=True for compatibility with previous logic
+ # (where file.readlines() preserves newlines)
+ return stdout.splitlines(True)
+ else:
+ return stdout
+ else:
+ raise LaunchError(p.returncode, cmd, stdout + stderr)
+except ImportError:
+ # support versions of python before 2.4 (slower on some systems)
+ def launch(cmd, split_lines=True):
+ if os.name not in ['nt', 'os2']:
+ import popen2
+ p = popen2.Popen4(cmd)
+ p.tochild.close()
+ if split_lines:
+ out = p.fromchild.readlines()
+ else:
+ out = p.fromchild.read()
+ ret = p.wait()
+ if ret == 0:
+ ret = None
+ else:
+ ret >>= 8
+ else:
+ i,k = os.popen4(cmd)
+ i.close()
+ if split_lines:
+ out = k.readlines()
+ else:
+ out = k.read()
+ ret = k.close()
+
+ if ret is None:
+ return out
+ raise LaunchError(ret, cmd, out)
+
+def launchsvn(s, show=False, pretend=False, **kwargs):
+ """Launch SVN and grab its output."""
+ username = password = configdir = ""
+ if opts.get("username", None):
+ username = "--username=" + opts["username"]
+ if opts.get("password", None):
+ password = "--password=" + opts["password"]
+ if opts.get("config-dir", None):
+ configdir = "--config-dir=" + opts["config-dir"]
+ cmd = ' '.join(filter(None, [opts["svn"], "--non-interactive",
+ username, password, configdir, s]))
+ if show or opts["verbose"] >= 2:
+ print cmd
+ if pretend:
+ return None
+ return launch(cmd, **kwargs)
+
+def svn_command(s):
+ """Do (or pretend to do) an SVN command."""
+ out = launchsvn(s, show=opts["show-changes"] or opts["dry-run"],
+ pretend=opts["dry-run"],
+ split_lines=False)
+ if not opts["dry-run"]:
+ print out
+
+def check_dir_clean(dir):
+ """Check the current status of dir for local mods."""
+ if opts["force"]:
+ report('skipping status check because of --force')
+ return
+ report('checking status of "%s"' % dir)
+
+ # Checking with -q does not show unversioned files or external
+ # directories. Though it displays a debug message for external
+ # directories, after a blank line. So, practically, the first line
+ # matters: if it's non-empty there is a modification.
+ out = launchsvn("status -q %s" % dir)
+ if out and out[0].strip():
+ error('"%s" has local modifications; it must be clean' % dir)
+
+class PathIdentifier:
+ """Abstraction for a path identifier, so that we can start talking
+ about it before we know the form that it takes in the properties (its
+ external_form). Objects are referenced in the class variable 'locobjs',
+ keyed by all known forms."""
+
+ # a map of UUID (or None) to repository root URL.
+ repo_hints = {}
+
+ # a map from any known string form to the corresponding PathIdentifier
+ locobjs = {}
+
+ def __init__(self, repo_relative_path, uuid=None, url=None, external_form=None):
+ self.repo_relative_path = repo_relative_path
+ self.uuid = uuid
+ self.url = url
+ self.external_form = external_form
+
+ def __repr__(self):
+ return "<PathIdentifier " + ', '.join('%s=%r' % i for i in self.__dict__.items()) + '>'
+
+ def __str__(self):
+ """Return a printable string representation"""
+ if self.external_form:
+ return self.external_form
+ if self.url:
+ return self.format('url')
+ if self.uuid:
+ return self.format('uuid')
+ return self.format('path')
+
+ def from_pathid(pathid_str):
+ """convert pathid_str to a PathIdentifier"""
+ if not PathIdentifier.locobjs.has_key(pathid_str):
+ if is_url(pathid_str):
+ # we can determine every form; PathIdentifier.hint knows how to do that
+ PathIdentifier.hint(pathid_str)
+ elif pathid_str[:7] == 'uuid://':
+ mo = re.match('uuid://([^/]*)(.*)', pathid_str)
+ if not mo:
+ error("Invalid path identifier '%s'" % pathid_str)
+ uuid, repo_relative_path = mo.groups()
+ pathid = PathIdentifier(repo_relative_path, uuid=uuid)
+ # we can cache this by uuid:// pathid and by repo-relative path
+ PathIdentifier.locobjs[pathid_str] = PathIdentifier.locobjs[repo_relative_path] = pathid
+ elif pathid_str and pathid_str[0] == '/':
+ # strip any trailing slashes
+ pathid_str = pathid_str.rstrip('/')
+ pathid = PathIdentifier(repo_relative_path=pathid_str)
+ # we can only cache this by repo-relative path
+ PathIdentifier.locobjs[pathid_str] = pathid
+ else:
+ error("Invalid path identifier '%s'" % pathid_str)
+ return PathIdentifier.locobjs[pathid_str]
+ from_pathid = staticmethod(from_pathid)
+
+ def from_target(target):
+ """Convert a target (either a working copy path or an URL) into a
+ path identifier."""
+ # prime the cache first if we don't know about this target yet
+ if not PathIdentifier.locobjs.has_key(target):
+ PathIdentifier.hint(target)
+
+ try:
+ return PathIdentifier.locobjs[target]
+ except KeyError:
+ error("Could not recognize path identifier '%s'" % target)
+ from_target = staticmethod(from_target)
+
+ def hint(target):
+ """Cache some information about target, as it may be referenced by
+ repo-relative path in subversion properties; the cache can help to
+ expand such a relative path to a full path identifier."""
+ if PathIdentifier.locobjs.has_key(target): return
+ if not is_url(target) and not is_wc(target): return
+
+ url = target_to_url(target)
+
+ root = get_repo_root(url)
+ assert root[-1] != "/"
+ assert url[:len(root)] == root, "url=%r, root=%r" % (url, root)
+ repo_relative_path = url[len(root):]
+
+ try:
+ uuid = get_svninfo(target)['Repository UUID']
+ uuid_pathid = 'uuid://%s%s' % (uuid, repo_relative_path)
+ except KeyError:
+ uuid = None
+ uuid_pathid = None
+
+ locobj = PathIdentifier.locobjs.get(url) or \
+ (uuid_pathid and PathIdentifier.locobjs.get(uuid_pathid))
+ if not locobj:
+ locobj = PathIdentifier(repo_relative_path, uuid=uuid, url=url)
+
+ PathIdentifier.repo_hints[uuid] = root # (uuid may be None)
+
+ PathIdentifier.locobjs[target] = locobj
+ PathIdentifier.locobjs[url] = locobj
+ if uuid_pathid:
+ PathIdentifier.locobjs[uuid_pathid] = locobj
+ if not PathIdentifier.locobjs.has_key(repo_relative_path):
+ PathIdentifier.locobjs[repo_relative_path] = locobj
+ hint = staticmethod(hint)
+
+ def format(self, fmt):
+ if fmt == 'path':
+ return self.repo_relative_path
+ elif fmt == 'uuid':
+ return "uuid://%s%s" % (self.uuid, self.repo_relative_path)
+ elif fmt == 'url':
+ return self.url
+ else:
+ error("Unkonwn path type '%s'" % fmt)
+
+ def match_substring(self, str):
+ """Test whether str is a substring of any representation of this
+ PathIdentifier."""
+ if self.repo_relative_path.find(str) >= 0:
+ return True
+
+ if self.uuid:
+ if ("uuid://%s%s" % (self.uuid, self.repo_relative_path)).find(str) >= 0:
+ return True
+
+ if self.url:
+ if (self.url + self.repo_relative_path).find(str) >= 0:
+ return True
+
+ return False
+
+ def get_url(self):
+ """Convert a pathid into a URL. If this is not possible, error out."""
+ if self.url:
+ return self.url
+ # if we have a uuid and happen to know the URL for it, use that
+ elif self.uuid and PathIdentifier.repo_hints.has_key(self.uuid):
+ self.url = PathIdentifier.repo_hints[self.uuid] + self.repo_relative_path
+ PathIdentifier.locobjs[self.url] = self
+ return self.url
+ # if we've only seen one rep, use that (a guess, but an educated one)
+ elif not self.uuid and len(PathIdentifier.repo_hints) == 1:
+ uuid, root = PathIdentifier.repo_hints.items()[0]
+ if uuid:
+ self.uuid = uuid
+ PathIdentifier.locobjs['uuid://%s%s' % (uuid, self.repo_relative_path)] = self
+ self.url = root + self.repo_relative_path
+ PathIdentifier.locobjs[self.url] = self
+ report("Guessing that '%s' refers to '%s'" % (self, self.url))
+ return self.url
+ else:
+ error("Cannot determine URL for '%s'; " % self +
+ "Explicit source argument (-S/--source) required.\n")
+
+class RevisionLog:
+ """
+ A log of the revisions which affected a given URL between two
+ revisions.
+ """
+
+ def __init__(self, url, begin, end, find_propchanges=False):
+ """
+ Create a new RevisionLog object, which stores, in self.revs, a list
+ of the revisions which affected the specified URL between begin and
+ end. If find_propchanges is True, self.propchange_revs will contain a
+ list of the revisions which changed properties directly on the
+ specified URL. URL must be the URL for a directory in the repository.
+ """
+ self.url = url
+
+ # Setup the log options (--quiet, so we don't show log messages)
+ log_opts = '--xml --quiet -r%s:%s "%s"' % (begin, end, url)
+ if find_propchanges:
+ # The --verbose flag lets us grab merge tracking information
+ # by looking at propchanges
+ log_opts = "--verbose " + log_opts
+
+ # Read the log to look for revision numbers and merge-tracking info
+ self.revs = []
+ self.propchange_revs = []
+ repos_pathid = PathIdentifier.from_target(url)
+ for chg in SvnLogParser(launchsvn("log %s" % log_opts,
+ split_lines=False)):
+ self.revs.append(chg.revision())
+ for p in chg.paths():
+ if p.action() == 'M' and p.pathid() == repos_pathid.repo_relative_path:
+ self.propchange_revs.append(chg.revision())
+
+ # Save the range of the log
+ self.begin = int(begin)
+ if end == "HEAD":
+ # If end is not provided, we do not know which is the latest
+ # revision in the repository. So we set 'end' to the latest
+ # known revision.
+ self.end = self.revs[-1]
+ else:
+ self.end = int(end)
+
+ self._merges = None
+ self._blocks = None
+
+ def merge_metadata(self):
+ """
+ Return a VersionedProperty object, with a cached view of the merge
+ metadata in the range of this log.
+ """
+
+ # Load merge metadata if necessary
+ if not self._merges:
+ self._merges = VersionedProperty(self.url, opts["prop"])
+ self._merges.load(self)
+
+ return self._merges
+
+ def block_metadata(self):
+ if not self._blocks:
+ self._blocks = VersionedProperty(self.url, opts["block-prop"])
+ self._blocks.load(self)
+
+ return self._blocks
+
+
+class VersionedProperty:
+ """
+ A read-only, cached view of a versioned property.
+
+ self.revs contains a list of the revisions in which the property changes.
+ self.values stores the new values at each corresponding revision. If the
+ value of the property is unknown, it is set to None.
+
+ Initially, we set self.revs to [0] and self.values to [None]. This
+ indicates that, as of revision zero, we know nothing about the value of
+ the property.
+
+ Later, if you run self.load(log), we cache the value of this property over
+ the entire range of the log by noting each revision in which the property
+ was changed. At the end of the range of the log, we invalidate our cache
+ by adding the value "None" to our cache for any revisions which fall out
+ of the range of our log.
+
+ Once self.revs and self.values are filled, we can find the value of the
+ property at any arbitrary revision using a binary search on self.revs.
+ Once we find the last revision during which the property was changed,
+ we can lookup the associated value in self.values. (If the associated
+ value is None, the associated value was not cached and we have to do
+ a full propget.)
+
+ An example: We know that the 'svnmerge' property was added in r10, and
+ changed in r21. We gathered log info up until r40.
+
+ revs = [0, 10, 21, 40]
+ values = [None, "val1", "val2", None]
+
+ What these values say:
+ - From r0 to r9, we know nothing about the property.
+ - In r10, the property was set to "val1". This property stayed the same
+ until r21, when it was changed to "val2".
+ - We don't know what happened after r40.
+ """
+
+ def __init__(self, url, name):
+ """View the history of a versioned property at URL with name"""
+ self.url = url
+ self.name = name
+
+ # We know nothing about the value of the property. Setup revs
+ # and values to indicate as such.
+ self.revs = [0]
+ self.values = [None]
+
+ # We don't have any revisions cached
+ self._initial_value = None
+ self._changed_revs = []
+ self._changed_values = []
+
+ def load(self, log):
+ """
+ Load the history of property changes from the specified
+ RevisionLog object.
+ """
+
+ # Get the property value before the range of the log
+ if log.begin > 1:
+ self.revs.append(log.begin-1)
+ try:
+ self._initial_value = self.raw_get(log.begin-1)
+ except LaunchError:
+ # The specified URL might not exist before the
+ # range of the log. If so, we can safely assume
+ # that the property was empty at that time.
+ self._initial_value = { }
+ self.values.append(self._initial_value)
+ else:
+ self._initial_value = { }
+ self.values[0] = self._initial_value
+
+ # Cache the property values in the log range
+ old_value = self._initial_value
+ for rev in log.propchange_revs:
+ new_value = self.raw_get(rev)
+ if new_value != old_value:
+ self._changed_revs.append(rev)
+ self._changed_values.append(new_value)
+ self.revs.append(rev)
+ self.values.append(new_value)
+ old_value = new_value
+
+ # Indicate that we know nothing about the value of the property
+ # after the range of the log.
+ if log.revs:
+ self.revs.append(log.end+1)
+ self.values.append(None)
+
+ def raw_get(self, rev=None):
+ """
+ Get the property at revision REV. If rev is not specified, get
+ the property at revision HEAD.
+ """
+ return get_revlist_prop(self.url, self.name, rev)
+
+ def get(self, rev=None):
+ """
+ Get the property at revision REV. If rev is not specified, get
+ the property at revision HEAD.
+ """
+
+ if rev is not None:
+
+ # Find the index using a binary search
+ i = bisect(self.revs, rev) - 1
+
+ # Return the value of the property, if it was cached
+ if self.values[i] is not None:
+ return self.values[i]
+
+ # Get the current value of the property
+ return self.raw_get(rev)
+
+ def changed_revs(self, key=None):
+ """
+ Get a list of the revisions in which the specified dictionary
+ key was changed in this property. If key is not specified,
+ return a list of revisions in which any key was changed.
+ """
+ if key is None:
+ return self._changed_revs
+ else:
+ changed_revs = []
+ old_val = self._initial_value
+ for rev, val in zip(self._changed_revs, self._changed_values):
+ if val.get(key) != old_val.get(key):
+ changed_revs.append(rev)
+ old_val = val
+ return changed_revs
+
+ def initialized_revs(self):
+ """
+ Get a list of the revisions in which keys were added or
+ removed in this property.
+ """
+ initialized_revs = []
+ old_len = len(self._initial_value)
+ for rev, val in zip(self._changed_revs, self._changed_values):
+ if len(val) != old_len:
+ initialized_revs.append(rev)
+ old_len = len(val)
+ return initialized_revs
+
+class RevisionSet:
+ """
+ A set of revisions, held in dictionary form for easy manipulation. If we
+ were to rewrite this script for Python 2.3+, we would subclass this from
+ set (or UserSet). As this class does not include branch
+ information, it's assumed that one instance will be used per
+ branch.
+ """
+ def __init__(self, parm):
+ """Constructs a RevisionSet from a string in property form, or from
+ a dictionary whose keys are the revisions. Raises ValueError if the
+ input string is invalid."""
+
+ self._revs = {}
+
+ revision_range_split_re = re.compile('[-:]')
+
+ if isinstance(parm, types.DictType):
+ self._revs = parm.copy()
+ elif isinstance(parm, types.ListType):
+ for R in parm:
+ self._revs[int(R)] = 1
+ else:
+ parm = parm.strip()
+ if parm:
+ for R in parm.split(","):
+ rev_or_revs = re.split(revision_range_split_re, R)
+ if len(rev_or_revs) == 1:
+ self._revs[int(rev_or_revs[0])] = 1
+ elif len(rev_or_revs) == 2:
+ for rev in range(int(rev_or_revs[0]),
+ int(rev_or_revs[1])+1):
+ self._revs[rev] = 1
+ else:
+ raise ValueError, 'Ill formatted revision range: ' + R
+
+ def sorted(self):
+ revnums = self._revs.keys()
+ revnums.sort()
+ return revnums
+
+ def normalized(self):
+ """Returns a normalized version of the revision set, which is an
+ ordered list of couples (start,end), with the minimum number of
+ intervals."""
+ revnums = self.sorted()
+ revnums.reverse()
+ ret = []
+ while revnums:
+ s = e = revnums.pop()
+ while revnums and revnums[-1] in (e, e+1):
+ e = revnums.pop()
+ ret.append((s, e))
+ return ret
+
+ def __str__(self):
+ """Convert the revision set to a string, using its normalized form."""
+ L = []
+ for s,e in self.normalized():
+ if s == e:
+ L.append(str(s))
+ else:
+ L.append(str(s) + "-" + str(e))
+ return ",".join(L)
+
+ def __contains__(self, rev):
+ return self._revs.has_key(rev)
+
+ def __sub__(self, rs):
+ """Compute subtraction as in sets."""
+ revs = {}
+ for r in self._revs.keys():
+ if r not in rs:
+ revs[r] = 1
+ return RevisionSet(revs)
+
+ def __and__(self, rs):
+ """Compute intersections as in sets."""
+ revs = {}
+ for r in self._revs.keys():
+ if r in rs:
+ revs[r] = 1
+ return RevisionSet(revs)
+
+ def __nonzero__(self):
+ return len(self._revs) != 0
+
+ def __len__(self):
+ """Return the number of revisions in the set."""
+ return len(self._revs)
+
+ def __iter__(self):
+ return iter(self.sorted())
+
+ def __or__(self, rs):
+ """Compute set union."""
+ revs = self._revs.copy()
+ revs.update(rs._revs)
+ return RevisionSet(revs)
+
+def merge_props_to_revision_set(merge_props, pathid):
+ """A converter which returns a RevisionSet instance containing the
+ revisions from PATH as known to BRANCH_PROPS. BRANCH_PROPS is a
+ dictionary of pathid -> revision set branch integration information
+ (as returned by get_merge_props())."""
+ if not merge_props.has_key(pathid):
+ error('no integration info available for path "%s"' % pathid)
+ return RevisionSet(merge_props[pathid])
+
+def dict_from_revlist_prop(propvalue):
+ """Given a property value as a string containing per-source revision
+ lists, return a dictionary whose key is a source path identifier
+ and whose value is the revisions for that source."""
+ prop = {}
+
+ # Multiple sources are separated by any whitespace.
+ for L in propvalue.split():
+ # We use rsplit to play safe and allow colons in pathids.
+ pathid_str, revs = rsplit(L.strip(), ":", 1)
+
+ pathid = PathIdentifier.from_pathid(pathid_str)
+
+ # cache the "external" form we saw
+ pathid.external_form = pathid_str
+
+ prop[pathid] = revs
+ return prop
+
+def get_revlist_prop(url_or_dir, propname, rev=None):
+ """Given a repository URL or working copy path and a property
+ name, extract the values of the property which store per-source
+ revision lists and return a dictionary whose key is a source path
+ identifier, and whose value is the revisions for that source."""
+
+ # Note that propget does not return an error if the property does
+ # not exist, it simply does not output anything. So we do not need
+ # to check for LaunchError here.
+ args = '--strict "%s" "%s"' % (propname, url_or_dir)
+ if rev:
+ args = '-r %s %s' % (rev, args)
+ out = launchsvn('propget %s' % args, split_lines=False)
+
+ return dict_from_revlist_prop(out)
+
+def get_merge_props(dir):
+ """Extract the merged revisions."""
+ return get_revlist_prop(dir, opts["prop"])
+
+def get_block_props(dir):
+ """Extract the blocked revisions."""
+ return get_revlist_prop(dir, opts["block-prop"])
+
+def get_blocked_revs(dir, source_pathid):
+ p = get_block_props(dir)
+ if p.has_key(source_pathid):
+ return RevisionSet(p[source_pathid])
+ return RevisionSet("")
+
+def format_merge_props(props, sep=" "):
+ """Formats the hash PROPS as a string suitable for use as a
+ Subversion property value."""
+ assert sep in ["\t", "\n", " "] # must be a whitespace
+ props = props.items()
+ props.sort()
+ L = []
+ for h, r in props:
+ L.append("%s:%s" % (h, r))
+ return sep.join(L)
+
+def _run_propset(dir, prop, value):
+ """Set the property 'prop' of directory 'dir' to value 'value'. We go
+ through a temporary file to not run into command line length limits."""
+ try:
+ fd, fname = tempfile.mkstemp()
+ f = os.fdopen(fd, "wb")
+ except AttributeError:
+ # Fallback for Python <= 2.3 which does not have mkstemp (mktemp
+ # suffers from race conditions. Not that we care...)
+ fname = tempfile.mktemp()
+ f = open(fname, "wb")
+
+ try:
+ f.write(value)
+ f.close()
+ report("property data written to temp file: %s" % value)
+ svn_command('propset "%s" -F "%s" "%s"' % (prop, fname, dir))
+ finally:
+ os.remove(fname)
+
+def set_props(dir, name, props):
+ props = format_merge_props(props)
+ if props:
+ _run_propset(dir, name, props)
+ else:
+ # Check if NAME exists on DIR before trying to delete it.
+ # As of 1.6 propdel no longer supports deleting a
+ # non-existent property.
+ out = launchsvn('propget "%s" "%s"' % (name, dir))
+ if out:
+ svn_command('propdel "%s" "%s"' % (name, dir))
+
+def set_merge_props(dir, props):
+ set_props(dir, opts["prop"], props)
+
+def set_block_props(dir, props):
+ set_props(dir, opts["block-prop"], props)
+
+def set_blocked_revs(dir, source_pathid, revs):
+ props = get_block_props(dir)
+ if revs:
+ props[source_pathid] = str(revs)
+ elif props.has_key(source_pathid):
+ del props[source_pathid]
+ set_block_props(dir, props)
+
+def is_url(url):
+ """Check if url looks like a valid url."""
+ return re.search(r"^[a-zA-Z][-+\.\w]*://[^\s]+$", url) is not None and url[:4] != 'uuid'
+
+def check_url(url):
+ """Similar to is_url, but actually invoke get_svninfo to find out"""
+ return get_svninfo(url) != {}
+
+def is_pathid(pathid):
+ return isinstance(pathid, PathIdentifier)
+
+def is_wc(dir):
+ """Check if a directory is a working copy."""
+ return os.path.isdir(os.path.join(dir, ".svn")) or \
+ os.path.isdir(os.path.join(dir, "_svn"))
+
+_cache_svninfo = {}
+def get_svninfo(target):
+ """Extract the subversion information for a target (through 'svn info').
+ This function uses an internal cache to let clients query information
+ many times."""
+ if _cache_svninfo.has_key(target):
+ return _cache_svninfo[target]
+ info = {}
+ for L in launchsvn('info "%s"' % target):
+ L = L.strip()
+ if not L:
+ continue
+ key, value = L.split(": ", 1)
+ info[key] = value.strip()
+ _cache_svninfo[target] = info
+ return info
+
+def target_to_url(target):
+ """Convert working copy path or repos URL to a repos URL."""
+ if is_wc(target):
+ info = get_svninfo(target)
+ return info["URL"]
+ return target
+
+_cache_reporoot = {}
+def get_repo_root(target):
+ """Compute the root repos URL given a working-copy path, or a URL."""
+ # Try using "svn info WCDIR". This works only on SVN clients >= 1.3
+ if not is_url(target):
+ try:
+ info = get_svninfo(target)
+ root = info["Repository Root"]
+ _cache_reporoot[root] = None
+ return root
+ except KeyError:
+ pass
+ url = target_to_url(target)
+ assert url[-1] != '/'
+ else:
+ url = target
+
+ # Go through the cache of the repository roots. This avoids extra
+ # server round-trips if we are asking the root of different URLs
+ # in the same repository (the cache in get_svninfo() cannot detect
+ # that of course and would issue a remote command).
+ assert is_url(url)
+ for r in _cache_reporoot:
+ if url.startswith(r):
+ return r
+
+ # Try using "svn info URL". This works only on SVN clients >= 1.2
+ try:
+ info = get_svninfo(url)
+ # info may be {}, in which case we'll see KeyError here
+ root = info["Repository Root"]
+ _cache_reporoot[root] = None
+ return root
+ except (KeyError, LaunchError):
+ pass
+
+ # Constrained to older svn clients, we are stuck with this ugly
+ # trial-and-error implementation. It could be made faster with a
+ # binary search.
+ while url:
+ temp = os.path.dirname(url)
+ try:
+ launchsvn('proplist "%s"' % temp)
+ except LaunchError:
+ _cache_reporoot[url] = None
+ return rstrip(url, "/")
+ url = temp
+
+ error("svn repos root of %s not found" % target)
+
+class SvnLogParser:
+ """
+ Parse the "svn log", going through the XML output and using pulldom (which
+ would even allow streaming the command output).
+ """
+ def __init__(self, xml):
+ self._events = pulldom.parseString(xml)
+ def __getitem__(self, idx):
+ for event, node in self._events:
+ if event == pulldom.START_ELEMENT and node.tagName == "logentry":
+ self._events.expandNode(node)
+ return self.SvnLogRevision(node)
+ raise IndexError, "Could not find 'logentry' tag in xml"
+
+ class SvnLogRevision:
+ def __init__(self, xmlnode):
+ self.n = xmlnode
+ def revision(self):
+ return int(self.n.getAttribute("revision"))
+ def author(self):
+ return self.n.getElementsByTagName("author")[0].firstChild.data
+ def paths(self):
+ return [self.SvnLogPath(n)
+ for n in self.n.getElementsByTagName("path")]
+
+ class SvnLogPath:
+ def __init__(self, xmlnode):
+ self.n = xmlnode
+ def action(self):
+ return self.n.getAttribute("action")
+ def pathid(self):
+ return self.n.firstChild.data
+ def copyfrom_rev(self):
+ try: return self.n.getAttribute("copyfrom-rev")
+ except KeyError: return None
+ def copyfrom_pathid(self):
+ try: return self.n.getAttribute("copyfrom-path")
+ except KeyError: return None
+
+def get_copyfrom(target):
+ """Get copyfrom info for a given target (it represents the
+ repository-relative path from where it was branched). NOTE:
+ repos root has no copyfrom info. In this case None is returned.
+
+ Returns the:
+ - source file or directory from which the copy was made
+ - revision from which that source was copied
+ - revision in which the copy was committed
+ """
+ repos_path = PathIdentifier.from_target(target).repo_relative_path
+ for chg in SvnLogParser(launchsvn('log -v --xml --stop-on-copy "%s"'
+ % target, split_lines=False)):
+ for p in chg.paths():
+ if p.action() == 'A' and p.pathid() == repos_path:
+ # These values will be None if the corresponding elements are
+ # not found in the log.
+ return p.copyfrom_pathid(), p.copyfrom_rev(), chg.revision()
+ return None,None,None
+
+def get_latest_rev(url):
+ """Get the latest revision of the repository of which URL is part."""
+ try:
+ info = get_svninfo(url)
+ if not info.has_key("Revision"):
+ error("Not a valid URL: %s" % url)
+ return info["Revision"]
+ except LaunchError:
+ # Alternative method for latest revision checking (for svn < 1.2)
+ report('checking latest revision of "%s"' % url)
+ L = launchsvn('proplist --revprop -r HEAD "%s"' % opts["source-url"])[0]
+ rev = re.search("revision (\d+)", L).group(1)
+ report('latest revision of "%s" is %s' % (url, rev))
+ return rev
+
+def get_created_rev(url):
+ """Lookup the revision at which the path identified by the
+ provided URL was first created."""
+ oldest_rev = -1
+ report('determining oldest revision for URL "%s"' % url)
+ ### TODO: Refactor this to use a modified RevisionLog class.
+ lines = None
+ cmd = "log -r1:HEAD --stop-on-copy -q " + url
+ try:
+ lines = launchsvn(cmd + " --limit=1")
+ except LaunchError:
+ # Assume that --limit isn't supported by the installed 'svn'.
+ lines = launchsvn(cmd)
+ if lines and len(lines) > 1:
+ i = lines[1].find(" ")
+ if i != -1:
+ oldest_rev = int(lines[1][1:i])
+ if oldest_rev == -1:
+ error('unable to determine oldest revision for URL "%s"' % url)
+ return oldest_rev
+
+def get_commit_log(url, revnum):
+ """Return the log message for a specific integer revision
+ number."""
+ out = launchsvn("log --incremental -r%d %s" % (revnum, url))
+ return recode_stdout_to_file("".join(out[1:]))
+
+def construct_merged_log_message(url, revnums):
+ """Return a commit log message containing all the commit messages
+ in the specified revisions at the given URL. The separator used
+ in this log message is determined by searching for the longest
+ svnmerge separator existing in the commit log messages and
+ extending it by one more separator. This results in a new commit
+ log message that is clearer in describing merges that contain
+ other merges. Trailing newlines are removed from the embedded
+ log messages."""
+ messages = ['']
+ longest_sep = ''
+ for r in revnums.sorted():
+ message = get_commit_log(url, r)
+ if message:
+ message = re.sub(r'(\r\n|\r|\n)', "\n", message)
+ message = rstrip(message, "\n") + "\n"
+ messages.append(prefix_lines(LOG_LINE_PREFIX, message))
+ for match in LOG_SEPARATOR_RE.findall(message):
+ sep = match[1]
+ if len(sep) > len(longest_sep):
+ longest_sep = sep
+
+ longest_sep += LOG_SEPARATOR + "\n"
+ messages.append('')
+ return longest_sep.join(messages)
+
+def get_default_source(branch_target, branch_props):
+ """Return the default source for branch_target (given its branch_props).
+ Error out if there is ambiguity."""
+ if not branch_props:
+ error("no integration info available")
+
+ props = branch_props.copy()
+ pathid = PathIdentifier.from_target(branch_target)
+
+ # To make bidirectional merges easier, find the target's
+ # repository local path so it can be removed from the list of
+ # possible integration sources.
+ if props.has_key(pathid):
+ del props[pathid]
+
+ if len(props) > 1:
+ err_msg = "multiple sources found. "
+ err_msg += "Explicit source argument (-S/--source) required.\n"
+ err_msg += "The merge sources available are:"
+ for prop in props:
+ err_msg += "\n " + str(prop)
+ error(err_msg)
+
+ return props.keys()[0]
+
+def should_find_reflected(branch_dir):
+ should_find_reflected = opts["bidirectional"]
+
+ # If the source has integration info for the target, set find_reflected
+ # even if --bidirectional wasn't specified
+ if not should_find_reflected:
+ source_props = get_merge_props(opts["source-url"])
+ should_find_reflected = source_props.has_key(PathIdentifier.from_target(branch_dir))
+
+ return should_find_reflected
+
+def analyze_revs(target_pathid, url, begin=1, end=None,
+ find_reflected=False):
+ """For the source of the merges in the source URL being merged into
+ target_pathid, analyze the revisions in the interval begin-end (which
+ defaults to 1-HEAD), to find out which revisions are changes in
+ the url, which are changes elsewhere (so-called 'phantom'
+ revisions), optionally which are reflected changes (to avoid
+ conflicts that can occur when doing bidirectional merging between
+ branches), and which revisions initialize merge tracking against other
+ branches. Return a tuple of four RevisionSet's:
+ (real_revs, phantom_revs, reflected_revs, initialized_revs).
+
+ NOTE: To maximize speed, if "end" is not provided, the function is
+ not able to find phantom revisions following the last real
+ revision in the URL.
+ """
+
+ begin = str(begin)
+ if end is None:
+ end = "HEAD"
+ else:
+ end = str(end)
+ if long(begin) > long(end):
+ return RevisionSet(""), RevisionSet(""), \
+ RevisionSet(""), RevisionSet("")
+
+ logs[url] = RevisionLog(url, begin, end, find_reflected)
+ revs = RevisionSet(logs[url].revs)
+
+ if end == "HEAD":
+ # If end is not provided, we do not know which is the latest revision
+ # in the repository. So return the phantom revision set only up to
+ # the latest known revision.
+ end = str(list(revs)[-1])
+
+ phantom_revs = RevisionSet("%s-%s" % (begin, end)) - revs
+
+ if find_reflected:
+ reflected_revs = logs[url].merge_metadata().changed_revs(target_pathid)
+ reflected_revs += logs[url].block_metadata().changed_revs(target_pathid)
+ else:
+ reflected_revs = []
+
+ initialized_revs = RevisionSet(logs[url].merge_metadata().initialized_revs())
+ reflected_revs = RevisionSet(reflected_revs)
+
+ return revs, phantom_revs, reflected_revs, initialized_revs
+
+def analyze_source_revs(branch_target, source_url, **kwargs):
+ """For the given branch and source, extract the real and phantom
+ source revisions."""
+ branch_url = target_to_url(branch_target)
+ branch_pathid = PathIdentifier.from_target(branch_target)
+
+ # Extract the latest repository revision from the URL of the branch
+ # directory (which is already cached at this point).
+ end_rev = get_latest_rev(source_url)
+
+ # Calculate the base of analysis. If there is a "1-XX" interval in the
+ # merged_revs, we do not need to check those.
+ base = 1
+ r = opts["merged-revs"].normalized()
+ if r and r[0][0] == 1:
+ base = r[0][1] + 1
+
+ # See if the user filtered the revision set. If so, we are not
+ # interested in something outside that range.
+ if opts["revision"]:
+ revs = RevisionSet(opts["revision"]).sorted()
+ if base < revs[0]:
+ base = revs[0]
+ if end_rev > revs[-1]:
+ end_rev = revs[-1]
+
+ return analyze_revs(branch_pathid, source_url, base, end_rev, **kwargs)
+
+def minimal_merge_intervals(revs, phantom_revs):
+ """Produce the smallest number of intervals suitable for merging. revs
+ is the RevisionSet which we want to merge, and phantom_revs are phantom
+ revisions which can be used to concatenate intervals, thus minimizing the
+ number of operations."""
+ revnums = revs.normalized()
+ ret = []
+
+ cur = revnums.pop()
+ while revnums:
+ next = revnums.pop()
+ assert next[1] < cur[0] # otherwise it is not ordered
+ assert cur[0] - next[1] > 1 # otherwise it is not normalized
+ for i in range(next[1]+1, cur[0]):
+ if i not in phantom_revs:
+ ret.append(cur)
+ cur = next
+ break
+ else:
+ cur = (next[0], cur[1])
+
+ ret.append(cur)
+ ret.reverse()
+ return ret
+
+def display_revisions(revs, display_style, revisions_msg, source_url):
+ """Show REVS as dictated by DISPLAY_STYLE, either numerically, in
+ log format, or as diffs. When displaying revisions numerically,
+ prefix output with REVISIONS_MSG when in verbose mode. Otherwise,
+ request logs or diffs using SOURCE_URL."""
+ if display_style == "revisions":
+ if revs:
+ report(revisions_msg)
+ print revs
+ elif display_style == "logs":
+ for start,end in revs.normalized():
+ svn_command('log --incremental -v -r %d:%d %s' % \
+ (start, end, source_url))
+ elif display_style in ("diffs", "summarize"):
+ if display_style == 'summarize':
+ summarize = '--summarize '
+ else:
+ summarize = ''
+
+ for start, end in revs.normalized():
+ print
+ if start == end:
+ print "%s: changes in revision %d follow" % (NAME, start)
+ else:
+ print "%s: changes in revisions %d-%d follow" % (NAME,
+ start, end)
+ print
+
+ # Note: the starting revision number to 'svn diff' is
+ # NOT inclusive so we have to subtract one from ${START}.
+ svn_command("diff -r %d:%d %s %s" % (start - 1, end, summarize,
+ source_url))
+ else:
+ assert False, "unhandled display style: %s" % display_style
+
+def action_init(target_dir, target_props):
+ """Initialize for merges."""
+ # Check that directory is ready for being modified
+ check_dir_clean(target_dir)
+
+ target_pathid = PathIdentifier.from_target(target_dir)
+ source_pathid = opts['source-pathid']
+ if source_pathid == target_pathid:
+ error("cannot init integration source path '%s'\nIts path identifier does not "
+ "differ from the path identifier of the current directory, '%s'."
+ % (source_pathid, target_pathid))
+
+ source_url = opts['source-url']
+
+ # If the user hasn't specified the revisions to use, see if the
+ # "source" is a copy from the current tree and if so, we can use
+ # the version data obtained from it.
+ revision_range = opts["revision"]
+ if not revision_range:
+ # If source was originally copied from target, and we are merging
+ # changes from source to target (the copy target is the merge source,
+ # and the copy source is the merge target), then we want to mark as
+ # integrated up to the rev in which the copy was committed which
+ # created the merge source:
+ cf_source, cf_rev, copy_committed_in_rev = get_copyfrom(source_url)
+
+ cf_pathid = None
+ if cf_source:
+ cf_url = get_repo_root(source_url) + cf_source
+ if is_url(cf_url) and check_url(cf_url):
+ cf_pathid = PathIdentifier.from_target(cf_url)
+
+ if target_pathid == cf_pathid:
+ report('the source "%s" was copied from "%s" in rev %s and committed in rev %s' %
+ (source_url, target_dir, cf_rev, copy_committed_in_rev))
+ revision_range = "1-" + str(copy_committed_in_rev)
+
+ if not revision_range:
+ # If the reverse is true: copy source is the merge source, and
+ # the copy target is the merge target, then we want to mark as
+ # integrated up to the specific rev of the merge target from
+ # which the merge source was copied. (Longer discussion at:
+ # http://subversion.tigris.org/issues/show_bug.cgi?id=2810 )
+ cf_source, cf_rev, copy_committed_in_rev = get_copyfrom(target_dir)
+
+ cf_pathid = None
+ if cf_source:
+ cf_url = get_repo_root(target_dir) + cf_source
+ if is_url(cf_url) and check_url(cf_url):
+ cf_pathid = PathIdentifier.from_target(cf_url)
+
+ source_pathid = PathIdentifier.from_target(source_url)
+ if source_pathid == cf_pathid:
+ report('the target "%s" was copied the source "%s" in rev %s and committed in rev %s' %
+ (target_dir, source_url, cf_rev, copy_committed_in_rev))
+ revision_range = "1-" + cf_rev
+
+ # When neither the merge source nor target is a copy of the other, and
+ # the user did not specify a revision range, then choose a default which is
+ # the current revision; saying, in effect, "everything has been merged, so
+ # mark as integrated up to the latest rev on source url).
+ if not revision_range:
+ revision_range = "1-" + get_latest_rev(source_url)
+
+ revs = RevisionSet(revision_range)
+
+ report('marking "%s" as already containing revisions "%s" of "%s"' %
+ (target_dir, revs, source_url))
+
+ revs = str(revs)
+ # If the local svnmerge-integrated property already has an entry
+ # for the source-pathid, simply error out.
+ if not opts["force"] and target_props.has_key(source_pathid):
+ error('Repository-relative path %s has already been initialized at %s\n'
+ 'Use --force to re-initialize' % (source_pathid, target_dir))
+ # set the pathid's external_form based on the user's options
+ source_pathid.external_form = source_pathid.format(opts['location-type'])
+
+ revs = str(revs)
+ target_props[source_pathid] = revs
+
+ # Set property
+ set_merge_props(target_dir, target_props)
+
+ # Write out commit message if desired
+ if opts["commit-file"]:
+ f = open(opts["commit-file"], "w")
+ print >>f, 'Initialized merge tracking via "%s" with revisions "%s" from ' \
+ % (NAME, revs)
+ print >>f, '%s' % source_url
+ f.close()
+ report('wrote commit message to "%s"' % opts["commit-file"])
+
+def action_avail(branch_dir, branch_props):
+ """Show commits available for merges."""
+ source_revs, phantom_revs, reflected_revs, initialized_revs = \
+ analyze_source_revs(branch_dir, opts["source-url"],
+ find_reflected=
+ should_find_reflected(branch_dir))
+ report('skipping phantom revisions: %s' % phantom_revs)
+ if reflected_revs:
+ report('skipping reflected revisions: %s' % reflected_revs)
+ report('skipping initialized revisions: %s' % initialized_revs)
+
+ blocked_revs = get_blocked_revs(branch_dir, opts["source-pathid"])
+ avail_revs = source_revs - opts["merged-revs"] - blocked_revs - \
+ reflected_revs - initialized_revs
+
+ # Compose the set of revisions to show
+ revs = RevisionSet("")
+ report_msg = "revisions available to be merged are:"
+ if "avail" in opts["avail-showwhat"]:
+ revs |= avail_revs
+ if "blocked" in opts["avail-showwhat"]:
+ revs |= blocked_revs
+ report_msg = "revisions blocked are:"
+
+ # Limit to revisions specified by -r (if any)
+ if opts["revision"]:
+ revs = revs & RevisionSet(opts["revision"])
+
+ display_revisions(revs, opts["avail-display"],
+ report_msg,
+ opts["source-url"])
+
+def action_integrated(branch_dir, branch_props):
+ """Show change sets already merged. This set of revisions is
+ calculated from taking svnmerge-integrated property from the
+ branch, and subtracting any revision older than the branch
+ creation revision."""
+ # Extract the integration info for the branch_dir
+ branch_props = get_merge_props(branch_dir)
+ revs = merge_props_to_revision_set(branch_props, opts["source-pathid"])
+
+ # Lookup the oldest revision on the branch path.
+ oldest_src_rev = get_created_rev(opts["source-url"])
+
+ # Subtract any revisions which pre-date the branch.
+ report("subtracting revisions which pre-date the source URL (%d)" %
+ oldest_src_rev)
+ revs = revs - RevisionSet(range(1, oldest_src_rev))
+
+ # Limit to revisions specified by -r (if any)
+ if opts["revision"]:
+ revs = revs & RevisionSet(opts["revision"])
+
+ display_revisions(revs, opts["integrated-display"],
+ "revisions already integrated are:", opts["source-url"])
+
+def action_merge(branch_dir, branch_props):
+ """Record merge meta data, and do the actual merge (if not
+ requested otherwise via --record-only)."""
+ # Check branch directory is ready for being modified
+ check_dir_clean(branch_dir)
+
+ source_revs, phantom_revs, reflected_revs, initialized_revs = \
+ analyze_source_revs(branch_dir, opts["source-url"],
+ find_reflected=
+ should_find_reflected(branch_dir))
+
+ if opts["revision"]:
+ revs = RevisionSet(opts["revision"])
+ else:
+ revs = source_revs
+
+ blocked_revs = get_blocked_revs(branch_dir, opts["source-pathid"])
+ merged_revs = opts["merged-revs"]
+
+ # Show what we're doing
+ if opts["verbose"]: # just to avoid useless calculations
+ if merged_revs & revs:
+ report('"%s" already contains revisions %s' % (branch_dir,
+ merged_revs & revs))
+ if phantom_revs:
+ report('memorizing phantom revision(s): %s' % phantom_revs)
+ if reflected_revs:
+ report('memorizing reflected revision(s): %s' % reflected_revs)
+ if blocked_revs & revs:
+ report('skipping blocked revisions(s): %s' % (blocked_revs & revs))
+ if initialized_revs:
+ report('skipping initialized revision(s): %s' % initialized_revs)
+
+ # Compute final merge set.
+ revs = revs - merged_revs - blocked_revs - reflected_revs - \
+ phantom_revs - initialized_revs
+ if not revs:
+ report('no revisions to merge, exiting')
+ return
+
+ # When manually marking revisions as merged, we only update the
+ # integration meta data, and don't perform an actual merge.
+ record_only = opts["record-only"]
+
+ if record_only:
+ report('recording merge of revision(s) %s from "%s"' %
+ (revs, opts["source-url"]))
+ else:
+ report('merging in revision(s) %s from "%s"' %
+ (revs, opts["source-url"]))
+
+ # Do the merge(s). Note: the starting revision number to 'svn merge'
+ # is NOT inclusive so we have to subtract one from start.
+ # We try to keep the number of merge operations as low as possible,
+ # because it is faster and reduces the number of conflicts.
+ old_block_props = get_block_props(branch_dir)
+ merge_metadata = logs[opts["source-url"]].merge_metadata()
+ block_metadata = logs[opts["source-url"]].block_metadata()
+ for start,end in minimal_merge_intervals(revs, phantom_revs):
+ if not record_only:
+ # Preset merge/blocked properties to the source value at
+ # the start rev to avoid spurious property conflicts
+ set_merge_props(branch_dir, merge_metadata.get(start - 1))
+ set_block_props(branch_dir, block_metadata.get(start - 1))
+ # Do the merge
+ svn_command("merge --force -r %d:%d %s %s" % \
+ (start - 1, end, opts["source-url"], branch_dir))
+ # TODO: to support graph merging, add logic to merge the property
+ # meta-data manually
+
+ # Update the set of merged revisions.
+ merged_revs = merged_revs | revs | reflected_revs | phantom_revs | initialized_revs
+ branch_props[opts["source-pathid"]] = str(merged_revs)
+ set_merge_props(branch_dir, branch_props)
+ # Reset the blocked revs
+ set_block_props(branch_dir, old_block_props)
+
+ # Write out commit message if desired
+ if opts["commit-file"]:
+ f = open(opts["commit-file"], "w")
+ if record_only:
+ print >>f, 'Recorded merge of revisions %s via %s from ' % \
+ (revs, NAME)
+ else:
+ print >>f, 'Merged revisions %s via %s from ' % \
+ (revs, NAME)
+ print >>f, '%s' % opts["source-url"]
+ if opts["commit-verbose"]:
+ print >>f
+ print >>f, construct_merged_log_message(opts["source-url"], revs),
+
+ f.close()
+ report('wrote commit message to "%s"' % opts["commit-file"])
+
+def action_block(branch_dir, branch_props):
+ """Block revisions."""
+ # Check branch directory is ready for being modified
+ check_dir_clean(branch_dir)
+
+ source_revs, phantom_revs, reflected_revs, initialized_revs = \
+ analyze_source_revs(branch_dir, opts["source-url"])
+ revs_to_block = source_revs - opts["merged-revs"]
+
+ # Limit to revisions specified by -r (if any)
+ if opts["revision"]:
+ revs_to_block = RevisionSet(opts["revision"]) & revs_to_block
+
+ if not revs_to_block:
+ error('no available revisions to block')
+
+ # Change blocked information
+ blocked_revs = get_blocked_revs(branch_dir, opts["source-pathid"])
+ blocked_revs = blocked_revs | revs_to_block
+ set_blocked_revs(branch_dir, opts["source-pathid"], blocked_revs)
+
+ # Write out commit message if desired
+ if opts["commit-file"]:
+ f = open(opts["commit-file"], "w")
+ print >>f, 'Blocked revisions %s via %s' % (revs_to_block, NAME)
+ if opts["commit-verbose"]:
+ print >>f
+ print >>f, construct_merged_log_message(opts["source-url"],
+ revs_to_block),
+
+ f.close()
+ report('wrote commit message to "%s"' % opts["commit-file"])
+
+def action_unblock(branch_dir, branch_props):
+ """Unblock revisions."""
+ # Check branch directory is ready for being modified
+ check_dir_clean(branch_dir)
+
+ blocked_revs = get_blocked_revs(branch_dir, opts["source-pathid"])
+ revs_to_unblock = blocked_revs
+
+ # Limit to revisions specified by -r (if any)
+ if opts["revision"]:
+ revs_to_unblock = revs_to_unblock & RevisionSet(opts["revision"])
+
+ if not revs_to_unblock:
+ error('no available revisions to unblock')
+
+ # Change blocked information
+ blocked_revs = blocked_revs - revs_to_unblock
+ set_blocked_revs(branch_dir, opts["source-pathid"], blocked_revs)
+
+ # Write out commit message if desired
+ if opts["commit-file"]:
+ f = open(opts["commit-file"], "w")
+ print >>f, 'Unblocked revisions %s via %s' % (revs_to_unblock, NAME)
+ if opts["commit-verbose"]:
+ print >>f
+ print >>f, construct_merged_log_message(opts["source-url"],
+ revs_to_unblock),
+ f.close()
+ report('wrote commit message to "%s"' % opts["commit-file"])
+
+def action_rollback(branch_dir, branch_props):
+ """Rollback previously integrated revisions."""
+
+ # Make sure the revision arguments are present
+ if not opts["revision"]:
+ error("The '-r' option is mandatory for rollback")
+
+ # Check branch directory is ready for being modified
+ check_dir_clean(branch_dir)
+
+ # Extract the integration info for the branch_dir
+ branch_props = get_merge_props(branch_dir)
+ # Get the list of all revisions already merged into this source-pathid.
+ merged_revs = merge_props_to_revision_set(branch_props,
+ opts["source-pathid"])
+
+ # At which revision was the src created?
+ oldest_src_rev = get_created_rev(opts["source-url"])
+ src_pre_exist_range = RevisionSet("1-%d" % oldest_src_rev)
+
+ # Limit to revisions specified by -r (if any)
+ revs = merged_revs & RevisionSet(opts["revision"])
+
+ # make sure there's some revision to rollback
+ if not revs:
+ report("Nothing to rollback in revision range r%s" % opts["revision"])
+ return
+
+ # If even one specified revision lies outside the lifetime of the
+ # merge source, error out.
+ if revs & src_pre_exist_range:
+ err_str = "Specified revision range falls out of the rollback range.\n"
+ err_str += "%s was created at r%d" % (opts["source-pathid"],
+ oldest_src_rev)
+ error(err_str)
+
+ record_only = opts["record-only"]
+
+ if record_only:
+ report('recording rollback of revision(s) %s from "%s"' %
+ (revs, opts["source-url"]))
+ else:
+ report('rollback of revision(s) %s from "%s"' %
+ (revs, opts["source-url"]))
+
+ # Do the reverse merge(s). Note: the starting revision number
+ # to 'svn merge' is NOT inclusive so we have to subtract one from start.
+ # We try to keep the number of merge operations as low as possible,
+ # because it is faster and reduces the number of conflicts.
+ rollback_intervals = minimal_merge_intervals(revs, [])
+ # rollback in the reverse order of merge
+ rollback_intervals.reverse()
+ for start, end in rollback_intervals:
+ if not record_only:
+ # Do the merge
+ svn_command("merge --force -r %d:%d %s %s" % \
+ (end, start - 1, opts["source-url"], branch_dir))
+
+ # Write out commit message if desired
+ # calculate the phantom revs first
+ if opts["commit-file"]:
+ f = open(opts["commit-file"], "w")
+ if record_only:
+ print >>f, 'Recorded rollback of revisions %s via %s from ' % \
+ (revs , NAME)
+ else:
+ print >>f, 'Rolled back revisions %s via %s from ' % \
+ (revs , NAME)
+ print >>f, '%s' % opts["source-url"]
+
+ f.close()
+ report('wrote commit message to "%s"' % opts["commit-file"])
+
+ # Update the set of merged revisions.
+ merged_revs = merged_revs - revs
+ branch_props[opts["source-pathid"]] = str(merged_revs)
+ set_merge_props(branch_dir, branch_props)
+
+def action_uninit(branch_dir, branch_props):
+ """Uninit SOURCE URL."""
+ # Check branch directory is ready for being modified
+ check_dir_clean(branch_dir)
+
+ # If the source-pathid does not have an entry in the svnmerge-integrated
+ # property, simply error out.
+ if not branch_props.has_key(opts["source-pathid"]):
+ error('Repository-relative path "%s" does not contain merge '
+ 'tracking information for "%s"' \
+ % (opts["source-pathid"], branch_dir))
+
+ del branch_props[opts["source-pathid"]]
+
+ # Set merge property with the selected source deleted
+ set_merge_props(branch_dir, branch_props)
+
+ # Set blocked revisions for the selected source to None
+ set_blocked_revs(branch_dir, opts["source-pathid"], None)
+
+ # Write out commit message if desired
+ if opts["commit-file"]:
+ f = open(opts["commit-file"], "w")
+ print >>f, 'Removed merge tracking for "%s" for ' % NAME
+ print >>f, '%s' % opts["source-url"]
+ f.close()
+ report('wrote commit message to "%s"' % opts["commit-file"])
+
+###############################################################################
+# Command line parsing -- options and commands management
+###############################################################################
+
+class OptBase:
+ def __init__(self, *args, **kwargs):
+ self.help = kwargs["help"]
+ del kwargs["help"]
+ self.lflags = []
+ self.sflags = []
+ for a in args:
+ if a.startswith("--"): self.lflags.append(a)
+ elif a.startswith("-"): self.sflags.append(a)
+ else:
+ raise TypeError, "invalid flag name: %s" % a
+ if kwargs.has_key("dest"):
+ self.dest = kwargs["dest"]
+ del kwargs["dest"]
+ else:
+ if not self.lflags:
+ raise TypeError, "cannot deduce dest name without long options"
+ self.dest = self.lflags[0][2:]
+ if kwargs:
+ raise TypeError, "invalid keyword arguments: %r" % kwargs.keys()
+ def repr_flags(self):
+ f = self.sflags + self.lflags
+ r = f[0]
+ for fl in f[1:]:
+ r += " [%s]" % fl
+ return r
+
+class Option(OptBase):
+ def __init__(self, *args, **kwargs):
+ self.default = kwargs.setdefault("default", 0)
+ del kwargs["default"]
+ self.value = kwargs.setdefault("value", None)
+ del kwargs["value"]
+ OptBase.__init__(self, *args, **kwargs)
+ def apply(self, state, value):
+ assert value == ""
+ if self.value is not None:
+ state[self.dest] = self.value
+ else:
+ state[self.dest] += 1
+
+class OptionArg(OptBase):
+ def __init__(self, *args, **kwargs):
+ self.default = kwargs["default"]
+ del kwargs["default"]
+ self.metavar = kwargs.setdefault("metavar", None)
+ del kwargs["metavar"]
+ OptBase.__init__(self, *args, **kwargs)
+
+ if self.metavar is None:
+ if self.dest is not None:
+ self.metavar = self.dest.upper()
+ else:
+ self.metavar = "arg"
+ if self.default:
+ self.help += " (default: %s)" % self.default
+ def apply(self, state, value):
+ assert value is not None
+ state[self.dest] = value
+ def repr_flags(self):
+ r = OptBase.repr_flags(self)
+ return r + " " + self.metavar
+
+class CommandOpts:
+ class Cmd:
+ def __init__(self, *args):
+ self.name, self.func, self.usage, self.help, self.opts = args
+ def short_help(self):
+ return self.help.split(".")[0]
+ def __str__(self):
+ return self.name
+ def __call__(self, *args, **kwargs):
+ return self.func(*args, **kwargs)
+
+ def __init__(self, global_opts, common_opts, command_table, version=None):
+ self.progname = NAME
+ self.version = version.replace("%prog", self.progname)
+ self.cwidth = console_width() - 2
+ self.ctable = command_table.copy()
+ self.gopts = global_opts[:]
+ self.copts = common_opts[:]
+ self._add_builtins()
+ for k in self.ctable.keys():
+ cmd = self.Cmd(k, *self.ctable[k])
+ opts = []
+ for o in cmd.opts:
+ if isinstance(o, types.StringType) or \
+ isinstance(o, types.UnicodeType):
+ o = self._find_common(o)
+ opts.append(o)
+ cmd.opts = opts
+ self.ctable[k] = cmd
+
+ def _add_builtins(self):
+ self.gopts.append(
+ Option("-h", "--help", help="show help for this command and exit"))
+ if self.version is not None:
+ self.gopts.append(
+ Option("-V", "--version", help="show version info and exit"))
+ self.ctable["help"] = (self._cmd_help,
+ "help [COMMAND]",
+ "Display help for a specific command. If COMMAND is omitted, "
+ "display brief command description.",
+ [])
+
+ def _cmd_help(self, cmd=None, *args):
+ if args:
+ self.error("wrong number of arguments", "help")
+ if cmd is not None:
+ cmd = self._command(cmd)
+ self.print_command_help(cmd)
+ else:
+ self.print_command_list()
+
+ def _paragraph(self, text, width=78):
+ chunks = re.split("\s+", text.strip())
+ chunks.reverse()
+ lines = []
+ while chunks:
+ L = chunks.pop()
+ while chunks and len(L) + len(chunks[-1]) + 1 <= width:
+ L += " " + chunks.pop()
+ lines.append(L)
+ return lines
+
+ def _paragraphs(self, text, *args, **kwargs):
+ pars = text.split("\n\n")
+ lines = self._paragraph(pars[0], *args, **kwargs)
+ for p in pars[1:]:
+ lines.append("")
+ lines.extend(self._paragraph(p, *args, **kwargs))
+ return lines
+
+ def _print_wrapped(self, text, indent=0):
+ text = self._paragraphs(text, self.cwidth - indent)
+ print text.pop(0)
+ for t in text:
+ print " " * indent + t
+
+ def _find_common(self, fl):
+ for o in self.copts:
+ if fl in o.lflags+o.sflags:
+ return o
+ assert False, fl
+
+ def _compute_flags(self, opts, check_conflicts=True):
+ back = {}
+ sfl = ""
+ lfl = []
+ for o in opts:
+ sapp = lapp = ""
+ if isinstance(o, OptionArg):
+ sapp, lapp = ":", "="
+ for s in o.sflags:
+ if check_conflicts and back.has_key(s):
+ raise RuntimeError, "option conflict: %s" % s
+ back[s] = o
+ sfl += s[1:] + sapp
+ for l in o.lflags:
+ if check_conflicts and back.has_key(l):
+ raise RuntimeError, "option conflict: %s" % l
+ back[l] = o
+ lfl.append(l[2:] + lapp)
+ return sfl, lfl, back
+
+ def _extract_command(self, args):
+ """
+ Try to extract the command name from the argument list. This is
+ non-trivial because we want to allow command-specific options even
+ before the command itself.
+ """
+ opts = self.gopts[:]
+ for cmd in self.ctable.values():
+ opts.extend(cmd.opts)
+ sfl, lfl, _ = self._compute_flags(opts, check_conflicts=False)
+
+ lopts,largs = getopt.getopt(args, sfl, lfl)
+ if not largs:
+ return None
+ return self._command(largs[0])
+
+ def _fancy_getopt(self, args, opts, state=None):
+ if state is None:
+ state= {}
+ for o in opts:
+ if not state.has_key(o.dest):
+ state[o.dest] = o.default
+
+ sfl, lfl, back = self._compute_flags(opts)
+ try:
+ lopts,args = getopt.gnu_getopt(args, sfl, lfl)
+ except AttributeError:
+ # Before Python 2.3, there was no gnu_getopt support.
+ # So we can't parse intermixed positional arguments
+ # and options.
+ lopts,args = getopt.getopt(args, sfl, lfl)
+
+ for o,v in lopts:
+ back[o].apply(state, v)
+ return state, args
+
+ def _command(self, cmd):
+ if not self.ctable.has_key(cmd):
+ self.error("unknown command: '%s'" % cmd)
+ return self.ctable[cmd]
+
+ def parse(self, args):
+ if not args:
+ self.print_small_help()
+ sys.exit(0)
+
+ cmd = None
+ try:
+ cmd = self._extract_command(args)
+ opts = self.gopts[:]
+ if cmd:
+ opts.extend(cmd.opts)
+ args.remove(cmd.name)
+ state, args = self._fancy_getopt(args, opts)
+ except getopt.GetoptError, e:
+ self.error(e, cmd)
+
+ # Handle builtins
+ if self.version is not None and state["version"]:
+ self.print_version()
+ sys.exit(0)
+ if state["help"]: # special case for --help
+ if cmd:
+ self.print_command_help(cmd)
+ sys.exit(0)
+ cmd = self.ctable["help"]
+ else:
+ if cmd is None:
+ self.error("command argument required")
+ if str(cmd) == "help":
+ cmd(*args)
+ sys.exit(0)
+ return cmd, args, state
+
+ def error(self, s, cmd=None):
+ print >>sys.stderr, "%s: %s" % (self.progname, s)
+ if cmd is not None:
+ self.print_command_help(cmd)
+ else:
+ self.print_small_help()
+ sys.exit(1)
+ def print_small_help(self):
+ print "Type '%s help' for usage" % self.progname
+ def print_usage_line(self):
+ print "usage: %s <subcommand> [options...] [args...]\n" % self.progname
+ def print_command_list(self):
+ print "Available commands (use '%s help COMMAND' for more details):\n" \
+ % self.progname
+ cmds = self.ctable.keys()
+ cmds.sort()
+ indent = max(map(len, cmds))
+ for c in cmds:
+ h = self.ctable[c].short_help()
+ print " %-*s " % (indent, c),
+ self._print_wrapped(h, indent+6)
+ def print_command_help(self, cmd):
+ cmd = self.ctable[str(cmd)]
+ print 'usage: %s %s\n' % (self.progname, cmd.usage)
+ self._print_wrapped(cmd.help)
+ def print_opts(opts, self=self):
+ if not opts: return
+ flags = [o.repr_flags() for o in opts]
+ indent = max(map(len, flags))
+ for f,o in zip(flags, opts):
+ print " %-*s :" % (indent, f),
+ self._print_wrapped(o.help, indent+5)
+ print '\nCommand options:'
+ print_opts(cmd.opts)
+ print '\nGlobal options:'
+ print_opts(self.gopts)
+
+ def print_version(self):
+ print self.version
+
+###############################################################################
+# Options and Commands description
+###############################################################################
+
+global_opts = [
+ Option("-F", "--force",
+ help="force operation even if the working copy is not clean, or "
+ "there are pending updates"),
+ Option("-n", "--dry-run",
+ help="don't actually change anything, just pretend; "
+ "implies --show-changes"),
+ Option("-s", "--show-changes",
+ help="show subversion commands that make changes"),
+ Option("-v", "--verbose",
+ help="verbose mode: output more information about progress"),
+ OptionArg("-u", "--username",
+ default=None,
+ help="invoke subversion commands with the supplied username"),
+ OptionArg("-p", "--password",
+ default=None,
+ help="invoke subversion commands with the supplied password"),
+ OptionArg("-c", "--config-dir", metavar="DIR",
+ default=None,
+ help="cause subversion commands to consult runtime config directory DIR"),
+]
+
+common_opts = [
+ Option("-b", "--bidirectional",
+ value=True,
+ default=False,
+ help="remove reflected and initialized revisions from merge candidates. "
+ "Not required but may be specified to speed things up slightly"),
+ OptionArg("-f", "--commit-file", metavar="FILE",
+ default="svnmerge-commit-message.txt",
+ help="set the name of the file where the suggested log message "
+ "is written to"),
+ Option("-M", "--record-only",
+ value=True,
+ default=False,
+ help="do not perform an actual merge of the changes, yet record "
+ "that a merge happened"),
+ OptionArg("-r", "--revision",
+ metavar="REVLIST",
+ default="",
+ help="specify a revision list, consisting of revision numbers "
+ 'and ranges separated by commas, e.g., "534,537-539,540"'),
+ OptionArg("-S", "--source", "--head",
+ default=None,
+ help="specify a merge source for this branch. It can be either "
+ "a working directory path, a full URL, or an unambiguous "
+ "substring of one of the locations for which merge tracking was "
+ "already initialized. Needed only to disambiguate in case of "
+ "multiple merge sources"),
+]
+
+command_table = {
+ "init": (action_init,
+ "init [OPTION...] [SOURCE]",
+ """Initialize merge tracking from SOURCE on the current working
+ directory.
+
+ If SOURCE is specified, all the revisions in SOURCE are marked as already
+ merged; if this is not correct, you can use --revision to specify the
+ exact list of already-merged revisions.
+
+ If SOURCE is omitted, then it is computed from the "svn cp" history of the
+ current working directory (searching back for the branch point); in this
+ case, %s assumes that no revision has been integrated yet since
+ the branch point (unless you teach it with --revision).""" % NAME,
+ [
+ "-f", "-r", # import common opts
+ OptionArg("-L", "--location-type",
+ dest="location-type",
+ default="path",
+ help="Use this type of location identifier in the new " +
+ "Subversion properties; 'uuid', 'url', or 'path' " +
+ "(default)"),
+ ]),
+
+ "avail": (action_avail,
+ "avail [OPTION...] [PATH]",
+ """Show unmerged revisions available for PATH as a revision list.
+ If --revision is given, the revisions shown will be limited to those
+ also specified in the option.
+
+ When svnmerge is used to bidirectionally merge changes between a
+ branch and its source, it is necessary to not merge the same changes
+ forth and back: e.g., if you committed a merge of a certain
+ revision of the branch into the source, you do not want that commit
+ to appear as available to merged into the branch (as the code
+ originated in the branch itself!). svnmerge will automatically
+ exclude these so-called "reflected" revisions.""",
+ [
+ Option("-A", "--all",
+ dest="avail-showwhat",
+ value=["blocked", "avail"],
+ default=["avail"],
+ help="show both available and blocked revisions (aka ignore "
+ "blocked revisions)"),
+ "-b",
+ Option("-B", "--blocked",
+ dest="avail-showwhat",
+ value=["blocked"],
+ help="show the blocked revision list (see '%s block')" % NAME),
+ Option("-d", "--diff",
+ dest="avail-display",
+ value="diffs",
+ default="revisions",
+ help="show corresponding diff instead of revision list"),
+ Option("--summarize",
+ dest="avail-display",
+ value="summarize",
+ help="show summarized diff instead of revision list"),
+ Option("-l", "--log",
+ dest="avail-display",
+ value="logs",
+ help="show corresponding log history instead of revision list"),
+ "-r",
+ "-S",
+ ]),
+
+ "integrated": (action_integrated,
+ "integrated [OPTION...] [PATH]",
+ """Show merged revisions available for PATH as a revision list.
+ If --revision is given, the revisions shown will be limited to
+ those also specified in the option.""",
+ [
+ Option("-d", "--diff",
+ dest="integrated-display",
+ value="diffs",
+ default="revisions",
+ help="show corresponding diff instead of revision list"),
+ Option("-l", "--log",
+ dest="integrated-display",
+ value="logs",
+ help="show corresponding log history instead of revision list"),
+ "-r",
+ "-S",
+ ]),
+
+ "rollback": (action_rollback,
+ "rollback [OPTION...] [PATH]",
+ """Rollback previously merged in revisions from PATH. The
+ --revision option is mandatory, and specifies which revisions
+ will be rolled back. Only the previously integrated merges
+ will be rolled back.
+
+ When manually rolling back changes, --record-only can be used to
+ instruct %s that a manual rollback of a certain revision
+ already happened, so that it can record it and offer that
+ revision for merge henceforth.""" % (NAME),
+ [
+ "-f", "-r", "-S", "-M", # import common opts
+ ]),
+
+ "merge": (action_merge,
+ "merge [OPTION...] [PATH]",
+ """Merge in revisions into PATH from its source. If --revision is omitted,
+ all the available revisions will be merged. In any case, already merged-in
+ revisions will NOT be merged again.
+
+ When svnmerge is used to bidirectionally merge changes between a
+ branch and its source, it is necessary to not merge the same changes
+ forth and back: e.g., if you committed a merge of a certain
+ revision of the branch into the source, you do not want that commit
+ to appear as available to merged into the branch (as the code
+ originated in the branch itself!). svnmerge will automatically
+ exclude these so-called "reflected" revisions.
+
+ When manually merging changes across branches, --record-only can
+ be used to instruct %s that a manual merge of a certain revision
+ already happened, so that it can record it and not offer that
+ revision for merge anymore. Conversely, when there are revisions
+ which should not be merged, use '%s block'.""" % (NAME, NAME),
+ [
+ "-b", "-f", "-r", "-S", "-M", # import common opts
+ ]),
+
+ "block": (action_block,
+ "block [OPTION...] [PATH]",
+ """Block revisions within PATH so that they disappear from the available
+ list. This is useful to hide revisions which will not be integrated.
+ If --revision is omitted, it defaults to all the available revisions.
+
+ Do not use this option to hide revisions that were manually merged
+ into the branch. Instead, use '%s merge --record-only', which
+ records that a merge happened (as opposed to a merge which should
+ not happen).""" % NAME,
+ [
+ "-f", "-r", "-S", # import common opts
+ ]),
+
+ "unblock": (action_unblock,
+ "unblock [OPTION...] [PATH]",
+ """Revert the effect of '%s block'. If --revision is omitted, all the
+ blocked revisions are unblocked""" % NAME,
+ [
+ "-f", "-r", "-S", # import common opts
+ ]),
+
+ "uninit": (action_uninit,
+ "uninit [OPTION...] [PATH]",
+ """Remove merge tracking information from PATH. It cleans any kind of merge
+ tracking information (including the list of blocked revisions). If there
+ are multiple sources, use --source to indicate which source you want to
+ forget about.""",
+ [
+ "-f", "-S", # import common opts
+ ]),
+}
+
+
+def main(args):
+ global opts
+
+ # Initialize default options
+ opts = default_opts.copy()
+ logs.clear()
+
+ optsparser = CommandOpts(global_opts, common_opts, command_table,
+ version="%%prog r%s\n modified: %s\n\n"
+ "Copyright (C) 2004,2005 Awarix Inc.\n"
+ "Copyright (C) 2005, Giovanni Bajo"
+ % (__revision__, __date__))
+
+ cmd, args, state = optsparser.parse(args)
+ opts.update(state)
+
+ source = opts.get("source", None)
+ branch_dir = "."
+
+ if str(cmd) == "init":
+ if len(args) == 1:
+ source = args[0]
+ elif len(args) > 1:
+ optsparser.error("wrong number of parameters", cmd)
+ elif str(cmd) in command_table.keys():
+ if len(args) == 1:
+ branch_dir = args[0]
+ elif len(args) > 1:
+ optsparser.error("wrong number of parameters", cmd)
+ else:
+ assert False, "command not handled: %s" % cmd
+
+ # Validate branch_dir
+ if not is_wc(branch_dir):
+ if str(cmd) == "avail":
+ info = None
+ # it should be noted here that svn info does not error exit
+ # if an invalid target is specified to it (as is
+ # intuitive). so the try, except code is not absolutely
+ # necessary. but, I retain it to indicate the intuitive
+ # handling.
+ try:
+ info = get_svninfo(branch_dir)
+ except LaunchError:
+ pass
+ # test that we definitely targeted a subversion directory,
+ # mirroring the purpose of the earlier is_wc() call
+ if info is None or not info.has_key("Node Kind") or info["Node Kind"] != "directory":
+ error('"%s" is neither a valid URL, nor a working directory' % branch_dir)
+ else:
+ error('"%s" is not a subversion working directory' % branch_dir)
+
+ # give out some hints as to potential pathids
+ PathIdentifier.hint(branch_dir)
+ if source: PathIdentifier.hint(source)
+
+ # Extract the integration info for the branch_dir
+ branch_props = get_merge_props(branch_dir)
+
+ # Calculate source_url and source_path
+ report("calculate source path for the branch")
+ if not source:
+ if str(cmd) == "init":
+ cf_source, cf_rev, copy_committed_in_rev = get_copyfrom(branch_dir)
+ if not cf_source:
+ error('no copyfrom info available. '
+ 'Explicit source argument (-S/--source) required.')
+ opts["source-url"] = get_repo_root(branch_dir) + cf_source
+ opts["source-pathid"] = PathIdentifier.from_target(opts["source-url"])
+
+ if not opts["revision"]:
+ opts["revision"] = "1-" + cf_rev
+ else:
+ opts["source-pathid"] = get_default_source(branch_dir, branch_props)
+ opts["source-url"] = opts["source-pathid"].get_url()
+
+ assert is_pathid(opts["source-pathid"])
+ assert is_url(opts["source-url"])
+ else:
+ # The source was given as a command line argument and is stored in
+ # SOURCE. Ensure that the specified source does not end in a /,
+ # otherwise it's easy to have the same source path listed more
+ # than once in the integrated version properties, with and without
+ # trailing /'s.
+ source = rstrip(source, "/")
+ if not is_wc(source) and not is_url(source):
+ # Check if it is a substring of a pathid recorded
+ # within the branch properties.
+ found = []
+ for pathid in branch_props.keys():
+ if pathid.match_substring(source):
+ found.append(pathid)
+ if len(found) == 1:
+ # (assumes pathid is a repository-relative-path)
+ source_pathid = found[0]
+ source = source_pathid.get_url()
+ else:
+ error('"%s" is neither a valid URL, nor an unambiguous '
+ 'substring of a repository path, nor a working directory'
+ % source)
+ else:
+ source_pathid = PathIdentifier.from_target(source)
+
+ source_pathid = PathIdentifier.from_target(source)
+ if str(cmd) == "init" and \
+ source_pathid == PathIdentifier.from_target("."):
+ error("cannot init integration source path '%s'\n"
+ "Its repository-relative path must differ from the "
+ "repository-relative path of the current directory."
+ % source_pathid)
+ opts["source-pathid"] = source_pathid
+ opts["source-url"] = target_to_url(source)
+
+ # Sanity check source_url
+ assert is_url(opts["source-url"])
+ # SVN does not support non-normalized URL (and we should not
+ # have created them)
+ assert opts["source-url"].find("/..") < 0
+
+ report('source is "%s"' % opts["source-url"])
+
+ # Get previously merged revisions (except when command is init)
+ if str(cmd) != "init":
+ opts["merged-revs"] = merge_props_to_revision_set(branch_props,
+ opts["source-pathid"])
+
+ # Perform the action
+ cmd(branch_dir, branch_props)
+
+
+if __name__ == "__main__":
+ try:
+ main(sys.argv[1:])
+ except LaunchError, (ret, cmd, out):
+ err_msg = "command execution failed (exit code: %d)\n" % ret
+ err_msg += cmd + "\n"
+ err_msg += "".join(out)
+ error(err_msg)
+ except KeyboardInterrupt:
+ # Avoid traceback on CTRL+C
+ print "aborted by user"
+ sys.exit(1)
diff --git a/testing/subversion/svnserve b/testing/subversion/svnserve
new file mode 100755
index 000000000..670fee742
--- /dev/null
+++ b/testing/subversion/svnserve
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+. /etc/rc.conf
+. /etc/rc.d/functions
+. /etc/conf.d/svnserve
+
+PID=`pidof -o %PPID /usr/bin/svnserve`
+case "$1" in
+ start)
+ stat_busy "Starting svnserve"
+ if [ -z "$PID" ]; then
+ if [ -n "$SVNSERVE_USER" ]; then
+ su -s '/bin/sh' $SVNSERVE_USER -c "/usr/bin/svnserve -d $SVNSERVE_ARGS" &
+ else
+ /usr/bin/svnserve -d $SVNSERVE_ARGS &
+ fi
+ fi
+ if [ ! -z "$PID" -o $? -gt 0 ]; then
+ stat_fail
+ else
+ add_daemon svnserve
+ stat_done
+ fi
+ ;;
+ stop)
+ stat_busy "Stopping svnserve"
+ [ ! -z "$PID" ] && kill $PID &> /dev/null
+ if [ $? -gt 0 ]; then
+ stat_fail
+ else
+ rm_daemon svnserve
+ stat_done
+ fi
+ ;;
+ restart)
+ $0 stop
+ sleep 1
+ $0 start
+ ;;
+ *)
+ echo "usage: $0 {start|stop|restart}"
+esac
diff --git a/testing/subversion/svnserve.conf b/testing/subversion/svnserve.conf
new file mode 100644
index 000000000..37fb7ea10
--- /dev/null
+++ b/testing/subversion/svnserve.conf
@@ -0,0 +1,7 @@
+#
+# Parameters to be passed to svnserve
+#
+#SVNSERVE_ARGS="-r /path/to/some/repos"
+SVNSERVE_ARGS=""
+
+#SVNSERVE_USER="svn"
diff --git a/testing/udev/81-arch.rules b/testing/udev/81-arch.rules
new file mode 100644
index 000000000..cd4e3e9b4
--- /dev/null
+++ b/testing/udev/81-arch.rules
@@ -0,0 +1,83 @@
+# Udev rules for Archlinux by Tobias Powalowski <tpowa@archlinux.org>
+# do not edit this file, it will be overwritten on update
+#
+# There are a number of modifiers that are allowed to be used in some
+# of the different fields. They provide the following subsitutions:
+#
+# %n the "kernel number" of the device.
+# For example, 'sda3' has a "kernel number" of '3'
+# %k the kernel name for the device.
+# %M the kernel major number for the device
+# %m the kernel minor number for the device
+# %b the bus id for the device
+# %c the string returned by the PROGRAM
+# %s{filename} the content of a sysfs attribute.
+# %% the '%' char itself.
+#
+# There are a number of modifiers that are allowed to be used in some of the
+# fields. See the udev man page for a full description of them.
+# global stuff
+#
+
+# permission for sg devices
+KERNEL=="sg[0-9]*", ATTRS{type}!="3|6", GROUP="disk", MODE="0660"
+
+# permissions for IDE CD devices
+SUBSYSTEMS=="ide", KERNEL=="hd[a-z]", ATTR{removable}=="1", ATTRS{media}=="cdrom*", GROUP="optical"
+
+# permissions for SCSI CD devices
+SUBSYSTEMS=="scsi", KERNEL=="sr[0-9]*", ATTRS{type}=="5", SYMLINK+="scd%n", GROUP="optical"
+SUBSYSTEMS=="scsi", KERNEL=="sg[0-9]*", ATTRS{type}=="5", GROUP="optical"
+
+# permissions for removable devices like cardreaders or sticks
+KERNEL=="sd*", ATTRS{scsi_level}=="3", ATTRS{type}=="0", GROUP="storage"
+
+# permissions for firewire external drives
+KERNEL=="sd*", ATTRS{scsi_level}=="5", GROUP="storage"
+
+# permissions for usb to scsi external adapters
+KERNEL=="sd*", ATTRS{scsi_level}=="3", ATTRS{type}=="7", GROUP="storage"
+
+# permissions for ide storage like pcmcia card readers
+ACTION!="add", GOTO="pcmcia_end"
+SUBSYSTEM!="block", GOTO="pcmcia_end"
+KERNEL=="hd*[!0-9]", IMPORT{program}="ata_id --export $tempnode"
+KERNEL=="hd*", IMPORT{parent}=="ID_*"
+KERNEL=="hd*", ENV{ID_TYPE}=="generic", GROUP="storage"
+LABEL="pcmcia_end"
+
+# permissions for SCSI scanners
+SUBSYSTEMS=="scsi", KERNEL=="sg[0-9]*", ATTRS{type}=="6", GROUP="scanner"
+
+# mem
+KERNEL=="ram0", SYMLINK+="ramdisk"
+KERNEL=="ram1", SYMLINK+="ram"
+
+# video4linux
+
+KERNEL=="vbi0", SYMLINK+="vbi"
+KERNEL=="radio0", SYMLINK+="radio"
+KERNEL=="radio[0-9]*", GROUP="video"
+KERNEL=="video0", SYMLINK+="video"
+KERNEL=="vtx0", SYMLINK+="vtx"
+
+# video devices
+### xorg resets those permissions, adjust your xorg.conf!
+KERNEL=="3dfx*", GROUP="video"
+KERNEL=="fb[0-9]*", GROUP="video"
+
+# misc
+KERNEL=="sgi_fetchop", MODE="0666"
+KERNEL=="sonypi", MODE="0666"
+
+# USB devices
+KERNEL=="legousbtower*", MODE="0666"
+
+# kbd devices
+KERNEL=="kbd", MODE="0664"
+
+# miscellaneous
+KERNEL=="rtc|rtc0", GROUP="audio", MODE="0664"
+#######################################
+# Permissions and Symlinks - end
+#######################################
diff --git a/testing/udev/PKGBUILD b/testing/udev/PKGBUILD
new file mode 100644
index 000000000..b80fd7707
--- /dev/null
+++ b/testing/udev/PKGBUILD
@@ -0,0 +1,101 @@
+# $Id: PKGBUILD 126202 2011-06-02 14:49:12Z tomegun $
+# Maintainer: Aaron Griffin <aaron@archlinux.org>
+# Maintainer: Tobias Powalowski <tpowa@archlinux.org>
+# Maintainer: Thomas Bächler <thomas@archlinux.org>
+# Maintainer: Tom Gundersen <teg@jklm.no>
+
+pkgbase="udev"
+pkgname=('udev' 'udev-compat')
+pkgver=171
+pkgrel=2
+arch=(i686 x86_64)
+url="http://www.kernel.org/pub/linux/utils/kernel/hotplug/udev.html"
+license=('GPL')
+groups=('base')
+# older initscripts versions required start_udev
+options=(!makeflags !libtool)
+makedepends=('glibc' 'coreutils' 'util-linux' 'pciutils' 'libusb-compat' 'glib2' 'kernel26' 'gperf' 'libxslt' 'gobject-introspection')
+source=(http://www.kernel.org/pub/linux/utils/kernel/hotplug/$pkgbase-$pkgver.tar.bz2
+ 81-arch.rules
+ static-audio-nodes-group.patch
+ static-nodes-permissions.patch)
+
+build() {
+ cd $srcdir/$pkgbase-$pkgver
+ # fix https://bugs.archlinux.org/task/24362 (will be in udev-172)
+ patch -Np1 -i ../static-audio-nodes-group.patch
+ patch -Np1 -i ../static-nodes-permissions.patch
+ ./configure --sysconfdir=/etc --with-rootlibdir=/lib --libexecdir=/lib/udev\
+ --sbindir=/sbin --with-systemdsystemunitdir=/lib/systemd/system\
+ --disable-rule-generator
+ make
+}
+
+package_udev() {
+ pkgdesc="The userspace dev tools (udev)"
+ depends=('glibc' 'coreutils' 'util-linux' 'libusb-compat' 'glib2'
+ 'module-init-tools>=3.11' 'pciutils')
+ install=udev.install
+ backup=(etc/udev/udev.conf
+ etc/modprobe.d/framebuffer_blacklist.conf)
+ conflicts=('pcmcia-cs' 'hotplug' 'initscripts<2009.07')
+ replaces=('devfsd')
+
+ cd $srcdir/$pkgbase-$pkgver
+ make DESTDIR=${pkgdir} install
+ # Install our rule for permissions and symlinks
+ install -D -m644 $srcdir/81-arch.rules $pkgdir/lib/udev/rules.d/81-arch.rules
+
+ # create framebuffer blacklist
+ mkdir -p $pkgdir/etc/modprobe.d/
+ for mod in $(find /lib/modules/*/kernel/drivers/video -name '*fb.ko.gz' -exec basename {} .ko.gz \;); do
+ echo "blacklist $mod"
+ done | sort -u > $pkgdir/etc/modprobe.d/framebuffer_blacklist.conf
+
+ # create static devices in /lib/udev/devices/
+ mkdir ${pkgdir}/lib/udev/devices/pts
+ mkdir ${pkgdir}/lib/udev/devices/shm
+
+ mknod -m 0600 ${pkgdir}/lib/udev/devices/console c 5 1
+ mknod -m 0666 ${pkgdir}/lib/udev/devices/null c 1 3
+ mknod -m 0660 ${pkgdir}/lib/udev/devices/zero c 1 5
+ mknod -m 0666 ${pkgdir}/lib/udev/devices/kmsg c 1 11
+
+ ln -snf /proc/self/fd ${pkgdir}/lib/udev/devices/fd
+ ln -snf /proc/self/fd/0 ${pkgdir}/lib/udev/devices/stdin
+ ln -snf /proc/self/fd/1 ${pkgdir}/lib/udev/devices/stdout
+ ln -snf /proc/self/fd/2 ${pkgdir}/lib/udev/devices/stderr
+ ln -snf /proc/kcore ${pkgdir}/lib/udev/devices/core
+
+ # these static devices are created for convenience, to autoload the modules if necessary
+ # /dev/loop0
+ mknod -m 0660 ${pkgdir}/lib/udev/devices/loop0 b 7 0
+ chgrp disk ${pkgdir}/lib/udev/devices/loop0
+ # /dev/net/tun
+ mkdir ${pkgdir}/lib/udev/devices/net
+ mknod -m 0666 ${pkgdir}/lib/udev/devices/net/tun c 10 200
+ # /dev/fuse
+ mknod -m 0666 ${pkgdir}/lib/udev/devices/fuse c 10 229
+ # /dev/ppp
+ mknod -m 0600 ${pkgdir}/lib/udev/devices/ppp c 108 0
+
+ # Replace dialout/tape/cdrom group in rules with uucp/storage/optical group
+ for i in $pkgdir/lib/udev/rules.d/*.rules; do
+ sed -i -e 's#GROUP="dialout"#GROUP="uucp"#g;
+ s#GROUP="tape"#GROUP="storage"#g;
+ s#GROUP="cdrom"#GROUP="optical"#g' $i
+ done
+}
+
+package_udev-compat() {
+ pkgdesc="The userspace dev tools (udev) - additional rules for older kernels"
+ depends=('udev')
+ groups=('')
+ cd $srcdir/$pkgbase-$pkgver
+ install -d -m755 ${pkgdir}/lib/${pkgbase}/rules.d
+ install -D -m644 ${srcdir}/${pkgbase}-${pkgver}/rules/misc/30-kernel-compat.rules ${pkgdir}/lib/udev/rules.d/30-kernel-compat.rules
+}
+md5sums=('bdf4617284be2ecac11767437417e209'
+ '6ee44e3feb8e0f037947e7d4ca273f12'
+ '4f625aea95a5597afd8cdf189421f193'
+ 'f9e50b8dfcd2215f5423ff9bc04ecf68')
diff --git a/testing/udev/static-audio-nodes-group.patch b/testing/udev/static-audio-nodes-group.patch
new file mode 100644
index 000000000..b1fc4f935
--- /dev/null
+++ b/testing/udev/static-audio-nodes-group.patch
@@ -0,0 +1,27 @@
+From 3e227830ad6494700e18ae03297e8fb833ff26bf Mon Sep 17 00:00:00 2001
+From: Kay Sievers <kay.sievers@vrfy.org>
+Date: Fri, 27 May 2011 02:50:29 +0200
+Subject: [PATCH] rules: apply 'audio' group of the static snd/{seq,timer}
+ nodes
+
+---
+ rules/rules.d/50-udev-default.rules | 3 ++-
+ 1 files changed, 2 insertions(+), 1 deletions(-)
+
+diff --git a/rules/rules.d/50-udev-default.rules b/rules/rules.d/50-udev-default.rules
+index cd745ef..cacb533 100644
+--- a/rules/rules.d/50-udev-default.rules
++++ b/rules/rules.d/50-udev-default.rules
+@@ -38,7 +38,8 @@ SUBSYSTEM=="graphics", GROUP="video"
+ SUBSYSTEM=="drm", GROUP="video"
+
+ # sound
+-SUBSYSTEM=="sound", GROUP="audio"
++SUBSYSTEM=="sound", GROUP="audio", \
++ OPTIONS+="static_node=snd/seq", OPTIONS+="static_node=snd/timer"
+
+ # DVB (video)
+ SUBSYSTEM=="dvb", GROUP="video"
+--
+1.7.5.3
+
diff --git a/testing/udev/static-nodes-permissions.patch b/testing/udev/static-nodes-permissions.patch
new file mode 100644
index 000000000..51e6ad6bc
--- /dev/null
+++ b/testing/udev/static-nodes-permissions.patch
@@ -0,0 +1,57 @@
+From c112873b5bc9ebbae39c32f502bc6211f33546cc Mon Sep 17 00:00:00 2001
+From: Kay Sievers <kay.sievers@vrfy.org>
+Date: Mon, 30 May 2011 02:12:02 +0200
+Subject: [PATCH 1/2] rules: static_node - use 0660 if group is given to get
+ the cigar
+
+>> On Tue, May 24, 2011 at 15:33, Tom Gundersen <teg@jklm.no> wrote:
+>
+> Close, but no cigar. Looks like the static nodes are not assigned
+> permissions 0660 even if a gid is set (the nodes have perms 0600).
+>
+> Cheers,
+>
+> Tom
+---
+ udev/udev-rules.c | 10 ++++++++--
+ 1 files changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/udev/udev-rules.c b/udev/udev-rules.c
+index 48395e7..56a258d 100644
+--- a/udev/udev-rules.c
++++ b/udev/udev-rules.c
+@@ -2709,8 +2709,9 @@ void udev_rules_apply_static_dev_perms(struct udev_rules *rules)
+ case TK_A_STATIC_NODE: {
+ char filename[UTIL_PATH_SIZE];
+ struct stat stats;
++
+ /* we assure, that the permissions tokens are sorted before the static token */
+- if (mode == 0 && uid == 0 && gid == 0)
++ if (uid == 0 && gid == 0)
+ goto next;
+ util_strscpyl(filename, sizeof(filename), udev_get_dev_path(rules->udev), "/",
+ &rules->buf[cur->key.value_off], NULL);
+@@ -2718,14 +2719,19 @@ void udev_rules_apply_static_dev_perms(struct udev_rules *rules)
+ goto next;
+ if (!S_ISBLK(stats.st_mode) && !S_ISCHR(stats.st_mode))
+ goto next;
+- if (mode != 0 && mode != (stats.st_mode & 0777)) {
++
++ if (mode == 0 && gid > 0)
++ mode = 0660;
++ if (mode != (stats.st_mode & 0777)) {
+ chmod(filename, mode);
+ info(rules->udev, "chmod '%s' %#o\n", filename, mode);
+ }
++
+ if ((uid != 0 && uid != stats.st_uid) || (gid != 0 && gid != stats.st_gid)) {
+ chown(filename, uid, gid);
+ info(rules->udev, "chown '%s' %u %u\n", filename, uid, gid);
+ }
++
+ utimensat(AT_FDCWD, filename, NULL, 0);
+ break;
+ }
+--
+1.7.5.2
+
diff --git a/testing/udev/udev.install b/testing/udev/udev.install
new file mode 100644
index 000000000..5c02dd15b
--- /dev/null
+++ b/testing/udev/udev.install
@@ -0,0 +1,65 @@
+# arg 1: the new package version
+# arg 2: the old package version
+
+post_upgrade() {
+ if [ "$(vercmp $2 100)" -lt 0 ]; then
+ echo "ATTENTION UDEV:"
+ echo "----------"
+ echo "udev >=098 rules syntax has changed, please update your own rules."
+ echo "udev >=099 Added persistent network and CD/DVD Symlink generator rules."
+ echo "Please read the instructions carefully before reboot."
+ echo "They are located in /etc/udev/readme-udev-arch.txt"
+ echo "----------"
+ fi
+ if [ "$(vercmp $2 169)" -lt 0 ]; then
+ echo "ATTENTION UDEV:"
+ echo "---------------"
+ echo "Kernel 2.6.32 or newer is now required."
+ echo "OSS emulation modules are not loaded by default, add to rc.conf if needed."
+ echo "Arch specific cd symlinks are now no longer created."
+ echo "cd and net persistent rules will no longer be autogenerated,"
+ echo "see <https://wiki.archlinux.org/index.php/Udev> for details."
+ echo "Errors are now logged (possibly to the console) by default."
+ echo "---------------"
+ fi
+ if [ "$(vercmp $2 172)" -lt 0 ]; then
+ echo "ATTENTION UDEV:"
+ echo "---------------"
+ echo "Arch's custom blacklisting logic has been removed. MOD_AUTOLOAD and"
+ echo "blacklisting in MODULES no longer works."
+ echo "See 'man modprobe.conf' for a replacement to blacklisting."
+ echo "To disable a module mod1 on the kernel command line, use"
+ echo "mod1.disable=1"
+ echo "or"
+ echo "modprobe.blacklist=mod1"
+ echo " --"
+ echo "The following modules are no longer unconditionally loaded:"
+ echo " pcspkr irtty-sir analog lp ppdev ide-generic"
+ echo "Add them to MODULES in rc.conf if you need them."
+ echo "---------------"
+ fi
+}
+
+post_install() {
+ # If a ramfs is mounted, we still need to make sure that /dev/{console,null,zero} exist
+ # The Archlinux installer bind-mounts /dev to /mnt/dev, thus making the real /dev invisible
+ ROOTDIR=""
+ [ "$(stat -c %D /)" != "$(stat -c %D /dev)" ] && ROOTDIR=$(mktemp -d /tmp/udevinstall.XXXXXX)
+ [ -n "${ROOTDIR}" ] && mount --bind / ${ROOTDIR}
+ if [ ! -c ${ROOTDIR}/dev/console ]; then
+ rm -f ${ROOTDIR}/dev/console
+ mknod -m600 ${ROOTDIR}/dev/console c 5 1
+ fi
+ if [ ! -c ${ROOTDIR}/dev/null ]; then
+ rm -f ${ROOTDIR}/dev/null
+ mknod -m644 ${ROOTDIR}/dev/null c 1 3
+ fi
+ if [ ! -c ${ROOTDIR}/dev/zero ]; then
+ rm -f ${ROOTDIR}/dev/zero
+ mknod -m644 ${ROOTDIR}/dev/zero c 1 5
+ fi
+ if [ -n "${ROOTDIR}" ]; then
+ umount ${ROOTDIR}
+ rmdir ${ROOTDIR}
+ fi
+}
diff --git a/testing/yp-tools/PKGBUILD b/testing/yp-tools/PKGBUILD
new file mode 100644
index 000000000..8cf4b6960
--- /dev/null
+++ b/testing/yp-tools/PKGBUILD
@@ -0,0 +1,26 @@
+# $Id: PKGBUILD 126193 2011-06-02 14:34:36Z bisson $
+# Maintainer: Gaetan Bisson <bisson@archlinux.org>
+# Contributor: dorphell <dorphell@archlinux.org>
+# Contributor: Tom Newsom <Jeepster@gmx.co.uk>
+
+pkgname=yp-tools
+pkgver=2.12
+pkgrel=2
+pkgdesc='Linux NIS Tools'
+arch=('i686' 'x86_64')
+url='http://www.linux-nis.org/nis/yp-tools/'
+license=('GPL2')
+depends=('ypbind-mt')
+source=("ftp://ftp.kernel.org/pub/linux/utils/net/NIS/$pkgname-$pkgver.tar.gz")
+sha1sums=('10b0ef5d4c5723e0716d7a1431a900c0ba6ef703')
+
+build() {
+ cd "$srcdir/$pkgname-$pkgver"
+ ./configure --prefix=/usr
+ make
+}
+
+package() {
+ cd "$srcdir/$pkgname-$pkgver"
+ make DESTDIR="$pkgdir" install
+}
diff --git a/testing/ypbind-mt/PKGBUILD b/testing/ypbind-mt/PKGBUILD
new file mode 100644
index 000000000..45cf58258
--- /dev/null
+++ b/testing/ypbind-mt/PKGBUILD
@@ -0,0 +1,39 @@
+# $Id: PKGBUILD 126308 2011-06-04 12:37:21Z bisson $
+# Maintainer: Gaetan Bisson <bisson@archlinux.org>
+# Contributor: judd <jvinet@zeroflux.org>
+# Contributor: Tom Newsom <Jeepster@gmx.co.uk>
+
+pkgname=ypbind-mt
+pkgver=1.33
+pkgrel=2
+pkgdesc='Linux NIS daemon'
+arch=('i686' 'x86_64')
+url='http://www.linux-nis.org/nis/ypbind-mt/'
+license=('GPL2')
+depends=('rpcbind' 'openslp')
+optdepends=('yp-tools: to set a domain name')
+backup=('etc/yp.conf' 'etc/conf.d/ypbind' 'etc/conf.d/nisdomainname')
+source=("ftp://ftp.kernel.org/pub/linux/utils/net/NIS/$pkgname-$pkgver.tar.gz"
+ 'nisdomainname.conf'
+ 'ypbind.conf'
+ 'ypbind')
+sha1sums=('49f578d15aa5d4f4130a2e96cd9c0e519263fc88'
+ 'eb00aecc0679e25a36b007e797f4468b40cb3e8e'
+ '07dee386d001fb9e9e6b76dda8af5b2092e5a4a2'
+ '112fc2aedfe3f761325b69647b7938bc1be5bfcd')
+
+build() {
+ cd "$srcdir/$pkgname-$pkgver"
+ ./configure --prefix=/usr --disable-dbus-nm
+ make
+}
+
+package() {
+ cd "$srcdir/$pkgname-$pkgver"
+ make DESTDIR="$pkgdir" install
+ install -D -m644 etc/yp.conf "$pkgdir"/etc/yp.conf
+ install -D -m755 ../ypbind "$pkgdir"/etc/rc.d/ypbind
+ install -D -m644 ../ypbind.conf "$pkgdir"/etc/conf.d/ypbind
+ install -D -m644 ../nisdomainname.conf "$pkgdir"/etc/conf.d/nisdomainname
+ install -d -m755 "$pkgdir"/var/yp/binding
+}
diff --git a/testing/ypbind-mt/nisdomainname.conf b/testing/ypbind-mt/nisdomainname.conf
new file mode 100644
index 000000000..bbbbf8fc7
--- /dev/null
+++ b/testing/ypbind-mt/nisdomainname.conf
@@ -0,0 +1,4 @@
+#
+# NIS domain to be set in /etc/rc.d/ypbind
+#
+NISDOMAINNAME=""
diff --git a/testing/ypbind-mt/ypbind b/testing/ypbind-mt/ypbind
new file mode 100755
index 000000000..6a5ef11e3
--- /dev/null
+++ b/testing/ypbind-mt/ypbind
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+. /etc/rc.conf
+. /etc/rc.d/functions
+
+. /etc/conf.d/ypbind
+. /etc/conf.d/nisdomainname
+
+name=ypbind
+PID=$(pidof -o %PPID /usr/sbin/ypbind)
+
+case "$1" in
+start)
+ stat_busy "Starting $name daemon"
+ [[ -n $NISDOMAINNAME ]] && /usr/bin/domainname "$NISDOMAINNAME" &>/dev/null
+ [[ -z "$PID" ]] && /usr/sbin/ypbind $YPBIND_ARGS &>/dev/null \
+ && { add_daemon $name; stat_done; } \
+ || { stat_fail; exit 1; }
+ ;;
+stop)
+ stat_busy "Stopping $name daemon"
+ [[ -n "$PID" ]] && kill $PID &>/dev/null \
+ && { rm_daemon $name; stat_done; } \
+ || { stat_fail; exit 1; }
+ ;;
+restart)
+ $0 stop
+ sleep 1
+ $0 start
+ ;;
+*)
+ echo "usage: $0 {start|stop|restart}"
+ exit 1
+ ;;
+esac
diff --git a/testing/ypbind-mt/ypbind.conf b/testing/ypbind-mt/ypbind.conf
new file mode 100644
index 000000000..fd0ebd491
--- /dev/null
+++ b/testing/ypbind-mt/ypbind.conf
@@ -0,0 +1,4 @@
+#
+# Parameters to be passed to ypbind
+#
+YPBIND_ARGS=""