summaryrefslogtreecommitdiff
path: root/kernels/xen
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <andre@pc-01.localdomain>2012-02-21 11:39:38 -0200
committerAndré Fabian Silva Delgado <andre@pc-01.localdomain>2012-02-21 11:39:38 -0200
commit415e6b0a686989d0000a82ba8404d4ab9cd1e6b7 (patch)
tree4c78f30103549386aa7f645c7f2623c181ce0572 /kernels/xen
parent8446919c3950deb73699302e1c33cdc05b7d4add (diff)
kernels/xe-guest-utilities
Diffstat (limited to 'kernels/xen')
-rwxr-xr-xkernels/xen/09_xen123
-rw-r--r--kernels/xen/24341.patch11
-rw-r--r--kernels/xen/24344.patch33
-rw-r--r--kernels/xen/24345.patch31
-rw-r--r--kernels/xen/PKGBUILD99
-rw-r--r--kernels/xen/dom0_xz_decompression.patch3528
-rw-r--r--kernels/xen/parabolainit.patch423
-rw-r--r--kernels/xen/xen.patch21
8 files changed, 4269 insertions, 0 deletions
diff --git a/kernels/xen/09_xen b/kernels/xen/09_xen
new file mode 100755
index 000000000..094b7f427
--- /dev/null
+++ b/kernels/xen/09_xen
@@ -0,0 +1,123 @@
+#! /bin/sh -e
+
+if [ -f /usr/lib/grub/grub-mkconfig_lib ]; then
+ . /usr/lib/grub/grub-mkconfig_lib
+else
+ # no grub file, so we notify and exit gracefully
+ echo "Cannot find grub config file, exiting." >&2
+ exit 0
+fi
+
+XEN_HYPERVISOR_CMDLINE=
+XEN_LINUX_CMDLINE="console=tty0"
+[ -r /etc/xen/grub.conf ] && . /etc/xen/grub.conf
+
+CLASS="--class gnu-linux --class gnu --class os"
+
+if [ "x${GRUB_DISTRIBUTOR}" = "x" ] ; then
+ OS=GNU/Linux
+else
+ OS="${GRUB_DISTRIBUTOR}"
+ CLASS="--class $(echo ${GRUB_DISTRIBUTOR} | tr '[A-Z]' '[a-z]' | cut -d' ' -f1) ${CLASS}"
+fi
+
+# loop-AES arranges things so that /dev/loop/X can be our root device, but
+# the initrds that Linux uses don't like that.
+case ${GRUB_DEVICE} in
+ /dev/loop/*|/dev/loop[0-9])
+ GRUB_DEVICE=`losetup ${GRUB_DEVICE} | sed -e "s/^[^(]*(\([^)]\+\)).*/\1/"`
+ ;;
+esac
+
+if [ "x${GRUB_DEVICE_UUID}" = "x" ] || [ "x${GRUB_DISABLE_LINUX_UUID}" = "xtrue" ] \
+ || ! test -e "/dev/disk/by-uuid/${GRUB_DEVICE_UUID}" \
+ || [ "`grub-probe -t abstraction --device ${GRUB_DEVICE} | sed -e 's,.*\(lvm\).*,\1,'`" = "lvm" ] ; then
+ LINUX_ROOT_DEVICE=${GRUB_DEVICE}
+else
+ LINUX_ROOT_DEVICE=UUID=${GRUB_DEVICE_UUID}
+fi
+
+xen_entry ()
+{
+ os="$1"
+ xen_version="$2"
+ version="$3"
+ xen_args="$4"
+ args="$5"
+ printf "menuentry 'Xen %s / %s, with Linux %s' --class xen ${CLASS} {\n" "${xen_version}" "${os}" "${version}"
+ save_default_entry | sed -e "s/^/\t/"
+
+ if [ -z "${prepare_boot_cache}" ]; then
+ prepare_boot_cache="$(prepare_grub_to_access_device ${GRUB_DEVICE_BOOT} | sed -e "s/^/\t/")"
+ fi
+ printf '%s\n' "${prepare_boot_cache}"
+ cat << EOF
+ echo '$(printf "Loading Xen %s ..." ${xen_version})'
+ multiboot ${rel_dirname}/${xen_basename} ${rel_dirname}/${xen_basename} ${xen_args}
+ echo $(printf "$(gettext "Loading Linux %s ...")" ${version})
+ module ${rel_dirname}/${basename} ${rel_dirname}/${basename} root=${linux_root_device_thisversion} ro ${args}
+EOF
+ if test -n "${initrd}" ; then
+ cat << EOF
+ echo "Loading initial ramdisk ..."
+ module ${rel_dirname}/${initrd}
+EOF
+ fi
+ cat << EOF
+}
+EOF
+}
+
+xen_list=`for i in /boot/xen-*.gz /xen-*.gz ; do
+ if grub_file_is_not_garbage "$i" ; then echo -n "$i "; fi
+done`
+prepare_boot_cache=
+
+while [ "x$xen_list" != "x" ] ; do
+ xen=`version_find_latest $xen_list`
+ echo "Found Xen hypervisor image: $xen" >&2
+ xen_basename=`basename $xen`
+ xen_dirname=`dirname $xen`
+ rel_xen_dirname=`make_system_path_relative_to_its_root $xen_dirname`
+ xen_version=`echo $xen_basename | sed -e "s,^[^0-9]*-,,g" | sed -e "s,.gz,,g"`
+ alt_xen_version=`echo $xen_version | sed -e "s,\.old$,,g"`
+
+ xen_configfiles=`grep -l 'CONFIG_XEN_PRIVILEGED_GUEST=y' /boot/config-*`
+
+ list="/boot/vmlinuz-linux-libre";
+
+ while [ "x$list" != "x" ] ; do
+ linux=`version_find_latest $list`
+ echo -e "\tFound linux image: $linux" >&2
+ basename=`basename $linux`
+ dirname=`dirname $linux`
+ rel_dirname=`make_system_path_relative_to_its_root $dirname`
+ version=`echo $basename | sed -e "s,^[^0-9]*-,,g"`
+ base_init=`echo $basename | sed -e "s,vmlinuz,initramfs,g"`
+ alt_version="${base_init}-fallback"
+ linux_root_device_thisversion="${LINUX_ROOT_DEVICE}"
+ initrd=
+
+ for i in "${base_init}.img"; do
+ if test -e "${dirname}/${i}" ; then
+ initrd="$i"
+ break
+ fi
+ done
+ if test -n "${initrd}" ; then
+ echo -e "\tFound initrd image: ${dirname}/${initrd}" >&2
+ else
+ # "UUID=" magic is parsed by initrds. Since there's no initrd, it can't work here.
+ linux_root_device_thisversion=${GRUB_DEVICE}
+ fi
+
+ xen_entry "${OS}" "${xen_version}" "${version}" \
+ "${XEN_HYPERVISOR_CMDLINE}" \
+ "${XEN_LINUX_CMDLINE}"
+
+ list=`echo $list | tr ' ' '\n' | grep -vx $linux | tr '\n' ' '`
+ done
+
+ xen_list=`echo $xen_list | tr ' ' '\n' | grep -vx $xen | tr '\n' ' '`
+done
+
diff --git a/kernels/xen/24341.patch b/kernels/xen/24341.patch
new file mode 100644
index 000000000..5554004d3
--- /dev/null
+++ b/kernels/xen/24341.patch
@@ -0,0 +1,11 @@
+--- a/xen/arch/x86/x86_64/mmconfig_64.c 2011-10-20 15:05:49.000000000 -0200
++++ b/xen/arch/x86/x86_64/mmconfig_64.c 2012-02-14 23:45:47.481729733 -0200
+@@ -23,7 +23,7 @@
+ char __iomem *virt;
+ };
+ static struct mmcfg_virt *pci_mmcfg_virt;
+-static int __initdata mmcfg_pci_segment_shift;
++static unsigned int mmcfg_pci_segment_shift;
+
+ static char __iomem *get_virt(unsigned int seg, unsigned bus)
+ {
diff --git a/kernels/xen/24344.patch b/kernels/xen/24344.patch
new file mode 100644
index 000000000..642b90867
--- /dev/null
+++ b/kernels/xen/24344.patch
@@ -0,0 +1,33 @@
+--- a/tools/libxc/xc_cpuid_x86.c 2011-10-20 15:05:42.000000000 -0200
++++ b/tools/libxc/xc_cpuid_x86.c 2012-02-15 00:01:46.307514813 -0200
+@@ -42,23 +42,23 @@
+ static void cpuid(const unsigned int *input, unsigned int *regs)
+ {
+ unsigned int count = (input[1] == XEN_CPUID_INPUT_UNUSED) ? 0 : input[1];
+- asm (
+ #ifdef __i386__
++/* Use the stack to avoid reg constraint failures with some gcc flags */
++ asm (
+ "push %%ebx; push %%edx\n\t"
+-#else
+- "push %%rbx; push %%rdx\n\t"
+-#endif
+ "cpuid\n\t"
+ "mov %%ebx,4(%4)\n\t"
+ "mov %%edx,12(%4)\n\t"
+-#ifdef __i386__
+ "pop %%edx; pop %%ebx\n\t"
++ : "=a" (regs[0]), "=c" (regs[2])
++ : "0" (input[0]), "1" (count), "S" (regs)
++ : "memory" );
+ #else
+- "pop %%rdx; pop %%rbx\n\t"
++ asm (
++ "cpuid"
++ : "=a" (regs[0]), "=b" (regs[1]), "=c" (regs[2]), "=d" (regs[3])
++ : "0" (input[0]), "2" (count) );
+ #endif
+- : "=a" (regs[0]), "=c" (regs[2])
+- : "0" (input[0]), "1" (count), "S" (regs)
+- : "memory" );
+ }
diff --git a/kernels/xen/24345.patch b/kernels/xen/24345.patch
new file mode 100644
index 000000000..e75a1b38c
--- /dev/null
+++ b/kernels/xen/24345.patch
@@ -0,0 +1,31 @@
+--- a/tools/misc/xen-detect.c 2011-10-20 15:05:43.000000000 -0200
++++ b/tools/misc/xen-detect.c 2012-02-15 00:05:55.524455578 -0200
+@@ -35,18 +35,21 @@
+
+ static void cpuid(uint32_t idx, uint32_t *regs, int pv_context)
+ {
+- asm volatile (
+ #ifdef __i386__
+-#define R(x) "%%e"#x"x"
+-#else
+-#define R(x) "%%r"#x"x"
+-#endif
+- "push "R(a)"; push "R(b)"; push "R(c)"; push "R(d)"\n\t"
++/* Use the stack to avoid reg constraint failures with some gcc flags */
++ asm volatile (
++ "push %%eax; push %%ebx; push %%ecx; push %%edx\n\t"
+ "test %1,%1 ; jz 1f ; ud2a ; .ascii \"xen\" ; 1: cpuid\n\t"
+ "mov %%eax,(%2); mov %%ebx,4(%2)\n\t"
+ "mov %%ecx,8(%2); mov %%edx,12(%2)\n\t"
+- "pop "R(d)"; pop "R(c)"; pop "R(b)"; pop "R(a)"\n\t"
++ "pop %%edx; pop %%ecx; pop %%ebx; pop %%eax\n\t"
+ : : "a" (idx), "c" (pv_context), "S" (regs) : "memory" );
++#else
++ asm volatile (
++ "test %5,%5 ; jz 1f ; ud2a ; .ascii \"xen\" ; 1: cpuid\n\t"
++ : "=a" (regs[0]), "=b" (regs[1]), "=c" (regs[2]), "=d" (regs[3])
++ : "0" (idx), "1" (pv_context), "2" (0) );
++#endif
+ }
+
+ static int check_for_xen(int pv_context)
diff --git a/kernels/xen/PKGBUILD b/kernels/xen/PKGBUILD
new file mode 100644
index 000000000..e1796106a
--- /dev/null
+++ b/kernels/xen/PKGBUILD
@@ -0,0 +1,99 @@
+#Mantainer M0Rf30
+#Contributor WaxyMouthfeel
+# Contributor (Parabola): André Silva <andre.paulista@adinet.com.uy>
+pkgname=xen
+pkgver=4.1.2
+pkgrel=2
+pkgdesc="Xen 4 (hypervisor and tools)"
+arch=(i686 x86_64)
+url="http://xen.org/"
+license="GPL"
+depends=('xz-utils' 'bzip2' 'iproute' 'bridge-utils' 'python2' 'sdl' 'zlib' 'e2fsprogs' 'pkgconfig' 'gnutls' 'lzo2' 'glibc')
+[ "$CARCH" == "x86_64" ] && depends=(${depends} 'lib32-glibc')
+optdepends=('xen-docs: Xen Official Documentation')
+makedepends=('dev86' 'bin86' 'ocaml-findlib' 'iasl')
+conflicts=('xen4' 'xen3' 'xen-hv-tools' 'libxen4')
+provides=('xen')
+backup=('etc/xen/xend-config.sxp' 'etc/xen/xend-pci-permissive.sxp' 'etc/xen/xend-pci-quirks.sxp')
+options=(!strip)
+optional=(xen-docs)
+source=(http://bits.xensource.com/oss-xen/release/${pkgver}/xen-${pkgver}.tar.gz
+ 09_xen
+ xen.patch
+ parabolainit.patch
+ dom0_xz_decompression.patch
+ 24341.patch
+ 24344.patch
+ 24345.patch)
+
+build() {
+
+
+ cd $srcdir/xen-${pkgver}
+
+ patch -p1 -i ../xen.patch
+ patch -p1 -i ../parabolainit.patch
+ patch -p1 -i ../dom0_xz_decompression.patch
+ patch -p1 -i ../24341.patch
+ patch -p1 -i ../24344.patch
+ patch -p1 -i ../24345.patch
+
+unset CFLAGS LDFLAGS
+
+make PYTHON=python2 DESTDIR=$pkgdir install-xen
+make PYTHON=python2 DESTDIR=$pkgdir install-tools
+#make PYTHON=python2 DESTDIR=$pkgdir install-stubdom
+
+ sed -i 's#XENDOM_CONFIG=/etc/sysconfig/xendomains#XENDOM_CONFIG=/etc/conf.d/xendomains#' $pkgdir/etc/init.d/xendomains
+ sed -i "s#touch /var/lock/subsys/xend#mkdir -p /var/lock/subsys\n touch /var/lock/subsys/xend#" $pkgdir/etc/init.d/xend
+
+ [ -d $pkgdir/usr/lib64 ] && ( cd $pkgdir/usr && cp -R lib64/* lib/ && rm -R lib64 )
+ ( cd $pkgdir/etc && mv init.d rc.d ) || return 1
+ rm -f $pkgdir/usr/share/man/man1/qemu-img.1* \
+ $pkgdir/usr/share/man/man1/qemu.1*
+ # First experiment to generate grub2.cfg entry
+ mkdir -p $pkgdir/etc/grub.d
+ chmod +x $srcdir/09_xen
+ cp $srcdir/09_xen $pkgdir/etc/grub.d
+
+ ############ kill unwanted stuff ############
+
+# stubdom: newlib
+rm -rf $pkgdir/usr/*-xen-elf
+
+# hypervisor symlinks
+rm -rf $pkgdir/boot/xen-4.1.gz
+rm -rf $pkgdir/boot/xen-4.gz
+rm -rf $pkgdir/boot/xen.gz
+
+# silly doc dir fun
+rm -fr $pkgdir/usr/share/doc/xen
+rm -rf $pkgdir/usr/share/doc/qemu
+
+# Pointless helper
+rm -f $pkgdir/usr/sbin/xen-python-path
+
+# qemu stuff (unused or available from upstream)
+rm -rf $pkgdir/usr/share/xen/man
+rm -rf $pkgdir/usr/bin/qemu-*-xen
+for file in bios.bin openbios-sparc32 openbios-sparc64 ppc_rom.bin \
+ pxe-e1000.bin pxe-ne2k_pci.bin pxe-pcnet.bin pxe-rtl8139.bin \
+ vgabios.bin vgabios-cirrus.bin video.x openbios-ppc bamboo.dtb
+do
+ rm -f $pkgdir/usr/share/xen/qemu/$file
+done
+
+# adhere to Static Library Packaging Guidelines
+rm -rf $pkgdir/usr/lib/*.a
+
+
+}
+
+md5sums=('73561faf3c1b5e36ec5c089b5db848ad'
+ '8d50beba46ffd89a3b959176245b676e'
+ 'f149bae1a6b420e49c51b9f3a74338a4'
+ '7a1ed81ecc828037724bb3280058c9fc'
+ '4aebccf16b578ed97aa8bab945011f35'
+ '1b0c05a555bc99fc8416dd52b6c6ae95'
+ 'b6225be5bec3fe462f9166f9fde9c347'
+ 'd282946a2873a78b2b2c3944571eb2fe')
diff --git a/kernels/xen/dom0_xz_decompression.patch b/kernels/xen/dom0_xz_decompression.patch
new file mode 100644
index 000000000..277ebcfd2
--- /dev/null
+++ b/kernels/xen/dom0_xz_decompression.patch
@@ -0,0 +1,3528 @@
+diff --git a/xen/common/Makefile b/xen/common/Makefile
+--- a/xen/common/Makefile
++++ b/xen/common/Makefile
+@@ -43,7 +43,7 @@
+ obj-y += rbtree.o
+ obj-y += lzo.o
+
+-obj-$(CONFIG_X86) += decompress.o bunzip2.o unlzma.o unlzo.o
++obj-$(CONFIG_X86) += decompress.o bunzip2.o unxz.o unlzma.o unlzo.o
+
+ obj-$(perfc) += perfc.o
+ obj-$(crash_debug) += gdbstub.o
+diff --git a/xen/common/decompress.c b/xen/common/decompress.c
+--- a/xen/common/decompress.c
++++ b/xen/common/decompress.c
+@@ -20,6 +20,9 @@
+ if ( len >= 3 && !memcmp(inbuf, "\x42\x5a\x68", 3) )
+ return bunzip2(inbuf, len, NULL, NULL, outbuf, NULL, error);
+
++ if ( len >= 6 && !memcmp(inbuf, "\3757zXZ", 6) )
++ return unxz(inbuf, len, NULL, NULL, outbuf, NULL, error);
++
+ if ( len >= 2 && !memcmp(inbuf, "\135\000", 2) )
+ return unlzma(inbuf, len, NULL, NULL, outbuf, NULL, error);
+
+diff --git a/xen/common/decompress.h b/xen/common/decompress.h
+--- a/xen/common/decompress.h
++++ b/xen/common/decompress.h
+@@ -8,6 +8,7 @@
+
+ #define STATIC
+ #define INIT __init
++#define INITDATA __initdata
+
+ static void(*__initdata error)(const char *);
+ #define set_error_fn(x) error = x;
+diff --git a/xen/common/unxz.c b/xen/common/unxz.c
+new file mode 100644
+--- /dev/null
++++ b/xen/common/unxz.c
+@@ -0,0 +1,306 @@
++/*
++ * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
++ *
++ * Author: Lasse Collin <lasse.collin@tukaani.org>
++ *
++ * This file has been put into the public domain.
++ * You can do whatever you want with this file.
++ */
++
++/*
++ * Important notes about in-place decompression
++ *
++ * At least on x86, the kernel is decompressed in place: the compressed data
++ * is placed to the end of the output buffer, and the decompressor overwrites
++ * most of the compressed data. There must be enough safety margin to
++ * guarantee that the write position is always behind the read position.
++ *
++ * The safety margin for XZ with LZMA2 or BCJ+LZMA2 is calculated below.
++ * Note that the margin with XZ is bigger than with Deflate (gzip)!
++ *
++ * The worst case for in-place decompression is that the beginning of
++ * the file is compressed extremely well, and the rest of the file is
++ * uncompressible. Thus, we must look for worst-case expansion when the
++ * compressor is encoding uncompressible data.
++ *
++ * The structure of the .xz file in case of a compresed kernel is as follows.
++ * Sizes (as bytes) of the fields are in parenthesis.
++ *
++ * Stream Header (12)
++ * Block Header:
++ * Block Header (8-12)
++ * Compressed Data (N)
++ * Block Padding (0-3)
++ * CRC32 (4)
++ * Index (8-20)
++ * Stream Footer (12)
++ *
++ * Normally there is exactly one Block, but let's assume that there are
++ * 2-4 Blocks just in case. Because Stream Header and also Block Header
++ * of the first Block don't make the decompressor produce any uncompressed
++ * data, we can ignore them from our calculations. Block Headers of possible
++ * additional Blocks have to be taken into account still. With these
++ * assumptions, it is safe to assume that the total header overhead is
++ * less than 128 bytes.
++ *
++ * Compressed Data contains LZMA2 or BCJ+LZMA2 encoded data. Since BCJ
++ * doesn't change the size of the data, it is enough to calculate the
++ * safety margin for LZMA2.
++ *
++ * LZMA2 stores the data in chunks. Each chunk has a header whose size is
++ * a maximum of 6 bytes, but to get round 2^n numbers, let's assume that
++ * the maximum chunk header size is 8 bytes. After the chunk header, there
++ * may be up to 64 KiB of actual payload in the chunk. Often the payload is
++ * quite a bit smaller though; to be safe, let's assume that an average
++ * chunk has only 32 KiB of payload.
++ *
++ * The maximum uncompressed size of the payload is 2 MiB. The minimum
++ * uncompressed size of the payload is in practice never less than the
++ * payload size itself. The LZMA2 format would allow uncompressed size
++ * to be less than the payload size, but no sane compressor creates such
++ * files. LZMA2 supports storing uncompressible data in uncompressed form,
++ * so there's never a need to create payloads whose uncompressed size is
++ * smaller than the compressed size.
++ *
++ * The assumption, that the uncompressed size of the payload is never
++ * smaller than the payload itself, is valid only when talking about
++ * the payload as a whole. It is possible that the payload has parts where
++ * the decompressor consumes more input than it produces output. Calculating
++ * the worst case for this would be tricky. Instead of trying to do that,
++ * let's simply make sure that the decompressor never overwrites any bytes
++ * of the payload which it is currently reading.
++ *
++ * Now we have enough information to calculate the safety margin. We need
++ * - 128 bytes for the .xz file format headers;
++ * - 8 bytes per every 32 KiB of uncompressed size (one LZMA2 chunk header
++ * per chunk, each chunk having average payload size of 32 KiB); and
++ * - 64 KiB (biggest possible LZMA2 chunk payload size) to make sure that
++ * the decompressor never overwrites anything from the LZMA2 chunk
++ * payload it is currently reading.
++ *
++ * We get the following formula:
++ *
++ * safety_margin = 128 + uncompressed_size * 8 / 32768 + 65536
++ * = 128 + (uncompressed_size >> 12) + 65536
++ *
++ * For comparision, according to arch/x86/boot/compressed/misc.c, the
++ * equivalent formula for Deflate is this:
++ *
++ * safety_margin = 18 + (uncompressed_size >> 12) + 32768
++ *
++ * Thus, when updating Deflate-only in-place kernel decompressor to
++ * support XZ, the fixed overhead has to be increased from 18+32768 bytes
++ * to 128+65536 bytes.
++ */
++
++#include "decompress.h"
++
++#define XZ_EXTERN STATIC
++
++/*
++ * For boot time use, we enable only the BCJ filter of the current
++ * architecture or none if no BCJ filter is available for the architecture.
++ */
++#ifdef CONFIG_X86
++# define XZ_DEC_X86
++#endif
++#ifdef CONFIG_PPC
++# define XZ_DEC_POWERPC
++#endif
++#ifdef CONFIG_ARM
++# define XZ_DEC_ARM
++#endif
++#ifdef CONFIG_IA64
++# define XZ_DEC_IA64
++#endif
++#ifdef CONFIG_SPARC
++# define XZ_DEC_SPARC
++#endif
++
++/*
++ * This will get the basic headers so that memeq() and others
++ * can be defined.
++ */
++#include "xz/private.h"
++
++/*
++ * memeq and memzero are not used much and any remotely sane implementation
++ * is fast enough. memcpy/memmove speed matters in multi-call mode, but
++ * the kernel image is decompressed in single-call mode, in which only
++ * memcpy speed can matter and only if there is a lot of uncompressible data
++ * (LZMA2 stores uncompressible chunks in uncompressed form). Thus, the
++ * functions below should just be kept small; it's probably not worth
++ * optimizing for speed.
++ */
++
++#ifndef memeq
++#define memeq(p1, p2, sz) (memcmp(p1, p2, sz) == 0)
++#endif
++
++#ifndef memzero
++#define memzero(p, sz) memset(p, 0, sz)
++#endif
++
++#include "xz/crc32.c"
++#include "xz/dec_stream.c"
++#include "xz/dec_lzma2.c"
++#include "xz/dec_bcj.c"
++
++/* Size of the input and output buffers in multi-call mode */
++#define XZ_IOBUF_SIZE 4096
++
++/*
++ * This function implements the API defined in <linux/decompress/generic.h>.
++ *
++ * This wrapper will automatically choose single-call or multi-call mode
++ * of the native XZ decoder API. The single-call mode can be used only when
++ * both input and output buffers are available as a single chunk, i.e. when
++ * fill() and flush() won't be used.
++ */
++STATIC int INIT unxz(unsigned char *in, unsigned int in_size,
++ int (*fill)(void *dest, unsigned int size),
++ int (*flush)(void *src, unsigned int size),
++ unsigned char *out, unsigned int *in_used,
++ void (*error_fn)(const char *x))
++{
++ struct xz_buf b;
++ struct xz_dec *s;
++ enum xz_ret ret;
++ bool_t must_free_in = false;
++
++ set_error_fn(error_fn);
++
++ xz_crc32_init();
++
++ if (in_used != NULL)
++ *in_used = 0;
++
++ if (fill == NULL && flush == NULL)
++ s = xz_dec_init(XZ_SINGLE, 0);
++ else
++ s = xz_dec_init(XZ_DYNALLOC, (uint32_t)-1);
++
++ if (s == NULL)
++ goto error_alloc_state;
++
++ if (flush == NULL) {
++ b.out = out;
++ b.out_size = (size_t)-1;
++ } else {
++ b.out_size = XZ_IOBUF_SIZE;
++ b.out = malloc(XZ_IOBUF_SIZE);
++ if (b.out == NULL)
++ goto error_alloc_out;
++ }
++
++ if (in == NULL) {
++ must_free_in = true;
++ in = malloc(XZ_IOBUF_SIZE);
++ if (in == NULL)
++ goto error_alloc_in;
++ }
++
++ b.in = in;
++ b.in_pos = 0;
++ b.in_size = in_size;
++ b.out_pos = 0;
++
++ if (fill == NULL && flush == NULL) {
++ ret = xz_dec_run(s, &b);
++ } else {
++ do {
++ if (b.in_pos == b.in_size && fill != NULL) {
++ if (in_used != NULL)
++ *in_used += b.in_pos;
++
++ b.in_pos = 0;
++
++ in_size = fill(in, XZ_IOBUF_SIZE);
++ if (in_size < 0) {
++ /*
++ * This isn't an optimal error code
++ * but it probably isn't worth making
++ * a new one either.
++ */
++ ret = XZ_BUF_ERROR;
++ break;
++ }
++
++ b.in_size = in_size;
++ }
++
++ ret = xz_dec_run(s, &b);
++
++ if (flush != NULL && (b.out_pos == b.out_size
++ || (ret != XZ_OK && b.out_pos > 0))) {
++ /*
++ * Setting ret here may hide an error
++ * returned by xz_dec_run(), but probably
++ * it's not too bad.
++ */
++ if (flush(b.out, b.out_pos) != (int)b.out_pos)
++ ret = XZ_BUF_ERROR;
++
++ b.out_pos = 0;
++ }
++ } while (ret == XZ_OK);
++
++ if (must_free_in)
++ free(in);
++
++ if (flush != NULL)
++ free(b.out);
++ }
++
++ if (in_used != NULL)
++ *in_used += b.in_pos;
++
++ xz_dec_end(s);
++
++ switch (ret) {
++ case XZ_STREAM_END:
++ return 0;
++
++ case XZ_MEM_ERROR:
++ /* This can occur only in multi-call mode. */
++ error("XZ decompressor ran out of memory");
++ break;
++
++ case XZ_FORMAT_ERROR:
++ error("Input is not in the XZ format (wrong magic bytes)");
++ break;
++
++ case XZ_OPTIONS_ERROR:
++ error("Input was encoded with settings that are not "
++ "supported by this XZ decoder");
++ break;
++
++ case XZ_DATA_ERROR:
++ case XZ_BUF_ERROR:
++ error("XZ-compressed data is corrupt");
++ break;
++
++ default:
++ error("Bug in the XZ decompressor");
++ break;
++ }
++
++ return -1;
++
++error_alloc_in:
++ if (flush != NULL)
++ free(b.out);
++
++error_alloc_out:
++ xz_dec_end(s);
++
++error_alloc_state:
++ error("XZ decompressor ran out of memory");
++ return -1;
++}
++
++/*
++ * This macro is used by architecture-specific files to decompress
++ * the kernel image.
++ */
++#define decompress unxz
+diff --git a/xen/common/xz/crc32.c b/xen/common/xz/crc32.c
+new file mode 100644
+--- /dev/null
++++ b/xen/common/xz/crc32.c
+@@ -0,0 +1,51 @@
++/*
++ * CRC32 using the polynomial from IEEE-802.3
++ *
++ * Authors: Lasse Collin <lasse.collin@tukaani.org>
++ * Igor Pavlov <http://7-zip.org/>
++ *
++ * This file has been put into the public domain.
++ * You can do whatever you want with this file.
++ */
++
++/*
++ * This is not the fastest implementation, but it is pretty compact.
++ * The fastest versions of xz_crc32() on modern CPUs without hardware
++ * accelerated CRC instruction are 3-5 times as fast as this version,
++ * but they are bigger and use more memory for the lookup table.
++ */
++
++#include "private.h"
++
++XZ_EXTERN uint32_t INITDATA xz_crc32_table[256];
++
++XZ_EXTERN void INIT xz_crc32_init(void)
++{
++ const uint32_t poly = 0xEDB88320;
++
++ uint32_t i;
++ uint32_t j;
++ uint32_t r;
++
++ for (i = 0; i < 256; ++i) {
++ r = i;
++ for (j = 0; j < 8; ++j)
++ r = (r >> 1) ^ (poly & ~((r & 1) - 1));
++
++ xz_crc32_table[i] = r;
++ }
++
++ return;
++}
++
++XZ_EXTERN uint32_t INIT xz_crc32(const uint8_t *buf, size_t size, uint32_t crc)
++{
++ crc = ~crc;
++
++ while (size != 0) {
++ crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8);
++ --size;
++ }
++
++ return ~crc;
++}
+diff --git a/xen/common/xz/dec_bcj.c b/xen/common/xz/dec_bcj.c
+new file mode 100644
+--- /dev/null
++++ b/xen/common/xz/dec_bcj.c
+@@ -0,0 +1,562 @@
++/*
++ * Branch/Call/Jump (BCJ) filter decoders
++ *
++ * Authors: Lasse Collin <lasse.collin@tukaani.org>
++ * Igor Pavlov <http://7-zip.org/>
++ *
++ * This file has been put into the public domain.
++ * You can do whatever you want with this file.
++ */
++
++#include "private.h"
++
++/*
++ * The rest of the file is inside this ifdef. It makes things a little more
++ * convenient when building without support for any BCJ filters.
++ */
++#ifdef XZ_DEC_BCJ
++
++struct xz_dec_bcj {
++ /* Type of the BCJ filter being used */
++ enum {
++ BCJ_X86 = 4, /* x86 or x86-64 */
++ BCJ_POWERPC = 5, /* Big endian only */
++ BCJ_IA64 = 6, /* Big or little endian */
++ BCJ_ARM = 7, /* Little endian only */
++ BCJ_ARMTHUMB = 8, /* Little endian only */
++ BCJ_SPARC = 9 /* Big or little endian */
++ } type;
++
++ /*
++ * Return value of the next filter in the chain. We need to preserve
++ * this information across calls, because we must not call the next
++ * filter anymore once it has returned XZ_STREAM_END.
++ */
++ enum xz_ret ret;
++
++ /* True if we are operating in single-call mode. */
++ bool_t single_call;
++
++ /*
++ * Absolute position relative to the beginning of the uncompressed
++ * data (in a single .xz Block). We care only about the lowest 32
++ * bits so this doesn't need to be uint64_t even with big files.
++ */
++ uint32_t pos;
++
++ /* x86 filter state */
++ uint32_t x86_prev_mask;
++
++ /* Temporary space to hold the variables from struct xz_buf */
++ uint8_t *out;
++ size_t out_pos;
++ size_t out_size;
++
++ struct {
++ /* Amount of already filtered data in the beginning of buf */
++ size_t filtered;
++
++ /* Total amount of data currently stored in buf */
++ size_t size;
++
++ /*
++ * Buffer to hold a mix of filtered and unfiltered data. This
++ * needs to be big enough to hold Alignment + 2 * Look-ahead:
++ *
++ * Type Alignment Look-ahead
++ * x86 1 4
++ * PowerPC 4 0
++ * IA-64 16 0
++ * ARM 4 0
++ * ARM-Thumb 2 2
++ * SPARC 4 0
++ */
++ uint8_t buf[16];
++ } temp;
++};
++
++#ifdef XZ_DEC_X86
++/*
++ * This is used to test the most significant byte of a memory address
++ * in an x86 instruction.
++ */
++static inline int INIT bcj_x86_test_msbyte(uint8_t b)
++{
++ return b == 0x00 || b == 0xFF;
++}
++
++static size_t INIT bcj_x86(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
++{
++ static /*const*/ bool_t INITDATA mask_to_allowed_status[8]
++ = { true, true, true, false, true, false, false, false };
++
++ static /*const*/ uint8_t INITDATA mask_to_bit_num[8]
++ = { 0, 1, 2, 2, 3, 3, 3, 3 };
++
++ size_t i;
++ size_t prev_pos = (size_t)-1;
++ uint32_t prev_mask = s->x86_prev_mask;
++ uint32_t src;
++ uint32_t dest;
++ uint32_t j;
++ uint8_t b;
++
++ if (size <= 4)
++ return 0;
++
++ size -= 4;
++ for (i = 0; i < size; ++i) {
++ if ((buf[i] & 0xFE) != 0xE8)
++ continue;
++
++ prev_pos = i - prev_pos;
++ if (prev_pos > 3) {
++ prev_mask = 0;
++ } else {
++ prev_mask = (prev_mask << (prev_pos - 1)) & 7;
++ if (prev_mask != 0) {
++ b = buf[i + 4 - mask_to_bit_num[prev_mask]];
++ if (!mask_to_allowed_status[prev_mask]
++ || bcj_x86_test_msbyte(b)) {
++ prev_pos = i;
++ prev_mask = (prev_mask << 1) | 1;
++ continue;
++ }
++ }
++ }
++
++ prev_pos = i;
++
++ if (bcj_x86_test_msbyte(buf[i + 4])) {
++ src = get_unaligned_le32(buf + i + 1);
++ while (true) {
++ dest = src - (s->pos + (uint32_t)i + 5);
++ if (prev_mask == 0)
++ break;
++
++ j = mask_to_bit_num[prev_mask] * 8;
++ b = (uint8_t)(dest >> (24 - j));
++ if (!bcj_x86_test_msbyte(b))
++ break;
++
++ src = dest ^ (((uint32_t)1 << (32 - j)) - 1);
++ }
++
++ dest &= 0x01FFFFFF;
++ dest |= (uint32_t)0 - (dest & 0x01000000);
++ put_unaligned_le32(dest, buf + i + 1);
++ i += 4;
++ } else {
++ prev_mask = (prev_mask << 1) | 1;
++ }
++ }
++
++ prev_pos = i - prev_pos;
++ s->x86_prev_mask = prev_pos > 3 ? 0 : prev_mask << (prev_pos - 1);
++ return i;
++}
++#endif
++
++#ifdef XZ_DEC_POWERPC
++static size_t INIT bcj_powerpc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
++{
++ size_t i;
++ uint32_t instr;
++
++ for (i = 0; i + 4 <= size; i += 4) {
++ instr = get_unaligned_be32(buf + i);
++ if ((instr & 0xFC000003) == 0x48000001) {
++ instr &= 0x03FFFFFC;
++ instr -= s->pos + (uint32_t)i;
++ instr &= 0x03FFFFFC;
++ instr |= 0x48000001;
++ put_unaligned_be32(instr, buf + i);
++ }
++ }
++
++ return i;
++}
++#endif
++
++#ifdef XZ_DEC_IA64
++static size_t INIT bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
++{
++ static const uint8_t branch_table[32] = {
++ 0, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0,
++ 4, 4, 6, 6, 0, 0, 7, 7,
++ 4, 4, 0, 0, 4, 4, 0, 0
++ };
++
++ /*
++ * The local variables take a little bit stack space, but it's less
++ * than what LZMA2 decoder takes, so it doesn't make sense to reduce
++ * stack usage here without doing that for the LZMA2 decoder too.
++ */
++
++ /* Loop counters */
++ size_t i;
++ size_t j;
++
++ /* Instruction slot (0, 1, or 2) in the 128-bit instruction word */
++ uint32_t slot;
++
++ /* Bitwise offset of the instruction indicated by slot */
++ uint32_t bit_pos;
++
++ /* bit_pos split into byte and bit parts */
++ uint32_t byte_pos;
++ uint32_t bit_res;
++
++ /* Address part of an instruction */
++ uint32_t addr;
++
++ /* Mask used to detect which instructions to convert */
++ uint32_t mask;
++
++ /* 41-bit instruction stored somewhere in the lowest 48 bits */
++ uint64_t instr;
++
++ /* Instruction normalized with bit_res for easier manipulation */
++ uint64_t norm;
++
++ for (i = 0; i + 16 <= size; i += 16) {
++ mask = branch_table[buf[i] & 0x1F];
++ for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) {
++ if (((mask >> slot) & 1) == 0)
++ continue;
++
++ byte_pos = bit_pos >> 3;
++ bit_res = bit_pos & 7;
++ instr = 0;
++ for (j = 0; j < 6; ++j)
++ instr |= (uint64_t)(buf[i + j + byte_pos])
++ << (8 * j);
++
++ norm = instr >> bit_res;
++
++ if (((norm >> 37) & 0x0F) == 0x05
++ && ((norm >> 9) & 0x07) == 0) {
++ addr = (norm >> 13) & 0x0FFFFF;
++ addr |= ((uint32_t)(norm >> 36) & 1) << 20;
++ addr <<= 4;
++ addr -= s->pos + (uint32_t)i;
++ addr >>= 4;
++
++ norm &= ~((uint64_t)0x8FFFFF << 13);
++ norm |= (uint64_t)(addr & 0x0FFFFF) << 13;
++ norm |= (uint64_t)(addr & 0x100000)
++ << (36 - 20);
++
++ instr &= (1 << bit_res) - 1;
++ instr |= norm << bit_res;
++
++ for (j = 0; j < 6; j++)
++ buf[i + j + byte_pos]
++ = (uint8_t)(instr >> (8 * j));
++ }
++ }
++ }
++
++ return i;
++}
++#endif
++
++#ifdef XZ_DEC_ARM
++static size_t INIT bcj_arm(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
++{
++ size_t i;
++ uint32_t addr;
++
++ for (i = 0; i + 4 <= size; i += 4) {
++ if (buf[i + 3] == 0xEB) {
++ addr = (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8)
++ | ((uint32_t)buf[i + 2] << 16);
++ addr <<= 2;
++ addr -= s->pos + (uint32_t)i + 8;
++ addr >>= 2;
++ buf[i] = (uint8_t)addr;
++ buf[i + 1] = (uint8_t)(addr >> 8);
++ buf[i + 2] = (uint8_t)(addr >> 16);
++ }
++ }
++
++ return i;
++}
++#endif
++
++#ifdef XZ_DEC_ARMTHUMB
++static size_t INIT bcj_armthumb(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
++{
++ size_t i;
++ uint32_t addr;
++
++ for (i = 0; i + 4 <= size; i += 2) {
++ if ((buf[i + 1] & 0xF8) == 0xF0
++ && (buf[i + 3] & 0xF8) == 0xF8) {
++ addr = (((uint32_t)buf[i + 1] & 0x07) << 19)
++ | ((uint32_t)buf[i] << 11)
++ | (((uint32_t)buf[i + 3] & 0x07) << 8)
++ | (uint32_t)buf[i + 2];
++ addr <<= 1;
++ addr -= s->pos + (uint32_t)i + 4;
++ addr >>= 1;
++ buf[i + 1] = (uint8_t)(0xF0 | ((addr >> 19) & 0x07));
++ buf[i] = (uint8_t)(addr >> 11);
++ buf[i + 3] = (uint8_t)(0xF8 | ((addr >> 8) & 0x07));
++ buf[i + 2] = (uint8_t)addr;
++ i += 2;
++ }
++ }
++
++ return i;
++}
++#endif
++
++#ifdef XZ_DEC_SPARC
++static size_t INIT bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
++{
++ size_t i;
++ uint32_t instr;
++
++ for (i = 0; i + 4 <= size; i += 4) {
++ instr = get_unaligned_be32(buf + i);
++ if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) {
++ instr <<= 2;
++ instr -= s->pos + (uint32_t)i;
++ instr >>= 2;
++ instr = ((uint32_t)0x40000000 - (instr & 0x400000))
++ | 0x40000000 | (instr & 0x3FFFFF);
++ put_unaligned_be32(instr, buf + i);
++ }
++ }
++
++ return i;
++}
++#endif
++
++/*
++ * Apply the selected BCJ filter. Update *pos and s->pos to match the amount
++ * of data that got filtered.
++ *
++ * NOTE: This is implemented as a switch statement to avoid using function
++ * pointers, which could be problematic in the kernel boot code, which must
++ * avoid pointers to static data (at least on x86).
++ */
++static void INIT bcj_apply(struct xz_dec_bcj *s,
++ uint8_t *buf, size_t *pos, size_t size)
++{
++ size_t filtered;
++
++ buf += *pos;
++ size -= *pos;
++
++ switch (s->type) {
++#ifdef XZ_DEC_X86
++ case BCJ_X86:
++ filtered = bcj_x86(s, buf, size);
++ break;
++#endif
++#ifdef XZ_DEC_POWERPC
++ case BCJ_POWERPC:
++ filtered = bcj_powerpc(s, buf, size);
++ break;
++#endif
++#ifdef XZ_DEC_IA64
++ case BCJ_IA64:
++ filtered = bcj_ia64(s, buf, size);
++ break;
++#endif
++#ifdef XZ_DEC_ARM
++ case BCJ_ARM:
++ filtered = bcj_arm(s, buf, size);
++ break;
++#endif
++#ifdef XZ_DEC_ARMTHUMB
++ case BCJ_ARMTHUMB:
++ filtered = bcj_armthumb(s, buf, size);
++ break;
++#endif
++#ifdef XZ_DEC_SPARC
++ case BCJ_SPARC:
++ filtered = bcj_sparc(s, buf, size);
++ break;
++#endif
++ default:
++ /* Never reached but silence compiler warnings. */
++ filtered = 0;
++ break;
++ }
++
++ *pos += filtered;
++ s->pos += filtered;
++}
++
++/*
++ * Flush pending filtered data from temp to the output buffer.
++ * Move the remaining mixture of possibly filtered and unfiltered
++ * data to the beginning of temp.
++ */
++static void INIT bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b)
++{
++ size_t copy_size;
++
++ copy_size = min_t(size_t, s->temp.filtered, b->out_size - b->out_pos);
++ memcpy(b->out + b->out_pos, s->temp.buf, copy_size);
++ b->out_pos += copy_size;
++
++ s->temp.filtered -= copy_size;
++ s->temp.size -= copy_size;
++ memmove(s->temp.buf, s->temp.buf + copy_size, s->temp.size);
++}
++
++/*
++ * The BCJ filter functions are primitive in sense that they process the
++ * data in chunks of 1-16 bytes. To hide this issue, this function does
++ * some buffering.
++ */
++XZ_EXTERN enum xz_ret INIT xz_dec_bcj_run(struct xz_dec_bcj *s,
++ struct xz_dec_lzma2 *lzma2,
++ struct xz_buf *b)
++{
++ size_t out_start;
++
++ /*
++ * Flush pending already filtered data to the output buffer. Return
++ * immediatelly if we couldn't flush everything, or if the next
++ * filter in the chain had already returned XZ_STREAM_END.
++ */
++ if (s->temp.filtered > 0) {
++ bcj_flush(s, b);
++ if (s->temp.filtered > 0)
++ return XZ_OK;
++
++ if (s->ret == XZ_STREAM_END)
++ return XZ_STREAM_END;
++ }
++
++ /*
++ * If we have more output space than what is currently pending in
++ * temp, copy the unfiltered data from temp to the output buffer
++ * and try to fill the output buffer by decoding more data from the
++ * next filter in the chain. Apply the BCJ filter on the new data
++ * in the output buffer. If everything cannot be filtered, copy it
++ * to temp and rewind the output buffer position accordingly.
++ */
++ if (s->temp.size < b->out_size - b->out_pos) {
++ out_start = b->out_pos;
++ memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size);
++ b->out_pos += s->temp.size;
++
++ s->ret = xz_dec_lzma2_run(lzma2, b);
++ if (s->ret != XZ_STREAM_END
++ && (s->ret != XZ_OK || s->single_call))
++ return s->ret;
++
++ bcj_apply(s, b->out, &out_start, b->out_pos);
++
++ /*
++ * As an exception, if the next filter returned XZ_STREAM_END,
++ * we can do that too, since the last few bytes that remain
++ * unfiltered are meant to remain unfiltered.
++ */
++ if (s->ret == XZ_STREAM_END)
++ return XZ_STREAM_END;
++
++ s->temp.size = b->out_pos - out_start;
++ b->out_pos -= s->temp.size;
++ memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size);
++ }
++
++ /*
++ * If we have unfiltered data in temp, try to fill by decoding more
++ * data from the next filter. Apply the BCJ filter on temp. Then we
++ * hopefully can fill the actual output buffer by copying filtered
++ * data from temp. A mix of filtered and unfiltered data may be left
++ * in temp; it will be taken care on the next call to this function.
++ */
++ if (s->temp.size > 0) {
++ /* Make b->out{,_pos,_size} temporarily point to s->temp. */
++ s->out = b->out;
++ s->out_pos = b->out_pos;
++ s->out_size = b->out_size;
++ b->out = s->temp.buf;
++ b->out_pos = s->temp.size;
++ b->out_size = sizeof(s->temp.buf);
++
++ s->ret = xz_dec_lzma2_run(lzma2, b);
++
++ s->temp.size = b->out_pos;
++ b->out = s->out;
++ b->out_pos = s->out_pos;
++ b->out_size = s->out_size;
++
++ if (s->ret != XZ_OK && s->ret != XZ_STREAM_END)
++ return s->ret;
++
++ bcj_apply(s, s->temp.buf, &s->temp.filtered, s->temp.size);
++
++ /*
++ * If the next filter returned XZ_STREAM_END, we mark that
++ * everything is filtered, since the last unfiltered bytes
++ * of the stream are meant to be left as is.
++ */
++ if (s->ret == XZ_STREAM_END)
++ s->temp.filtered = s->temp.size;
++
++ bcj_flush(s, b);
++ if (s->temp.filtered > 0)
++ return XZ_OK;
++ }
++
++ return s->ret;
++}
++
++XZ_EXTERN struct xz_dec_bcj *INIT xz_dec_bcj_create(bool_t single_call)
++{
++ struct xz_dec_bcj *s = malloc(sizeof(*s));
++ if (s != NULL)
++ s->single_call = single_call;
++
++ return s;
++}
++
++XZ_EXTERN enum xz_ret INIT xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id)
++{
++ switch (id) {
++#ifdef XZ_DEC_X86
++ case BCJ_X86:
++#endif
++#ifdef XZ_DEC_POWERPC
++ case BCJ_POWERPC:
++#endif
++#ifdef XZ_DEC_IA64
++ case BCJ_IA64:
++#endif
++#ifdef XZ_DEC_ARM
++ case BCJ_ARM:
++#endif
++#ifdef XZ_DEC_ARMTHUMB
++ case BCJ_ARMTHUMB:
++#endif
++#ifdef XZ_DEC_SPARC
++ case BCJ_SPARC:
++#endif
++ break;
++
++ default:
++ /* Unsupported Filter ID */
++ return XZ_OPTIONS_ERROR;
++ }
++
++ s->type = id;
++ s->ret = XZ_OK;
++ s->pos = 0;
++ s->x86_prev_mask = 0;
++ s->temp.filtered = 0;
++ s->temp.size = 0;
++
++ return XZ_OK;
++}
++
++#endif
+diff --git a/xen/common/xz/dec_lzma2.c b/xen/common/xz/dec_lzma2.c
+new file mode 100644
+--- /dev/null
++++ b/xen/common/xz/dec_lzma2.c
+@@ -0,0 +1,1171 @@
++/*
++ * LZMA2 decoder
++ *
++ * Authors: Lasse Collin <lasse.collin@tukaani.org>
++ * Igor Pavlov <http://7-zip.org/>
++ *
++ * This file has been put into the public domain.
++ * You can do whatever you want with this file.
++ */
++
++#include "private.h"
++#include "lzma2.h"
++
++/*
++ * Range decoder initialization eats the first five bytes of each LZMA chunk.
++ */
++#define RC_INIT_BYTES 5
++
++/*
++ * Minimum number of usable input buffer to safely decode one LZMA symbol.
++ * The worst case is that we decode 22 bits using probabilities and 26
++ * direct bits. This may decode at maximum of 20 bytes of input. However,
++ * lzma_main() does an extra normalization before returning, thus we
++ * need to put 21 here.
++ */
++#define LZMA_IN_REQUIRED 21
++
++/*
++ * Dictionary (history buffer)
++ *
++ * These are always true:
++ * start <= pos <= full <= end
++ * pos <= limit <= end
++ *
++ * In multi-call mode, also these are true:
++ * end == size
++ * size <= size_max
++ * allocated <= size
++ *
++ * Most of these variables are size_t to support single-call mode,
++ * in which the dictionary variables address the actual output
++ * buffer directly.
++ */
++struct dictionary {
++ /* Beginning of the history buffer */
++ uint8_t *buf;
++
++ /* Old position in buf (before decoding more data) */
++ size_t start;
++
++ /* Position in buf */
++ size_t pos;
++
++ /*
++ * How full dictionary is. This is used to detect corrupt input that
++ * would read beyond the beginning of the uncompressed stream.
++ */
++ size_t full;
++
++ /* Write limit; we don't write to buf[limit] or later bytes. */
++ size_t limit;
++
++ /*
++ * End of the dictionary buffer. In multi-call mode, this is
++ * the same as the dictionary size. In single-call mode, this
++ * indicates the size of the output buffer.
++ */
++ size_t end;
++
++ /*
++ * Size of the dictionary as specified in Block Header. This is used
++ * together with "full" to detect corrupt input that would make us
++ * read beyond the beginning of the uncompressed stream.
++ */
++ uint32_t size;
++
++ /*
++ * Maximum allowed dictionary size in multi-call mode.
++ * This is ignored in single-call mode.
++ */
++ uint32_t size_max;
++
++ /*
++ * Amount of memory currently allocated for the dictionary.
++ * This is used only with XZ_DYNALLOC. (With XZ_PREALLOC,
++ * size_max is always the same as the allocated size.)
++ */
++ uint32_t allocated;
++
++ /* Operation mode */
++ enum xz_mode mode;
++};
++
++/* Range decoder */
++struct rc_dec {
++ uint32_t range;
++ uint32_t code;
++
++ /*
++ * Number of initializing bytes remaining to be read
++ * by rc_read_init().
++ */
++ uint32_t init_bytes_left;
++
++ /*
++ * Buffer from which we read our input. It can be either
++ * temp.buf or the caller-provided input buffer.
++ */
++ const uint8_t *in;
++ size_t in_pos;
++ size_t in_limit;
++};
++
++/* Probabilities for a length decoder. */
++struct lzma_len_dec {
++ /* Probability of match length being at least 10 */
++ uint16_t choice;
++
++ /* Probability of match length being at least 18 */
++ uint16_t choice2;
++
++ /* Probabilities for match lengths 2-9 */
++ uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS];
++
++ /* Probabilities for match lengths 10-17 */
++ uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS];
++
++ /* Probabilities for match lengths 18-273 */
++ uint16_t high[LEN_HIGH_SYMBOLS];
++};
++
++struct lzma_dec {
++ /* Distances of latest four matches */
++ uint32_t rep0;
++ uint32_t rep1;
++ uint32_t rep2;
++ uint32_t rep3;
++
++ /* Types of the most recently seen LZMA symbols */
++ enum lzma_state state;
++
++ /*
++ * Length of a match. This is updated so that dict_repeat can
++ * be called again to finish repeating the whole match.
++ */
++ uint32_t len;
++
++ /*
++ * LZMA properties or related bit masks (number of literal
++ * context bits, a mask dervied from the number of literal
++ * position bits, and a mask dervied from the number
++ * position bits)
++ */
++ uint32_t lc;
++ uint32_t literal_pos_mask; /* (1 << lp) - 1 */
++ uint32_t pos_mask; /* (1 << pb) - 1 */
++
++ /* If 1, it's a match. Otherwise it's a single 8-bit literal. */
++ uint16_t is_match[STATES][POS_STATES_MAX];
++
++ /* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */
++ uint16_t is_rep[STATES];
++
++ /*
++ * If 0, distance of a repeated match is rep0.
++ * Otherwise check is_rep1.
++ */
++ uint16_t is_rep0[STATES];
++
++ /*
++ * If 0, distance of a repeated match is rep1.
++ * Otherwise check is_rep2.
++ */
++ uint16_t is_rep1[STATES];
++
++ /* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */
++ uint16_t is_rep2[STATES];
++
++ /*
++ * If 1, the repeated match has length of one byte. Otherwise
++ * the length is decoded from rep_len_decoder.
++ */
++ uint16_t is_rep0_long[STATES][POS_STATES_MAX];
++
++ /*
++ * Probability tree for the highest two bits of the match
++ * distance. There is a separate probability tree for match
++ * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273].
++ */
++ uint16_t dist_slot[DIST_STATES][DIST_SLOTS];
++
++ /*
++ * Probility trees for additional bits for match distance
++ * when the distance is in the range [4, 127].
++ */
++ uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END];
++
++ /*
++ * Probability tree for the lowest four bits of a match
++ * distance that is equal to or greater than 128.
++ */
++ uint16_t dist_align[ALIGN_SIZE];
++
++ /* Length of a normal match */
++ struct lzma_len_dec match_len_dec;
++
++ /* Length of a repeated match */
++ struct lzma_len_dec rep_len_dec;
++
++ /* Probabilities of literals */
++ uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE];
++};
++
++struct lzma2_dec {
++ /* Position in xz_dec_lzma2_run(). */
++ enum lzma2_seq {
++ SEQ_CONTROL,
++ SEQ_UNCOMPRESSED_1,
++ SEQ_UNCOMPRESSED_2,
++ SEQ_COMPRESSED_0,
++ SEQ_COMPRESSED_1,
++ SEQ_PROPERTIES,
++ SEQ_LZMA_PREPARE,
++ SEQ_LZMA_RUN,
++ SEQ_COPY
++ } sequence;
++
++ /* Next position after decoding the compressed size of the chunk. */
++ enum lzma2_seq next_sequence;
++
++ /* Uncompressed size of LZMA chunk (2 MiB at maximum) */
++ uint32_t uncompressed;
++
++ /*
++ * Compressed size of LZMA chunk or compressed/uncompressed
++ * size of uncompressed chunk (64 KiB at maximum)
++ */
++ uint32_t compressed;
++
++ /*
++ * True if dictionary reset is needed. This is false before
++ * the first chunk (LZMA or uncompressed).
++ */
++ bool_t need_dict_reset;
++
++ /*
++ * True if new LZMA properties are needed. This is false
++ * before the first LZMA chunk.
++ */
++ bool_t need_props;
++};
++
++struct xz_dec_lzma2 {
++ /*
++ * The order below is important on x86 to reduce code size and
++ * it shouldn't hurt on other platforms. Everything up to and
++ * including lzma.pos_mask are in the first 128 bytes on x86-32,
++ * which allows using smaller instructions to access those
++ * variables. On x86-64, fewer variables fit into the first 128
++ * bytes, but this is still the best order without sacrificing
++ * the readability by splitting the structures.
++ */
++ struct rc_dec rc;
++ struct dictionary dict;
++ struct lzma2_dec lzma2;
++ struct lzma_dec lzma;
++
++ /*
++ * Temporary buffer which holds small number of input bytes between
++ * decoder calls. See lzma2_lzma() for details.
++ */
++ struct {
++ uint32_t size;
++ uint8_t buf[3 * LZMA_IN_REQUIRED];
++ } temp;
++};
++
++/**************
++ * Dictionary *
++ **************/
++
++/*
++ * Reset the dictionary state. When in single-call mode, set up the beginning
++ * of the dictionary to point to the actual output buffer.
++ */
++static void INIT dict_reset(struct dictionary *dict, struct xz_buf *b)
++{
++ if (DEC_IS_SINGLE(dict->mode)) {
++ dict->buf = b->out + b->out_pos;
++ dict->end = b->out_size - b->out_pos;
++ }
++
++ dict->start = 0;
++ dict->pos = 0;
++ dict->limit = 0;
++ dict->full = 0;
++}
++
++/* Set dictionary write limit */
++static void INIT dict_limit(struct dictionary *dict, size_t out_max)
++{
++ if (dict->end - dict->pos <= out_max)
++ dict->limit = dict->end;
++ else
++ dict->limit = dict->pos + out_max;
++}
++
++/* Return true if at least one byte can be written into the dictionary. */
++static inline bool_t INIT dict_has_space(const struct dictionary *dict)
++{
++ return dict->pos < dict->limit;
++}
++
++/*
++ * Get a byte from the dictionary at the given distance. The distance is
++ * assumed to valid, or as a special case, zero when the dictionary is
++ * still empty. This special case is needed for single-call decoding to
++ * avoid writing a '\0' to the end of the destination buffer.
++ */
++static inline uint32_t INIT dict_get(const struct dictionary *dict, uint32_t dist)
++{
++ size_t offset = dict->pos - dist - 1;
++
++ if (dist >= dict->pos)
++ offset += dict->end;
++
++ return dict->full > 0 ? dict->buf[offset] : 0;
++}
++
++/*
++ * Put one byte into the dictionary. It is assumed that there is space for it.
++ */
++static inline void INIT dict_put(struct dictionary *dict, uint8_t byte)
++{
++ dict->buf[dict->pos++] = byte;
++
++ if (dict->full < dict->pos)
++ dict->full = dict->pos;
++}
++
++/*
++ * Repeat given number of bytes from the given distance. If the distance is
++ * invalid, false is returned. On success, true is returned and *len is
++ * updated to indicate how many bytes were left to be repeated.
++ */
++static bool_t INIT dict_repeat(struct dictionary *dict, uint32_t *len, uint32_t dist)
++{
++ size_t back;
++ uint32_t left;
++
++ if (dist >= dict->full || dist >= dict->size)
++ return false;
++
++ left = min_t(size_t, dict->limit - dict->pos, *len);
++ *len -= left;
++
++ back = dict->pos - dist - 1;
++ if (dist >= dict->pos)
++ back += dict->end;
++
++ do {
++ dict->buf[dict->pos++] = dict->buf[back++];
++ if (back == dict->end)
++ back = 0;
++ } while (--left > 0);
++
++ if (dict->full < dict->pos)
++ dict->full = dict->pos;
++
++ return true;
++}
++
++/* Copy uncompressed data as is from input to dictionary and output buffers. */
++static void INIT dict_uncompressed(struct dictionary *dict, struct xz_buf *b,
++ uint32_t *left)
++{
++ size_t copy_size;
++
++ while (*left > 0 && b->in_pos < b->in_size
++ && b->out_pos < b->out_size) {
++ copy_size = min(b->in_size - b->in_pos,
++ b->out_size - b->out_pos);
++ if (copy_size > dict->end - dict->pos)
++ copy_size = dict->end - dict->pos;
++ if (copy_size > *left)
++ copy_size = *left;
++
++ *left -= copy_size;
++
++ memcpy(dict->buf + dict->pos, b->in + b->in_pos, copy_size);
++ dict->pos += copy_size;
++
++ if (dict->full < dict->pos)
++ dict->full = dict->pos;
++
++ if (DEC_IS_MULTI(dict->mode)) {
++ if (dict->pos == dict->end)
++ dict->pos = 0;
++
++ memcpy(b->out + b->out_pos, b->in + b->in_pos,
++ copy_size);
++ }
++
++ dict->start = dict->pos;
++
++ b->out_pos += copy_size;
++ b->in_pos += copy_size;
++ }
++}
++
++/*
++ * Flush pending data from dictionary to b->out. It is assumed that there is
++ * enough space in b->out. This is guaranteed because caller uses dict_limit()
++ * before decoding data into the dictionary.
++ */
++static uint32_t INIT dict_flush(struct dictionary *dict, struct xz_buf *b)
++{
++ size_t copy_size = dict->pos - dict->start;
++
++ if (DEC_IS_MULTI(dict->mode)) {
++ if (dict->pos == dict->end)
++ dict->pos = 0;
++
++ memcpy(b->out + b->out_pos, dict->buf + dict->start,
++ copy_size);
++ }
++
++ dict->start = dict->pos;
++ b->out_pos += copy_size;
++ return copy_size;
++}
++
++/*****************
++ * Range decoder *
++ *****************/
++
++/* Reset the range decoder. */
++static void INIT rc_reset(struct rc_dec *rc)
++{
++ rc->range = (uint32_t)-1;
++ rc->code = 0;
++ rc->init_bytes_left = RC_INIT_BYTES;
++}
++
++/*
++ * Read the first five initial bytes into rc->code if they haven't been
++ * read already. (Yes, the first byte gets completely ignored.)
++ */
++static bool_t INIT rc_read_init(struct rc_dec *rc, struct xz_buf *b)
++{
++ while (rc->init_bytes_left > 0) {
++ if (b->in_pos == b->in_size)
++ return false;
++
++ rc->code = (rc->code << 8) + b->in[b->in_pos++];
++ --rc->init_bytes_left;
++ }
++
++ return true;
++}
++
++/* Return true if there may not be enough input for the next decoding loop. */
++static inline bool_t INIT rc_limit_exceeded(const struct rc_dec *rc)
++{
++ return rc->in_pos > rc->in_limit;
++}
++
++/*
++ * Return true if it is possible (from point of view of range decoder) that
++ * we have reached the end of the LZMA chunk.
++ */
++static inline bool_t INIT rc_is_finished(const struct rc_dec *rc)
++{
++ return rc->code == 0;
++}
++
++/* Read the next input byte if needed. */
++static always_inline void rc_normalize(struct rc_dec *rc)
++{
++ if (rc->range < RC_TOP_VALUE) {
++ rc->range <<= RC_SHIFT_BITS;
++ rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++];
++ }
++}
++
++/*
++ * Decode one bit. In some versions, this function has been splitted in three
++ * functions so that the compiler is supposed to be able to more easily avoid
++ * an extra branch. In this particular version of the LZMA decoder, this
++ * doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3
++ * on x86). Using a non-splitted version results in nicer looking code too.
++ *
++ * NOTE: This must return an int. Do not make it return a bool or the speed
++ * of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care,
++ * and it generates 10-20 % faster code than GCC 3.x from this file anyway.)
++ */
++static always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob)
++{
++ uint32_t bound;
++ int bit;
++
++ rc_normalize(rc);
++ bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob;
++ if (rc->code < bound) {
++ rc->range = bound;
++ *prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS;
++ bit = 0;
++ } else {
++ rc->range -= bound;
++ rc->code -= bound;
++ *prob -= *prob >> RC_MOVE_BITS;
++ bit = 1;
++ }
++
++ return bit;
++}
++
++/* Decode a bittree starting from the most significant bit. */
++static always_inline uint32_t rc_bittree(struct rc_dec *rc,
++ uint16_t *probs, uint32_t limit)
++{
++ uint32_t symbol = 1;
++
++ do {
++ if (rc_bit(rc, &probs[symbol]))
++ symbol = (symbol << 1) + 1;
++ else
++ symbol <<= 1;
++ } while (symbol < limit);
++
++ return symbol;
++}
++
++/* Decode a bittree starting from the least significant bit. */
++static always_inline void rc_bittree_reverse(struct rc_dec *rc,
++ uint16_t *probs,
++ uint32_t *dest, uint32_t limit)
++{
++ uint32_t symbol = 1;
++ uint32_t i = 0;
++
++ do {
++ if (rc_bit(rc, &probs[symbol])) {
++ symbol = (symbol << 1) + 1;
++ *dest += 1 << i;
++ } else {
++ symbol <<= 1;
++ }
++ } while (++i < limit);
++}
++
++/* Decode direct bits (fixed fifty-fifty probability) */
++static inline void INIT rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t limit)
++{
++ uint32_t mask;
++
++ do {
++ rc_normalize(rc);
++ rc->range >>= 1;
++ rc->code -= rc->range;
++ mask = (uint32_t)0 - (rc->code >> 31);
++ rc->code += rc->range & mask;
++ *dest = (*dest << 1) + (mask + 1);
++ } while (--limit > 0);
++}
++
++/********
++ * LZMA *
++ ********/
++
++/* Get pointer to literal coder probability array. */
++static uint16_t *INIT lzma_literal_probs(struct xz_dec_lzma2 *s)
++{
++ uint32_t prev_byte = dict_get(&s->dict, 0);
++ uint32_t low = prev_byte >> (8 - s->lzma.lc);
++ uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc;
++ return s->lzma.literal[low + high];
++}
++
++/* Decode a literal (one 8-bit byte) */
++static void INIT lzma_literal(struct xz_dec_lzma2 *s)
++{
++ uint16_t *probs;
++ uint32_t symbol;
++ uint32_t match_byte;
++ uint32_t match_bit;
++ uint32_t offset;
++ uint32_t i;
++
++ probs = lzma_literal_probs(s);
++
++ if (lzma_state_is_literal(s->lzma.state)) {
++ symbol = rc_bittree(&s->rc, probs, 0x100);
++ } else {
++ symbol = 1;
++ match_byte = dict_get(&s->dict, s->lzma.rep0) << 1;
++ offset = 0x100;
++
++ do {
++ match_bit = match_byte & offset;
++ match_byte <<= 1;
++ i = offset + match_bit + symbol;
++
++ if (rc_bit(&s->rc, &probs[i])) {
++ symbol = (symbol << 1) + 1;
++ offset &= match_bit;
++ } else {
++ symbol <<= 1;
++ offset &= ~match_bit;
++ }
++ } while (symbol < 0x100);
++ }
++
++ dict_put(&s->dict, (uint8_t)symbol);
++ lzma_state_literal(&s->lzma.state);
++}
++
++/* Decode the length of the match into s->lzma.len. */
++static void INIT lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l,
++ uint32_t pos_state)
++{
++ uint16_t *probs;
++ uint32_t limit;
++
++ if (!rc_bit(&s->rc, &l->choice)) {
++ probs = l->low[pos_state];
++ limit = LEN_LOW_SYMBOLS;
++ s->lzma.len = MATCH_LEN_MIN;
++ } else {
++ if (!rc_bit(&s->rc, &l->choice2)) {
++ probs = l->mid[pos_state];
++ limit = LEN_MID_SYMBOLS;
++ s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS;
++ } else {
++ probs = l->high;
++ limit = LEN_HIGH_SYMBOLS;
++ s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS
++ + LEN_MID_SYMBOLS;
++ }
++ }
++
++ s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit;
++}
++
++/* Decode a match. The distance will be stored in s->lzma.rep0. */
++static void INIT lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
++{
++ uint16_t *probs;
++ uint32_t dist_slot;
++ uint32_t limit;
++
++ lzma_state_match(&s->lzma.state);
++
++ s->lzma.rep3 = s->lzma.rep2;
++ s->lzma.rep2 = s->lzma.rep1;
++ s->lzma.rep1 = s->lzma.rep0;
++
++ lzma_len(s, &s->lzma.match_len_dec, pos_state);
++
++ probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)];
++ dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS;
++
++ if (dist_slot < DIST_MODEL_START) {
++ s->lzma.rep0 = dist_slot;
++ } else {
++ limit = (dist_slot >> 1) - 1;
++ s->lzma.rep0 = 2 + (dist_slot & 1);
++
++ if (dist_slot < DIST_MODEL_END) {
++ s->lzma.rep0 <<= limit;
++ probs = s->lzma.dist_special + s->lzma.rep0
++ - dist_slot - 1;
++ rc_bittree_reverse(&s->rc, probs,
++ &s->lzma.rep0, limit);
++ } else {
++ rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS);
++ s->lzma.rep0 <<= ALIGN_BITS;
++ rc_bittree_reverse(&s->rc, s->lzma.dist_align,
++ &s->lzma.rep0, ALIGN_BITS);
++ }
++ }
++}
++
++/*
++ * Decode a repeated match. The distance is one of the four most recently
++ * seen matches. The distance will be stored in s->lzma.rep0.
++ */
++static void INIT lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
++{
++ uint32_t tmp;
++
++ if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) {
++ if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[
++ s->lzma.state][pos_state])) {
++ lzma_state_short_rep(&s->lzma.state);
++ s->lzma.len = 1;
++ return;
++ }
++ } else {
++ if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) {
++ tmp = s->lzma.rep1;
++ } else {
++ if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) {
++ tmp = s->lzma.rep2;
++ } else {
++ tmp = s->lzma.rep3;
++ s->lzma.rep3 = s->lzma.rep2;
++ }
++
++ s->lzma.rep2 = s->lzma.rep1;
++ }
++
++ s->lzma.rep1 = s->lzma.rep0;
++ s->lzma.rep0 = tmp;
++ }
++
++ lzma_state_long_rep(&s->lzma.state);
++ lzma_len(s, &s->lzma.rep_len_dec, pos_state);
++}
++
++/* LZMA decoder core */
++static bool_t INIT lzma_main(struct xz_dec_lzma2 *s)
++{
++ uint32_t pos_state;
++
++ /*
++ * If the dictionary was reached during the previous call, try to
++ * finish the possibly pending repeat in the dictionary.
++ */
++ if (dict_has_space(&s->dict) && s->lzma.len > 0)
++ dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0);
++
++ /*
++ * Decode more LZMA symbols. One iteration may consume up to
++ * LZMA_IN_REQUIRED - 1 bytes.
++ */
++ while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) {
++ pos_state = s->dict.pos & s->lzma.pos_mask;
++
++ if (!rc_bit(&s->rc, &s->lzma.is_match[
++ s->lzma.state][pos_state])) {
++ lzma_literal(s);
++ } else {
++ if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state]))
++ lzma_rep_match(s, pos_state);
++ else
++ lzma_match(s, pos_state);
++
++ if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0))
++ return false;
++ }
++ }
++
++ /*
++ * Having the range decoder always normalized when we are outside
++ * this function makes it easier to correctly handle end of the chunk.
++ */
++ rc_normalize(&s->rc);
++
++ return true;
++}
++
++/*
++ * Reset the LZMA decoder and range decoder state. Dictionary is nore reset
++ * here, because LZMA state may be reset without resetting the dictionary.
++ */
++static void INIT lzma_reset(struct xz_dec_lzma2 *s)
++{
++ uint16_t *probs;
++ size_t i;
++
++ s->lzma.state = STATE_LIT_LIT;
++ s->lzma.rep0 = 0;
++ s->lzma.rep1 = 0;
++ s->lzma.rep2 = 0;
++ s->lzma.rep3 = 0;
++
++ /*
++ * All probabilities are initialized to the same value. This hack
++ * makes the code smaller by avoiding a separate loop for each
++ * probability array.
++ *
++ * This could be optimized so that only that part of literal
++ * probabilities that are actually required. In the common case
++ * we would write 12 KiB less.
++ */
++ probs = s->lzma.is_match[0];
++ for (i = 0; i < PROBS_TOTAL; ++i)
++ probs[i] = RC_BIT_MODEL_TOTAL / 2;
++
++ rc_reset(&s->rc);
++}
++
++/*
++ * Decode and validate LZMA properties (lc/lp/pb) and calculate the bit masks
++ * from the decoded lp and pb values. On success, the LZMA decoder state is
++ * reset and true is returned.
++ */
++static bool_t INIT lzma_props(struct xz_dec_lzma2 *s, uint8_t props)
++{
++ if (props > (4 * 5 + 4) * 9 + 8)
++ return false;
++
++ s->lzma.pos_mask = 0;
++ while (props >= 9 * 5) {
++ props -= 9 * 5;
++ ++s->lzma.pos_mask;
++ }
++
++ s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1;
++
++ s->lzma.literal_pos_mask = 0;
++ while (props >= 9) {
++ props -= 9;
++ ++s->lzma.literal_pos_mask;
++ }
++
++ s->lzma.lc = props;
++
++ if (s->lzma.lc + s->lzma.literal_pos_mask > 4)
++ return false;
++
++ s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1;
++
++ lzma_reset(s);
++
++ return true;
++}
++
++/*********
++ * LZMA2 *
++ *********/
++
++/*
++ * The LZMA decoder assumes that if the input limit (s->rc.in_limit) hasn't
++ * been exceeded, it is safe to read up to LZMA_IN_REQUIRED bytes. This
++ * wrapper function takes care of making the LZMA decoder's assumption safe.
++ *
++ * As long as there is plenty of input left to be decoded in the current LZMA
++ * chunk, we decode directly from the caller-supplied input buffer until
++ * there's LZMA_IN_REQUIRED bytes left. Those remaining bytes are copied into
++ * s->temp.buf, which (hopefully) gets filled on the next call to this
++ * function. We decode a few bytes from the temporary buffer so that we can
++ * continue decoding from the caller-supplied input buffer again.
++ */
++static bool_t INIT lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b)
++{
++ size_t in_avail;
++ uint32_t tmp;
++
++ in_avail = b->in_size - b->in_pos;
++ if (s->temp.size > 0 || s->lzma2.compressed == 0) {
++ tmp = 2 * LZMA_IN_REQUIRED - s->temp.size;
++ if (tmp > s->lzma2.compressed - s->temp.size)
++ tmp = s->lzma2.compressed - s->temp.size;
++ if (tmp > in_avail)
++ tmp = in_avail;
++
++ memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp);
++
++ if (s->temp.size + tmp == s->lzma2.compressed) {
++ memzero(s->temp.buf + s->temp.size + tmp,
++ sizeof(s->temp.buf)
++ - s->temp.size - tmp);
++ s->rc.in_limit = s->temp.size + tmp;
++ } else if (s->temp.size + tmp < LZMA_IN_REQUIRED) {
++ s->temp.size += tmp;
++ b->in_pos += tmp;
++ return true;
++ } else {
++ s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED;
++ }
++
++ s->rc.in = s->temp.buf;
++ s->rc.in_pos = 0;
++
++ if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp)
++ return false;
++
++ s->lzma2.compressed -= s->rc.in_pos;
++
++ if (s->rc.in_pos < s->temp.size) {
++ s->temp.size -= s->rc.in_pos;
++ memmove(s->temp.buf, s->temp.buf + s->rc.in_pos,
++ s->temp.size);
++ return true;
++ }
++
++ b->in_pos += s->rc.in_pos - s->temp.size;
++ s->temp.size = 0;
++ }
++
++ in_avail = b->in_size - b->in_pos;
++ if (in_avail >= LZMA_IN_REQUIRED) {
++ s->rc.in = b->in;
++ s->rc.in_pos = b->in_pos;
++
++ if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED)
++ s->rc.in_limit = b->in_pos + s->lzma2.compressed;
++ else
++ s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED;
++
++ if (!lzma_main(s))
++ return false;
++
++ in_avail = s->rc.in_pos - b->in_pos;
++ if (in_avail > s->lzma2.compressed)
++ return false;
++
++ s->lzma2.compressed -= in_avail;
++ b->in_pos = s->rc.in_pos;
++ }
++
++ in_avail = b->in_size - b->in_pos;
++ if (in_avail < LZMA_IN_REQUIRED) {
++ if (in_avail > s->lzma2.compressed)
++ in_avail = s->lzma2.compressed;
++
++ memcpy(s->temp.buf, b->in + b->in_pos, in_avail);
++ s->temp.size = in_avail;
++ b->in_pos += in_avail;
++ }
++
++ return true;
++}
++
++/*
++ * Take care of the LZMA2 control layer, and forward the job of actual LZMA
++ * decoding or copying of uncompressed chunks to other functions.
++ */
++XZ_EXTERN enum xz_ret INIT xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
++ struct xz_buf *b)
++{
++ uint32_t tmp;
++
++ while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) {
++ switch (s->lzma2.sequence) {
++ case SEQ_CONTROL:
++ /*
++ * LZMA2 control byte
++ *
++ * Exact values:
++ * 0x00 End marker
++ * 0x01 Dictionary reset followed by
++ * an uncompressed chunk
++ * 0x02 Uncompressed chunk (no dictionary reset)
++ *
++ * Highest three bits (s->control & 0xE0):
++ * 0xE0 Dictionary reset, new properties and state
++ * reset, followed by LZMA compressed chunk
++ * 0xC0 New properties and state reset, followed
++ * by LZMA compressed chunk (no dictionary
++ * reset)
++ * 0xA0 State reset using old properties,
++ * followed by LZMA compressed chunk (no
++ * dictionary reset)
++ * 0x80 LZMA chunk (no dictionary or state reset)
++ *
++ * For LZMA compressed chunks, the lowest five bits
++ * (s->control & 1F) are the highest bits of the
++ * uncompressed size (bits 16-20).
++ *
++ * A new LZMA2 stream must begin with a dictionary
++ * reset. The first LZMA chunk must set new
++ * properties and reset the LZMA state.
++ *
++ * Values that don't match anything described above
++ * are invalid and we return XZ_DATA_ERROR.
++ */
++ tmp = b->in[b->in_pos++];
++
++ if (tmp >= 0xE0 || tmp == 0x01) {
++ s->lzma2.need_props = true;
++ s->lzma2.need_dict_reset = false;
++ dict_reset(&s->dict, b);
++ } else if (s->lzma2.need_dict_reset) {
++ return XZ_DATA_ERROR;
++ }
++
++ if (tmp >= 0x80) {
++ s->lzma2.uncompressed = (tmp & 0x1F) << 16;
++ s->lzma2.sequence = SEQ_UNCOMPRESSED_1;
++
++ if (tmp >= 0xC0) {
++ /*
++ * When there are new properties,
++ * state reset is done at
++ * SEQ_PROPERTIES.
++ */
++ s->lzma2.need_props = false;
++ s->lzma2.next_sequence
++ = SEQ_PROPERTIES;
++
++ } else if (s->lzma2.need_props) {
++ return XZ_DATA_ERROR;
++
++ } else {
++ s->lzma2.next_sequence
++ = SEQ_LZMA_PREPARE;
++ if (tmp >= 0xA0)
++ lzma_reset(s);
++ }
++ } else {
++ if (tmp == 0x00)
++ return XZ_STREAM_END;
++
++ if (tmp > 0x02)
++ return XZ_DATA_ERROR;
++
++ s->lzma2.sequence = SEQ_COMPRESSED_0;
++ s->lzma2.next_sequence = SEQ_COPY;
++ }
++
++ break;
++
++ case SEQ_UNCOMPRESSED_1:
++ s->lzma2.uncompressed
++ += (uint32_t)b->in[b->in_pos++] << 8;
++ s->lzma2.sequence = SEQ_UNCOMPRESSED_2;
++ break;
++
++ case SEQ_UNCOMPRESSED_2:
++ s->lzma2.uncompressed
++ += (uint32_t)b->in[b->in_pos++] + 1;
++ s->lzma2.sequence = SEQ_COMPRESSED_0;
++ break;
++
++ case SEQ_COMPRESSED_0:
++ s->lzma2.compressed
++ = (uint32_t)b->in[b->in_pos++] << 8;
++ s->lzma2.sequence = SEQ_COMPRESSED_1;
++ break;
++
++ case SEQ_COMPRESSED_1:
++ s->lzma2.compressed
++ += (uint32_t)b->in[b->in_pos++] + 1;
++ s->lzma2.sequence = s->lzma2.next_sequence;
++ break;
++
++ case SEQ_PROPERTIES:
++ if (!lzma_props(s, b->in[b->in_pos++]))
++ return XZ_DATA_ERROR;
++
++ s->lzma2.sequence = SEQ_LZMA_PREPARE;
++
++ case SEQ_LZMA_PREPARE:
++ if (s->lzma2.compressed < RC_INIT_BYTES)
++ return XZ_DATA_ERROR;
++
++ if (!rc_read_init(&s->rc, b))
++ return XZ_OK;
++
++ s->lzma2.compressed -= RC_INIT_BYTES;
++ s->lzma2.sequence = SEQ_LZMA_RUN;
++
++ case SEQ_LZMA_RUN:
++ /*
++ * Set dictionary limit to indicate how much we want
++ * to be encoded at maximum. Decode new data into the
++ * dictionary. Flush the new data from dictionary to
++ * b->out. Check if we finished decoding this chunk.
++ * In case the dictionary got full but we didn't fill
++ * the output buffer yet, we may run this loop
++ * multiple times without changing s->lzma2.sequence.
++ */
++ dict_limit(&s->dict, min_t(size_t,
++ b->out_size - b->out_pos,
++ s->lzma2.uncompressed));
++ if (!lzma2_lzma(s, b))
++ return XZ_DATA_ERROR;
++
++ s->lzma2.uncompressed -= dict_flush(&s->dict, b);
++
++ if (s->lzma2.uncompressed == 0) {
++ if (s->lzma2.compressed > 0 || s->lzma.len > 0
++ || !rc_is_finished(&s->rc))
++ return XZ_DATA_ERROR;
++
++ rc_reset(&s->rc);
++ s->lzma2.sequence = SEQ_CONTROL;
++
++ } else if (b->out_pos == b->out_size
++ || (b->in_pos == b->in_size
++ && s->temp.size
++ < s->lzma2.compressed)) {
++ return XZ_OK;
++ }
++
++ break;
++
++ case SEQ_COPY:
++ dict_uncompressed(&s->dict, b, &s->lzma2.compressed);
++ if (s->lzma2.compressed > 0)
++ return XZ_OK;
++
++ s->lzma2.sequence = SEQ_CONTROL;
++ break;
++ }
++ }
++
++ return XZ_OK;
++}
++
++XZ_EXTERN struct xz_dec_lzma2 *INIT xz_dec_lzma2_create(enum xz_mode mode,
++ uint32_t dict_max)
++{
++ struct xz_dec_lzma2 *s = malloc(sizeof(*s));
++ if (s == NULL)
++ return NULL;
++
++ s->dict.mode = mode;
++ s->dict.size_max = dict_max;
++
++ if (DEC_IS_PREALLOC(mode)) {
++ s->dict.buf = large_malloc(dict_max);
++ if (s->dict.buf == NULL) {
++ free(s);
++ return NULL;
++ }
++ } else if (DEC_IS_DYNALLOC(mode)) {
++ s->dict.buf = NULL;
++ s->dict.allocated = 0;
++ }
++
++ return s;
++}
++
++XZ_EXTERN enum xz_ret INIT xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
++{
++ /* This limits dictionary size to 3 GiB to keep parsing simpler. */
++ if (props > 39)
++ return XZ_OPTIONS_ERROR;
++
++ s->dict.size = 2 + (props & 1);
++ s->dict.size <<= (props >> 1) + 11;
++
++ if (DEC_IS_MULTI(s->dict.mode)) {
++ if (s->dict.size > s->dict.size_max)
++ return XZ_MEMLIMIT_ERROR;
++
++ s->dict.end = s->dict.size;
++
++ if (DEC_IS_DYNALLOC(s->dict.mode)) {
++ if (s->dict.allocated < s->dict.size) {
++ large_free(s->dict.buf);
++ s->dict.buf = large_malloc(s->dict.size);
++ if (s->dict.buf == NULL) {
++ s->dict.allocated = 0;
++ return XZ_MEM_ERROR;
++ }
++ }
++ }
++ }
++
++ s->lzma.len = 0;
++
++ s->lzma2.sequence = SEQ_CONTROL;
++ s->lzma2.need_dict_reset = true;
++
++ s->temp.size = 0;
++
++ return XZ_OK;
++}
++
++XZ_EXTERN void INIT xz_dec_lzma2_end(struct xz_dec_lzma2 *s)
++{
++ if (DEC_IS_MULTI(s->dict.mode))
++ large_free(s->dict.buf);
++
++ free(s);
++}
+diff --git a/xen/common/xz/dec_stream.c b/xen/common/xz/dec_stream.c
+new file mode 100644
+--- /dev/null
++++ b/xen/common/xz/dec_stream.c
+@@ -0,0 +1,821 @@
++/*
++ * .xz Stream decoder
++ *
++ * Author: Lasse Collin <lasse.collin@tukaani.org>
++ *
++ * This file has been put into the public domain.
++ * You can do whatever you want with this file.
++ */
++
++#include "private.h"
++#include "stream.h"
++
++/* Hash used to validate the Index field */
++struct xz_dec_hash {
++ vli_type unpadded;
++ vli_type uncompressed;
++ uint32_t crc32;
++};
++
++struct xz_dec {
++ /* Position in dec_main() */
++ enum {
++ SEQ_STREAM_HEADER,
++ SEQ_BLOCK_START,
++ SEQ_BLOCK_HEADER,
++ SEQ_BLOCK_UNCOMPRESS,
++ SEQ_BLOCK_PADDING,
++ SEQ_BLOCK_CHECK,
++ SEQ_INDEX,
++ SEQ_INDEX_PADDING,
++ SEQ_INDEX_CRC32,
++ SEQ_STREAM_FOOTER
++ } sequence;
++
++ /* Position in variable-length integers and Check fields */
++ uint32_t pos;
++
++ /* Variable-length integer decoded by dec_vli() */
++ vli_type vli;
++
++ /* Saved in_pos and out_pos */
++ size_t in_start;
++ size_t out_start;
++
++ /* CRC32 value in Block or Index */
++ uint32_t crc32;
++
++ /* Type of the integrity check calculated from uncompressed data */
++ enum xz_check check_type;
++
++ /* Operation mode */
++ enum xz_mode mode;
++
++ /*
++ * True if the next call to xz_dec_run() is allowed to return
++ * XZ_BUF_ERROR.
++ */
++ bool_t allow_buf_error;
++
++ /* Information stored in Block Header */
++ struct {
++ /*
++ * Value stored in the Compressed Size field, or
++ * VLI_UNKNOWN if Compressed Size is not present.
++ */
++ vli_type compressed;
++
++ /*
++ * Value stored in the Uncompressed Size field, or
++ * VLI_UNKNOWN if Uncompressed Size is not present.
++ */
++ vli_type uncompressed;
++
++ /* Size of the Block Header field */
++ uint32_t size;
++ } block_header;
++
++ /* Information collected when decoding Blocks */
++ struct {
++ /* Observed compressed size of the current Block */
++ vli_type compressed;
++
++ /* Observed uncompressed size of the current Block */
++ vli_type uncompressed;
++
++ /* Number of Blocks decoded so far */
++ vli_type count;
++
++ /*
++ * Hash calculated from the Block sizes. This is used to
++ * validate the Index field.
++ */
++ struct xz_dec_hash hash;
++ } block;
++
++ /* Variables needed when verifying the Index field */
++ struct {
++ /* Position in dec_index() */
++ enum {
++ SEQ_INDEX_COUNT,
++ SEQ_INDEX_UNPADDED,
++ SEQ_INDEX_UNCOMPRESSED
++ } sequence;
++
++ /* Size of the Index in bytes */
++ vli_type size;
++
++ /* Number of Records (matches block.count in valid files) */
++ vli_type count;
++
++ /*
++ * Hash calculated from the Records (matches block.hash in
++ * valid files).
++ */
++ struct xz_dec_hash hash;
++ } index;
++
++ /*
++ * Temporary buffer needed to hold Stream Header, Block Header,
++ * and Stream Footer. The Block Header is the biggest (1 KiB)
++ * so we reserve space according to that. buf[] has to be aligned
++ * to a multiple of four bytes; the size_t variables before it
++ * should guarantee this.
++ */
++ struct {
++ size_t pos;
++ size_t size;
++ uint8_t buf[1024];
++ } temp;
++
++ struct xz_dec_lzma2 *lzma2;
++
++#ifdef XZ_DEC_BCJ
++ struct xz_dec_bcj *bcj;
++ bool_t bcj_active;
++#endif
++};
++
++#ifdef XZ_DEC_ANY_CHECK
++/* Sizes of the Check field with different Check IDs */
++static const uint8_t check_sizes[16] = {
++ 0,
++ 4, 4, 4,
++ 8, 8, 8,
++ 16, 16, 16,
++ 32, 32, 32,
++ 64, 64, 64
++};
++#endif
++
++/*
++ * Fill s->temp by copying data starting from b->in[b->in_pos]. Caller
++ * must have set s->temp.pos to indicate how much data we are supposed
++ * to copy into s->temp.buf. Return true once s->temp.pos has reached
++ * s->temp.size.
++ */
++static bool_t INIT fill_temp(struct xz_dec *s, struct xz_buf *b)
++{
++ size_t copy_size = min_t(size_t,
++ b->in_size - b->in_pos, s->temp.size - s->temp.pos);
++
++ memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size);
++ b->in_pos += copy_size;
++ s->temp.pos += copy_size;
++
++ if (s->temp.pos == s->temp.size) {
++ s->temp.pos = 0;
++ return true;
++ }
++
++ return false;
++}
++
++/* Decode a variable-length integer (little-endian base-128 encoding) */
++static enum xz_ret INIT dec_vli(struct xz_dec *s, const uint8_t *in,
++ size_t *in_pos, size_t in_size)
++{
++ uint8_t byte;
++
++ if (s->pos == 0)
++ s->vli = 0;
++
++ while (*in_pos < in_size) {
++ byte = in[*in_pos];
++ ++*in_pos;
++
++ s->vli |= (vli_type)(byte & 0x7F) << s->pos;
++
++ if ((byte & 0x80) == 0) {
++ /* Don't allow non-minimal encodings. */
++ if (byte == 0 && s->pos != 0)
++ return XZ_DATA_ERROR;
++
++ s->pos = 0;
++ return XZ_STREAM_END;
++ }
++
++ s->pos += 7;
++ if (s->pos == 7 * VLI_BYTES_MAX)
++ return XZ_DATA_ERROR;
++ }
++
++ return XZ_OK;
++}
++
++/*
++ * Decode the Compressed Data field from a Block. Update and validate
++ * the observed compressed and uncompressed sizes of the Block so that
++ * they don't exceed the values possibly stored in the Block Header
++ * (validation assumes that no integer overflow occurs, since vli_type
++ * is normally uint64_t). Update the CRC32 if presence of the CRC32
++ * field was indicated in Stream Header.
++ *
++ * Once the decoding is finished, validate that the observed sizes match
++ * the sizes possibly stored in the Block Header. Update the hash and
++ * Block count, which are later used to validate the Index field.
++ */
++static enum xz_ret INIT dec_block(struct xz_dec *s, struct xz_buf *b)
++{
++ enum xz_ret ret;
++
++ s->in_start = b->in_pos;
++ s->out_start = b->out_pos;
++
++#ifdef XZ_DEC_BCJ
++ if (s->bcj_active)
++ ret = xz_dec_bcj_run(s->bcj, s->lzma2, b);
++ else
++#endif
++ ret = xz_dec_lzma2_run(s->lzma2, b);
++
++ s->block.compressed += b->in_pos - s->in_start;
++ s->block.uncompressed += b->out_pos - s->out_start;
++
++ /*
++ * There is no need to separately check for VLI_UNKNOWN, since
++ * the observed sizes are always smaller than VLI_UNKNOWN.
++ */
++ if (s->block.compressed > s->block_header.compressed
++ || s->block.uncompressed
++ > s->block_header.uncompressed)
++ return XZ_DATA_ERROR;
++
++ if (s->check_type == XZ_CHECK_CRC32)
++ s->crc32 = xz_crc32(b->out + s->out_start,
++ b->out_pos - s->out_start, s->crc32);
++
++ if (ret == XZ_STREAM_END) {
++ if (s->block_header.compressed != VLI_UNKNOWN
++ && s->block_header.compressed
++ != s->block.compressed)
++ return XZ_DATA_ERROR;
++
++ if (s->block_header.uncompressed != VLI_UNKNOWN
++ && s->block_header.uncompressed
++ != s->block.uncompressed)
++ return XZ_DATA_ERROR;
++
++ s->block.hash.unpadded += s->block_header.size
++ + s->block.compressed;
++
++#ifdef XZ_DEC_ANY_CHECK
++ s->block.hash.unpadded += check_sizes[s->check_type];
++#else
++ if (s->check_type == XZ_CHECK_CRC32)
++ s->block.hash.unpadded += 4;
++#endif
++
++ s->block.hash.uncompressed += s->block.uncompressed;
++ s->block.hash.crc32 = xz_crc32(
++ (const uint8_t *)&s->block.hash,
++ sizeof(s->block.hash), s->block.hash.crc32);
++
++ ++s->block.count;
++ }
++
++ return ret;
++}
++
++/* Update the Index size and the CRC32 value. */
++static void INIT index_update(struct xz_dec *s, const struct xz_buf *b)
++{
++ size_t in_used = b->in_pos - s->in_start;
++ s->index.size += in_used;
++ s->crc32 = xz_crc32(b->in + s->in_start, in_used, s->crc32);
++}
++
++/*
++ * Decode the Number of Records, Unpadded Size, and Uncompressed Size
++ * fields from the Index field. That is, Index Padding and CRC32 are not
++ * decoded by this function.
++ *
++ * This can return XZ_OK (more input needed), XZ_STREAM_END (everything
++ * successfully decoded), or XZ_DATA_ERROR (input is corrupt).
++ */
++static enum xz_ret INIT dec_index(struct xz_dec *s, struct xz_buf *b)
++{
++ enum xz_ret ret;
++
++ do {
++ ret = dec_vli(s, b->in, &b->in_pos, b->in_size);
++ if (ret != XZ_STREAM_END) {
++ index_update(s, b);
++ return ret;
++ }
++
++ switch (s->index.sequence) {
++ case SEQ_INDEX_COUNT:
++ s->index.count = s->vli;
++
++ /*
++ * Validate that the Number of Records field
++ * indicates the same number of Records as
++ * there were Blocks in the Stream.
++ */
++ if (s->index.count != s->block.count)
++ return XZ_DATA_ERROR;
++
++ s->index.sequence = SEQ_INDEX_UNPADDED;
++ break;
++
++ case SEQ_INDEX_UNPADDED:
++ s->index.hash.unpadded += s->vli;
++ s->index.sequence = SEQ_INDEX_UNCOMPRESSED;
++ break;
++
++ case SEQ_INDEX_UNCOMPRESSED:
++ s->index.hash.uncompressed += s->vli;
++ s->index.hash.crc32 = xz_crc32(
++ (const uint8_t *)&s->index.hash,
++ sizeof(s->index.hash),
++ s->index.hash.crc32);
++ --s->index.count;
++ s->index.sequence = SEQ_INDEX_UNPADDED;
++ break;
++ }
++ } while (s->index.count > 0);
++
++ return XZ_STREAM_END;
++}
++
++/*
++ * Validate that the next four input bytes match the value of s->crc32.
++ * s->pos must be zero when starting to validate the first byte.
++ */
++static enum xz_ret INIT crc32_validate(struct xz_dec *s, struct xz_buf *b)
++{
++ do {
++ if (b->in_pos == b->in_size)
++ return XZ_OK;
++
++ if (((s->crc32 >> s->pos) & 0xFF) != b->in[b->in_pos++])
++ return XZ_DATA_ERROR;
++
++ s->pos += 8;
++
++ } while (s->pos < 32);
++
++ s->crc32 = 0;
++ s->pos = 0;
++
++ return XZ_STREAM_END;
++}
++
++#ifdef XZ_DEC_ANY_CHECK
++/*
++ * Skip over the Check field when the Check ID is not supported.
++ * Returns true once the whole Check field has been skipped over.
++ */
++static bool_t INIT check_skip(struct xz_dec *s, struct xz_buf *b)
++{
++ while (s->pos < check_sizes[s->check_type]) {
++ if (b->in_pos == b->in_size)
++ return false;
++
++ ++b->in_pos;
++ ++s->pos;
++ }
++
++ s->pos = 0;
++
++ return true;
++}
++#endif
++
++/* Decode the Stream Header field (the first 12 bytes of the .xz Stream). */
++static enum xz_ret INIT dec_stream_header(struct xz_dec *s)
++{
++ if (!memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE))
++ return XZ_FORMAT_ERROR;
++
++ if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0)
++ != get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2))
++ return XZ_DATA_ERROR;
++
++ if (s->temp.buf[HEADER_MAGIC_SIZE] != 0)
++ return XZ_OPTIONS_ERROR;
++
++ /*
++ * Of integrity checks, we support only none (Check ID = 0) and
++ * CRC32 (Check ID = 1). However, if XZ_DEC_ANY_CHECK is defined,
++ * we will accept other check types too, but then the check won't
++ * be verified and a warning (XZ_UNSUPPORTED_CHECK) will be given.
++ */
++ s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1];
++
++#ifdef XZ_DEC_ANY_CHECK
++ if (s->check_type > XZ_CHECK_MAX)
++ return XZ_OPTIONS_ERROR;
++
++ if (s->check_type > XZ_CHECK_CRC32)
++ return XZ_UNSUPPORTED_CHECK;
++#else
++ if (s->check_type > XZ_CHECK_CRC32)
++ return XZ_OPTIONS_ERROR;
++#endif
++
++ return XZ_OK;
++}
++
++/* Decode the Stream Footer field (the last 12 bytes of the .xz Stream) */
++static enum xz_ret INIT dec_stream_footer(struct xz_dec *s)
++{
++ if (!memeq(s->temp.buf + 10, FOOTER_MAGIC, FOOTER_MAGIC_SIZE))
++ return XZ_DATA_ERROR;
++
++ if (xz_crc32(s->temp.buf + 4, 6, 0) != get_le32(s->temp.buf))
++ return XZ_DATA_ERROR;
++
++ /*
++ * Validate Backward Size. Note that we never added the size of the
++ * Index CRC32 field to s->index.size, thus we use s->index.size / 4
++ * instead of s->index.size / 4 - 1.
++ */
++ if ((s->index.size >> 2) != get_le32(s->temp.buf + 4))
++ return XZ_DATA_ERROR;
++
++ if (s->temp.buf[8] != 0 || s->temp.buf[9] != s->check_type)
++ return XZ_DATA_ERROR;
++
++ /*
++ * Use XZ_STREAM_END instead of XZ_OK to be more convenient
++ * for the caller.
++ */
++ return XZ_STREAM_END;
++}
++
++/* Decode the Block Header and initialize the filter chain. */
++static enum xz_ret INIT dec_block_header(struct xz_dec *s)
++{
++ enum xz_ret ret;
++
++ /*
++ * Validate the CRC32. We know that the temp buffer is at least
++ * eight bytes so this is safe.
++ */
++ s->temp.size -= 4;
++ if (xz_crc32(s->temp.buf, s->temp.size, 0)
++ != get_le32(s->temp.buf + s->temp.size))
++ return XZ_DATA_ERROR;
++
++ s->temp.pos = 2;
++
++ /*
++ * Catch unsupported Block Flags. We support only one or two filters
++ * in the chain, so we catch that with the same test.
++ */
++#ifdef XZ_DEC_BCJ
++ if (s->temp.buf[1] & 0x3E)
++#else
++ if (s->temp.buf[1] & 0x3F)
++#endif
++ return XZ_OPTIONS_ERROR;
++
++ /* Compressed Size */
++ if (s->temp.buf[1] & 0x40) {
++ if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size)
++ != XZ_STREAM_END)
++ return XZ_DATA_ERROR;
++
++ s->block_header.compressed = s->vli;
++ } else {
++ s->block_header.compressed = VLI_UNKNOWN;
++ }
++
++ /* Uncompressed Size */
++ if (s->temp.buf[1] & 0x80) {
++ if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size)
++ != XZ_STREAM_END)
++ return XZ_DATA_ERROR;
++
++ s->block_header.uncompressed = s->vli;
++ } else {
++ s->block_header.uncompressed = VLI_UNKNOWN;
++ }
++
++#ifdef XZ_DEC_BCJ
++ /* If there are two filters, the first one must be a BCJ filter. */
++ s->bcj_active = s->temp.buf[1] & 0x01;
++ if (s->bcj_active) {
++ if (s->temp.size - s->temp.pos < 2)
++ return XZ_OPTIONS_ERROR;
++
++ ret = xz_dec_bcj_reset(s->bcj, s->temp.buf[s->temp.pos++]);
++ if (ret != XZ_OK)
++ return ret;
++
++ /*
++ * We don't support custom start offset,
++ * so Size of Properties must be zero.
++ */
++ if (s->temp.buf[s->temp.pos++] != 0x00)
++ return XZ_OPTIONS_ERROR;
++ }
++#endif
++
++ /* Valid Filter Flags always take at least two bytes. */
++ if (s->temp.size - s->temp.pos < 2)
++ return XZ_DATA_ERROR;
++
++ /* Filter ID = LZMA2 */
++ if (s->temp.buf[s->temp.pos++] != 0x21)
++ return XZ_OPTIONS_ERROR;
++
++ /* Size of Properties = 1-byte Filter Properties */
++ if (s->temp.buf[s->temp.pos++] != 0x01)
++ return XZ_OPTIONS_ERROR;
++
++ /* Filter Properties contains LZMA2 dictionary size. */
++ if (s->temp.size - s->temp.pos < 1)
++ return XZ_DATA_ERROR;
++
++ ret = xz_dec_lzma2_reset(s->lzma2, s->temp.buf[s->temp.pos++]);
++ if (ret != XZ_OK)
++ return ret;
++
++ /* The rest must be Header Padding. */
++ while (s->temp.pos < s->temp.size)
++ if (s->temp.buf[s->temp.pos++] != 0x00)
++ return XZ_OPTIONS_ERROR;
++
++ s->temp.pos = 0;
++ s->block.compressed = 0;
++ s->block.uncompressed = 0;
++
++ return XZ_OK;
++}
++
++static enum xz_ret INIT dec_main(struct xz_dec *s, struct xz_buf *b)
++{
++ enum xz_ret ret;
++
++ /*
++ * Store the start position for the case when we are in the middle
++ * of the Index field.
++ */
++ s->in_start = b->in_pos;
++
++ while (true) {
++ switch (s->sequence) {
++ case SEQ_STREAM_HEADER:
++ /*
++ * Stream Header is copied to s->temp, and then
++ * decoded from there. This way if the caller
++ * gives us only little input at a time, we can
++ * still keep the Stream Header decoding code
++ * simple. Similar approach is used in many places
++ * in this file.
++ */
++ if (!fill_temp(s, b))
++ return XZ_OK;
++
++ /*
++ * If dec_stream_header() returns
++ * XZ_UNSUPPORTED_CHECK, it is still possible
++ * to continue decoding if working in multi-call
++ * mode. Thus, update s->sequence before calling
++ * dec_stream_header().
++ */
++ s->sequence = SEQ_BLOCK_START;
++
++ ret = dec_stream_header(s);
++ if (ret != XZ_OK)
++ return ret;
++
++ case SEQ_BLOCK_START:
++ /* We need one byte of input to continue. */
++ if (b->in_pos == b->in_size)
++ return XZ_OK;
++
++ /* See if this is the beginning of the Index field. */
++ if (b->in[b->in_pos] == 0) {
++ s->in_start = b->in_pos++;
++ s->sequence = SEQ_INDEX;
++ break;
++ }
++
++ /*
++ * Calculate the size of the Block Header and
++ * prepare to decode it.
++ */
++ s->block_header.size
++ = ((uint32_t)b->in[b->in_pos] + 1) * 4;
++
++ s->temp.size = s->block_header.size;
++ s->temp.pos = 0;
++ s->sequence = SEQ_BLOCK_HEADER;
++
++ case SEQ_BLOCK_HEADER:
++ if (!fill_temp(s, b))
++ return XZ_OK;
++
++ ret = dec_block_header(s);
++ if (ret != XZ_OK)
++ return ret;
++
++ s->sequence = SEQ_BLOCK_UNCOMPRESS;
++
++ case SEQ_BLOCK_UNCOMPRESS:
++ ret = dec_block(s, b);
++ if (ret != XZ_STREAM_END)
++ return ret;
++
++ s->sequence = SEQ_BLOCK_PADDING;
++
++ case SEQ_BLOCK_PADDING:
++ /*
++ * Size of Compressed Data + Block Padding
++ * must be a multiple of four. We don't need
++ * s->block.compressed for anything else
++ * anymore, so we use it here to test the size
++ * of the Block Padding field.
++ */
++ while (s->block.compressed & 3) {
++ if (b->in_pos == b->in_size)
++ return XZ_OK;
++
++ if (b->in[b->in_pos++] != 0)
++ return XZ_DATA_ERROR;
++
++ ++s->block.compressed;
++ }
++
++ s->sequence = SEQ_BLOCK_CHECK;
++
++ case SEQ_BLOCK_CHECK:
++ if (s->check_type == XZ_CHECK_CRC32) {
++ ret = crc32_validate(s, b);
++ if (ret != XZ_STREAM_END)
++ return ret;
++ }
++#ifdef XZ_DEC_ANY_CHECK
++ else if (!check_skip(s, b)) {
++ return XZ_OK;
++ }
++#endif
++
++ s->sequence = SEQ_BLOCK_START;
++ break;
++
++ case SEQ_INDEX:
++ ret = dec_index(s, b);
++ if (ret != XZ_STREAM_END)
++ return ret;
++
++ s->sequence = SEQ_INDEX_PADDING;
++
++ case SEQ_INDEX_PADDING:
++ while ((s->index.size + (b->in_pos - s->in_start))
++ & 3) {
++ if (b->in_pos == b->in_size) {
++ index_update(s, b);
++ return XZ_OK;
++ }
++
++ if (b->in[b->in_pos++] != 0)
++ return XZ_DATA_ERROR;
++ }
++
++ /* Finish the CRC32 value and Index size. */
++ index_update(s, b);
++
++ /* Compare the hashes to validate the Index field. */
++ if (!memeq(&s->block.hash, &s->index.hash,
++ sizeof(s->block.hash)))
++ return XZ_DATA_ERROR;
++
++ s->sequence = SEQ_INDEX_CRC32;
++
++ case SEQ_INDEX_CRC32:
++ ret = crc32_validate(s, b);
++ if (ret != XZ_STREAM_END)
++ return ret;
++
++ s->temp.size = STREAM_HEADER_SIZE;
++ s->sequence = SEQ_STREAM_FOOTER;
++
++ case SEQ_STREAM_FOOTER:
++ if (!fill_temp(s, b))
++ return XZ_OK;
++
++ return dec_stream_footer(s);
++ }
++ }
++
++ /* Never reached */
++}
++
++XZ_EXTERN void INIT xz_dec_reset(struct xz_dec *s)
++{
++ s->sequence = SEQ_STREAM_HEADER;
++ s->allow_buf_error = false;
++ s->pos = 0;
++ s->crc32 = 0;
++ memzero(&s->block, sizeof(s->block));
++ memzero(&s->index, sizeof(s->index));
++ s->temp.pos = 0;
++ s->temp.size = STREAM_HEADER_SIZE;
++}
++
++/*
++ * xz_dec_run() is a wrapper for dec_main() to handle some special cases in
++ * multi-call and single-call decoding.
++ *
++ * In multi-call mode, we must return XZ_BUF_ERROR when it seems clear that we
++ * are not going to make any progress anymore. This is to prevent the caller
++ * from calling us infinitely when the input file is truncated or otherwise
++ * corrupt. Since zlib-style API allows that the caller fills the input buffer
++ * only when the decoder doesn't produce any new output, we have to be careful
++ * to avoid returning XZ_BUF_ERROR too easily: XZ_BUF_ERROR is returned only
++ * after the second consecutive call to xz_dec_run() that makes no progress.
++ *
++ * In single-call mode, if we couldn't decode everything and no error
++ * occurred, either the input is truncated or the output buffer is too small.
++ * Since we know that the last input byte never produces any output, we know
++ * that if all the input was consumed and decoding wasn't finished, the file
++ * must be corrupt. Otherwise the output buffer has to be too small or the
++ * file is corrupt in a way that decoding it produces too big output.
++ *
++ * If single-call decoding fails, we reset b->in_pos and b->out_pos back to
++ * their original values. This is because with some filter chains there won't
++ * be any valid uncompressed data in the output buffer unless the decoding
++ * actually succeeds (that's the price to pay of using the output buffer as
++ * the workspace).
++ */
++XZ_EXTERN enum xz_ret INIT xz_dec_run(struct xz_dec *s, struct xz_buf *b)
++{
++ size_t in_start;
++ size_t out_start;
++ enum xz_ret ret;
++
++ if (DEC_IS_SINGLE(s->mode))
++ xz_dec_reset(s);
++
++ in_start = b->in_pos;
++ out_start = b->out_pos;
++ ret = dec_main(s, b);
++
++ if (DEC_IS_SINGLE(s->mode)) {
++ if (ret == XZ_OK)
++ ret = b->in_pos == b->in_size
++ ? XZ_DATA_ERROR : XZ_BUF_ERROR;
++
++ if (ret != XZ_STREAM_END) {
++ b->in_pos = in_start;
++ b->out_pos = out_start;
++ }
++
++ } else if (ret == XZ_OK && in_start == b->in_pos
++ && out_start == b->out_pos) {
++ if (s->allow_buf_error)
++ ret = XZ_BUF_ERROR;
++
++ s->allow_buf_error = true;
++ } else {
++ s->allow_buf_error = false;
++ }
++
++ return ret;
++}
++
++XZ_EXTERN struct xz_dec *INIT xz_dec_init(enum xz_mode mode, uint32_t dict_max)
++{
++ struct xz_dec *s = malloc(sizeof(*s));
++ if (s == NULL)
++ return NULL;
++
++ s->mode = mode;
++
++#ifdef XZ_DEC_BCJ
++ s->bcj = xz_dec_bcj_create(DEC_IS_SINGLE(mode));
++ if (s->bcj == NULL)
++ goto error_bcj;
++#endif
++
++ s->lzma2 = xz_dec_lzma2_create(mode, dict_max);
++ if (s->lzma2 == NULL)
++ goto error_lzma2;
++
++ xz_dec_reset(s);
++ return s;
++
++error_lzma2:
++#ifdef XZ_DEC_BCJ
++ xz_dec_bcj_end(s->bcj);
++error_bcj:
++#endif
++ free(s);
++ return NULL;
++}
++
++XZ_EXTERN void INIT xz_dec_end(struct xz_dec *s)
++{
++ if (s != NULL) {
++ xz_dec_lzma2_end(s->lzma2);
++#ifdef XZ_DEC_BCJ
++ xz_dec_bcj_end(s->bcj);
++#endif
++ free(s);
++ }
++}
+diff --git a/xen/common/xz/lzma2.h b/xen/common/xz/lzma2.h
+new file mode 100644
+--- /dev/null
++++ b/xen/common/xz/lzma2.h
+@@ -0,0 +1,204 @@
++/*
++ * LZMA2 definitions
++ *
++ * Authors: Lasse Collin <lasse.collin@tukaani.org>
++ * Igor Pavlov <http://7-zip.org/>
++ *
++ * This file has been put into the public domain.
++ * You can do whatever you want with this file.
++ */
++
++#ifndef XZ_LZMA2_H
++#define XZ_LZMA2_H
++
++/* Range coder constants */
++#define RC_SHIFT_BITS 8
++#define RC_TOP_BITS 24
++#define RC_TOP_VALUE (1 << RC_TOP_BITS)
++#define RC_BIT_MODEL_TOTAL_BITS 11
++#define RC_BIT_MODEL_TOTAL (1 << RC_BIT_MODEL_TOTAL_BITS)
++#define RC_MOVE_BITS 5
++
++/*
++ * Maximum number of position states. A position state is the lowest pb
++ * number of bits of the current uncompressed offset. In some places there
++ * are different sets of probabilities for different position states.
++ */
++#define POS_STATES_MAX (1 << 4)
++
++/*
++ * This enum is used to track which LZMA symbols have occurred most recently
++ * and in which order. This information is used to predict the next symbol.
++ *
++ * Symbols:
++ * - Literal: One 8-bit byte
++ * - Match: Repeat a chunk of data at some distance
++ * - Long repeat: Multi-byte match at a recently seen distance
++ * - Short repeat: One-byte repeat at a recently seen distance
++ *
++ * The symbol names are in from STATE_oldest_older_previous. REP means
++ * either short or long repeated match, and NONLIT means any non-literal.
++ */
++enum lzma_state {
++ STATE_LIT_LIT,
++ STATE_MATCH_LIT_LIT,
++ STATE_REP_LIT_LIT,
++ STATE_SHORTREP_LIT_LIT,
++ STATE_MATCH_LIT,
++ STATE_REP_LIT,
++ STATE_SHORTREP_LIT,
++ STATE_LIT_MATCH,
++ STATE_LIT_LONGREP,
++ STATE_LIT_SHORTREP,
++ STATE_NONLIT_MATCH,
++ STATE_NONLIT_REP
++};
++
++/* Total number of states */
++#define STATES 12
++
++/* The lowest 7 states indicate that the previous state was a literal. */
++#define LIT_STATES 7
++
++/* Indicate that the latest symbol was a literal. */
++static inline void INIT lzma_state_literal(enum lzma_state *state)
++{
++ if (*state <= STATE_SHORTREP_LIT_LIT)
++ *state = STATE_LIT_LIT;
++ else if (*state <= STATE_LIT_SHORTREP)
++ *state -= 3;
++ else
++ *state -= 6;
++}
++
++/* Indicate that the latest symbol was a match. */
++static inline void INIT lzma_state_match(enum lzma_state *state)
++{
++ *state = *state < LIT_STATES ? STATE_LIT_MATCH : STATE_NONLIT_MATCH;
++}
++
++/* Indicate that the latest state was a long repeated match. */
++static inline void INIT lzma_state_long_rep(enum lzma_state *state)
++{
++ *state = *state < LIT_STATES ? STATE_LIT_LONGREP : STATE_NONLIT_REP;
++}
++
++/* Indicate that the latest symbol was a short match. */
++static inline void INIT lzma_state_short_rep(enum lzma_state *state)
++{
++ *state = *state < LIT_STATES ? STATE_LIT_SHORTREP : STATE_NONLIT_REP;
++}
++
++/* Test if the previous symbol was a literal. */
++static inline bool_t INIT lzma_state_is_literal(enum lzma_state state)
++{
++ return state < LIT_STATES;
++}
++
++/* Each literal coder is divided in three sections:
++ * - 0x001-0x0FF: Without match byte
++ * - 0x101-0x1FF: With match byte; match bit is 0
++ * - 0x201-0x2FF: With match byte; match bit is 1
++ *
++ * Match byte is used when the previous LZMA symbol was something else than
++ * a literal (that is, it was some kind of match).
++ */
++#define LITERAL_CODER_SIZE 0x300
++
++/* Maximum number of literal coders */
++#define LITERAL_CODERS_MAX (1 << 4)
++
++/* Minimum length of a match is two bytes. */
++#define MATCH_LEN_MIN 2
++
++/* Match length is encoded with 4, 5, or 10 bits.
++ *
++ * Length Bits
++ * 2-9 4 = Choice=0 + 3 bits
++ * 10-17 5 = Choice=1 + Choice2=0 + 3 bits
++ * 18-273 10 = Choice=1 + Choice2=1 + 8 bits
++ */
++#define LEN_LOW_BITS 3
++#define LEN_LOW_SYMBOLS (1 << LEN_LOW_BITS)
++#define LEN_MID_BITS 3
++#define LEN_MID_SYMBOLS (1 << LEN_MID_BITS)
++#define LEN_HIGH_BITS 8
++#define LEN_HIGH_SYMBOLS (1 << LEN_HIGH_BITS)
++#define LEN_SYMBOLS (LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS + LEN_HIGH_SYMBOLS)
++
++/*
++ * Maximum length of a match is 273 which is a result of the encoding
++ * described above.
++ */
++#define MATCH_LEN_MAX (MATCH_LEN_MIN + LEN_SYMBOLS - 1)
++
++/*
++ * Different sets of probabilities are used for match distances that have
++ * very short match length: Lengths of 2, 3, and 4 bytes have a separate
++ * set of probabilities for each length. The matches with longer length
++ * use a shared set of probabilities.
++ */
++#define DIST_STATES 4
++
++/*
++ * Get the index of the appropriate probability array for decoding
++ * the distance slot.
++ */
++static inline uint32_t INIT lzma_get_dist_state(uint32_t len)
++{
++ return len < DIST_STATES + MATCH_LEN_MIN
++ ? len - MATCH_LEN_MIN : DIST_STATES - 1;
++}
++
++/*
++ * The highest two bits of a 32-bit match distance are encoded using six bits.
++ * This six-bit value is called a distance slot. This way encoding a 32-bit
++ * value takes 6-36 bits, larger values taking more bits.
++ */
++#define DIST_SLOT_BITS 6
++#define DIST_SLOTS (1 << DIST_SLOT_BITS)
++
++/* Match distances up to 127 are fully encoded using probabilities. Since
++ * the highest two bits (distance slot) are always encoded using six bits,
++ * the distances 0-3 don't need any additional bits to encode, since the
++ * distance slot itself is the same as the actual distance. DIST_MODEL_START
++ * indicates the first distance slot where at least one additional bit is
++ * needed.
++ */
++#define DIST_MODEL_START 4
++
++/*
++ * Match distances greater than 127 are encoded in three pieces:
++ * - distance slot: the highest two bits
++ * - direct bits: 2-26 bits below the highest two bits
++ * - alignment bits: four lowest bits
++ *
++ * Direct bits don't use any probabilities.
++ *
++ * The distance slot value of 14 is for distances 128-191.
++ */
++#define DIST_MODEL_END 14
++
++/* Distance slots that indicate a distance <= 127. */
++#define FULL_DISTANCES_BITS (DIST_MODEL_END / 2)
++#define FULL_DISTANCES (1 << FULL_DISTANCES_BITS)
++
++/*
++ * For match distances greater than 127, only the highest two bits and the
++ * lowest four bits (alignment) is encoded using probabilities.
++ */
++#define ALIGN_BITS 4
++#define ALIGN_SIZE (1 << ALIGN_BITS)
++#define ALIGN_MASK (ALIGN_SIZE - 1)
++
++/* Total number of all probability variables */
++#define PROBS_TOTAL (1846 + LITERAL_CODERS_MAX * LITERAL_CODER_SIZE)
++
++/*
++ * LZMA remembers the four most recent match distances. Reusing these
++ * distances tends to take less space than re-encoding the actual
++ * distance value.
++ */
++#define REPS 4
++
++#endif
+diff --git a/xen/common/xz/private.h b/xen/common/xz/private.h
+new file mode 100644
+--- /dev/null
++++ b/xen/common/xz/private.h
+@@ -0,0 +1,271 @@
++/*
++ * Private includes and definitions
++ *
++ * Author: Lasse Collin <lasse.collin@tukaani.org>
++ *
++ * This file has been put into the public domain.
++ * You can do whatever you want with this file.
++ */
++
++#ifndef XZ_PRIVATE_H
++#define XZ_PRIVATE_H
++
++#include <xen/kernel.h>
++#include <asm/byteorder.h>
++#define get_le32(p) le32_to_cpup((const uint32_t *)(p))
++
++#if 1 /* ndef CONFIG_??? */
++static inline u32 INIT get_unaligned_le32(void *p)
++{
++ return le32_to_cpup(p);
++}
++
++static inline void INIT put_unaligned_le32(u32 val, void *p)
++{
++ *(__force __le32*)p = cpu_to_le32(val);
++}
++#else
++#include <asm/unaligned.h>
++
++static inline u32 INIT get_unaligned_le32(void *p)
++{
++ return le32_to_cpu(__get_unaligned(p, 4));
++}
++
++static inline void INIT put_unaligned_le32(u32 val, void *p)
++{
++ __put_unaligned(cpu_to_le32(val), p, 4);
++}
++#endif
++
++#define false 0
++#define true 1
++
++/**
++ * enum xz_mode - Operation mode
++ *
++ * @XZ_SINGLE: Single-call mode. This uses less RAM than
++ * than multi-call modes, because the LZMA2
++ * dictionary doesn't need to be allocated as
++ * part of the decoder state. All required data
++ * structures are allocated at initialization,
++ * so xz_dec_run() cannot return XZ_MEM_ERROR.
++ * @XZ_PREALLOC: Multi-call mode with preallocated LZMA2
++ * dictionary buffer. All data structures are
++ * allocated at initialization, so xz_dec_run()
++ * cannot return XZ_MEM_ERROR.
++ * @XZ_DYNALLOC: Multi-call mode. The LZMA2 dictionary is
++ * allocated once the required size has been
++ * parsed from the stream headers. If the
++ * allocation fails, xz_dec_run() will return
++ * XZ_MEM_ERROR.
++ *
++ * It is possible to enable support only for a subset of the above
++ * modes at compile time by defining XZ_DEC_SINGLE, XZ_DEC_PREALLOC,
++ * or XZ_DEC_DYNALLOC. The xz_dec kernel module is always compiled
++ * with support for all operation modes, but the preboot code may
++ * be built with fewer features to minimize code size.
++ */
++enum xz_mode {
++ XZ_SINGLE,
++ XZ_PREALLOC,
++ XZ_DYNALLOC
++};
++
++/**
++ * enum xz_ret - Return codes
++ * @XZ_OK: Everything is OK so far. More input or more
++ * output space is required to continue. This
++ * return code is possible only in multi-call mode
++ * (XZ_PREALLOC or XZ_DYNALLOC).
++ * @XZ_STREAM_END: Operation finished successfully.
++ * @XZ_UNSUPPORTED_CHECK: Integrity check type is not supported. Decoding
++ * is still possible in multi-call mode by simply
++ * calling xz_dec_run() again.
++ * Note that this return value is used only if
++ * XZ_DEC_ANY_CHECK was defined at build time,
++ * which is not used in the kernel. Unsupported
++ * check types return XZ_OPTIONS_ERROR if
++ * XZ_DEC_ANY_CHECK was not defined at build time.
++ * @XZ_MEM_ERROR: Allocating memory failed. This return code is
++ * possible only if the decoder was initialized
++ * with XZ_DYNALLOC. The amount of memory that was
++ * tried to be allocated was no more than the
++ * dict_max argument given to xz_dec_init().
++ * @XZ_MEMLIMIT_ERROR: A bigger LZMA2 dictionary would be needed than
++ * allowed by the dict_max argument given to
++ * xz_dec_init(). This return value is possible
++ * only in multi-call mode (XZ_PREALLOC or
++ * XZ_DYNALLOC); the single-call mode (XZ_SINGLE)
++ * ignores the dict_max argument.
++ * @XZ_FORMAT_ERROR: File format was not recognized (wrong magic
++ * bytes).
++ * @XZ_OPTIONS_ERROR: This implementation doesn't support the requested
++ * compression options. In the decoder this means
++ * that the header CRC32 matches, but the header
++ * itself specifies something that we don't support.
++ * @XZ_DATA_ERROR: Compressed data is corrupt.
++ * @XZ_BUF_ERROR: Cannot make any progress. Details are slightly
++ * different between multi-call and single-call
++ * mode; more information below.
++ *
++ * In multi-call mode, XZ_BUF_ERROR is returned when two consecutive calls
++ * to XZ code cannot consume any input and cannot produce any new output.
++ * This happens when there is no new input available, or the output buffer
++ * is full while at least one output byte is still pending. Assuming your
++ * code is not buggy, you can get this error only when decoding a compressed
++ * stream that is truncated or otherwise corrupt.
++ *
++ * In single-call mode, XZ_BUF_ERROR is returned only when the output buffer
++ * is too small or the compressed input is corrupt in a way that makes the
++ * decoder produce more output than the caller expected. When it is
++ * (relatively) clear that the compressed input is truncated, XZ_DATA_ERROR
++ * is used instead of XZ_BUF_ERROR.
++ */
++enum xz_ret {
++ XZ_OK,
++ XZ_STREAM_END,
++ XZ_UNSUPPORTED_CHECK,
++ XZ_MEM_ERROR,
++ XZ_MEMLIMIT_ERROR,
++ XZ_FORMAT_ERROR,
++ XZ_OPTIONS_ERROR,
++ XZ_DATA_ERROR,
++ XZ_BUF_ERROR
++};
++
++/**
++ * struct xz_buf - Passing input and output buffers to XZ code
++ * @in: Beginning of the input buffer. This may be NULL if and only
++ * if in_pos is equal to in_size.
++ * @in_pos: Current position in the input buffer. This must not exceed
++ * in_size.
++ * @in_size: Size of the input buffer
++ * @out: Beginning of the output buffer. This may be NULL if and only
++ * if out_pos is equal to out_size.
++ * @out_pos: Current position in the output buffer. This must not exceed
++ * out_size.
++ * @out_size: Size of the output buffer
++ *
++ * Only the contents of the output buffer from out[out_pos] onward, and
++ * the variables in_pos and out_pos are modified by the XZ code.
++ */
++struct xz_buf {
++ const uint8_t *in;
++ size_t in_pos;
++ size_t in_size;
++
++ uint8_t *out;
++ size_t out_pos;
++ size_t out_size;
++};
++
++/**
++ * struct xz_dec - Opaque type to hold the XZ decoder state
++ */
++struct xz_dec;
++
++/* If no specific decoding mode is requested, enable support for all modes. */
++#if !defined(XZ_DEC_SINGLE) && !defined(XZ_DEC_PREALLOC) \
++ && !defined(XZ_DEC_DYNALLOC)
++# define XZ_DEC_SINGLE
++# define XZ_DEC_PREALLOC
++# define XZ_DEC_DYNALLOC
++#endif
++
++/*
++ * The DEC_IS_foo(mode) macros are used in "if" statements. If only some
++ * of the supported modes are enabled, these macros will evaluate to true or
++ * false at compile time and thus allow the compiler to omit unneeded code.
++ */
++#ifdef XZ_DEC_SINGLE
++# define DEC_IS_SINGLE(mode) ((mode) == XZ_SINGLE)
++#else
++# define DEC_IS_SINGLE(mode) (false)
++#endif
++
++#ifdef XZ_DEC_PREALLOC
++# define DEC_IS_PREALLOC(mode) ((mode) == XZ_PREALLOC)
++#else
++# define DEC_IS_PREALLOC(mode) (false)
++#endif
++
++#ifdef XZ_DEC_DYNALLOC
++# define DEC_IS_DYNALLOC(mode) ((mode) == XZ_DYNALLOC)
++#else
++# define DEC_IS_DYNALLOC(mode) (false)
++#endif
++
++#if !defined(XZ_DEC_SINGLE)
++# define DEC_IS_MULTI(mode) (true)
++#elif defined(XZ_DEC_PREALLOC) || defined(XZ_DEC_DYNALLOC)
++# define DEC_IS_MULTI(mode) ((mode) != XZ_SINGLE)
++#else
++# define DEC_IS_MULTI(mode) (false)
++#endif
++
++/*
++ * If any of the BCJ filter decoders are wanted, define XZ_DEC_BCJ.
++ * XZ_DEC_BCJ is used to enable generic support for BCJ decoders.
++ */
++#ifndef XZ_DEC_BCJ
++# if defined(XZ_DEC_X86) || defined(XZ_DEC_POWERPC) \
++ || defined(XZ_DEC_IA64) || defined(XZ_DEC_ARM) \
++ || defined(XZ_DEC_ARM) || defined(XZ_DEC_ARMTHUMB) \
++ || defined(XZ_DEC_SPARC)
++# define XZ_DEC_BCJ
++# endif
++#endif
++
++/*
++ * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used
++ * before calling xz_dec_lzma2_run().
++ */
++XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
++ uint32_t dict_max);
++
++/*
++ * Decode the LZMA2 properties (one byte) and reset the decoder. Return
++ * XZ_OK on success, XZ_MEMLIMIT_ERROR if the preallocated dictionary is not
++ * big enough, and XZ_OPTIONS_ERROR if props indicates something that this
++ * decoder doesn't support.
++ */
++XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s,
++ uint8_t props);
++
++/* Decode raw LZMA2 stream from b->in to b->out. */
++XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
++ struct xz_buf *b);
++
++/* Free the memory allocated for the LZMA2 decoder. */
++XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s);
++
++#ifdef XZ_DEC_BCJ
++/*
++ * Allocate memory for BCJ decoders. xz_dec_bcj_reset() must be used before
++ * calling xz_dec_bcj_run().
++ */
++XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool_t single_call);
++
++/*
++ * Decode the Filter ID of a BCJ filter. This implementation doesn't
++ * support custom start offsets, so no decoding of Filter Properties
++ * is needed. Returns XZ_OK if the given Filter ID is supported.
++ * Otherwise XZ_OPTIONS_ERROR is returned.
++ */
++XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id);
++
++/*
++ * Decode raw BCJ + LZMA2 stream. This must be used only if there actually is
++ * a BCJ filter in the chain. If the chain has only LZMA2, xz_dec_lzma2_run()
++ * must be called directly.
++ */
++XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
++ struct xz_dec_lzma2 *lzma2,
++ struct xz_buf *b);
++
++/* Free the memory allocated for the BCJ filters. */
++#define xz_dec_bcj_end(s) free(s)
++#endif
++
++#endif
+diff --git a/xen/common/xz/stream.h b/xen/common/xz/stream.h
+new file mode 100644
+--- /dev/null
++++ b/xen/common/xz/stream.h
+@@ -0,0 +1,55 @@
++/*
++ * Definitions for handling the .xz file format
++ *
++ * Author: Lasse Collin <lasse.collin@tukaani.org>
++ *
++ * This file has been put into the public domain.
++ * You can do whatever you want with this file.
++ */
++
++#ifndef XZ_STREAM_H
++#define XZ_STREAM_H
++
++/*
++ * See the .xz file format specification at
++ * http://tukaani.org/xz/xz-file-format.txt
++ * to understand the container format.
++ */
++
++#define STREAM_HEADER_SIZE 12
++
++#define HEADER_MAGIC "\3757zXZ"
++#define HEADER_MAGIC_SIZE 6
++
++#define FOOTER_MAGIC "YZ"
++#define FOOTER_MAGIC_SIZE 2
++
++/*
++ * Variable-length integer can hold a 63-bit unsigned integer or a special
++ * value indicating that the value is unknown.
++ *
++ * Experimental: vli_type can be defined to uint32_t to save a few bytes
++ * in code size (no effect on speed). Doing so limits the uncompressed and
++ * compressed size of the file to less than 256 MiB and may also weaken
++ * error detection slightly.
++ */
++typedef uint64_t vli_type;
++
++#define VLI_MAX ((vli_type)-1 / 2)
++#define VLI_UNKNOWN ((vli_type)-1)
++
++/* Maximum encoded size of a VLI */
++#define VLI_BYTES_MAX (sizeof(vli_type) * 8 / 7)
++
++/* Integrity Check types */
++enum xz_check {
++ XZ_CHECK_NONE = 0,
++ XZ_CHECK_CRC32 = 1,
++ XZ_CHECK_CRC64 = 4,
++ XZ_CHECK_SHA256 = 10
++};
++
++/* Maximum possible Check ID */
++#define XZ_CHECK_MAX 15
++
++#endif
+diff --git a/xen/include/xen/decompress.h b/xen/include/xen/decompress.h
+--- a/xen/include/xen/decompress.h
++++ b/xen/include/xen/decompress.h
+@@ -31,7 +31,7 @@
+ * dependent).
+ */
+
+-decompress_fn bunzip2, unlzma, unlzo;
++decompress_fn bunzip2, unxz, unlzma, unlzo;
+
+ int decompress(void *inbuf, unsigned int len, void *outbuf); \ No newline at end of file
diff --git a/kernels/xen/parabolainit.patch b/kernels/xen/parabolainit.patch
new file mode 100644
index 000000000..645a66edd
--- /dev/null
+++ b/kernels/xen/parabolainit.patch
@@ -0,0 +1,423 @@
+diff -Naur orig.xen-4.1.1//tools/hotplug/Linux/init.d/xencommons xen-4.1.1//tools/hotplug/Linux/init.d/xencommons
+--- orig.xen-4.1.1//tools/hotplug/Linux/init.d/xencommons 2011-07-03 03:08:44.953747064 -0700
++++ xen-4.1.1//tools/hotplug/Linux/init.d/xencommons 2011-07-05 13:47:54.627029164 -0700
+@@ -18,6 +18,9 @@
+ # Description: Starts and stops the daemons neeeded for xl/xend
+ ### END INIT INFO
+
++. /etc/rc.conf
++. /etc/rc.d/functions
++
+ if [ -d /etc/sysconfig ]; then
+ xencommons_config=/etc/sysconfig
+ else
+@@ -26,7 +29,7 @@
+
+ test -f $xencommons_config/xencommons && . $xencommons_config/xencommons
+
+-XENCONSOLED_PIDFILE=/var/run/xenconsoled.pid
++XENCONSOLED_PIDFILE=/run/daemons/xenconsoled.pid
+ shopt -s extglob
+
+ if test "x$1" = xstart && \
+@@ -51,8 +54,9 @@
+ rm -f "$XENSTORED_ROOTDIR"/tdb* &>/dev/null
+ test -z "$XENSTORED_TRACE" || XENSTORED_ARGS=" -T /var/log/xen/xenstored-trace.log"
+
+- echo -n Starting xenstored...
+- xenstored --pid-file=/var/run/xenstored.pid $XENSTORED_ARGS
++ #echo -n Starting xenstored...
++ stat_busy "Starting xenstored"
++ xenstored --pid-file=/run/daemons/xenstored.pid $XENSTORED_ARGS
+
+ # Wait for xenstored to actually come up, timing out after 30 seconds
+ while [ $time -lt $timeout ] && ! `xenstore-read -s / >/dev/null 2>&1` ; do
+@@ -60,33 +64,39 @@
+ time=$(($time+1))
+ sleep 1
+ done
+- echo
+-
+ # Exit if we timed out
+ if ! [ $time -lt $timeout ] ; then
+- echo Could not start xenstored
++ #echo Could not start xenstored
++ stat_fail
+ exit 1
+ fi
++ stat_done
+
+- echo Setting domain 0 name...
++ stat_busy "Setting domain 0 name..."
+ xenstore-write "/local/domain/0/name" "Domain-0"
++ stat_done
+ fi
+
+- echo Starting xenconsoled...
++ #echo Starting xenconsoled...
++ stat_busy "Starting xenconsoled"
+ test -z "$XENCONSOLED_TRACE" || XENCONSOLED_ARGS=" --log=$XENCONSOLED_TRACE"
+ xenconsoled --pid-file=$XENCONSOLED_PIDFILE $XENCONSOLED_ARGS
+ test -z "$XENBACKENDD_DEBUG" || XENBACKENDD_ARGS="-d"
+ test "`uname`" != "NetBSD" || xenbackendd $XENBACKENDD_ARGS
++ stat_done
++ add_daemon xencommons
+ }
+ do_stop () {
+- echo Stopping xenconsoled
++ stat_busy "Stopping xenconsoled"
+ if read 2>/dev/null <$XENCONSOLED_PIDFILE pid; then
+ kill $pid
+ while kill -9 $pid >/dev/null 2>&1; do sleep 0.1; done
+ rm -f $XENCONSOLED_PIDFILE
+ fi
++ stat_done
+
+- echo WARNING: Not stopping xenstored, as it cannot be restarted.
++ printhl "WARNING: Not stopping xenstored, as it cannot be restarted."
++ rm_daemon xencommons
+ }
+
+ case "$1" in
+diff -Naur orig.xen-4.1.1//tools/hotplug/Linux/init.d/xend xen-4.1.1//tools/hotplug/Linux/init.d/xend
+--- orig.xen-4.1.1//tools/hotplug/Linux/init.d/xend 2011-07-03 03:08:44.953747064 -0700
++++ xen-4.1.1//tools/hotplug/Linux/init.d/xend 2011-07-05 01:47:40.981951191 -0700
+@@ -18,6 +18,10 @@
+ # Description: Starts and stops the Xen control daemon.
+ ### END INIT INFO
+
++. /etc/rc.conf
++. /etc/rc.d/functions
++
++
+ shopt -s extglob
+
+ # Wait for Xend to be up
+@@ -37,23 +41,30 @@
+ case "$1" in
+ start)
+ if [ -z "`ps -C xenconsoled -o pid=`" ]; then
+- echo "xencommons should be started first."
++ printhl "xencommons should be started first."
+ exit 1
+ fi
+ # mkdir shouldn't be needed as most distros have this already created. Default to using subsys.
+ # See docs/misc/distro_mapping.txt
+- mkdir -p /var/lock
+- if [ -d /var/lock/subsys ] ; then
+- touch /var/lock/subsys/xend
++ if [ -d /run/lock/subsys ] ; then
++ touch /run/lock/subsys/xend
+ else
+- touch /var/lock/xend
++ touch /run/lock/xend
+ fi
++ stat_busy "Starting xend"
+ xend start
+ await_daemons_up
++ stat_done
++ add_daemon xend
+ ;;
++
++
+ stop)
++ stat_busy "Stopping xend"
+ xend stop
+- rm -f /var/lock/subsys/xend /var/lock/xend
++ rm -f /run/lock/xend /var/lock/xend
++ stat_done
++ rm_daemon xend
+ ;;
+ status)
+ xend status
+@@ -62,8 +73,10 @@
+ xend reload
+ ;;
+ restart|force-reload)
++ stat_busy "Restarting xend"
+ xend restart
+ await_daemons_up
++ stat_done
+ ;;
+ *)
+ # do not advertise unreasonable commands that there is no reason
+diff -Naur orig.xen-4.1.1//tools/hotplug/Linux/init.d/xendomains xen-4.1.1//tools/hotplug/Linux/init.d/xendomains
+--- orig.xen-4.1.1//tools/hotplug/Linux/init.d/xendomains 2011-07-03 03:08:44.953747064 -0700
++++ xen-4.1.1//tools/hotplug/Linux/init.d/xendomains 2011-07-05 13:46:36.208222760 -0700
+@@ -26,6 +26,9 @@
+ # Description: Start / stop domains automatically when domain 0
+ # boots / shuts down.
+ ### END INIT INFO
++. /etc/rc.conf
++. /etc/rc.d/functions
++
+
+ CMD=xm
+ $CMD list &> /dev/null
+@@ -46,93 +49,52 @@
+ exit 0
+ fi
+
+-# See docs/misc/distro_mapping.txt
+-if [ -d /var/lock/subsys ]; then
+- LOCKFILE=/var/lock/subsys/xendomains
+-else
+- LOCKFILE=/var/lock/xendomains
+-fi
+-
+-if [ -d /etc/sysconfig ]; then
+- XENDOM_CONFIG=/etc/sysconfig/xendomains
+-else
+- XENDOM_CONFIG=/etc/default/xendomains
+-fi
++LOCKFILE=/run/lock/xendomains
++XENDOM_CONFIG=/etc/default/xendomains
+
+-test -r $XENDOM_CONFIG || { echo "$XENDOM_CONFIG not existing";
++test -r $XENDOM_CONFIG || {
++ printhl "$XENDOM_CONFIG not existing";
+ if [ "$1" = "stop" ]; then exit 0;
+ else exit 6; fi; }
+
+ . $XENDOM_CONFIG
+
+-# Use the SUSE rc_ init script functions;
+-# emulate them on LSB, RH and other systems
+-if test -e /etc/rc.status; then
+- # SUSE rc script library
+- . /etc/rc.status
+-else
+- _cmd=$1
+- declare -a _SMSG
+- if test "${_cmd}" = "status"; then
++_cmd=$1
++declare -a _SMSG
++if test "${_cmd}" = "status"; then
+ _SMSG=(running dead dead unused unknown)
+ _RC_UNUSED=3
+- else
++else
+ _SMSG=(done failed failed missed failed skipped unused failed failed)
+ _RC_UNUSED=6
+- fi
+- if test -e /etc/init.d/functions; then
+- # REDHAT
+- . /etc/init.d/functions
+- echo_rc()
+- {
+- #echo -n " [${_SMSG[${_RC_RV}]}] "
+- if test ${_RC_RV} = 0; then
+- success " [${_SMSG[${_RC_RV}]}] "
+- else
+- failure " [${_SMSG[${_RC_RV}]}] "
+- fi
+- }
+- elif test -e /lib/lsb/init-functions; then
+- # LSB
+- . /lib/lsb/init-functions
+- if alias log_success_msg >/dev/null 2>/dev/null; then
+- echo_rc()
+- {
+- echo " [${_SMSG[${_RC_RV}]}] "
+- }
+- else
+- echo_rc()
+- {
+- if test ${_RC_RV} = 0; then
+- log_success_msg " [${_SMSG[${_RC_RV}]}] "
+- else
+- log_failure_msg " [${_SMSG[${_RC_RV}]}] "
+- fi
+- }
+- fi
+- else
+- # emulate it
+- echo_rc()
+- {
+- echo " [${_SMSG[${_RC_RV}]}] "
+- }
+- fi
+- rc_reset() { _RC_RV=0; }
+- rc_failed()
+- {
++fi
++
++
++
++echo_rc() {
++ echo
++ printhl "Return Status: ${_SMSG[${_RC_RV}]}"
++}
++
++
++rc_reset() { _RC_RV=0; }
++
++
++rc_failed() {
+ if test -z "$1"; then
+- _RC_RV=1;
++ _RC_RV=1;
+ elif test "$1" != "0"; then
+- _RC_RV=$1;
+- fi
++ _RC_RV=$1;
++ fi
+ return ${_RC_RV}
+- }
+- rc_check()
+- {
++}
++
++rc_check() {
+ return rc_failed $?
+- }
+- rc_status()
+- {
++}
++
++
++rc_status() {
+ rc_failed $?
+ if test "$1" = "-r"; then _RC_RV=0; shift; fi
+ if test "$1" = "-s"; then rc_failed 5; echo_rc; rc_failed 3; shift; fi
+@@ -140,26 +102,24 @@
+ if test "$1" = "-v"; then echo_rc; shift; fi
+ if test "$1" = "-r"; then _RC_RV=0; shift; fi
+ return ${_RC_RV}
+- }
+- rc_exit() { exit ${_RC_RV}; }
+- rc_active()
+- {
++}
++
++
++rc_exit() { exit ${_RC_RV}; }
++
++
++rc_active() {
+ if test -z "$RUNLEVEL"; then read RUNLEVEL REST < <(/sbin/runlevel); fi
+ if test -e /etc/init.d/S[0-9][0-9]${1}; then return 0; fi
+ return 1
+- }
+-fi
++}
+
+-if ! which usleep >&/dev/null
+-then
+- usleep()
+- {
+- if [ -n "$1" ]
+- then
+- sleep $(( $1 / 1000000 ))
+- fi
+- }
+-fi
++usleep() {
++ if [ -n "$1" ]
++ then
++ sleep $(( $1 / 1000000 ))
++ fi
++}
+
+ # Reset status of this service
+ rc_reset
+@@ -235,10 +195,12 @@
+ start()
+ {
+ if [ -f $LOCKFILE ]; then
+- echo -e "xendomains already running (lockfile exists)"
++ stat_busy "xendomains already running (lockfile exists)"
++ stat_fail
+ return;
+ fi
+
++ printhl "Starting Xen Domains"
+ saved_domains=" "
+ if [ "$XENDOMAINS_RESTORE" = "true" ] &&
+ contains_something "$XENDOMAINS_SAVE"
+@@ -299,6 +261,7 @@
+ fi
+ done
+ fi
++ add_daemon xendomains
+ }
+
+ all_zombies()
+@@ -352,7 +315,7 @@
+ if test "$XENDOMAINS_AUTO_ONLY" = "true"; then
+ rdnames
+ fi
+- echo -n "Shutting down Xen domains:"
++ printhl "Shutting down Xen domains"
+ name=;id=
+ while read LN; do
+ parseln "$LN" || continue
+@@ -465,6 +428,7 @@
+ rm -f $LOCKFILE
+
+ exec 2>&3
++ rm_daemon xendomains
+ }
+
+ check_domain_up()
+diff -Naur orig.xen-4.1.1//tools/hotplug/Linux/init.d/xen-watchdog xen-4.1.1//tools/hotplug/Linux/init.d/xen-watchdog
+--- orig.xen-4.1.1//tools/hotplug/Linux/init.d/xen-watchdog 2011-07-03 03:08:44.957080397 -0700
++++ xen-4.1.1//tools/hotplug/Linux/init.d/xen-watchdog 2011-07-05 13:20:22.515289867 -0700
+@@ -17,49 +17,32 @@
+ ### END INIT INFO
+ #
+
++. /etc/rc.conf
++. /etc/rc.d/functions
++
+ DAEMON=/usr/sbin/xenwatchdogd
+ base=$(basename $DAEMON)
++initname="xen-watchdog"
+
+-# Source function library.
+-if [ -e /etc/init.d/functions ] ; then
+- . /etc/init.d/functions
+-elif [ -e /lib/lsb/init-functions ] ; then
+- . /lib/lsb/init-functions
+- success () {
+- log_success_msg $*
+- }
+- failure () {
+- log_failure_msg $*
+- }
+-else
+- success () {
+- echo $*
+- }
+- failure () {
+- echo $*
+- }
+-fi
+
+ start() {
+ local r
+- echo -n $"Starting domain watchdog daemon: "
++ stat_busy "Starting domain watchdog daemon"
+
+ $DAEMON 30 15
+ r=$?
+- [ "$r" -eq 0 ] && success $"$base startup" || failure $"$base startup"
+- echo
++ [ "$r" -eq 0 ] && stat_done ; add_daemon $initname || stat_fail
+
+ return $r
+ }
+
+ stop() {
+ local r
+- echo -n $"Stopping domain watchdog daemon: "
++ stat_busy "Stopping domain watchdog daemon"
+
+ killall -USR1 $base 2>/dev/null
+ r=$?
+- [ "$r" -eq 0 ] && success $"$base stop" || failure $"$base stop"
+- echo
++ [ "$r" -eq 0 ] && stat_done ; rm_daemon $initname || stat_fail
+
+ return $r
+ }
diff --git a/kernels/xen/xen.patch b/kernels/xen/xen.patch
new file mode 100644
index 000000000..8b1b5585d
--- /dev/null
+++ b/kernels/xen/xen.patch
@@ -0,0 +1,21 @@
+--- xen-4.0.1.orig/Config.mk 2010-08-25 12:22:44.000000000 +0200
++++ xen-4.0.1/Config.mk 2010-11-02 23:38:11.575000000 +0100
+@@ -187,4 +187,4 @@
+ CONFIG_MINITERM ?= n
+ CONFIG_LOMOUNT ?= n
+
+--include $(XEN_ROOT)/.config
++#-include $(XEN_ROOT)/.config
+
+--- xen-4.0.1/Config.mk.orig 2010-08-25 11:22:44.000000000 +0100
++++ xen-4.0.1/Config.mk 2011-01-29 17:40:43.000000000 +0000
+@@ -135,6 +135,8 @@
+
+ LDFLAGS += $(foreach i, $(EXTRA_LIB), -L$(i))
+ CFLAGS += $(foreach i, $(EXTRA_INCLUDES), -I$(i))
++# temporary compile fix for rawhide
++CFLAGS += -Wunused-but-set-variable -Wno-error=unused-but-set-variable -Wuninitialized -Wno-error=uninitialized
+
+ EMBEDDED_EXTRA_CFLAGS := -nopie -fno-stack-protector -fno-stack-protector-all
+ EMBEDDED_EXTRA_CFLAGS += -fno-exceptions
+