From dc2c282b6aac820a3c27d8a7a68c0386300db663 Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Wed, 4 Mar 2015 17:07:12 +0100 Subject: import: rename download code from "import" to "pull" That way we can call the code for local container/VM imports "import" without confusion. --- src/import/import-common.c | 545 --------------------------- src/import/import-common.h | 41 --- src/import/import-dkr.c | 896 --------------------------------------------- src/import/import-dkr.h | 36 -- src/import/import-job.c | 746 ------------------------------------- src/import/import-job.h | 122 ------ src/import/import-raw.c | 518 -------------------------- src/import/import-raw.h | 37 -- src/import/import-tar.c | 414 --------------------- src/import/import-tar.h | 37 -- src/import/pull-common.c | 545 +++++++++++++++++++++++++++ src/import/pull-common.h | 41 +++ src/import/pull-dkr.c | 896 +++++++++++++++++++++++++++++++++++++++++++++ src/import/pull-dkr.h | 36 ++ src/import/pull-job.c | 746 +++++++++++++++++++++++++++++++++++++ src/import/pull-job.h | 122 ++++++ src/import/pull-raw.c | 518 ++++++++++++++++++++++++++ src/import/pull-raw.h | 37 ++ src/import/pull-tar.c | 414 +++++++++++++++++++++ src/import/pull-tar.h | 37 ++ src/import/pull.c | 50 +-- 21 files changed, 3417 insertions(+), 3417 deletions(-) delete mode 100644 src/import/import-common.c delete mode 100644 src/import/import-common.h delete mode 100644 src/import/import-dkr.c delete mode 100644 src/import/import-dkr.h delete mode 100644 src/import/import-job.c delete mode 100644 src/import/import-job.h delete mode 100644 src/import/import-raw.c delete mode 100644 src/import/import-raw.h delete mode 100644 src/import/import-tar.c delete mode 100644 src/import/import-tar.h create mode 100644 src/import/pull-common.c create mode 100644 src/import/pull-common.h create mode 100644 src/import/pull-dkr.c create mode 100644 src/import/pull-dkr.h create mode 100644 src/import/pull-job.c create mode 100644 src/import/pull-job.h create mode 100644 src/import/pull-raw.c create mode 100644 src/import/pull-raw.h create mode 100644 src/import/pull-tar.c create mode 100644 src/import/pull-tar.h (limited to 'src') diff --git a/src/import/import-common.c b/src/import/import-common.c deleted file mode 100644 index f10a453eed..0000000000 --- a/src/import/import-common.c +++ /dev/null @@ -1,545 +0,0 @@ -/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ - -/*** - This file is part of systemd. - - Copyright 2015 Lennart Poettering - - systemd is free software; you can redistribute it and/or modify it - under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - systemd is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with systemd; If not, see . -***/ - -#include - -#include "util.h" -#include "strv.h" -#include "copy.h" -#include "btrfs-util.h" -#include "capability.h" -#include "import-job.h" -#include "import-common.h" - -#define FILENAME_ESCAPE "/.#\"\'" - -int import_find_old_etags(const char *url, const char *image_root, int dt, const char *prefix, const char *suffix, char ***etags) { - _cleanup_free_ char *escaped_url = NULL; - _cleanup_closedir_ DIR *d = NULL; - _cleanup_strv_free_ char **l = NULL; - struct dirent *de; - int r; - - assert(url); - assert(etags); - - if (!image_root) - image_root = "/var/lib/machines"; - - escaped_url = xescape(url, FILENAME_ESCAPE); - if (!escaped_url) - return -ENOMEM; - - d = opendir(image_root); - if (!d) { - if (errno == ENOENT) { - *etags = NULL; - return 0; - } - - return -errno; - } - - FOREACH_DIRENT_ALL(de, d, return -errno) { - const char *a, *b; - char *u; - - if (de->d_type != DT_UNKNOWN && - de->d_type != dt) - continue; - - if (prefix) { - a = startswith(de->d_name, prefix); - if (!a) - continue; - } else - a = de->d_name; - - a = startswith(a, escaped_url); - if (!a) - continue; - - a = startswith(a, "."); - if (!a) - continue; - - if (suffix) { - b = endswith(de->d_name, suffix); - if (!b) - continue; - } else - b = strchr(de->d_name, 0); - - if (a >= b) - continue; - - u = cunescape_length(a, b - a); - if (!u) - return -ENOMEM; - - if (!http_etag_is_valid(u)) { - free(u); - continue; - } - - r = strv_consume(&l, u); - if (r < 0) - return r; - } - - *etags = l; - l = NULL; - - return 0; -} - -int import_make_local_copy(const char *final, const char *image_root, const char *local, bool force_local) { - const char *p; - int r; - - assert(final); - assert(local); - - if (!image_root) - image_root = "/var/lib/machines"; - - p = strjoina(image_root, "/", local); - - if (force_local) { - (void) btrfs_subvol_remove(p); - (void) rm_rf_dangerous(p, false, true, false); - } - - r = btrfs_subvol_snapshot(final, p, false, false); - if (r == -ENOTTY) { - r = copy_tree(final, p, false); - if (r < 0) - return log_error_errno(r, "Failed to copy image: %m"); - } else if (r < 0) - return log_error_errno(r, "Failed to create local image: %m"); - - log_info("Created new local image '%s'.", local); - - return 0; -} - -int import_make_read_only_fd(int fd) { - int r; - - assert(fd >= 0); - - /* First, let's make this a read-only subvolume if it refers - * to a subvolume */ - r = btrfs_subvol_set_read_only_fd(fd, true); - if (r == -ENOTTY || r == -ENOTDIR || r == -EINVAL) { - struct stat st; - - /* This doesn't refer to a subvolume, or the file - * system isn't even btrfs. In that, case fall back to - * chmod()ing */ - - r = fstat(fd, &st); - if (r < 0) - return log_error_errno(errno, "Failed to stat temporary image: %m"); - - /* Drop "w" flag */ - if (fchmod(fd, st.st_mode & 07555) < 0) - return log_error_errno(errno, "Failed to chmod() final image: %m"); - - return 0; - - } else if (r < 0) - return log_error_errno(r, "Failed to make subvolume read-only: %m"); - - return 0; -} - -int import_make_read_only(const char *path) { - _cleanup_close_ int fd = 1; - - fd = open(path, O_RDONLY|O_NOCTTY|O_CLOEXEC); - if (fd < 0) - return log_error_errno(errno, "Failed to open %s: %m", path); - - return import_make_read_only_fd(fd); -} - -int import_make_path(const char *url, const char *etag, const char *image_root, const char *prefix, const char *suffix, char **ret) { - _cleanup_free_ char *escaped_url = NULL; - char *path; - - assert(url); - assert(ret); - - if (!image_root) - image_root = "/var/lib/machines"; - - escaped_url = xescape(url, FILENAME_ESCAPE); - if (!escaped_url) - return -ENOMEM; - - if (etag) { - _cleanup_free_ char *escaped_etag = NULL; - - escaped_etag = xescape(etag, FILENAME_ESCAPE); - if (!escaped_etag) - return -ENOMEM; - - path = strjoin(image_root, "/", strempty(prefix), escaped_url, ".", escaped_etag, strempty(suffix), NULL); - } else - path = strjoin(image_root, "/", strempty(prefix), escaped_url, strempty(suffix), NULL); - if (!path) - return -ENOMEM; - - *ret = path; - return 0; -} - -int import_make_verification_jobs( - ImportJob **ret_checksum_job, - ImportJob **ret_signature_job, - ImportVerify verify, - const char *url, - CurlGlue *glue, - ImportJobFinished on_finished, - void *userdata) { - - _cleanup_(import_job_unrefp) ImportJob *checksum_job = NULL, *signature_job = NULL; - int r; - - assert(ret_checksum_job); - assert(ret_signature_job); - assert(verify >= 0); - assert(verify < _IMPORT_VERIFY_MAX); - assert(url); - assert(glue); - - if (verify != IMPORT_VERIFY_NO) { - _cleanup_free_ char *checksum_url = NULL; - - /* Queue job for the SHA256SUMS file for the image */ - r = import_url_change_last_component(url, "SHA256SUMS", &checksum_url); - if (r < 0) - return r; - - r = import_job_new(&checksum_job, checksum_url, glue, userdata); - if (r < 0) - return r; - - checksum_job->on_finished = on_finished; - checksum_job->uncompressed_max = checksum_job->compressed_max = 1ULL * 1024ULL * 1024ULL; - } - - if (verify == IMPORT_VERIFY_SIGNATURE) { - _cleanup_free_ char *signature_url = NULL; - - /* Queue job for the SHA256SUMS.gpg file for the image. */ - r = import_url_change_last_component(url, "SHA256SUMS.gpg", &signature_url); - if (r < 0) - return r; - - r = import_job_new(&signature_job, signature_url, glue, userdata); - if (r < 0) - return r; - - signature_job->on_finished = on_finished; - signature_job->uncompressed_max = signature_job->compressed_max = 1ULL * 1024ULL * 1024ULL; - } - - *ret_checksum_job = checksum_job; - *ret_signature_job = signature_job; - - checksum_job = signature_job = NULL; - - return 0; -} - -int import_verify( - ImportJob *main_job, - ImportJob *checksum_job, - ImportJob *signature_job) { - - _cleanup_close_pair_ int gpg_pipe[2] = { -1, -1 }; - _cleanup_free_ char *fn = NULL; - _cleanup_close_ int sig_file = -1; - const char *p, *line; - char sig_file_path[] = "/tmp/sigXXXXXX", gpg_home[] = "/tmp/gpghomeXXXXXX"; - _cleanup_sigkill_wait_ pid_t pid = 0; - bool gpg_home_created = false; - int r; - - assert(main_job); - assert(main_job->state == IMPORT_JOB_DONE); - - if (!checksum_job) - return 0; - - assert(main_job->calc_checksum); - assert(main_job->checksum); - assert(checksum_job->state == IMPORT_JOB_DONE); - - if (!checksum_job->payload || checksum_job->payload_size <= 0) { - log_error("Checksum is empty, cannot verify."); - return -EBADMSG; - } - - r = import_url_last_component(main_job->url, &fn); - if (r < 0) - return log_oom(); - - if (!filename_is_valid(fn)) { - log_error("Cannot verify checksum, could not determine valid server-side file name."); - return -EBADMSG; - } - - line = strjoina(main_job->checksum, " *", fn, "\n"); - - p = memmem(checksum_job->payload, - checksum_job->payload_size, - line, - strlen(line)); - - if (!p || (p != (char*) checksum_job->payload && p[-1] != '\n')) { - log_error("Checksum did not check out, payload has been tempered with."); - return -EBADMSG; - } - - log_info("SHA256 checksum of %s is valid.", main_job->url); - - if (!signature_job) - return 0; - - assert(signature_job->state == IMPORT_JOB_DONE); - - if (!signature_job->payload || signature_job->payload_size <= 0) { - log_error("Signature is empty, cannot verify."); - return -EBADMSG; - } - - r = pipe2(gpg_pipe, O_CLOEXEC); - if (r < 0) - return log_error_errno(errno, "Failed to create pipe for gpg: %m"); - - sig_file = mkostemp(sig_file_path, O_RDWR); - if (sig_file < 0) - return log_error_errno(errno, "Failed to create temporary file: %m"); - - r = loop_write(sig_file, signature_job->payload, signature_job->payload_size, false); - if (r < 0) { - log_error_errno(r, "Failed to write to temporary file: %m"); - goto finish; - } - - if (!mkdtemp(gpg_home)) { - r = log_error_errno(errno, "Failed to create tempory home for gpg: %m"); - goto finish; - } - - gpg_home_created = true; - - pid = fork(); - if (pid < 0) - return log_error_errno(errno, "Failed to fork off gpg: %m"); - if (pid == 0) { - const char *cmd[] = { - "gpg", - "--no-options", - "--no-default-keyring", - "--no-auto-key-locate", - "--no-auto-check-trustdb", - "--batch", - "--trust-model=always", - NULL, /* --homedir= */ - NULL, /* --keyring= */ - NULL, /* --verify */ - NULL, /* signature file */ - NULL, /* dash */ - NULL /* trailing NULL */ - }; - unsigned k = ELEMENTSOF(cmd) - 6; - int null_fd; - - /* Child */ - - reset_all_signal_handlers(); - reset_signal_mask(); - assert_se(prctl(PR_SET_PDEATHSIG, SIGTERM) == 0); - - gpg_pipe[1] = safe_close(gpg_pipe[1]); - - if (dup2(gpg_pipe[0], STDIN_FILENO) != STDIN_FILENO) { - log_error_errno(errno, "Failed to dup2() fd: %m"); - _exit(EXIT_FAILURE); - } - - if (gpg_pipe[0] != STDIN_FILENO) - gpg_pipe[0] = safe_close(gpg_pipe[0]); - - null_fd = open("/dev/null", O_WRONLY|O_NOCTTY); - if (null_fd < 0) { - log_error_errno(errno, "Failed to open /dev/null: %m"); - _exit(EXIT_FAILURE); - } - - if (dup2(null_fd, STDOUT_FILENO) != STDOUT_FILENO) { - log_error_errno(errno, "Failed to dup2() fd: %m"); - _exit(EXIT_FAILURE); - } - - if (null_fd != STDOUT_FILENO) - null_fd = safe_close(null_fd); - - cmd[k++] = strjoina("--homedir=", gpg_home); - - /* We add the user keyring only to the command line - * arguments, if it's around since gpg fails - * otherwise. */ - if (access(USER_KEYRING_PATH, F_OK) >= 0) - cmd[k++] = "--keyring=" USER_KEYRING_PATH; - else - cmd[k++] = "--keyring=" VENDOR_KEYRING_PATH; - - cmd[k++] = "--verify"; - cmd[k++] = sig_file_path; - cmd[k++] = "-"; - cmd[k++] = NULL; - - fd_cloexec(STDIN_FILENO, false); - fd_cloexec(STDOUT_FILENO, false); - fd_cloexec(STDERR_FILENO, false); - - execvp("gpg2", (char * const *) cmd); - execvp("gpg", (char * const *) cmd); - log_error_errno(errno, "Failed to execute gpg: %m"); - _exit(EXIT_FAILURE); - } - - gpg_pipe[0] = safe_close(gpg_pipe[0]); - - r = loop_write(gpg_pipe[1], checksum_job->payload, checksum_job->payload_size, false); - if (r < 0) { - log_error_errno(r, "Failed to write to pipe: %m"); - goto finish; - } - - gpg_pipe[1] = safe_close(gpg_pipe[1]); - - r = wait_for_terminate_and_warn("gpg", pid, true); - pid = 0; - if (r < 0) - goto finish; - if (r > 0) { - log_error("Signature verification failed."); - r = -EBADMSG; - } else { - log_info("Signature verification succeeded."); - r = 0; - } - -finish: - if (sig_file >= 0) - unlink(sig_file_path); - - if (gpg_home_created) - rm_rf_dangerous(gpg_home, false, true, false); - - return r; -} - -int import_fork_tar(const char *path, pid_t *ret) { - _cleanup_close_pair_ int pipefd[2] = { -1, -1 }; - pid_t pid; - int r; - - assert(path); - assert(ret); - - if (pipe2(pipefd, O_CLOEXEC) < 0) - return log_error_errno(errno, "Failed to create pipe for tar: %m"); - - pid = fork(); - if (pid < 0) - return log_error_errno(errno, "Failed to fork off tar: %m"); - - if (pid == 0) { - int null_fd; - uint64_t retain = - (1ULL << CAP_CHOWN) | - (1ULL << CAP_FOWNER) | - (1ULL << CAP_FSETID) | - (1ULL << CAP_MKNOD) | - (1ULL << CAP_SETFCAP) | - (1ULL << CAP_DAC_OVERRIDE); - - /* Child */ - - reset_all_signal_handlers(); - reset_signal_mask(); - assert_se(prctl(PR_SET_PDEATHSIG, SIGTERM) == 0); - - pipefd[1] = safe_close(pipefd[1]); - - if (dup2(pipefd[0], STDIN_FILENO) != STDIN_FILENO) { - log_error_errno(errno, "Failed to dup2() fd: %m"); - _exit(EXIT_FAILURE); - } - - if (pipefd[0] != STDIN_FILENO) - pipefd[0] = safe_close(pipefd[0]); - - null_fd = open("/dev/null", O_WRONLY|O_NOCTTY); - if (null_fd < 0) { - log_error_errno(errno, "Failed to open /dev/null: %m"); - _exit(EXIT_FAILURE); - } - - if (dup2(null_fd, STDOUT_FILENO) != STDOUT_FILENO) { - log_error_errno(errno, "Failed to dup2() fd: %m"); - _exit(EXIT_FAILURE); - } - - if (null_fd != STDOUT_FILENO) - null_fd = safe_close(null_fd); - - fd_cloexec(STDIN_FILENO, false); - fd_cloexec(STDOUT_FILENO, false); - fd_cloexec(STDERR_FILENO, false); - - if (unshare(CLONE_NEWNET) < 0) - log_error_errno(errno, "Failed to lock tar into network namespace, ignoring: %m"); - - r = capability_bounding_set_drop(~retain, true); - if (r < 0) - log_error_errno(r, "Failed to drop capabilities, ignoring: %m"); - - execlp("tar", "tar", "--numeric-owner", "-C", path, "-px", NULL); - log_error_errno(errno, "Failed to execute tar: %m"); - _exit(EXIT_FAILURE); - } - - pipefd[0] = safe_close(pipefd[0]); - r = pipefd[1]; - pipefd[1] = -1; - - *ret = pid; - - return r; -} diff --git a/src/import/import-common.h b/src/import/import-common.h deleted file mode 100644 index f6b4268fd7..0000000000 --- a/src/import/import-common.h +++ /dev/null @@ -1,41 +0,0 @@ -/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ - -#pragma once - -/*** - This file is part of systemd. - - Copyright 2015 Lennart Poettering - - systemd is free software; you can redistribute it and/or modify it - under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - systemd is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with systemd; If not, see . -***/ - -#include - -#include "import-job.h" -#include "import-util.h" - -int import_make_local_copy(const char *final, const char *root, const char *local, bool force_local); - -int import_find_old_etags(const char *url, const char *root, int dt, const char *prefix, const char *suffix, char ***etags); - -int import_make_read_only_fd(int fd); -int import_make_read_only(const char *path); - -int import_make_path(const char *url, const char *etag, const char *image_root, const char *prefix, const char *suffix, char **ret); - -int import_make_verification_jobs(ImportJob **ret_checksum_job, ImportJob **ret_signature_job, ImportVerify verify, const char *url, CurlGlue *glue, ImportJobFinished on_finished, void *userdata); -int import_verify(ImportJob *main_job, ImportJob *checksum_job, ImportJob *signature_job); - -int import_fork_tar(const char *path, pid_t *ret); diff --git a/src/import/import-dkr.c b/src/import/import-dkr.c deleted file mode 100644 index 2d4e9b398f..0000000000 --- a/src/import/import-dkr.c +++ /dev/null @@ -1,896 +0,0 @@ -/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ - -/*** - This file is part of systemd. - - Copyright 2014 Lennart Poettering - - systemd is free software; you can redistribute it and/or modify it - under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - systemd is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with systemd; If not, see . -***/ - -#include -#include - -#include "sd-daemon.h" -#include "json.h" -#include "strv.h" -#include "btrfs-util.h" -#include "utf8.h" -#include "mkdir.h" -#include "path-util.h" -#include "import-util.h" -#include "curl-util.h" -#include "aufs-util.h" -#include "import-job.h" -#include "import-common.h" -#include "import-dkr.h" - -typedef enum DkrProgress { - DKR_SEARCHING, - DKR_RESOLVING, - DKR_METADATA, - DKR_DOWNLOADING, - DKR_COPYING, -} DkrProgress; - -struct DkrImport { - sd_event *event; - CurlGlue *glue; - - char *index_url; - char *image_root; - - ImportJob *images_job; - ImportJob *tags_job; - ImportJob *ancestry_job; - ImportJob *json_job; - ImportJob *layer_job; - - char *name; - char *tag; - char *id; - - char *response_token; - char **response_registries; - - char **ancestry; - unsigned n_ancestry; - unsigned current_ancestry; - - DkrImportFinished on_finished; - void *userdata; - - char *local; - bool force_local; - bool grow_machine_directory; - - char *temp_path; - char *final_path; - - pid_t tar_pid; -}; - -#define PROTOCOL_PREFIX "https://" - -#define HEADER_TOKEN "X-Do" /* the HTTP header for the auth token */ "cker-Token:" -#define HEADER_REGISTRY "X-Do" /*the HTTP header for the registry */ "cker-Endpoints:" - -#define LAYERS_MAX 2048 - -static void dkr_import_job_on_finished(ImportJob *j); - -DkrImport* dkr_import_unref(DkrImport *i) { - if (!i) - return NULL; - - if (i->tar_pid > 1) { - (void) kill_and_sigcont(i->tar_pid, SIGKILL); - (void) wait_for_terminate(i->tar_pid, NULL); - } - - import_job_unref(i->images_job); - import_job_unref(i->tags_job); - import_job_unref(i->ancestry_job); - import_job_unref(i->json_job); - import_job_unref(i->layer_job); - - curl_glue_unref(i->glue); - sd_event_unref(i->event); - - if (i->temp_path) { - (void) btrfs_subvol_remove(i->temp_path); - (void) rm_rf_dangerous(i->temp_path, false, true, false); - free(i->temp_path); - } - - free(i->name); - free(i->tag); - free(i->id); - free(i->response_token); - free(i->response_registries); - strv_free(i->ancestry); - free(i->final_path); - free(i->index_url); - free(i->image_root); - free(i->local); - free(i); - - return NULL; -} - -int dkr_import_new( - DkrImport **ret, - sd_event *event, - const char *index_url, - const char *image_root, - DkrImportFinished on_finished, - void *userdata) { - - _cleanup_(dkr_import_unrefp) DkrImport *i = NULL; - char *e; - int r; - - assert(ret); - assert(index_url); - - if (!http_url_is_valid(index_url)) - return -EINVAL; - - i = new0(DkrImport, 1); - if (!i) - return -ENOMEM; - - i->on_finished = on_finished; - i->userdata = userdata; - - i->image_root = strdup(image_root ?: "/var/lib/machines"); - if (!i->image_root) - return -ENOMEM; - - i->grow_machine_directory = path_startswith(i->image_root, "/var/lib/machines"); - - i->index_url = strdup(index_url); - if (!i->index_url) - return -ENOMEM; - - e = endswith(i->index_url, "/"); - if (e) - *e = 0; - - if (event) - i->event = sd_event_ref(event); - else { - r = sd_event_default(&i->event); - if (r < 0) - return r; - } - - r = curl_glue_new(&i->glue, i->event); - if (r < 0) - return r; - - i->glue->on_finished = import_job_curl_on_finished; - i->glue->userdata = i; - - *ret = i; - i = NULL; - - return 0; -} - -static void dkr_import_report_progress(DkrImport *i, DkrProgress p) { - unsigned percent; - - assert(i); - - switch (p) { - - case DKR_SEARCHING: - percent = 0; - if (i->images_job) - percent += i->images_job->progress_percent * 5 / 100; - break; - - case DKR_RESOLVING: - percent = 5; - if (i->tags_job) - percent += i->tags_job->progress_percent * 5 / 100; - break; - - case DKR_METADATA: - percent = 10; - if (i->ancestry_job) - percent += i->ancestry_job->progress_percent * 5 / 100; - if (i->json_job) - percent += i->json_job->progress_percent * 5 / 100; - break; - - case DKR_DOWNLOADING: - percent = 20; - percent += 75 * i->current_ancestry / MAX(1U, i->n_ancestry); - if (i->layer_job) - percent += i->layer_job->progress_percent * 75 / MAX(1U, i->n_ancestry) / 100; - - break; - - case DKR_COPYING: - percent = 95; - break; - - default: - assert_not_reached("Unknown progress state"); - } - - sd_notifyf(false, "X_IMPORT_PROGRESS=%u", percent); - log_debug("Combined progress %u%%", percent); -} - -static int parse_id(const void *payload, size_t size, char **ret) { - _cleanup_free_ char *buf = NULL, *id = NULL, *other = NULL; - union json_value v = {}; - void *json_state = NULL; - const char *p; - int t; - - assert(payload); - assert(ret); - - if (size <= 0) - return -EBADMSG; - - if (memchr(payload, 0, size)) - return -EBADMSG; - - buf = strndup(payload, size); - if (!buf) - return -ENOMEM; - - p = buf; - t = json_tokenize(&p, &id, &v, &json_state, NULL); - if (t < 0) - return t; - if (t != JSON_STRING) - return -EBADMSG; - - t = json_tokenize(&p, &other, &v, &json_state, NULL); - if (t < 0) - return t; - if (t != JSON_END) - return -EBADMSG; - - if (!dkr_id_is_valid(id)) - return -EBADMSG; - - *ret = id; - id = NULL; - - return 0; -} - -static int parse_ancestry(const void *payload, size_t size, char ***ret) { - _cleanup_free_ char *buf = NULL; - void *json_state = NULL; - const char *p; - enum { - STATE_BEGIN, - STATE_ITEM, - STATE_COMMA, - STATE_END, - } state = STATE_BEGIN; - _cleanup_strv_free_ char **l = NULL; - size_t n = 0, allocated = 0; - - if (size <= 0) - return -EBADMSG; - - if (memchr(payload, 0, size)) - return -EBADMSG; - - buf = strndup(payload, size); - if (!buf) - return -ENOMEM; - - p = buf; - for (;;) { - _cleanup_free_ char *str; - union json_value v = {}; - int t; - - t = json_tokenize(&p, &str, &v, &json_state, NULL); - if (t < 0) - return t; - - switch (state) { - - case STATE_BEGIN: - if (t == JSON_ARRAY_OPEN) - state = STATE_ITEM; - else - return -EBADMSG; - - break; - - case STATE_ITEM: - if (t == JSON_STRING) { - if (!dkr_id_is_valid(str)) - return -EBADMSG; - - if (n+1 > LAYERS_MAX) - return -EFBIG; - - if (!GREEDY_REALLOC(l, allocated, n + 2)) - return -ENOMEM; - - l[n++] = str; - str = NULL; - l[n] = NULL; - - state = STATE_COMMA; - - } else if (t == JSON_ARRAY_CLOSE) - state = STATE_END; - else - return -EBADMSG; - - break; - - case STATE_COMMA: - if (t == JSON_COMMA) - state = STATE_ITEM; - else if (t == JSON_ARRAY_CLOSE) - state = STATE_END; - else - return -EBADMSG; - break; - - case STATE_END: - if (t == JSON_END) { - - if (strv_isempty(l)) - return -EBADMSG; - - if (!strv_is_uniq(l)) - return -EBADMSG; - - l = strv_reverse(l); - - *ret = l; - l = NULL; - return 0; - } else - return -EBADMSG; - } - - } -} - -static const char *dkr_import_current_layer(DkrImport *i) { - assert(i); - - if (strv_isempty(i->ancestry)) - return NULL; - - return i->ancestry[i->current_ancestry]; -} - -static const char *dkr_import_current_base_layer(DkrImport *i) { - assert(i); - - if (strv_isempty(i->ancestry)) - return NULL; - - if (i->current_ancestry <= 0) - return NULL; - - return i->ancestry[i->current_ancestry-1]; -} - -static int dkr_import_add_token(DkrImport *i, ImportJob *j) { - const char *t; - - assert(i); - assert(j); - - if (i->response_token) - t = strjoina("Authorization: Token ", i->response_token); - else - t = HEADER_TOKEN " true"; - - j->request_header = curl_slist_new("Accept: application/json", t, NULL); - if (!j->request_header) - return -ENOMEM; - - return 0; -} - -static bool dkr_import_is_done(DkrImport *i) { - assert(i); - assert(i->images_job); - - if (i->images_job->state != IMPORT_JOB_DONE) - return false; - - if (!i->tags_job || i->tags_job->state != IMPORT_JOB_DONE) - return false; - - if (!i->ancestry_job || i->ancestry_job->state != IMPORT_JOB_DONE) - return false; - - if (!i->json_job || i->json_job->state != IMPORT_JOB_DONE) - return false; - - if (i->layer_job && i->layer_job->state != IMPORT_JOB_DONE) - return false; - - if (dkr_import_current_layer(i)) - return false; - - return true; -} - -static int dkr_import_make_local_copy(DkrImport *i) { - int r; - - assert(i); - - if (!i->local) - return 0; - - if (!i->final_path) { - i->final_path = strjoin(i->image_root, "/.dkr-", i->id, NULL); - if (!i->final_path) - return log_oom(); - } - - r = import_make_local_copy(i->final_path, i->image_root, i->local, i->force_local); - if (r < 0) - return r; - - return 0; -} - -static int dkr_import_job_on_open_disk(ImportJob *j) { - const char *base; - DkrImport *i; - int r; - - assert(j); - assert(j->userdata); - - i = j->userdata; - assert(i->layer_job == j); - assert(i->final_path); - assert(!i->temp_path); - assert(i->tar_pid <= 0); - - r = tempfn_random(i->final_path, &i->temp_path); - if (r < 0) - return log_oom(); - - mkdir_parents_label(i->temp_path, 0700); - - base = dkr_import_current_base_layer(i); - if (base) { - const char *base_path; - - base_path = strjoina(i->image_root, "/.dkr-", base); - r = btrfs_subvol_snapshot(base_path, i->temp_path, false, true); - } else - r = btrfs_subvol_make(i->temp_path); - if (r < 0) - return log_error_errno(r, "Failed to make btrfs subvolume %s: %m", i->temp_path); - - j->disk_fd = import_fork_tar(i->temp_path, &i->tar_pid); - if (j->disk_fd < 0) - return j->disk_fd; - - return 0; -} - -static void dkr_import_job_on_progress(ImportJob *j) { - DkrImport *i; - - assert(j); - assert(j->userdata); - - i = j->userdata; - - dkr_import_report_progress( - i, - j == i->images_job ? DKR_SEARCHING : - j == i->tags_job ? DKR_RESOLVING : - j == i->ancestry_job || j == i->json_job ? DKR_METADATA : - DKR_DOWNLOADING); -} - -static int dkr_import_pull_layer(DkrImport *i) { - _cleanup_free_ char *path = NULL; - const char *url, *layer = NULL; - int r; - - assert(i); - assert(!i->layer_job); - assert(!i->temp_path); - assert(!i->final_path); - - for (;;) { - layer = dkr_import_current_layer(i); - if (!layer) - return 0; /* no more layers */ - - path = strjoin(i->image_root, "/.dkr-", layer, NULL); - if (!path) - return log_oom(); - - if (laccess(path, F_OK) < 0) { - if (errno == ENOENT) - break; - - return log_error_errno(errno, "Failed to check for container: %m"); - } - - log_info("Layer %s already exists, skipping.", layer); - - i->current_ancestry++; - - free(path); - path = NULL; - } - - log_info("Pulling layer %s...", layer); - - i->final_path = path; - path = NULL; - - url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v1/images/", layer, "/layer"); - r = import_job_new(&i->layer_job, url, i->glue, i); - if (r < 0) - return log_error_errno(r, "Failed to allocate layer job: %m"); - - r = dkr_import_add_token(i, i->layer_job); - if (r < 0) - return log_oom(); - - i->layer_job->on_finished = dkr_import_job_on_finished; - i->layer_job->on_open_disk = dkr_import_job_on_open_disk; - i->layer_job->on_progress = dkr_import_job_on_progress; - i->layer_job->grow_machine_directory = i->grow_machine_directory; - - r = import_job_begin(i->layer_job); - if (r < 0) - return log_error_errno(r, "Failed to start layer job: %m"); - - return 0; -} - -static void dkr_import_job_on_finished(ImportJob *j) { - DkrImport *i; - int r; - - assert(j); - assert(j->userdata); - - i = j->userdata; - if (j->error != 0) { - if (j == i->images_job) - log_error_errno(j->error, "Failed to retrieve images list. (Wrong index URL?)"); - else if (j == i->tags_job) - log_error_errno(j->error, "Failed to retrieve tags list."); - else if (j == i->ancestry_job) - log_error_errno(j->error, "Failed to retrieve ancestry list."); - else if (j == i->json_job) - log_error_errno(j->error, "Failed to retrieve json data."); - else - log_error_errno(j->error, "Failed to retrieve layer data."); - - r = j->error; - goto finish; - } - - if (i->images_job == j) { - const char *url; - - assert(!i->tags_job); - assert(!i->ancestry_job); - assert(!i->json_job); - assert(!i->layer_job); - - if (strv_isempty(i->response_registries)) { - r = -EBADMSG; - log_error("Didn't get registry information."); - goto finish; - } - - log_info("Index lookup succeeded, directed to registry %s.", i->response_registries[0]); - dkr_import_report_progress(i, DKR_RESOLVING); - - url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v1/repositories/", i->name, "/tags/", i->tag); - r = import_job_new(&i->tags_job, url, i->glue, i); - if (r < 0) { - log_error_errno(r, "Failed to allocate tags job: %m"); - goto finish; - } - - r = dkr_import_add_token(i, i->tags_job); - if (r < 0) { - log_oom(); - goto finish; - } - - i->tags_job->on_finished = dkr_import_job_on_finished; - i->tags_job->on_progress = dkr_import_job_on_progress; - - r = import_job_begin(i->tags_job); - if (r < 0) { - log_error_errno(r, "Failed to start tags job: %m"); - goto finish; - } - - } else if (i->tags_job == j) { - const char *url; - char *id = NULL; - - assert(!i->ancestry_job); - assert(!i->json_job); - assert(!i->layer_job); - - r = parse_id(j->payload, j->payload_size, &id); - if (r < 0) { - log_error_errno(r, "Failed to parse JSON id."); - goto finish; - } - - free(i->id); - i->id = id; - - log_info("Tag lookup succeeded, resolved to layer %s.", i->id); - dkr_import_report_progress(i, DKR_METADATA); - - url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v1/images/", i->id, "/ancestry"); - r = import_job_new(&i->ancestry_job, url, i->glue, i); - if (r < 0) { - log_error_errno(r, "Failed to allocate ancestry job: %m"); - goto finish; - } - - r = dkr_import_add_token(i, i->ancestry_job); - if (r < 0) { - log_oom(); - goto finish; - } - - i->ancestry_job->on_finished = dkr_import_job_on_finished; - i->ancestry_job->on_progress = dkr_import_job_on_progress; - - url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v1/images/", i->id, "/json"); - r = import_job_new(&i->json_job, url, i->glue, i); - if (r < 0) { - log_error_errno(r, "Failed to allocate json job: %m"); - goto finish; - } - - r = dkr_import_add_token(i, i->json_job); - if (r < 0) { - log_oom(); - goto finish; - } - - i->json_job->on_finished = dkr_import_job_on_finished; - i->json_job->on_progress = dkr_import_job_on_progress; - - r = import_job_begin(i->ancestry_job); - if (r < 0) { - log_error_errno(r, "Failed to start ancestry job: %m"); - goto finish; - } - - r = import_job_begin(i->json_job); - if (r < 0) { - log_error_errno(r, "Failed to start json job: %m"); - goto finish; - } - - } else if (i->ancestry_job == j) { - char **ancestry = NULL, **k; - unsigned n; - - assert(!i->layer_job); - - r = parse_ancestry(j->payload, j->payload_size, &ancestry); - if (r < 0) { - log_error_errno(r, "Failed to parse JSON id."); - goto finish; - } - - n = strv_length(ancestry); - if (n <= 0 || !streq(ancestry[n-1], i->id)) { - log_error("Ancestry doesn't end in main layer."); - strv_free(ancestry); - r = -EBADMSG; - goto finish; - } - - log_info("Ancestor lookup succeeded, requires layers:\n"); - STRV_FOREACH(k, ancestry) - log_info("\t%s", *k); - - strv_free(i->ancestry); - i->ancestry = ancestry; - i->n_ancestry = n; - i->current_ancestry = 0; - - dkr_import_report_progress(i, DKR_DOWNLOADING); - - r = dkr_import_pull_layer(i); - if (r < 0) - goto finish; - - } else if (i->layer_job == j) { - assert(i->temp_path); - assert(i->final_path); - - j->disk_fd = safe_close(j->disk_fd); - - if (i->tar_pid > 0) { - r = wait_for_terminate_and_warn("tar", i->tar_pid, true); - i->tar_pid = 0; - if (r < 0) - goto finish; - } - - r = aufs_resolve(i->temp_path); - if (r < 0) { - log_error_errno(r, "Failed to resolve aufs whiteouts: %m"); - goto finish; - } - - r = btrfs_subvol_set_read_only(i->temp_path, true); - if (r < 0) { - log_error_errno(r, "Failed to mark snapshot read-only: %m"); - goto finish; - } - - if (rename(i->temp_path, i->final_path) < 0) { - log_error_errno(errno, "Failed to rename snaphsot: %m"); - goto finish; - } - - log_info("Completed writing to layer %s.", i->final_path); - - i->layer_job = import_job_unref(i->layer_job); - free(i->temp_path); - i->temp_path = NULL; - free(i->final_path); - i->final_path = NULL; - - i->current_ancestry ++; - r = dkr_import_pull_layer(i); - if (r < 0) - goto finish; - - } else if (i->json_job != j) - assert_not_reached("Got finished event for unknown curl object"); - - if (!dkr_import_is_done(i)) - return; - - dkr_import_report_progress(i, DKR_COPYING); - - r = dkr_import_make_local_copy(i); - if (r < 0) - goto finish; - - r = 0; - -finish: - if (i->on_finished) - i->on_finished(i, r, i->userdata); - else - sd_event_exit(i->event, r); -} - -static int dkr_import_job_on_header(ImportJob *j, const char *header, size_t sz) { - _cleanup_free_ char *registry = NULL; - char *token; - DkrImport *i; - int r; - - assert(j); - assert(j->userdata); - - i = j->userdata; - - r = curl_header_strdup(header, sz, HEADER_TOKEN, &token); - if (r < 0) - return log_oom(); - if (r > 0) { - free(i->response_token); - i->response_token = token; - return 0; - } - - r = curl_header_strdup(header, sz, HEADER_REGISTRY, ®istry); - if (r < 0) - return log_oom(); - if (r > 0) { - char **l, **k; - - l = strv_split(registry, ","); - if (!l) - return log_oom(); - - STRV_FOREACH(k, l) { - if (!hostname_is_valid(*k)) { - log_error("Registry hostname is not valid."); - strv_free(l); - return -EBADMSG; - } - } - - strv_free(i->response_registries); - i->response_registries = l; - } - - return 0; -} - -int dkr_import_pull(DkrImport *i, const char *name, const char *tag, const char *local, bool force_local) { - const char *url; - int r; - - assert(i); - - if (!dkr_name_is_valid(name)) - return -EINVAL; - - if (tag && !dkr_tag_is_valid(tag)) - return -EINVAL; - - if (local && !machine_name_is_valid(local)) - return -EINVAL; - - if (i->images_job) - return -EBUSY; - - if (!tag) - tag = "latest"; - - r = free_and_strdup(&i->local, local); - if (r < 0) - return r; - i->force_local = force_local; - - r = free_and_strdup(&i->name, name); - if (r < 0) - return r; - r = free_and_strdup(&i->tag, tag); - if (r < 0) - return r; - - url = strjoina(i->index_url, "/v1/repositories/", name, "/images"); - - r = import_job_new(&i->images_job, url, i->glue, i); - if (r < 0) - return r; - - r = dkr_import_add_token(i, i->images_job); - if (r < 0) - return r; - - i->images_job->on_finished = dkr_import_job_on_finished; - i->images_job->on_header = dkr_import_job_on_header; - i->images_job->on_progress = dkr_import_job_on_progress; - - return import_job_begin(i->images_job); -} diff --git a/src/import/import-dkr.h b/src/import/import-dkr.h deleted file mode 100644 index 633c767965..0000000000 --- a/src/import/import-dkr.h +++ /dev/null @@ -1,36 +0,0 @@ -/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ - -/*** - This file is part of systemd. - - Copyright 2014 Lennart Poettering - - systemd is free software; you can redistribute it and/or modify it - under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - systemd is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with systemd; If not, see . -***/ - -#pragma once - -#include "sd-event.h" -#include "util.h" - -typedef struct DkrImport DkrImport; - -typedef void (*DkrImportFinished)(DkrImport *import, int error, void *userdata); - -int dkr_import_new(DkrImport **import, sd_event *event, const char *index_url, const char *image_root, DkrImportFinished on_finished, void *userdata); -DkrImport* dkr_import_unref(DkrImport *import); - -DEFINE_TRIVIAL_CLEANUP_FUNC(DkrImport*, dkr_import_unref); - -int dkr_import_pull(DkrImport *import, const char *name, const char *tag, const char *local, bool force_local); diff --git a/src/import/import-job.c b/src/import/import-job.c deleted file mode 100644 index 980b639b5d..0000000000 --- a/src/import/import-job.c +++ /dev/null @@ -1,746 +0,0 @@ -/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ - -/*** - This file is part of systemd. - - Copyright 2015 Lennart Poettering - - systemd is free software; you can redistribute it and/or modify it - under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - systemd is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with systemd; If not, see . -***/ - -#include - -#include "strv.h" -#include "machine-pool.h" -#include "import-job.h" - -/* Grow the /var/lib/machines directory after each 10MiB written */ -#define IMPORT_GROW_INTERVAL_BYTES (UINT64_C(10) * UINT64_C(1024) * UINT64_C(1024)) - -ImportJob* import_job_unref(ImportJob *j) { - if (!j) - return NULL; - - curl_glue_remove_and_free(j->glue, j->curl); - curl_slist_free_all(j->request_header); - - safe_close(j->disk_fd); - - if (j->compressed == IMPORT_JOB_XZ) - lzma_end(&j->xz); - else if (j->compressed == IMPORT_JOB_GZIP) - inflateEnd(&j->gzip); - else if (j->compressed == IMPORT_JOB_BZIP2) - BZ2_bzDecompressEnd(&j->bzip2); - - if (j->checksum_context) - gcry_md_close(j->checksum_context); - - free(j->url); - free(j->etag); - strv_free(j->old_etags); - free(j->payload); - free(j->checksum); - - free(j); - - return NULL; -} - -static void import_job_finish(ImportJob *j, int ret) { - assert(j); - - if (j->state == IMPORT_JOB_DONE || - j->state == IMPORT_JOB_FAILED) - return; - - if (ret == 0) { - j->state = IMPORT_JOB_DONE; - j->progress_percent = 100; - log_info("Download of %s complete.", j->url); - } else { - j->state = IMPORT_JOB_FAILED; - j->error = ret; - } - - if (j->on_finished) - j->on_finished(j); -} - -void import_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) { - ImportJob *j = NULL; - CURLcode code; - long status; - int r; - - if (curl_easy_getinfo(curl, CURLINFO_PRIVATE, &j) != CURLE_OK) - return; - - if (!j || j->state == IMPORT_JOB_DONE || j->state == IMPORT_JOB_FAILED) - return; - - if (result != CURLE_OK) { - log_error("Transfer failed: %s", curl_easy_strerror(result)); - r = -EIO; - goto finish; - } - - code = curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &status); - if (code != CURLE_OK) { - log_error("Failed to retrieve response code: %s", curl_easy_strerror(code)); - r = -EIO; - goto finish; - } else if (status == 304) { - log_info("Image already downloaded. Skipping download."); - j->etag_exists = true; - r = 0; - goto finish; - } else if (status >= 300) { - log_error("HTTP request to %s failed with code %li.", j->url, status); - r = -EIO; - goto finish; - } else if (status < 200) { - log_error("HTTP request to %s finished with unexpected code %li.", j->url, status); - r = -EIO; - goto finish; - } - - if (j->state != IMPORT_JOB_RUNNING) { - log_error("Premature connection termination."); - r = -EIO; - goto finish; - } - - if (j->content_length != (uint64_t) -1 && - j->content_length != j->written_compressed) { - log_error("Download truncated."); - r = -EIO; - goto finish; - } - - if (j->checksum_context) { - uint8_t *k; - - k = gcry_md_read(j->checksum_context, GCRY_MD_SHA256); - if (!k) { - log_error("Failed to get checksum."); - r = -EIO; - goto finish; - } - - j->checksum = hexmem(k, gcry_md_get_algo_dlen(GCRY_MD_SHA256)); - if (!j->checksum) { - r = log_oom(); - goto finish; - } - - log_debug("SHA256 of %s is %s.", j->url, j->checksum); - } - - if (j->disk_fd >= 0 && j->allow_sparse) { - /* Make sure the file size is right, in case the file was - * sparse and we just seeked for the last part */ - - if (ftruncate(j->disk_fd, j->written_uncompressed) < 0) { - log_error_errno(errno, "Failed to truncate file: %m"); - r = -errno; - goto finish; - } - - if (j->etag) - (void) fsetxattr(j->disk_fd, "user.source_etag", j->etag, strlen(j->etag), 0); - if (j->url) - (void) fsetxattr(j->disk_fd, "user.source_url", j->url, strlen(j->url), 0); - - if (j->mtime != 0) { - struct timespec ut[2]; - - timespec_store(&ut[0], j->mtime); - ut[1] = ut[0]; - (void) futimens(j->disk_fd, ut); - - (void) fd_setcrtime(j->disk_fd, j->mtime); - } - } - - r = 0; - -finish: - import_job_finish(j, r); -} - -static int import_job_write_uncompressed(ImportJob *j, void *p, size_t sz) { - ssize_t n; - - assert(j); - assert(p); - - if (sz <= 0) - return 0; - - if (j->written_uncompressed + sz < j->written_uncompressed) { - log_error("File too large, overflow"); - return -EOVERFLOW; - } - - if (j->written_uncompressed + sz > j->uncompressed_max) { - log_error("File overly large, refusing"); - return -EFBIG; - } - - if (j->disk_fd >= 0) { - - if (j->grow_machine_directory && j->written_since_last_grow >= IMPORT_GROW_INTERVAL_BYTES) { - j->written_since_last_grow = 0; - grow_machine_directory(); - } - - if (j->allow_sparse) - n = sparse_write(j->disk_fd, p, sz, 64); - else - n = write(j->disk_fd, p, sz); - if (n < 0) { - log_error_errno(errno, "Failed to write file: %m"); - return -errno; - } - if ((size_t) n < sz) { - log_error("Short write"); - return -EIO; - } - } else { - - if (!GREEDY_REALLOC(j->payload, j->payload_allocated, j->payload_size + sz)) - return log_oom(); - - memcpy(j->payload + j->payload_size, p, sz); - j->payload_size += sz; - } - - j->written_uncompressed += sz; - j->written_since_last_grow += sz; - - return 0; -} - -static int import_job_write_compressed(ImportJob *j, void *p, size_t sz) { - int r; - - assert(j); - assert(p); - - if (sz <= 0) - return 0; - - if (j->written_compressed + sz < j->written_compressed) { - log_error("File too large, overflow"); - return -EOVERFLOW; - } - - if (j->written_compressed + sz > j->compressed_max) { - log_error("File overly large, refusing."); - return -EFBIG; - } - - if (j->content_length != (uint64_t) -1 && - j->written_compressed + sz > j->content_length) { - log_error("Content length incorrect."); - return -EFBIG; - } - - if (j->checksum_context) - gcry_md_write(j->checksum_context, p, sz); - - switch (j->compressed) { - - case IMPORT_JOB_UNCOMPRESSED: - r = import_job_write_uncompressed(j, p, sz); - if (r < 0) - return r; - - break; - - case IMPORT_JOB_XZ: - j->xz.next_in = p; - j->xz.avail_in = sz; - - while (j->xz.avail_in > 0) { - uint8_t buffer[16 * 1024]; - lzma_ret lzr; - - j->xz.next_out = buffer; - j->xz.avail_out = sizeof(buffer); - - lzr = lzma_code(&j->xz, LZMA_RUN); - if (lzr != LZMA_OK && lzr != LZMA_STREAM_END) { - log_error("Decompression error."); - return -EIO; - } - - r = import_job_write_uncompressed(j, buffer, sizeof(buffer) - j->xz.avail_out); - if (r < 0) - return r; - } - - break; - - case IMPORT_JOB_GZIP: - j->gzip.next_in = p; - j->gzip.avail_in = sz; - - while (j->gzip.avail_in > 0) { - uint8_t buffer[16 * 1024]; - - j->gzip.next_out = buffer; - j->gzip.avail_out = sizeof(buffer); - - r = inflate(&j->gzip, Z_NO_FLUSH); - if (r != Z_OK && r != Z_STREAM_END) { - log_error("Decompression error."); - return -EIO; - } - - r = import_job_write_uncompressed(j, buffer, sizeof(buffer) - j->gzip.avail_out); - if (r < 0) - return r; - } - - break; - - case IMPORT_JOB_BZIP2: - j->bzip2.next_in = p; - j->bzip2.avail_in = sz; - - while (j->bzip2.avail_in > 0) { - uint8_t buffer[16 * 1024]; - - j->bzip2.next_out = (char*) buffer; - j->bzip2.avail_out = sizeof(buffer); - - r = BZ2_bzDecompress(&j->bzip2); - if (r != BZ_OK && r != BZ_STREAM_END) { - log_error("Decompression error."); - return -EIO; - } - - r = import_job_write_uncompressed(j, buffer, sizeof(buffer) - j->bzip2.avail_out); - if (r < 0) - return r; - } - - break; - - default: - assert_not_reached("Unknown compression"); - } - - j->written_compressed += sz; - - return 0; -} - -static int import_job_open_disk(ImportJob *j) { - int r; - - assert(j); - - if (j->on_open_disk) { - r = j->on_open_disk(j); - if (r < 0) - return r; - } - - if (j->disk_fd >= 0) { - /* Check if we can do sparse files */ - - if (lseek(j->disk_fd, SEEK_SET, 0) == 0) - j->allow_sparse = true; - else { - if (errno != ESPIPE) - return log_error_errno(errno, "Failed to seek on file descriptor: %m"); - - j->allow_sparse = false; - } - } - - if (j->calc_checksum) { - if (gcry_md_open(&j->checksum_context, GCRY_MD_SHA256, 0) != 0) { - log_error("Failed to initialize hash context."); - return -EIO; - } - } - - return 0; -} - -static int import_job_detect_compression(ImportJob *j) { - static const uint8_t xz_signature[] = { - 0xfd, '7', 'z', 'X', 'Z', 0x00 - }; - static const uint8_t gzip_signature[] = { - 0x1f, 0x8b - }; - static const uint8_t bzip2_signature[] = { - 'B', 'Z', 'h' - }; - - _cleanup_free_ uint8_t *stub = NULL; - size_t stub_size; - - int r; - - assert(j); - - if (j->payload_size < MAX3(sizeof(xz_signature), - sizeof(gzip_signature), - sizeof(bzip2_signature))) - return 0; - - if (memcmp(j->payload, xz_signature, sizeof(xz_signature)) == 0) - j->compressed = IMPORT_JOB_XZ; - else if (memcmp(j->payload, gzip_signature, sizeof(gzip_signature)) == 0) - j->compressed = IMPORT_JOB_GZIP; - else if (memcmp(j->payload, bzip2_signature, sizeof(bzip2_signature)) == 0) - j->compressed = IMPORT_JOB_BZIP2; - else - j->compressed = IMPORT_JOB_UNCOMPRESSED; - - log_debug("Stream is XZ compressed: %s", yes_no(j->compressed == IMPORT_JOB_XZ)); - log_debug("Stream is GZIP compressed: %s", yes_no(j->compressed == IMPORT_JOB_GZIP)); - log_debug("Stream is BZIP2 compressed: %s", yes_no(j->compressed == IMPORT_JOB_BZIP2)); - - if (j->compressed == IMPORT_JOB_XZ) { - lzma_ret xzr; - - xzr = lzma_stream_decoder(&j->xz, UINT64_MAX, LZMA_TELL_UNSUPPORTED_CHECK); - if (xzr != LZMA_OK) { - log_error("Failed to initialize XZ decoder."); - return -EIO; - } - } - if (j->compressed == IMPORT_JOB_GZIP) { - r = inflateInit2(&j->gzip, 15+16); - if (r != Z_OK) { - log_error("Failed to initialize gzip decoder."); - return -EIO; - } - } - if (j->compressed == IMPORT_JOB_BZIP2) { - r = BZ2_bzDecompressInit(&j->bzip2, 0, 0); - if (r != BZ_OK) { - log_error("Failed to initialize bzip2 decoder."); - return -EIO; - } - } - - r = import_job_open_disk(j); - if (r < 0) - return r; - - /* Now, take the payload we read so far, and decompress it */ - stub = j->payload; - stub_size = j->payload_size; - - j->payload = NULL; - j->payload_size = 0; - j->payload_allocated = 0; - - j->state = IMPORT_JOB_RUNNING; - - r = import_job_write_compressed(j, stub, stub_size); - if (r < 0) - return r; - - return 0; -} - -static size_t import_job_write_callback(void *contents, size_t size, size_t nmemb, void *userdata) { - ImportJob *j = userdata; - size_t sz = size * nmemb; - int r; - - assert(contents); - assert(j); - - switch (j->state) { - - case IMPORT_JOB_ANALYZING: - /* Let's first check what it actually is */ - - if (!GREEDY_REALLOC(j->payload, j->payload_allocated, j->payload_size + sz)) { - r = log_oom(); - goto fail; - } - - memcpy(j->payload + j->payload_size, contents, sz); - j->payload_size += sz; - - r = import_job_detect_compression(j); - if (r < 0) - goto fail; - - break; - - case IMPORT_JOB_RUNNING: - - r = import_job_write_compressed(j, contents, sz); - if (r < 0) - goto fail; - - break; - - case IMPORT_JOB_DONE: - case IMPORT_JOB_FAILED: - r = -ESTALE; - goto fail; - - default: - assert_not_reached("Impossible state."); - } - - return sz; - -fail: - import_job_finish(j, r); - return 0; -} - -static size_t import_job_header_callback(void *contents, size_t size, size_t nmemb, void *userdata) { - ImportJob *j = userdata; - size_t sz = size * nmemb; - _cleanup_free_ char *length = NULL, *last_modified = NULL; - char *etag; - int r; - - assert(contents); - assert(j); - - if (j->state == IMPORT_JOB_DONE || j->state == IMPORT_JOB_FAILED) { - r = -ESTALE; - goto fail; - } - - assert(j->state == IMPORT_JOB_ANALYZING); - - r = curl_header_strdup(contents, sz, "ETag:", &etag); - if (r < 0) { - log_oom(); - goto fail; - } - if (r > 0) { - free(j->etag); - j->etag = etag; - - if (strv_contains(j->old_etags, j->etag)) { - log_info("Image already downloaded. Skipping download."); - j->etag_exists = true; - import_job_finish(j, 0); - return sz; - } - - return sz; - } - - r = curl_header_strdup(contents, sz, "Content-Length:", &length); - if (r < 0) { - log_oom(); - goto fail; - } - if (r > 0) { - (void) safe_atou64(length, &j->content_length); - - if (j->content_length != (uint64_t) -1) { - char bytes[FORMAT_BYTES_MAX]; - - if (j->content_length > j->compressed_max) { - log_error("Content too large."); - r = -EFBIG; - goto fail; - } - - log_info("Downloading %s for %s.", format_bytes(bytes, sizeof(bytes), j->content_length), j->url); - } - - return sz; - } - - r = curl_header_strdup(contents, sz, "Last-Modified:", &last_modified); - if (r < 0) { - log_oom(); - goto fail; - } - if (r > 0) { - (void) curl_parse_http_time(last_modified, &j->mtime); - return sz; - } - - if (j->on_header) { - r = j->on_header(j, contents, sz); - if (r < 0) - goto fail; - } - - return sz; - -fail: - import_job_finish(j, r); - return 0; -} - -static int import_job_progress_callback(void *userdata, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow) { - ImportJob *j = userdata; - unsigned percent; - usec_t n; - - assert(j); - - if (dltotal <= 0) - return 0; - - percent = ((100 * dlnow) / dltotal); - n = now(CLOCK_MONOTONIC); - - if (n > j->last_status_usec + USEC_PER_SEC && - percent != j->progress_percent && - dlnow < dltotal) { - char buf[FORMAT_TIMESPAN_MAX]; - - if (n - j->start_usec > USEC_PER_SEC && dlnow > 0) { - char y[FORMAT_BYTES_MAX]; - usec_t left, done; - - done = n - j->start_usec; - left = (usec_t) (((double) done * (double) dltotal) / dlnow) - done; - - log_info("Got %u%% of %s. %s left at %s/s.", - percent, - j->url, - format_timespan(buf, sizeof(buf), left, USEC_PER_SEC), - format_bytes(y, sizeof(y), (uint64_t) ((double) dlnow / ((double) done / (double) USEC_PER_SEC)))); - } else - log_info("Got %u%% of %s.", percent, j->url); - - j->progress_percent = percent; - j->last_status_usec = n; - - if (j->on_progress) - j->on_progress(j); - } - - return 0; -} - -int import_job_new(ImportJob **ret, const char *url, CurlGlue *glue, void *userdata) { - _cleanup_(import_job_unrefp) ImportJob *j = NULL; - - assert(url); - assert(glue); - assert(ret); - - j = new0(ImportJob, 1); - if (!j) - return -ENOMEM; - - j->state = IMPORT_JOB_INIT; - j->disk_fd = -1; - j->userdata = userdata; - j->glue = glue; - j->content_length = (uint64_t) -1; - j->start_usec = now(CLOCK_MONOTONIC); - j->compressed_max = j->uncompressed_max = 8LLU * 1024LLU * 1024LLU * 1024LLU; /* 8GB */ - - j->url = strdup(url); - if (!j->url) - return -ENOMEM; - - *ret = j; - j = NULL; - - return 0; -} - -int import_job_begin(ImportJob *j) { - int r; - - assert(j); - - if (j->state != IMPORT_JOB_INIT) - return -EBUSY; - - if (j->grow_machine_directory) - grow_machine_directory(); - - r = curl_glue_make(&j->curl, j->url, j); - if (r < 0) - return r; - - if (!strv_isempty(j->old_etags)) { - _cleanup_free_ char *cc = NULL, *hdr = NULL; - - cc = strv_join(j->old_etags, ", "); - if (!cc) - return -ENOMEM; - - hdr = strappend("If-None-Match: ", cc); - if (!hdr) - return -ENOMEM; - - if (!j->request_header) { - j->request_header = curl_slist_new(hdr, NULL); - if (!j->request_header) - return -ENOMEM; - } else { - struct curl_slist *l; - - l = curl_slist_append(j->request_header, hdr); - if (!l) - return -ENOMEM; - - j->request_header = l; - } - } - - if (j->request_header) { - if (curl_easy_setopt(j->curl, CURLOPT_HTTPHEADER, j->request_header) != CURLE_OK) - return -EIO; - } - - if (curl_easy_setopt(j->curl, CURLOPT_WRITEFUNCTION, import_job_write_callback) != CURLE_OK) - return -EIO; - - if (curl_easy_setopt(j->curl, CURLOPT_WRITEDATA, j) != CURLE_OK) - return -EIO; - - if (curl_easy_setopt(j->curl, CURLOPT_HEADERFUNCTION, import_job_header_callback) != CURLE_OK) - return -EIO; - - if (curl_easy_setopt(j->curl, CURLOPT_HEADERDATA, j) != CURLE_OK) - return -EIO; - - if (curl_easy_setopt(j->curl, CURLOPT_XFERINFOFUNCTION, import_job_progress_callback) != CURLE_OK) - return -EIO; - - if (curl_easy_setopt(j->curl, CURLOPT_XFERINFODATA, j) != CURLE_OK) - return -EIO; - - if (curl_easy_setopt(j->curl, CURLOPT_NOPROGRESS, 0) != CURLE_OK) - return -EIO; - - r = curl_glue_add(j->glue, j->curl); - if (r < 0) - return r; - - j->state = IMPORT_JOB_ANALYZING; - - return 0; -} diff --git a/src/import/import-job.h b/src/import/import-job.h deleted file mode 100644 index 2c01d723db..0000000000 --- a/src/import/import-job.h +++ /dev/null @@ -1,122 +0,0 @@ -/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ - -#pragma once - -/*** - This file is part of systemd. - - Copyright 2015 Lennart Poettering - - systemd is free software; you can redistribute it and/or modify it - under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - systemd is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with systemd; If not, see . -***/ - -#include -#include -#include -#include - -#include "macro.h" -#include "curl-util.h" - -typedef struct ImportJob ImportJob; - -typedef void (*ImportJobFinished)(ImportJob *job); -typedef int (*ImportJobOpenDisk)(ImportJob *job); -typedef int (*ImportJobHeader)(ImportJob *job, const char *header, size_t sz); -typedef void (*ImportJobProgress)(ImportJob *job); - -typedef enum ImportJobState { - IMPORT_JOB_INIT, - IMPORT_JOB_ANALYZING, /* Still reading into ->payload, to figure out what we have */ - IMPORT_JOB_RUNNING, /* Writing to destination */ - IMPORT_JOB_DONE, - IMPORT_JOB_FAILED, - _IMPORT_JOB_STATE_MAX, - _IMPORT_JOB_STATE_INVALID = -1, -} ImportJobState; - -#define IMPORT_JOB_STATE_IS_COMPLETE(j) (IN_SET((j)->state, IMPORT_JOB_DONE, IMPORT_JOB_FAILED)) - -typedef enum ImportJobCompression { - IMPORT_JOB_UNCOMPRESSED, - IMPORT_JOB_XZ, - IMPORT_JOB_GZIP, - IMPORT_JOB_BZIP2, - _IMPORT_JOB_COMPRESSION_MAX, - _IMPORT_JOB_COMPRESSION_INVALID = -1, -} ImportJobCompression; - -struct ImportJob { - ImportJobState state; - int error; - - char *url; - - void *userdata; - ImportJobFinished on_finished; - ImportJobOpenDisk on_open_disk; - ImportJobHeader on_header; - ImportJobProgress on_progress; - - CurlGlue *glue; - CURL *curl; - struct curl_slist *request_header; - - char *etag; - char **old_etags; - bool etag_exists; - - uint64_t content_length; - uint64_t written_compressed; - uint64_t written_uncompressed; - - uint64_t uncompressed_max; - uint64_t compressed_max; - - uint8_t *payload; - size_t payload_size; - size_t payload_allocated; - - int disk_fd; - - usec_t mtime; - - ImportJobCompression compressed; - lzma_stream xz; - z_stream gzip; - bz_stream bzip2; - - unsigned progress_percent; - usec_t start_usec; - usec_t last_status_usec; - - bool allow_sparse; - - bool calc_checksum; - gcry_md_hd_t checksum_context; - - char *checksum; - - bool grow_machine_directory; - uint64_t written_since_last_grow; -}; - -int import_job_new(ImportJob **job, const char *url, CurlGlue *glue, void *userdata); -ImportJob* import_job_unref(ImportJob *job); - -int import_job_begin(ImportJob *j); - -void import_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result); - -DEFINE_TRIVIAL_CLEANUP_FUNC(ImportJob*, import_job_unref); diff --git a/src/import/import-raw.c b/src/import/import-raw.c deleted file mode 100644 index 89c064cb3d..0000000000 --- a/src/import/import-raw.c +++ /dev/null @@ -1,518 +0,0 @@ -/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ - -/*** - This file is part of systemd. - - Copyright 2014 Lennart Poettering - - systemd is free software; you can redistribute it and/or modify it - under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - systemd is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with systemd; If not, see . -***/ - -#include -#include -#include - -#include "sd-daemon.h" -#include "utf8.h" -#include "strv.h" -#include "copy.h" -#include "btrfs-util.h" -#include "util.h" -#include "macro.h" -#include "mkdir.h" -#include "path-util.h" -#include "import-util.h" -#include "curl-util.h" -#include "qcow2-util.h" -#include "import-job.h" -#include "import-common.h" -#include "import-raw.h" - -typedef enum RawProgress { - RAW_DOWNLOADING, - RAW_VERIFYING, - RAW_UNPACKING, - RAW_FINALIZING, - RAW_COPYING, -} RawProgress; - -struct RawImport { - sd_event *event; - CurlGlue *glue; - - char *image_root; - - ImportJob *raw_job; - ImportJob *checksum_job; - ImportJob *signature_job; - - RawImportFinished on_finished; - void *userdata; - - char *local; - bool force_local; - bool grow_machine_directory; - - char *temp_path; - char *final_path; - - ImportVerify verify; -}; - -RawImport* raw_import_unref(RawImport *i) { - if (!i) - return NULL; - - import_job_unref(i->raw_job); - import_job_unref(i->checksum_job); - import_job_unref(i->signature_job); - - curl_glue_unref(i->glue); - sd_event_unref(i->event); - - if (i->temp_path) { - (void) unlink(i->temp_path); - free(i->temp_path); - } - - free(i->final_path); - free(i->image_root); - free(i->local); - free(i); - - return NULL; -} - -int raw_import_new( - RawImport **ret, - sd_event *event, - const char *image_root, - RawImportFinished on_finished, - void *userdata) { - - _cleanup_(raw_import_unrefp) RawImport *i = NULL; - int r; - - assert(ret); - - i = new0(RawImport, 1); - if (!i) - return -ENOMEM; - - i->on_finished = on_finished; - i->userdata = userdata; - - i->image_root = strdup(image_root ?: "/var/lib/machines"); - if (!i->image_root) - return -ENOMEM; - - i->grow_machine_directory = path_startswith(i->image_root, "/var/lib/machines"); - - if (event) - i->event = sd_event_ref(event); - else { - r = sd_event_default(&i->event); - if (r < 0) - return r; - } - - r = curl_glue_new(&i->glue, i->event); - if (r < 0) - return r; - - i->glue->on_finished = import_job_curl_on_finished; - i->glue->userdata = i; - - *ret = i; - i = NULL; - - return 0; -} - -static void raw_import_report_progress(RawImport *i, RawProgress p) { - unsigned percent; - - assert(i); - - switch (p) { - - case RAW_DOWNLOADING: { - unsigned remain = 80; - - percent = 0; - - if (i->checksum_job) { - percent += i->checksum_job->progress_percent * 5 / 100; - remain -= 5; - } - - if (i->signature_job) { - percent += i->signature_job->progress_percent * 5 / 100; - remain -= 5; - } - - if (i->raw_job) - percent += i->raw_job->progress_percent * remain / 100; - break; - } - - case RAW_VERIFYING: - percent = 80; - break; - - case RAW_UNPACKING: - percent = 85; - break; - - case RAW_FINALIZING: - percent = 90; - break; - - case RAW_COPYING: - percent = 95; - break; - - default: - assert_not_reached("Unknown progress state"); - } - - sd_notifyf(false, "X_IMPORT_PROGRESS=%u", percent); - log_debug("Combined progress %u%%", percent); -} - -static int raw_import_maybe_convert_qcow2(RawImport *i) { - _cleanup_close_ int converted_fd = -1; - _cleanup_free_ char *t = NULL; - int r; - - assert(i); - assert(i->raw_job); - - r = qcow2_detect(i->raw_job->disk_fd); - if (r < 0) - return log_error_errno(r, "Failed to detect whether this is a QCOW2 image: %m"); - if (r == 0) - return 0; - - /* This is a QCOW2 image, let's convert it */ - r = tempfn_random(i->final_path, &t); - if (r < 0) - return log_oom(); - - converted_fd = open(t, O_RDWR|O_CREAT|O_EXCL|O_NOCTTY|O_CLOEXEC, 0644); - if (converted_fd < 0) - return log_error_errno(errno, "Failed to create %s: %m", t); - - r = chattr_fd(converted_fd, true, FS_NOCOW_FL); - if (r < 0) - log_warning_errno(errno, "Failed to set file attributes on %s: %m", t); - - log_info("Unpacking QCOW2 file."); - - r = qcow2_convert(i->raw_job->disk_fd, converted_fd); - if (r < 0) { - unlink(t); - return log_error_errno(r, "Failed to convert qcow2 image: %m"); - } - - unlink(i->temp_path); - free(i->temp_path); - - i->temp_path = t; - t = NULL; - - safe_close(i->raw_job->disk_fd); - i->raw_job->disk_fd = converted_fd; - converted_fd = -1; - - return 1; -} - -static int raw_import_make_local_copy(RawImport *i) { - _cleanup_free_ char *tp = NULL; - _cleanup_close_ int dfd = -1; - const char *p; - int r; - - assert(i); - assert(i->raw_job); - - if (!i->local) - return 0; - - if (i->raw_job->etag_exists) { - /* We have downloaded this one previously, reopen it */ - - assert(i->raw_job->disk_fd < 0); - - if (!i->final_path) { - r = import_make_path(i->raw_job->url, i->raw_job->etag, i->image_root, ".raw-", ".raw", &i->final_path); - if (r < 0) - return log_oom(); - } - - i->raw_job->disk_fd = open(i->final_path, O_RDONLY|O_NOCTTY|O_CLOEXEC); - if (i->raw_job->disk_fd < 0) - return log_error_errno(errno, "Failed to open vendor image: %m"); - } else { - /* We freshly downloaded the image, use it */ - - assert(i->raw_job->disk_fd >= 0); - - if (lseek(i->raw_job->disk_fd, SEEK_SET, 0) == (off_t) -1) - return log_error_errno(errno, "Failed to seek to beginning of vendor image: %m"); - } - - p = strjoina(i->image_root, "/", i->local, ".raw"); - - if (i->force_local) { - (void) btrfs_subvol_remove(p); - (void) rm_rf_dangerous(p, false, true, false); - } - - r = tempfn_random(p, &tp); - if (r < 0) - return log_oom(); - - dfd = open(tp, O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY|O_CLOEXEC, 0664); - if (dfd < 0) - return log_error_errno(errno, "Failed to create writable copy of image: %m"); - - /* Turn off COW writing. This should greatly improve - * performance on COW file systems like btrfs, since it - * reduces fragmentation caused by not allowing in-place - * writes. */ - r = chattr_fd(dfd, true, FS_NOCOW_FL); - if (r < 0) - log_warning_errno(errno, "Failed to set file attributes on %s: %m", tp); - - r = copy_bytes(i->raw_job->disk_fd, dfd, (off_t) -1, true); - if (r < 0) { - unlink(tp); - return log_error_errno(r, "Failed to make writable copy of image: %m"); - } - - (void) copy_times(i->raw_job->disk_fd, dfd); - (void) copy_xattr(i->raw_job->disk_fd, dfd); - - dfd = safe_close(dfd); - - r = rename(tp, p); - if (r < 0) { - unlink(tp); - return log_error_errno(errno, "Failed to move writable image into place: %m"); - } - - log_info("Created new local image '%s'.", i->local); - return 0; -} - -static bool raw_import_is_done(RawImport *i) { - assert(i); - assert(i->raw_job); - - if (i->raw_job->state != IMPORT_JOB_DONE) - return false; - if (i->checksum_job && i->checksum_job->state != IMPORT_JOB_DONE) - return false; - if (i->signature_job && i->signature_job->state != IMPORT_JOB_DONE) - return false; - - return true; -} - -static void raw_import_job_on_finished(ImportJob *j) { - RawImport *i; - int r; - - assert(j); - assert(j->userdata); - - i = j->userdata; - if (j->error != 0) { - if (j == i->checksum_job) - log_error_errno(j->error, "Failed to retrieve SHA256 checksum, cannot verify. (Try --verify=no?)"); - else if (j == i->signature_job) - log_error_errno(j->error, "Failed to retrieve signature file, cannot verify. (Try --verify=no?)"); - else - log_error_errno(j->error, "Failed to retrieve image file. (Wrong URL?)"); - - r = j->error; - goto finish; - } - - /* This is invoked if either the download completed - * successfully, or the download was skipped because we - * already have the etag. In this case ->etag_exists is - * true. - * - * We only do something when we got all three files */ - - if (!raw_import_is_done(i)) - return; - - if (!i->raw_job->etag_exists) { - /* This is a new download, verify it, and move it into place */ - assert(i->raw_job->disk_fd >= 0); - - raw_import_report_progress(i, RAW_VERIFYING); - - r = import_verify(i->raw_job, i->checksum_job, i->signature_job); - if (r < 0) - goto finish; - - raw_import_report_progress(i, RAW_UNPACKING); - - r = raw_import_maybe_convert_qcow2(i); - if (r < 0) - goto finish; - - raw_import_report_progress(i, RAW_FINALIZING); - - r = import_make_read_only_fd(i->raw_job->disk_fd); - if (r < 0) - goto finish; - - r = rename(i->temp_path, i->final_path); - if (r < 0) { - r = log_error_errno(errno, "Failed to move RAW file into place: %m"); - goto finish; - } - - free(i->temp_path); - i->temp_path = NULL; - } - - raw_import_report_progress(i, RAW_COPYING); - - r = raw_import_make_local_copy(i); - if (r < 0) - goto finish; - - r = 0; - -finish: - if (i->on_finished) - i->on_finished(i, r, i->userdata); - else - sd_event_exit(i->event, r); -} - -static int raw_import_job_on_open_disk(ImportJob *j) { - RawImport *i; - int r; - - assert(j); - assert(j->userdata); - - i = j->userdata; - assert(i->raw_job == j); - assert(!i->final_path); - assert(!i->temp_path); - - r = import_make_path(j->url, j->etag, i->image_root, ".raw-", ".raw", &i->final_path); - if (r < 0) - return log_oom(); - - r = tempfn_random(i->final_path, &i->temp_path); - if (r <0) - return log_oom(); - - mkdir_parents_label(i->temp_path, 0700); - - j->disk_fd = open(i->temp_path, O_RDWR|O_CREAT|O_EXCL|O_NOCTTY|O_CLOEXEC, 0644); - if (j->disk_fd < 0) - return log_error_errno(errno, "Failed to create %s: %m", i->temp_path); - - r = chattr_fd(j->disk_fd, true, FS_NOCOW_FL); - if (r < 0) - log_warning_errno(errno, "Failed to set file attributes on %s: %m", i->temp_path); - - return 0; -} - -static void raw_import_job_on_progress(ImportJob *j) { - RawImport *i; - - assert(j); - assert(j->userdata); - - i = j->userdata; - - raw_import_report_progress(i, RAW_DOWNLOADING); -} - -int raw_import_pull(RawImport *i, const char *url, const char *local, bool force_local, ImportVerify verify) { - int r; - - assert(i); - assert(verify < _IMPORT_VERIFY_MAX); - assert(verify >= 0); - - if (!http_url_is_valid(url)) - return -EINVAL; - - if (local && !machine_name_is_valid(local)) - return -EINVAL; - - if (i->raw_job) - return -EBUSY; - - r = free_and_strdup(&i->local, local); - if (r < 0) - return r; - i->force_local = force_local; - i->verify = verify; - - /* Queue job for the image itself */ - r = import_job_new(&i->raw_job, url, i->glue, i); - if (r < 0) - return r; - - i->raw_job->on_finished = raw_import_job_on_finished; - i->raw_job->on_open_disk = raw_import_job_on_open_disk; - i->raw_job->on_progress = raw_import_job_on_progress; - i->raw_job->calc_checksum = verify != IMPORT_VERIFY_NO; - i->raw_job->grow_machine_directory = i->grow_machine_directory; - - r = import_find_old_etags(url, i->image_root, DT_REG, ".raw-", ".raw", &i->raw_job->old_etags); - if (r < 0) - return r; - - r = import_make_verification_jobs(&i->checksum_job, &i->signature_job, verify, url, i->glue, raw_import_job_on_finished, i); - if (r < 0) - return r; - - r = import_job_begin(i->raw_job); - if (r < 0) - return r; - - if (i->checksum_job) { - i->checksum_job->on_progress = raw_import_job_on_progress; - - r = import_job_begin(i->checksum_job); - if (r < 0) - return r; - } - - if (i->signature_job) { - i->signature_job->on_progress = raw_import_job_on_progress; - - r = import_job_begin(i->signature_job); - if (r < 0) - return r; - } - - return 0; -} diff --git a/src/import/import-raw.h b/src/import/import-raw.h deleted file mode 100644 index ae2c29991f..0000000000 --- a/src/import/import-raw.h +++ /dev/null @@ -1,37 +0,0 @@ -/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ - -#pragma once - -/*** - This file is part of systemd. - - Copyright 2014 Lennart Poettering - - systemd is free software; you can redistribute it and/or modify it - under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - systemd is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with systemd; If not, see . -***/ - -#include "sd-event.h" -#include "macro.h" -#include "import-util.h" - -typedef struct RawImport RawImport; - -typedef void (*RawImportFinished)(RawImport *import, int error, void *userdata); - -int raw_import_new(RawImport **import, sd_event *event, const char *image_root, RawImportFinished on_finished, void *userdata); -RawImport* raw_import_unref(RawImport *import); - -DEFINE_TRIVIAL_CLEANUP_FUNC(RawImport*, raw_import_unref); - -int raw_import_pull(RawImport *import, const char *url, const char *local, bool force_local, ImportVerify verify); diff --git a/src/import/import-tar.c b/src/import/import-tar.c deleted file mode 100644 index 472e336247..0000000000 --- a/src/import/import-tar.c +++ /dev/null @@ -1,414 +0,0 @@ -/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ - -/*** - This file is part of systemd. - - Copyright 2015 Lennart Poettering - - systemd is free software; you can redistribute it and/or modify it - under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - systemd is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with systemd; If not, see . -***/ - -#include -#include - -#include "sd-daemon.h" -#include "utf8.h" -#include "strv.h" -#include "copy.h" -#include "btrfs-util.h" -#include "util.h" -#include "macro.h" -#include "mkdir.h" -#include "path-util.h" -#include "import-util.h" -#include "curl-util.h" -#include "import-job.h" -#include "import-common.h" -#include "import-tar.h" - -typedef enum TarProgress { - TAR_DOWNLOADING, - TAR_VERIFYING, - TAR_FINALIZING, - TAR_COPYING, -} TarProgress; - -struct TarImport { - sd_event *event; - CurlGlue *glue; - - char *image_root; - - ImportJob *tar_job; - ImportJob *checksum_job; - ImportJob *signature_job; - - TarImportFinished on_finished; - void *userdata; - - char *local; - bool force_local; - bool grow_machine_directory; - - pid_t tar_pid; - - char *temp_path; - char *final_path; - - ImportVerify verify; -}; - -TarImport* tar_import_unref(TarImport *i) { - if (!i) - return NULL; - - if (i->tar_pid > 1) { - (void) kill_and_sigcont(i->tar_pid, SIGKILL); - (void) wait_for_terminate(i->tar_pid, NULL); - } - - import_job_unref(i->tar_job); - import_job_unref(i->checksum_job); - import_job_unref(i->signature_job); - - curl_glue_unref(i->glue); - sd_event_unref(i->event); - - if (i->temp_path) { - (void) btrfs_subvol_remove(i->temp_path); - (void) rm_rf_dangerous(i->temp_path, false, true, false); - free(i->temp_path); - } - - free(i->final_path); - free(i->image_root); - free(i->local); - free(i); - - return NULL; -} - -int tar_import_new( - TarImport **ret, - sd_event *event, - const char *image_root, - TarImportFinished on_finished, - void *userdata) { - - _cleanup_(tar_import_unrefp) TarImport *i = NULL; - int r; - - assert(ret); - assert(event); - - i = new0(TarImport, 1); - if (!i) - return -ENOMEM; - - i->on_finished = on_finished; - i->userdata = userdata; - - i->image_root = strdup(image_root ?: "/var/lib/machines"); - if (!i->image_root) - return -ENOMEM; - - i->grow_machine_directory = path_startswith(i->image_root, "/var/lib/machines"); - - if (event) - i->event = sd_event_ref(event); - else { - r = sd_event_default(&i->event); - if (r < 0) - return r; - } - - r = curl_glue_new(&i->glue, i->event); - if (r < 0) - return r; - - i->glue->on_finished = import_job_curl_on_finished; - i->glue->userdata = i; - - *ret = i; - i = NULL; - - return 0; -} - -static void tar_import_report_progress(TarImport *i, TarProgress p) { - unsigned percent; - - assert(i); - - switch (p) { - - case TAR_DOWNLOADING: { - unsigned remain = 85; - - percent = 0; - - if (i->checksum_job) { - percent += i->checksum_job->progress_percent * 5 / 100; - remain -= 5; - } - - if (i->signature_job) { - percent += i->signature_job->progress_percent * 5 / 100; - remain -= 5; - } - - if (i->tar_job) - percent += i->tar_job->progress_percent * remain / 100; - break; - } - - case TAR_VERIFYING: - percent = 85; - break; - - case TAR_FINALIZING: - percent = 90; - break; - - case TAR_COPYING: - percent = 95; - break; - - default: - assert_not_reached("Unknown progress state"); - } - - sd_notifyf(false, "X_IMPORT_PROGRESS=%u", percent); - log_debug("Combined progress %u%%", percent); -} - -static int tar_import_make_local_copy(TarImport *i) { - int r; - - assert(i); - assert(i->tar_job); - - if (!i->local) - return 0; - - if (!i->final_path) { - r = import_make_path(i->tar_job->url, i->tar_job->etag, i->image_root, ".tar-", NULL, &i->final_path); - if (r < 0) - return log_oom(); - } - - r = import_make_local_copy(i->final_path, i->image_root, i->local, i->force_local); - if (r < 0) - return r; - - return 0; -} - -static bool tar_import_is_done(TarImport *i) { - assert(i); - assert(i->tar_job); - - if (i->tar_job->state != IMPORT_JOB_DONE) - return false; - if (i->checksum_job && i->checksum_job->state != IMPORT_JOB_DONE) - return false; - if (i->signature_job && i->signature_job->state != IMPORT_JOB_DONE) - return false; - - return true; -} - -static void tar_import_job_on_finished(ImportJob *j) { - TarImport *i; - int r; - - assert(j); - assert(j->userdata); - - i = j->userdata; - if (j->error != 0) { - if (j == i->checksum_job) - log_error_errno(j->error, "Failed to retrieve SHA256 checksum, cannot verify. (Try --verify=no?)"); - else if (j == i->signature_job) - log_error_errno(j->error, "Failed to retrieve signature file, cannot verify. (Try --verify=no?)"); - else - log_error_errno(j->error, "Failed to retrieve image file. (Wrong URL?)"); - - r = j->error; - goto finish; - } - - /* This is invoked if either the download completed - * successfully, or the download was skipped because we - * already have the etag. */ - - if (!tar_import_is_done(i)) - return; - - j->disk_fd = safe_close(i->tar_job->disk_fd); - - if (i->tar_pid > 0) { - r = wait_for_terminate_and_warn("tar", i->tar_pid, true); - i->tar_pid = 0; - if (r < 0) - goto finish; - } - - if (!i->tar_job->etag_exists) { - /* This is a new download, verify it, and move it into place */ - - tar_import_report_progress(i, TAR_VERIFYING); - - r = import_verify(i->tar_job, i->checksum_job, i->signature_job); - if (r < 0) - goto finish; - - tar_import_report_progress(i, TAR_FINALIZING); - - r = import_make_read_only(i->temp_path); - if (r < 0) - goto finish; - - if (rename(i->temp_path, i->final_path) < 0) { - r = log_error_errno(errno, "Failed to rename to final image name: %m"); - goto finish; - } - - free(i->temp_path); - i->temp_path = NULL; - } - - tar_import_report_progress(i, TAR_COPYING); - - r = tar_import_make_local_copy(i); - if (r < 0) - goto finish; - - r = 0; - -finish: - if (i->on_finished) - i->on_finished(i, r, i->userdata); - else - sd_event_exit(i->event, r); -} - -static int tar_import_job_on_open_disk(ImportJob *j) { - TarImport *i; - int r; - - assert(j); - assert(j->userdata); - - i = j->userdata; - assert(i->tar_job == j); - assert(!i->final_path); - assert(!i->temp_path); - assert(i->tar_pid <= 0); - - r = import_make_path(j->url, j->etag, i->image_root, ".tar-", NULL, &i->final_path); - if (r < 0) - return log_oom(); - - r = tempfn_random(i->final_path, &i->temp_path); - if (r < 0) - return log_oom(); - - mkdir_parents_label(i->temp_path, 0700); - - r = btrfs_subvol_make(i->temp_path); - if (r == -ENOTTY) { - if (mkdir(i->temp_path, 0755) < 0) - return log_error_errno(errno, "Failed to create directory %s: %m", i->temp_path); - } else if (r < 0) - return log_error_errno(errno, "Failed to create subvolume %s: %m", i->temp_path); - - j->disk_fd = import_fork_tar(i->temp_path, &i->tar_pid); - if (j->disk_fd < 0) - return j->disk_fd; - - return 0; -} - -static void tar_import_job_on_progress(ImportJob *j) { - TarImport *i; - - assert(j); - assert(j->userdata); - - i = j->userdata; - - tar_import_report_progress(i, TAR_DOWNLOADING); -} - -int tar_import_pull(TarImport *i, const char *url, const char *local, bool force_local, ImportVerify verify) { - int r; - - assert(i); - - if (!http_url_is_valid(url)) - return -EINVAL; - - if (local && !machine_name_is_valid(local)) - return -EINVAL; - - if (i->tar_job) - return -EBUSY; - - r = free_and_strdup(&i->local, local); - if (r < 0) - return r; - i->force_local = force_local; - i->verify = verify; - - r = import_job_new(&i->tar_job, url, i->glue, i); - if (r < 0) - return r; - - i->tar_job->on_finished = tar_import_job_on_finished; - i->tar_job->on_open_disk = tar_import_job_on_open_disk; - i->tar_job->on_progress = tar_import_job_on_progress; - i->tar_job->calc_checksum = verify != IMPORT_VERIFY_NO; - i->tar_job->grow_machine_directory = i->grow_machine_directory; - - r = import_find_old_etags(url, i->image_root, DT_DIR, ".tar-", NULL, &i->tar_job->old_etags); - if (r < 0) - return r; - - r = import_make_verification_jobs(&i->checksum_job, &i->signature_job, verify, url, i->glue, tar_import_job_on_finished, i); - if (r < 0) - return r; - - r = import_job_begin(i->tar_job); - if (r < 0) - return r; - - if (i->checksum_job) { - i->checksum_job->on_progress = tar_import_job_on_progress; - - r = import_job_begin(i->checksum_job); - if (r < 0) - return r; - } - - if (i->signature_job) { - i->signature_job->on_progress = tar_import_job_on_progress; - - r = import_job_begin(i->signature_job); - if (r < 0) - return r; - } - - return 0; -} diff --git a/src/import/import-tar.h b/src/import/import-tar.h deleted file mode 100644 index 212f804d16..0000000000 --- a/src/import/import-tar.h +++ /dev/null @@ -1,37 +0,0 @@ -/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ - -#pragma once - -/*** - This file is part of systemd. - - Copyright 2015 Lennart Poettering - - systemd is free software; you can redistribute it and/or modify it - under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - systemd is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with systemd; If not, see . -***/ - -#include "sd-event.h" -#include "macro.h" -#include "import-util.h" - -typedef struct TarImport TarImport; - -typedef void (*TarImportFinished)(TarImport *import, int error, void *userdata); - -int tar_import_new(TarImport **import, sd_event *event, const char *image_root, TarImportFinished on_finished, void *userdata); -TarImport* tar_import_unref(TarImport *import); - -DEFINE_TRIVIAL_CLEANUP_FUNC(TarImport*, tar_import_unref); - -int tar_import_pull(TarImport *import, const char *url, const char *local, bool force_local, ImportVerify verify); diff --git a/src/import/pull-common.c b/src/import/pull-common.c new file mode 100644 index 0000000000..38380fc018 --- /dev/null +++ b/src/import/pull-common.c @@ -0,0 +1,545 @@ +/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ + +/*** + This file is part of systemd. + + Copyright 2015 Lennart Poettering + + systemd is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + systemd is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with systemd; If not, see . +***/ + +#include + +#include "util.h" +#include "strv.h" +#include "copy.h" +#include "btrfs-util.h" +#include "capability.h" +#include "pull-job.h" +#include "pull-common.h" + +#define FILENAME_ESCAPE "/.#\"\'" + +int pull_find_old_etags(const char *url, const char *image_root, int dt, const char *prefix, const char *suffix, char ***etags) { + _cleanup_free_ char *escaped_url = NULL; + _cleanup_closedir_ DIR *d = NULL; + _cleanup_strv_free_ char **l = NULL; + struct dirent *de; + int r; + + assert(url); + assert(etags); + + if (!image_root) + image_root = "/var/lib/machines"; + + escaped_url = xescape(url, FILENAME_ESCAPE); + if (!escaped_url) + return -ENOMEM; + + d = opendir(image_root); + if (!d) { + if (errno == ENOENT) { + *etags = NULL; + return 0; + } + + return -errno; + } + + FOREACH_DIRENT_ALL(de, d, return -errno) { + const char *a, *b; + char *u; + + if (de->d_type != DT_UNKNOWN && + de->d_type != dt) + continue; + + if (prefix) { + a = startswith(de->d_name, prefix); + if (!a) + continue; + } else + a = de->d_name; + + a = startswith(a, escaped_url); + if (!a) + continue; + + a = startswith(a, "."); + if (!a) + continue; + + if (suffix) { + b = endswith(de->d_name, suffix); + if (!b) + continue; + } else + b = strchr(de->d_name, 0); + + if (a >= b) + continue; + + u = cunescape_length(a, b - a); + if (!u) + return -ENOMEM; + + if (!http_etag_is_valid(u)) { + free(u); + continue; + } + + r = strv_consume(&l, u); + if (r < 0) + return r; + } + + *etags = l; + l = NULL; + + return 0; +} + +int pull_make_local_copy(const char *final, const char *image_root, const char *local, bool force_local) { + const char *p; + int r; + + assert(final); + assert(local); + + if (!image_root) + image_root = "/var/lib/machines"; + + p = strjoina(image_root, "/", local); + + if (force_local) { + (void) btrfs_subvol_remove(p); + (void) rm_rf_dangerous(p, false, true, false); + } + + r = btrfs_subvol_snapshot(final, p, false, false); + if (r == -ENOTTY) { + r = copy_tree(final, p, false); + if (r < 0) + return log_error_errno(r, "Failed to copy image: %m"); + } else if (r < 0) + return log_error_errno(r, "Failed to create local image: %m"); + + log_info("Created new local image '%s'.", local); + + return 0; +} + +int pull_make_read_only_fd(int fd) { + int r; + + assert(fd >= 0); + + /* First, let's make this a read-only subvolume if it refers + * to a subvolume */ + r = btrfs_subvol_set_read_only_fd(fd, true); + if (r == -ENOTTY || r == -ENOTDIR || r == -EINVAL) { + struct stat st; + + /* This doesn't refer to a subvolume, or the file + * system isn't even btrfs. In that, case fall back to + * chmod()ing */ + + r = fstat(fd, &st); + if (r < 0) + return log_error_errno(errno, "Failed to stat temporary image: %m"); + + /* Drop "w" flag */ + if (fchmod(fd, st.st_mode & 07555) < 0) + return log_error_errno(errno, "Failed to chmod() final image: %m"); + + return 0; + + } else if (r < 0) + return log_error_errno(r, "Failed to make subvolume read-only: %m"); + + return 0; +} + +int pull_make_read_only(const char *path) { + _cleanup_close_ int fd = 1; + + fd = open(path, O_RDONLY|O_NOCTTY|O_CLOEXEC); + if (fd < 0) + return log_error_errno(errno, "Failed to open %s: %m", path); + + return pull_make_read_only_fd(fd); +} + +int pull_make_path(const char *url, const char *etag, const char *image_root, const char *prefix, const char *suffix, char **ret) { + _cleanup_free_ char *escaped_url = NULL; + char *path; + + assert(url); + assert(ret); + + if (!image_root) + image_root = "/var/lib/machines"; + + escaped_url = xescape(url, FILENAME_ESCAPE); + if (!escaped_url) + return -ENOMEM; + + if (etag) { + _cleanup_free_ char *escaped_etag = NULL; + + escaped_etag = xescape(etag, FILENAME_ESCAPE); + if (!escaped_etag) + return -ENOMEM; + + path = strjoin(image_root, "/", strempty(prefix), escaped_url, ".", escaped_etag, strempty(suffix), NULL); + } else + path = strjoin(image_root, "/", strempty(prefix), escaped_url, strempty(suffix), NULL); + if (!path) + return -ENOMEM; + + *ret = path; + return 0; +} + +int pull_make_verification_jobs( + PullJob **ret_checksum_job, + PullJob **ret_signature_job, + ImportVerify verify, + const char *url, + CurlGlue *glue, + PullJobFinished on_finished, + void *userdata) { + + _cleanup_(pull_job_unrefp) PullJob *checksum_job = NULL, *signature_job = NULL; + int r; + + assert(ret_checksum_job); + assert(ret_signature_job); + assert(verify >= 0); + assert(verify < _IMPORT_VERIFY_MAX); + assert(url); + assert(glue); + + if (verify != IMPORT_VERIFY_NO) { + _cleanup_free_ char *checksum_url = NULL; + + /* Queue job for the SHA256SUMS file for the image */ + r = import_url_change_last_component(url, "SHA256SUMS", &checksum_url); + if (r < 0) + return r; + + r = pull_job_new(&checksum_job, checksum_url, glue, userdata); + if (r < 0) + return r; + + checksum_job->on_finished = on_finished; + checksum_job->uncompressed_max = checksum_job->compressed_max = 1ULL * 1024ULL * 1024ULL; + } + + if (verify == IMPORT_VERIFY_SIGNATURE) { + _cleanup_free_ char *signature_url = NULL; + + /* Queue job for the SHA256SUMS.gpg file for the image. */ + r = import_url_change_last_component(url, "SHA256SUMS.gpg", &signature_url); + if (r < 0) + return r; + + r = pull_job_new(&signature_job, signature_url, glue, userdata); + if (r < 0) + return r; + + signature_job->on_finished = on_finished; + signature_job->uncompressed_max = signature_job->compressed_max = 1ULL * 1024ULL * 1024ULL; + } + + *ret_checksum_job = checksum_job; + *ret_signature_job = signature_job; + + checksum_job = signature_job = NULL; + + return 0; +} + +int pull_verify( + PullJob *main_job, + PullJob *checksum_job, + PullJob *signature_job) { + + _cleanup_close_pair_ int gpg_pipe[2] = { -1, -1 }; + _cleanup_free_ char *fn = NULL; + _cleanup_close_ int sig_file = -1; + const char *p, *line; + char sig_file_path[] = "/tmp/sigXXXXXX", gpg_home[] = "/tmp/gpghomeXXXXXX"; + _cleanup_sigkill_wait_ pid_t pid = 0; + bool gpg_home_created = false; + int r; + + assert(main_job); + assert(main_job->state == PULL_JOB_DONE); + + if (!checksum_job) + return 0; + + assert(main_job->calc_checksum); + assert(main_job->checksum); + assert(checksum_job->state == PULL_JOB_DONE); + + if (!checksum_job->payload || checksum_job->payload_size <= 0) { + log_error("Checksum is empty, cannot verify."); + return -EBADMSG; + } + + r = import_url_last_component(main_job->url, &fn); + if (r < 0) + return log_oom(); + + if (!filename_is_valid(fn)) { + log_error("Cannot verify checksum, could not determine valid server-side file name."); + return -EBADMSG; + } + + line = strjoina(main_job->checksum, " *", fn, "\n"); + + p = memmem(checksum_job->payload, + checksum_job->payload_size, + line, + strlen(line)); + + if (!p || (p != (char*) checksum_job->payload && p[-1] != '\n')) { + log_error("Checksum did not check out, payload has been tempered with."); + return -EBADMSG; + } + + log_info("SHA256 checksum of %s is valid.", main_job->url); + + if (!signature_job) + return 0; + + assert(signature_job->state == PULL_JOB_DONE); + + if (!signature_job->payload || signature_job->payload_size <= 0) { + log_error("Signature is empty, cannot verify."); + return -EBADMSG; + } + + r = pipe2(gpg_pipe, O_CLOEXEC); + if (r < 0) + return log_error_errno(errno, "Failed to create pipe for gpg: %m"); + + sig_file = mkostemp(sig_file_path, O_RDWR); + if (sig_file < 0) + return log_error_errno(errno, "Failed to create temporary file: %m"); + + r = loop_write(sig_file, signature_job->payload, signature_job->payload_size, false); + if (r < 0) { + log_error_errno(r, "Failed to write to temporary file: %m"); + goto finish; + } + + if (!mkdtemp(gpg_home)) { + r = log_error_errno(errno, "Failed to create tempory home for gpg: %m"); + goto finish; + } + + gpg_home_created = true; + + pid = fork(); + if (pid < 0) + return log_error_errno(errno, "Failed to fork off gpg: %m"); + if (pid == 0) { + const char *cmd[] = { + "gpg", + "--no-options", + "--no-default-keyring", + "--no-auto-key-locate", + "--no-auto-check-trustdb", + "--batch", + "--trust-model=always", + NULL, /* --homedir= */ + NULL, /* --keyring= */ + NULL, /* --verify */ + NULL, /* signature file */ + NULL, /* dash */ + NULL /* trailing NULL */ + }; + unsigned k = ELEMENTSOF(cmd) - 6; + int null_fd; + + /* Child */ + + reset_all_signal_handlers(); + reset_signal_mask(); + assert_se(prctl(PR_SET_PDEATHSIG, SIGTERM) == 0); + + gpg_pipe[1] = safe_close(gpg_pipe[1]); + + if (dup2(gpg_pipe[0], STDIN_FILENO) != STDIN_FILENO) { + log_error_errno(errno, "Failed to dup2() fd: %m"); + _exit(EXIT_FAILURE); + } + + if (gpg_pipe[0] != STDIN_FILENO) + gpg_pipe[0] = safe_close(gpg_pipe[0]); + + null_fd = open("/dev/null", O_WRONLY|O_NOCTTY); + if (null_fd < 0) { + log_error_errno(errno, "Failed to open /dev/null: %m"); + _exit(EXIT_FAILURE); + } + + if (dup2(null_fd, STDOUT_FILENO) != STDOUT_FILENO) { + log_error_errno(errno, "Failed to dup2() fd: %m"); + _exit(EXIT_FAILURE); + } + + if (null_fd != STDOUT_FILENO) + null_fd = safe_close(null_fd); + + cmd[k++] = strjoina("--homedir=", gpg_home); + + /* We add the user keyring only to the command line + * arguments, if it's around since gpg fails + * otherwise. */ + if (access(USER_KEYRING_PATH, F_OK) >= 0) + cmd[k++] = "--keyring=" USER_KEYRING_PATH; + else + cmd[k++] = "--keyring=" VENDOR_KEYRING_PATH; + + cmd[k++] = "--verify"; + cmd[k++] = sig_file_path; + cmd[k++] = "-"; + cmd[k++] = NULL; + + fd_cloexec(STDIN_FILENO, false); + fd_cloexec(STDOUT_FILENO, false); + fd_cloexec(STDERR_FILENO, false); + + execvp("gpg2", (char * const *) cmd); + execvp("gpg", (char * const *) cmd); + log_error_errno(errno, "Failed to execute gpg: %m"); + _exit(EXIT_FAILURE); + } + + gpg_pipe[0] = safe_close(gpg_pipe[0]); + + r = loop_write(gpg_pipe[1], checksum_job->payload, checksum_job->payload_size, false); + if (r < 0) { + log_error_errno(r, "Failed to write to pipe: %m"); + goto finish; + } + + gpg_pipe[1] = safe_close(gpg_pipe[1]); + + r = wait_for_terminate_and_warn("gpg", pid, true); + pid = 0; + if (r < 0) + goto finish; + if (r > 0) { + log_error("Signature verification failed."); + r = -EBADMSG; + } else { + log_info("Signature verification succeeded."); + r = 0; + } + +finish: + if (sig_file >= 0) + unlink(sig_file_path); + + if (gpg_home_created) + rm_rf_dangerous(gpg_home, false, true, false); + + return r; +} + +int pull_fork_tar(const char *path, pid_t *ret) { + _cleanup_close_pair_ int pipefd[2] = { -1, -1 }; + pid_t pid; + int r; + + assert(path); + assert(ret); + + if (pipe2(pipefd, O_CLOEXEC) < 0) + return log_error_errno(errno, "Failed to create pipe for tar: %m"); + + pid = fork(); + if (pid < 0) + return log_error_errno(errno, "Failed to fork off tar: %m"); + + if (pid == 0) { + int null_fd; + uint64_t retain = + (1ULL << CAP_CHOWN) | + (1ULL << CAP_FOWNER) | + (1ULL << CAP_FSETID) | + (1ULL << CAP_MKNOD) | + (1ULL << CAP_SETFCAP) | + (1ULL << CAP_DAC_OVERRIDE); + + /* Child */ + + reset_all_signal_handlers(); + reset_signal_mask(); + assert_se(prctl(PR_SET_PDEATHSIG, SIGTERM) == 0); + + pipefd[1] = safe_close(pipefd[1]); + + if (dup2(pipefd[0], STDIN_FILENO) != STDIN_FILENO) { + log_error_errno(errno, "Failed to dup2() fd: %m"); + _exit(EXIT_FAILURE); + } + + if (pipefd[0] != STDIN_FILENO) + pipefd[0] = safe_close(pipefd[0]); + + null_fd = open("/dev/null", O_WRONLY|O_NOCTTY); + if (null_fd < 0) { + log_error_errno(errno, "Failed to open /dev/null: %m"); + _exit(EXIT_FAILURE); + } + + if (dup2(null_fd, STDOUT_FILENO) != STDOUT_FILENO) { + log_error_errno(errno, "Failed to dup2() fd: %m"); + _exit(EXIT_FAILURE); + } + + if (null_fd != STDOUT_FILENO) + null_fd = safe_close(null_fd); + + fd_cloexec(STDIN_FILENO, false); + fd_cloexec(STDOUT_FILENO, false); + fd_cloexec(STDERR_FILENO, false); + + if (unshare(CLONE_NEWNET) < 0) + log_error_errno(errno, "Failed to lock tar into network namespace, ignoring: %m"); + + r = capability_bounding_set_drop(~retain, true); + if (r < 0) + log_error_errno(r, "Failed to drop capabilities, ignoring: %m"); + + execlp("tar", "tar", "--numeric-owner", "-C", path, "-px", NULL); + log_error_errno(errno, "Failed to execute tar: %m"); + _exit(EXIT_FAILURE); + } + + pipefd[0] = safe_close(pipefd[0]); + r = pipefd[1]; + pipefd[1] = -1; + + *ret = pid; + + return r; +} diff --git a/src/import/pull-common.h b/src/import/pull-common.h new file mode 100644 index 0000000000..4ac016bde6 --- /dev/null +++ b/src/import/pull-common.h @@ -0,0 +1,41 @@ +/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ + +#pragma once + +/*** + This file is part of systemd. + + Copyright 2015 Lennart Poettering + + systemd is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + systemd is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with systemd; If not, see . +***/ + +#include + +#include "pull-job.h" +#include "import-util.h" + +int pull_make_local_copy(const char *final, const char *root, const char *local, bool force_local); + +int pull_find_old_etags(const char *url, const char *root, int dt, const char *prefix, const char *suffix, char ***etags); + +int pull_make_read_only_fd(int fd); +int pull_make_read_only(const char *path); + +int pull_make_path(const char *url, const char *etag, const char *image_root, const char *prefix, const char *suffix, char **ret); + +int pull_make_verification_jobs(PullJob **ret_checksum_job, PullJob **ret_signature_job, ImportVerify verify, const char *url, CurlGlue *glue, PullJobFinished on_finished, void *userdata); +int pull_verify(PullJob *main_job, PullJob *checksum_job, PullJob *signature_job); + +int pull_fork_tar(const char *path, pid_t *ret); diff --git a/src/import/pull-dkr.c b/src/import/pull-dkr.c new file mode 100644 index 0000000000..ecbf8063ce --- /dev/null +++ b/src/import/pull-dkr.c @@ -0,0 +1,896 @@ +/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ + +/*** + This file is part of systemd. + + Copyright 2014 Lennart Poettering + + systemd is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + systemd is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with systemd; If not, see . +***/ + +#include +#include + +#include "sd-daemon.h" +#include "json.h" +#include "strv.h" +#include "btrfs-util.h" +#include "utf8.h" +#include "mkdir.h" +#include "path-util.h" +#include "import-util.h" +#include "curl-util.h" +#include "aufs-util.h" +#include "pull-job.h" +#include "pull-common.h" +#include "pull-dkr.h" + +typedef enum DkrProgress { + DKR_SEARCHING, + DKR_RESOLVING, + DKR_METADATA, + DKR_DOWNLOADING, + DKR_COPYING, +} DkrProgress; + +struct DkrPull { + sd_event *event; + CurlGlue *glue; + + char *index_url; + char *image_root; + + PullJob *images_job; + PullJob *tags_job; + PullJob *ancestry_job; + PullJob *json_job; + PullJob *layer_job; + + char *name; + char *tag; + char *id; + + char *response_token; + char **response_registries; + + char **ancestry; + unsigned n_ancestry; + unsigned current_ancestry; + + DkrPullFinished on_finished; + void *userdata; + + char *local; + bool force_local; + bool grow_machine_directory; + + char *temp_path; + char *final_path; + + pid_t tar_pid; +}; + +#define PROTOCOL_PREFIX "https://" + +#define HEADER_TOKEN "X-Do" /* the HTTP header for the auth token */ "cker-Token:" +#define HEADER_REGISTRY "X-Do" /*the HTTP header for the registry */ "cker-Endpoints:" + +#define LAYERS_MAX 2048 + +static void dkr_pull_job_on_finished(PullJob *j); + +DkrPull* dkr_pull_unref(DkrPull *i) { + if (!i) + return NULL; + + if (i->tar_pid > 1) { + (void) kill_and_sigcont(i->tar_pid, SIGKILL); + (void) wait_for_terminate(i->tar_pid, NULL); + } + + pull_job_unref(i->images_job); + pull_job_unref(i->tags_job); + pull_job_unref(i->ancestry_job); + pull_job_unref(i->json_job); + pull_job_unref(i->layer_job); + + curl_glue_unref(i->glue); + sd_event_unref(i->event); + + if (i->temp_path) { + (void) btrfs_subvol_remove(i->temp_path); + (void) rm_rf_dangerous(i->temp_path, false, true, false); + free(i->temp_path); + } + + free(i->name); + free(i->tag); + free(i->id); + free(i->response_token); + free(i->response_registries); + strv_free(i->ancestry); + free(i->final_path); + free(i->index_url); + free(i->image_root); + free(i->local); + free(i); + + return NULL; +} + +int dkr_pull_new( + DkrPull **ret, + sd_event *event, + const char *index_url, + const char *image_root, + DkrPullFinished on_finished, + void *userdata) { + + _cleanup_(dkr_pull_unrefp) DkrPull *i = NULL; + char *e; + int r; + + assert(ret); + assert(index_url); + + if (!http_url_is_valid(index_url)) + return -EINVAL; + + i = new0(DkrPull, 1); + if (!i) + return -ENOMEM; + + i->on_finished = on_finished; + i->userdata = userdata; + + i->image_root = strdup(image_root ?: "/var/lib/machines"); + if (!i->image_root) + return -ENOMEM; + + i->grow_machine_directory = path_startswith(i->image_root, "/var/lib/machines"); + + i->index_url = strdup(index_url); + if (!i->index_url) + return -ENOMEM; + + e = endswith(i->index_url, "/"); + if (e) + *e = 0; + + if (event) + i->event = sd_event_ref(event); + else { + r = sd_event_default(&i->event); + if (r < 0) + return r; + } + + r = curl_glue_new(&i->glue, i->event); + if (r < 0) + return r; + + i->glue->on_finished = pull_job_curl_on_finished; + i->glue->userdata = i; + + *ret = i; + i = NULL; + + return 0; +} + +static void dkr_pull_report_progress(DkrPull *i, DkrProgress p) { + unsigned percent; + + assert(i); + + switch (p) { + + case DKR_SEARCHING: + percent = 0; + if (i->images_job) + percent += i->images_job->progress_percent * 5 / 100; + break; + + case DKR_RESOLVING: + percent = 5; + if (i->tags_job) + percent += i->tags_job->progress_percent * 5 / 100; + break; + + case DKR_METADATA: + percent = 10; + if (i->ancestry_job) + percent += i->ancestry_job->progress_percent * 5 / 100; + if (i->json_job) + percent += i->json_job->progress_percent * 5 / 100; + break; + + case DKR_DOWNLOADING: + percent = 20; + percent += 75 * i->current_ancestry / MAX(1U, i->n_ancestry); + if (i->layer_job) + percent += i->layer_job->progress_percent * 75 / MAX(1U, i->n_ancestry) / 100; + + break; + + case DKR_COPYING: + percent = 95; + break; + + default: + assert_not_reached("Unknown progress state"); + } + + sd_notifyf(false, "X_IMPORT_PROGRESS=%u", percent); + log_debug("Combined progress %u%%", percent); +} + +static int parse_id(const void *payload, size_t size, char **ret) { + _cleanup_free_ char *buf = NULL, *id = NULL, *other = NULL; + union json_value v = {}; + void *json_state = NULL; + const char *p; + int t; + + assert(payload); + assert(ret); + + if (size <= 0) + return -EBADMSG; + + if (memchr(payload, 0, size)) + return -EBADMSG; + + buf = strndup(payload, size); + if (!buf) + return -ENOMEM; + + p = buf; + t = json_tokenize(&p, &id, &v, &json_state, NULL); + if (t < 0) + return t; + if (t != JSON_STRING) + return -EBADMSG; + + t = json_tokenize(&p, &other, &v, &json_state, NULL); + if (t < 0) + return t; + if (t != JSON_END) + return -EBADMSG; + + if (!dkr_id_is_valid(id)) + return -EBADMSG; + + *ret = id; + id = NULL; + + return 0; +} + +static int parse_ancestry(const void *payload, size_t size, char ***ret) { + _cleanup_free_ char *buf = NULL; + void *json_state = NULL; + const char *p; + enum { + STATE_BEGIN, + STATE_ITEM, + STATE_COMMA, + STATE_END, + } state = STATE_BEGIN; + _cleanup_strv_free_ char **l = NULL; + size_t n = 0, allocated = 0; + + if (size <= 0) + return -EBADMSG; + + if (memchr(payload, 0, size)) + return -EBADMSG; + + buf = strndup(payload, size); + if (!buf) + return -ENOMEM; + + p = buf; + for (;;) { + _cleanup_free_ char *str; + union json_value v = {}; + int t; + + t = json_tokenize(&p, &str, &v, &json_state, NULL); + if (t < 0) + return t; + + switch (state) { + + case STATE_BEGIN: + if (t == JSON_ARRAY_OPEN) + state = STATE_ITEM; + else + return -EBADMSG; + + break; + + case STATE_ITEM: + if (t == JSON_STRING) { + if (!dkr_id_is_valid(str)) + return -EBADMSG; + + if (n+1 > LAYERS_MAX) + return -EFBIG; + + if (!GREEDY_REALLOC(l, allocated, n + 2)) + return -ENOMEM; + + l[n++] = str; + str = NULL; + l[n] = NULL; + + state = STATE_COMMA; + + } else if (t == JSON_ARRAY_CLOSE) + state = STATE_END; + else + return -EBADMSG; + + break; + + case STATE_COMMA: + if (t == JSON_COMMA) + state = STATE_ITEM; + else if (t == JSON_ARRAY_CLOSE) + state = STATE_END; + else + return -EBADMSG; + break; + + case STATE_END: + if (t == JSON_END) { + + if (strv_isempty(l)) + return -EBADMSG; + + if (!strv_is_uniq(l)) + return -EBADMSG; + + l = strv_reverse(l); + + *ret = l; + l = NULL; + return 0; + } else + return -EBADMSG; + } + + } +} + +static const char *dkr_pull_current_layer(DkrPull *i) { + assert(i); + + if (strv_isempty(i->ancestry)) + return NULL; + + return i->ancestry[i->current_ancestry]; +} + +static const char *dkr_pull_current_base_layer(DkrPull *i) { + assert(i); + + if (strv_isempty(i->ancestry)) + return NULL; + + if (i->current_ancestry <= 0) + return NULL; + + return i->ancestry[i->current_ancestry-1]; +} + +static int dkr_pull_add_token(DkrPull *i, PullJob *j) { + const char *t; + + assert(i); + assert(j); + + if (i->response_token) + t = strjoina("Authorization: Token ", i->response_token); + else + t = HEADER_TOKEN " true"; + + j->request_header = curl_slist_new("Accept: application/json", t, NULL); + if (!j->request_header) + return -ENOMEM; + + return 0; +} + +static bool dkr_pull_is_done(DkrPull *i) { + assert(i); + assert(i->images_job); + + if (i->images_job->state != PULL_JOB_DONE) + return false; + + if (!i->tags_job || i->tags_job->state != PULL_JOB_DONE) + return false; + + if (!i->ancestry_job || i->ancestry_job->state != PULL_JOB_DONE) + return false; + + if (!i->json_job || i->json_job->state != PULL_JOB_DONE) + return false; + + if (i->layer_job && i->layer_job->state != PULL_JOB_DONE) + return false; + + if (dkr_pull_current_layer(i)) + return false; + + return true; +} + +static int dkr_pull_make_local_copy(DkrPull *i) { + int r; + + assert(i); + + if (!i->local) + return 0; + + if (!i->final_path) { + i->final_path = strjoin(i->image_root, "/.dkr-", i->id, NULL); + if (!i->final_path) + return log_oom(); + } + + r = pull_make_local_copy(i->final_path, i->image_root, i->local, i->force_local); + if (r < 0) + return r; + + return 0; +} + +static int dkr_pull_job_on_open_disk(PullJob *j) { + const char *base; + DkrPull *i; + int r; + + assert(j); + assert(j->userdata); + + i = j->userdata; + assert(i->layer_job == j); + assert(i->final_path); + assert(!i->temp_path); + assert(i->tar_pid <= 0); + + r = tempfn_random(i->final_path, &i->temp_path); + if (r < 0) + return log_oom(); + + mkdir_parents_label(i->temp_path, 0700); + + base = dkr_pull_current_base_layer(i); + if (base) { + const char *base_path; + + base_path = strjoina(i->image_root, "/.dkr-", base); + r = btrfs_subvol_snapshot(base_path, i->temp_path, false, true); + } else + r = btrfs_subvol_make(i->temp_path); + if (r < 0) + return log_error_errno(r, "Failed to make btrfs subvolume %s: %m", i->temp_path); + + j->disk_fd = pull_fork_tar(i->temp_path, &i->tar_pid); + if (j->disk_fd < 0) + return j->disk_fd; + + return 0; +} + +static void dkr_pull_job_on_progress(PullJob *j) { + DkrPull *i; + + assert(j); + assert(j->userdata); + + i = j->userdata; + + dkr_pull_report_progress( + i, + j == i->images_job ? DKR_SEARCHING : + j == i->tags_job ? DKR_RESOLVING : + j == i->ancestry_job || j == i->json_job ? DKR_METADATA : + DKR_DOWNLOADING); +} + +static int dkr_pull_pull_layer(DkrPull *i) { + _cleanup_free_ char *path = NULL; + const char *url, *layer = NULL; + int r; + + assert(i); + assert(!i->layer_job); + assert(!i->temp_path); + assert(!i->final_path); + + for (;;) { + layer = dkr_pull_current_layer(i); + if (!layer) + return 0; /* no more layers */ + + path = strjoin(i->image_root, "/.dkr-", layer, NULL); + if (!path) + return log_oom(); + + if (laccess(path, F_OK) < 0) { + if (errno == ENOENT) + break; + + return log_error_errno(errno, "Failed to check for container: %m"); + } + + log_info("Layer %s already exists, skipping.", layer); + + i->current_ancestry++; + + free(path); + path = NULL; + } + + log_info("Pulling layer %s...", layer); + + i->final_path = path; + path = NULL; + + url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v1/images/", layer, "/layer"); + r = pull_job_new(&i->layer_job, url, i->glue, i); + if (r < 0) + return log_error_errno(r, "Failed to allocate layer job: %m"); + + r = dkr_pull_add_token(i, i->layer_job); + if (r < 0) + return log_oom(); + + i->layer_job->on_finished = dkr_pull_job_on_finished; + i->layer_job->on_open_disk = dkr_pull_job_on_open_disk; + i->layer_job->on_progress = dkr_pull_job_on_progress; + i->layer_job->grow_machine_directory = i->grow_machine_directory; + + r = pull_job_begin(i->layer_job); + if (r < 0) + return log_error_errno(r, "Failed to start layer job: %m"); + + return 0; +} + +static void dkr_pull_job_on_finished(PullJob *j) { + DkrPull *i; + int r; + + assert(j); + assert(j->userdata); + + i = j->userdata; + if (j->error != 0) { + if (j == i->images_job) + log_error_errno(j->error, "Failed to retrieve images list. (Wrong index URL?)"); + else if (j == i->tags_job) + log_error_errno(j->error, "Failed to retrieve tags list."); + else if (j == i->ancestry_job) + log_error_errno(j->error, "Failed to retrieve ancestry list."); + else if (j == i->json_job) + log_error_errno(j->error, "Failed to retrieve json data."); + else + log_error_errno(j->error, "Failed to retrieve layer data."); + + r = j->error; + goto finish; + } + + if (i->images_job == j) { + const char *url; + + assert(!i->tags_job); + assert(!i->ancestry_job); + assert(!i->json_job); + assert(!i->layer_job); + + if (strv_isempty(i->response_registries)) { + r = -EBADMSG; + log_error("Didn't get registry information."); + goto finish; + } + + log_info("Index lookup succeeded, directed to registry %s.", i->response_registries[0]); + dkr_pull_report_progress(i, DKR_RESOLVING); + + url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v1/repositories/", i->name, "/tags/", i->tag); + r = pull_job_new(&i->tags_job, url, i->glue, i); + if (r < 0) { + log_error_errno(r, "Failed to allocate tags job: %m"); + goto finish; + } + + r = dkr_pull_add_token(i, i->tags_job); + if (r < 0) { + log_oom(); + goto finish; + } + + i->tags_job->on_finished = dkr_pull_job_on_finished; + i->tags_job->on_progress = dkr_pull_job_on_progress; + + r = pull_job_begin(i->tags_job); + if (r < 0) { + log_error_errno(r, "Failed to start tags job: %m"); + goto finish; + } + + } else if (i->tags_job == j) { + const char *url; + char *id = NULL; + + assert(!i->ancestry_job); + assert(!i->json_job); + assert(!i->layer_job); + + r = parse_id(j->payload, j->payload_size, &id); + if (r < 0) { + log_error_errno(r, "Failed to parse JSON id."); + goto finish; + } + + free(i->id); + i->id = id; + + log_info("Tag lookup succeeded, resolved to layer %s.", i->id); + dkr_pull_report_progress(i, DKR_METADATA); + + url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v1/images/", i->id, "/ancestry"); + r = pull_job_new(&i->ancestry_job, url, i->glue, i); + if (r < 0) { + log_error_errno(r, "Failed to allocate ancestry job: %m"); + goto finish; + } + + r = dkr_pull_add_token(i, i->ancestry_job); + if (r < 0) { + log_oom(); + goto finish; + } + + i->ancestry_job->on_finished = dkr_pull_job_on_finished; + i->ancestry_job->on_progress = dkr_pull_job_on_progress; + + url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v1/images/", i->id, "/json"); + r = pull_job_new(&i->json_job, url, i->glue, i); + if (r < 0) { + log_error_errno(r, "Failed to allocate json job: %m"); + goto finish; + } + + r = dkr_pull_add_token(i, i->json_job); + if (r < 0) { + log_oom(); + goto finish; + } + + i->json_job->on_finished = dkr_pull_job_on_finished; + i->json_job->on_progress = dkr_pull_job_on_progress; + + r = pull_job_begin(i->ancestry_job); + if (r < 0) { + log_error_errno(r, "Failed to start ancestry job: %m"); + goto finish; + } + + r = pull_job_begin(i->json_job); + if (r < 0) { + log_error_errno(r, "Failed to start json job: %m"); + goto finish; + } + + } else if (i->ancestry_job == j) { + char **ancestry = NULL, **k; + unsigned n; + + assert(!i->layer_job); + + r = parse_ancestry(j->payload, j->payload_size, &ancestry); + if (r < 0) { + log_error_errno(r, "Failed to parse JSON id."); + goto finish; + } + + n = strv_length(ancestry); + if (n <= 0 || !streq(ancestry[n-1], i->id)) { + log_error("Ancestry doesn't end in main layer."); + strv_free(ancestry); + r = -EBADMSG; + goto finish; + } + + log_info("Ancestor lookup succeeded, requires layers:\n"); + STRV_FOREACH(k, ancestry) + log_info("\t%s", *k); + + strv_free(i->ancestry); + i->ancestry = ancestry; + i->n_ancestry = n; + i->current_ancestry = 0; + + dkr_pull_report_progress(i, DKR_DOWNLOADING); + + r = dkr_pull_pull_layer(i); + if (r < 0) + goto finish; + + } else if (i->layer_job == j) { + assert(i->temp_path); + assert(i->final_path); + + j->disk_fd = safe_close(j->disk_fd); + + if (i->tar_pid > 0) { + r = wait_for_terminate_and_warn("tar", i->tar_pid, true); + i->tar_pid = 0; + if (r < 0) + goto finish; + } + + r = aufs_resolve(i->temp_path); + if (r < 0) { + log_error_errno(r, "Failed to resolve aufs whiteouts: %m"); + goto finish; + } + + r = btrfs_subvol_set_read_only(i->temp_path, true); + if (r < 0) { + log_error_errno(r, "Failed to mark snapshot read-only: %m"); + goto finish; + } + + if (rename(i->temp_path, i->final_path) < 0) { + log_error_errno(errno, "Failed to rename snaphsot: %m"); + goto finish; + } + + log_info("Completed writing to layer %s.", i->final_path); + + i->layer_job = pull_job_unref(i->layer_job); + free(i->temp_path); + i->temp_path = NULL; + free(i->final_path); + i->final_path = NULL; + + i->current_ancestry ++; + r = dkr_pull_pull_layer(i); + if (r < 0) + goto finish; + + } else if (i->json_job != j) + assert_not_reached("Got finished event for unknown curl object"); + + if (!dkr_pull_is_done(i)) + return; + + dkr_pull_report_progress(i, DKR_COPYING); + + r = dkr_pull_make_local_copy(i); + if (r < 0) + goto finish; + + r = 0; + +finish: + if (i->on_finished) + i->on_finished(i, r, i->userdata); + else + sd_event_exit(i->event, r); +} + +static int dkr_pull_job_on_header(PullJob *j, const char *header, size_t sz) { + _cleanup_free_ char *registry = NULL; + char *token; + DkrPull *i; + int r; + + assert(j); + assert(j->userdata); + + i = j->userdata; + + r = curl_header_strdup(header, sz, HEADER_TOKEN, &token); + if (r < 0) + return log_oom(); + if (r > 0) { + free(i->response_token); + i->response_token = token; + return 0; + } + + r = curl_header_strdup(header, sz, HEADER_REGISTRY, ®istry); + if (r < 0) + return log_oom(); + if (r > 0) { + char **l, **k; + + l = strv_split(registry, ","); + if (!l) + return log_oom(); + + STRV_FOREACH(k, l) { + if (!hostname_is_valid(*k)) { + log_error("Registry hostname is not valid."); + strv_free(l); + return -EBADMSG; + } + } + + strv_free(i->response_registries); + i->response_registries = l; + } + + return 0; +} + +int dkr_pull_start(DkrPull *i, const char *name, const char *tag, const char *local, bool force_local) { + const char *url; + int r; + + assert(i); + + if (!dkr_name_is_valid(name)) + return -EINVAL; + + if (tag && !dkr_tag_is_valid(tag)) + return -EINVAL; + + if (local && !machine_name_is_valid(local)) + return -EINVAL; + + if (i->images_job) + return -EBUSY; + + if (!tag) + tag = "latest"; + + r = free_and_strdup(&i->local, local); + if (r < 0) + return r; + i->force_local = force_local; + + r = free_and_strdup(&i->name, name); + if (r < 0) + return r; + r = free_and_strdup(&i->tag, tag); + if (r < 0) + return r; + + url = strjoina(i->index_url, "/v1/repositories/", name, "/images"); + + r = pull_job_new(&i->images_job, url, i->glue, i); + if (r < 0) + return r; + + r = dkr_pull_add_token(i, i->images_job); + if (r < 0) + return r; + + i->images_job->on_finished = dkr_pull_job_on_finished; + i->images_job->on_header = dkr_pull_job_on_header; + i->images_job->on_progress = dkr_pull_job_on_progress; + + return pull_job_begin(i->images_job); +} diff --git a/src/import/pull-dkr.h b/src/import/pull-dkr.h new file mode 100644 index 0000000000..4c4b10c7ac --- /dev/null +++ b/src/import/pull-dkr.h @@ -0,0 +1,36 @@ +/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ + +/*** + This file is part of systemd. + + Copyright 2014 Lennart Poettering + + systemd is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + systemd is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with systemd; If not, see . +***/ + +#pragma once + +#include "sd-event.h" +#include "util.h" + +typedef struct DkrPull DkrPull; + +typedef void (*DkrPullFinished)(DkrPull *pull, int error, void *userdata); + +int dkr_pull_new(DkrPull **pull, sd_event *event, const char *index_url, const char *image_root, DkrPullFinished on_finished, void *userdata); +DkrPull* dkr_pull_unref(DkrPull *pull); + +DEFINE_TRIVIAL_CLEANUP_FUNC(DkrPull*, dkr_pull_unref); + +int dkr_pull_start(DkrPull *pull, const char *name, const char *tag, const char *local, bool force_local); diff --git a/src/import/pull-job.c b/src/import/pull-job.c new file mode 100644 index 0000000000..165dae6619 --- /dev/null +++ b/src/import/pull-job.c @@ -0,0 +1,746 @@ +/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ + +/*** + This file is part of systemd. + + Copyright 2015 Lennart Poettering + + systemd is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + systemd is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with systemd; If not, see . +***/ + +#include + +#include "strv.h" +#include "machine-pool.h" +#include "pull-job.h" + +/* Grow the /var/lib/machines directory after each 10MiB written */ +#define PULL_GROW_INTERVAL_BYTES (UINT64_C(10) * UINT64_C(1024) * UINT64_C(1024)) + +PullJob* pull_job_unref(PullJob *j) { + if (!j) + return NULL; + + curl_glue_remove_and_free(j->glue, j->curl); + curl_slist_free_all(j->request_header); + + safe_close(j->disk_fd); + + if (j->compressed == PULL_JOB_XZ) + lzma_end(&j->xz); + else if (j->compressed == PULL_JOB_GZIP) + inflateEnd(&j->gzip); + else if (j->compressed == PULL_JOB_BZIP2) + BZ2_bzDecompressEnd(&j->bzip2); + + if (j->checksum_context) + gcry_md_close(j->checksum_context); + + free(j->url); + free(j->etag); + strv_free(j->old_etags); + free(j->payload); + free(j->checksum); + + free(j); + + return NULL; +} + +static void pull_job_finish(PullJob *j, int ret) { + assert(j); + + if (j->state == PULL_JOB_DONE || + j->state == PULL_JOB_FAILED) + return; + + if (ret == 0) { + j->state = PULL_JOB_DONE; + j->progress_percent = 100; + log_info("Download of %s complete.", j->url); + } else { + j->state = PULL_JOB_FAILED; + j->error = ret; + } + + if (j->on_finished) + j->on_finished(j); +} + +void pull_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) { + PullJob *j = NULL; + CURLcode code; + long status; + int r; + + if (curl_easy_getinfo(curl, CURLINFO_PRIVATE, &j) != CURLE_OK) + return; + + if (!j || j->state == PULL_JOB_DONE || j->state == PULL_JOB_FAILED) + return; + + if (result != CURLE_OK) { + log_error("Transfer failed: %s", curl_easy_strerror(result)); + r = -EIO; + goto finish; + } + + code = curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &status); + if (code != CURLE_OK) { + log_error("Failed to retrieve response code: %s", curl_easy_strerror(code)); + r = -EIO; + goto finish; + } else if (status == 304) { + log_info("Image already downloaded. Skipping download."); + j->etag_exists = true; + r = 0; + goto finish; + } else if (status >= 300) { + log_error("HTTP request to %s failed with code %li.", j->url, status); + r = -EIO; + goto finish; + } else if (status < 200) { + log_error("HTTP request to %s finished with unexpected code %li.", j->url, status); + r = -EIO; + goto finish; + } + + if (j->state != PULL_JOB_RUNNING) { + log_error("Premature connection termination."); + r = -EIO; + goto finish; + } + + if (j->content_length != (uint64_t) -1 && + j->content_length != j->written_compressed) { + log_error("Download truncated."); + r = -EIO; + goto finish; + } + + if (j->checksum_context) { + uint8_t *k; + + k = gcry_md_read(j->checksum_context, GCRY_MD_SHA256); + if (!k) { + log_error("Failed to get checksum."); + r = -EIO; + goto finish; + } + + j->checksum = hexmem(k, gcry_md_get_algo_dlen(GCRY_MD_SHA256)); + if (!j->checksum) { + r = log_oom(); + goto finish; + } + + log_debug("SHA256 of %s is %s.", j->url, j->checksum); + } + + if (j->disk_fd >= 0 && j->allow_sparse) { + /* Make sure the file size is right, in case the file was + * sparse and we just seeked for the last part */ + + if (ftruncate(j->disk_fd, j->written_uncompressed) < 0) { + log_error_errno(errno, "Failed to truncate file: %m"); + r = -errno; + goto finish; + } + + if (j->etag) + (void) fsetxattr(j->disk_fd, "user.source_etag", j->etag, strlen(j->etag), 0); + if (j->url) + (void) fsetxattr(j->disk_fd, "user.source_url", j->url, strlen(j->url), 0); + + if (j->mtime != 0) { + struct timespec ut[2]; + + timespec_store(&ut[0], j->mtime); + ut[1] = ut[0]; + (void) futimens(j->disk_fd, ut); + + (void) fd_setcrtime(j->disk_fd, j->mtime); + } + } + + r = 0; + +finish: + pull_job_finish(j, r); +} + +static int pull_job_write_uncompressed(PullJob *j, void *p, size_t sz) { + ssize_t n; + + assert(j); + assert(p); + + if (sz <= 0) + return 0; + + if (j->written_uncompressed + sz < j->written_uncompressed) { + log_error("File too large, overflow"); + return -EOVERFLOW; + } + + if (j->written_uncompressed + sz > j->uncompressed_max) { + log_error("File overly large, refusing"); + return -EFBIG; + } + + if (j->disk_fd >= 0) { + + if (j->grow_machine_directory && j->written_since_last_grow >= PULL_GROW_INTERVAL_BYTES) { + j->written_since_last_grow = 0; + grow_machine_directory(); + } + + if (j->allow_sparse) + n = sparse_write(j->disk_fd, p, sz, 64); + else + n = write(j->disk_fd, p, sz); + if (n < 0) { + log_error_errno(errno, "Failed to write file: %m"); + return -errno; + } + if ((size_t) n < sz) { + log_error("Short write"); + return -EIO; + } + } else { + + if (!GREEDY_REALLOC(j->payload, j->payload_allocated, j->payload_size + sz)) + return log_oom(); + + memcpy(j->payload + j->payload_size, p, sz); + j->payload_size += sz; + } + + j->written_uncompressed += sz; + j->written_since_last_grow += sz; + + return 0; +} + +static int pull_job_write_compressed(PullJob *j, void *p, size_t sz) { + int r; + + assert(j); + assert(p); + + if (sz <= 0) + return 0; + + if (j->written_compressed + sz < j->written_compressed) { + log_error("File too large, overflow"); + return -EOVERFLOW; + } + + if (j->written_compressed + sz > j->compressed_max) { + log_error("File overly large, refusing."); + return -EFBIG; + } + + if (j->content_length != (uint64_t) -1 && + j->written_compressed + sz > j->content_length) { + log_error("Content length incorrect."); + return -EFBIG; + } + + if (j->checksum_context) + gcry_md_write(j->checksum_context, p, sz); + + switch (j->compressed) { + + case PULL_JOB_UNCOMPRESSED: + r = pull_job_write_uncompressed(j, p, sz); + if (r < 0) + return r; + + break; + + case PULL_JOB_XZ: + j->xz.next_in = p; + j->xz.avail_in = sz; + + while (j->xz.avail_in > 0) { + uint8_t buffer[16 * 1024]; + lzma_ret lzr; + + j->xz.next_out = buffer; + j->xz.avail_out = sizeof(buffer); + + lzr = lzma_code(&j->xz, LZMA_RUN); + if (lzr != LZMA_OK && lzr != LZMA_STREAM_END) { + log_error("Decompression error."); + return -EIO; + } + + r = pull_job_write_uncompressed(j, buffer, sizeof(buffer) - j->xz.avail_out); + if (r < 0) + return r; + } + + break; + + case PULL_JOB_GZIP: + j->gzip.next_in = p; + j->gzip.avail_in = sz; + + while (j->gzip.avail_in > 0) { + uint8_t buffer[16 * 1024]; + + j->gzip.next_out = buffer; + j->gzip.avail_out = sizeof(buffer); + + r = inflate(&j->gzip, Z_NO_FLUSH); + if (r != Z_OK && r != Z_STREAM_END) { + log_error("Decompression error."); + return -EIO; + } + + r = pull_job_write_uncompressed(j, buffer, sizeof(buffer) - j->gzip.avail_out); + if (r < 0) + return r; + } + + break; + + case PULL_JOB_BZIP2: + j->bzip2.next_in = p; + j->bzip2.avail_in = sz; + + while (j->bzip2.avail_in > 0) { + uint8_t buffer[16 * 1024]; + + j->bzip2.next_out = (char*) buffer; + j->bzip2.avail_out = sizeof(buffer); + + r = BZ2_bzDecompress(&j->bzip2); + if (r != BZ_OK && r != BZ_STREAM_END) { + log_error("Decompression error."); + return -EIO; + } + + r = pull_job_write_uncompressed(j, buffer, sizeof(buffer) - j->bzip2.avail_out); + if (r < 0) + return r; + } + + break; + + default: + assert_not_reached("Unknown compression"); + } + + j->written_compressed += sz; + + return 0; +} + +static int pull_job_open_disk(PullJob *j) { + int r; + + assert(j); + + if (j->on_open_disk) { + r = j->on_open_disk(j); + if (r < 0) + return r; + } + + if (j->disk_fd >= 0) { + /* Check if we can do sparse files */ + + if (lseek(j->disk_fd, SEEK_SET, 0) == 0) + j->allow_sparse = true; + else { + if (errno != ESPIPE) + return log_error_errno(errno, "Failed to seek on file descriptor: %m"); + + j->allow_sparse = false; + } + } + + if (j->calc_checksum) { + if (gcry_md_open(&j->checksum_context, GCRY_MD_SHA256, 0) != 0) { + log_error("Failed to initialize hash context."); + return -EIO; + } + } + + return 0; +} + +static int pull_job_detect_compression(PullJob *j) { + static const uint8_t xz_signature[] = { + 0xfd, '7', 'z', 'X', 'Z', 0x00 + }; + static const uint8_t gzip_signature[] = { + 0x1f, 0x8b + }; + static const uint8_t bzip2_signature[] = { + 'B', 'Z', 'h' + }; + + _cleanup_free_ uint8_t *stub = NULL; + size_t stub_size; + + int r; + + assert(j); + + if (j->payload_size < MAX3(sizeof(xz_signature), + sizeof(gzip_signature), + sizeof(bzip2_signature))) + return 0; + + if (memcmp(j->payload, xz_signature, sizeof(xz_signature)) == 0) + j->compressed = PULL_JOB_XZ; + else if (memcmp(j->payload, gzip_signature, sizeof(gzip_signature)) == 0) + j->compressed = PULL_JOB_GZIP; + else if (memcmp(j->payload, bzip2_signature, sizeof(bzip2_signature)) == 0) + j->compressed = PULL_JOB_BZIP2; + else + j->compressed = PULL_JOB_UNCOMPRESSED; + + log_debug("Stream is XZ compressed: %s", yes_no(j->compressed == PULL_JOB_XZ)); + log_debug("Stream is GZIP compressed: %s", yes_no(j->compressed == PULL_JOB_GZIP)); + log_debug("Stream is BZIP2 compressed: %s", yes_no(j->compressed == PULL_JOB_BZIP2)); + + if (j->compressed == PULL_JOB_XZ) { + lzma_ret xzr; + + xzr = lzma_stream_decoder(&j->xz, UINT64_MAX, LZMA_TELL_UNSUPPORTED_CHECK); + if (xzr != LZMA_OK) { + log_error("Failed to initialize XZ decoder."); + return -EIO; + } + } + if (j->compressed == PULL_JOB_GZIP) { + r = inflateInit2(&j->gzip, 15+16); + if (r != Z_OK) { + log_error("Failed to initialize gzip decoder."); + return -EIO; + } + } + if (j->compressed == PULL_JOB_BZIP2) { + r = BZ2_bzDecompressInit(&j->bzip2, 0, 0); + if (r != BZ_OK) { + log_error("Failed to initialize bzip2 decoder."); + return -EIO; + } + } + + r = pull_job_open_disk(j); + if (r < 0) + return r; + + /* Now, take the payload we read so far, and decompress it */ + stub = j->payload; + stub_size = j->payload_size; + + j->payload = NULL; + j->payload_size = 0; + j->payload_allocated = 0; + + j->state = PULL_JOB_RUNNING; + + r = pull_job_write_compressed(j, stub, stub_size); + if (r < 0) + return r; + + return 0; +} + +static size_t pull_job_write_callback(void *contents, size_t size, size_t nmemb, void *userdata) { + PullJob *j = userdata; + size_t sz = size * nmemb; + int r; + + assert(contents); + assert(j); + + switch (j->state) { + + case PULL_JOB_ANALYZING: + /* Let's first check what it actually is */ + + if (!GREEDY_REALLOC(j->payload, j->payload_allocated, j->payload_size + sz)) { + r = log_oom(); + goto fail; + } + + memcpy(j->payload + j->payload_size, contents, sz); + j->payload_size += sz; + + r = pull_job_detect_compression(j); + if (r < 0) + goto fail; + + break; + + case PULL_JOB_RUNNING: + + r = pull_job_write_compressed(j, contents, sz); + if (r < 0) + goto fail; + + break; + + case PULL_JOB_DONE: + case PULL_JOB_FAILED: + r = -ESTALE; + goto fail; + + default: + assert_not_reached("Impossible state."); + } + + return sz; + +fail: + pull_job_finish(j, r); + return 0; +} + +static size_t pull_job_header_callback(void *contents, size_t size, size_t nmemb, void *userdata) { + PullJob *j = userdata; + size_t sz = size * nmemb; + _cleanup_free_ char *length = NULL, *last_modified = NULL; + char *etag; + int r; + + assert(contents); + assert(j); + + if (j->state == PULL_JOB_DONE || j->state == PULL_JOB_FAILED) { + r = -ESTALE; + goto fail; + } + + assert(j->state == PULL_JOB_ANALYZING); + + r = curl_header_strdup(contents, sz, "ETag:", &etag); + if (r < 0) { + log_oom(); + goto fail; + } + if (r > 0) { + free(j->etag); + j->etag = etag; + + if (strv_contains(j->old_etags, j->etag)) { + log_info("Image already downloaded. Skipping download."); + j->etag_exists = true; + pull_job_finish(j, 0); + return sz; + } + + return sz; + } + + r = curl_header_strdup(contents, sz, "Content-Length:", &length); + if (r < 0) { + log_oom(); + goto fail; + } + if (r > 0) { + (void) safe_atou64(length, &j->content_length); + + if (j->content_length != (uint64_t) -1) { + char bytes[FORMAT_BYTES_MAX]; + + if (j->content_length > j->compressed_max) { + log_error("Content too large."); + r = -EFBIG; + goto fail; + } + + log_info("Downloading %s for %s.", format_bytes(bytes, sizeof(bytes), j->content_length), j->url); + } + + return sz; + } + + r = curl_header_strdup(contents, sz, "Last-Modified:", &last_modified); + if (r < 0) { + log_oom(); + goto fail; + } + if (r > 0) { + (void) curl_parse_http_time(last_modified, &j->mtime); + return sz; + } + + if (j->on_header) { + r = j->on_header(j, contents, sz); + if (r < 0) + goto fail; + } + + return sz; + +fail: + pull_job_finish(j, r); + return 0; +} + +static int pull_job_progress_callback(void *userdata, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow) { + PullJob *j = userdata; + unsigned percent; + usec_t n; + + assert(j); + + if (dltotal <= 0) + return 0; + + percent = ((100 * dlnow) / dltotal); + n = now(CLOCK_MONOTONIC); + + if (n > j->last_status_usec + USEC_PER_SEC && + percent != j->progress_percent && + dlnow < dltotal) { + char buf[FORMAT_TIMESPAN_MAX]; + + if (n - j->start_usec > USEC_PER_SEC && dlnow > 0) { + char y[FORMAT_BYTES_MAX]; + usec_t left, done; + + done = n - j->start_usec; + left = (usec_t) (((double) done * (double) dltotal) / dlnow) - done; + + log_info("Got %u%% of %s. %s left at %s/s.", + percent, + j->url, + format_timespan(buf, sizeof(buf), left, USEC_PER_SEC), + format_bytes(y, sizeof(y), (uint64_t) ((double) dlnow / ((double) done / (double) USEC_PER_SEC)))); + } else + log_info("Got %u%% of %s.", percent, j->url); + + j->progress_percent = percent; + j->last_status_usec = n; + + if (j->on_progress) + j->on_progress(j); + } + + return 0; +} + +int pull_job_new(PullJob **ret, const char *url, CurlGlue *glue, void *userdata) { + _cleanup_(pull_job_unrefp) PullJob *j = NULL; + + assert(url); + assert(glue); + assert(ret); + + j = new0(PullJob, 1); + if (!j) + return -ENOMEM; + + j->state = PULL_JOB_INIT; + j->disk_fd = -1; + j->userdata = userdata; + j->glue = glue; + j->content_length = (uint64_t) -1; + j->start_usec = now(CLOCK_MONOTONIC); + j->compressed_max = j->uncompressed_max = 8LLU * 1024LLU * 1024LLU * 1024LLU; /* 8GB */ + + j->url = strdup(url); + if (!j->url) + return -ENOMEM; + + *ret = j; + j = NULL; + + return 0; +} + +int pull_job_begin(PullJob *j) { + int r; + + assert(j); + + if (j->state != PULL_JOB_INIT) + return -EBUSY; + + if (j->grow_machine_directory) + grow_machine_directory(); + + r = curl_glue_make(&j->curl, j->url, j); + if (r < 0) + return r; + + if (!strv_isempty(j->old_etags)) { + _cleanup_free_ char *cc = NULL, *hdr = NULL; + + cc = strv_join(j->old_etags, ", "); + if (!cc) + return -ENOMEM; + + hdr = strappend("If-None-Match: ", cc); + if (!hdr) + return -ENOMEM; + + if (!j->request_header) { + j->request_header = curl_slist_new(hdr, NULL); + if (!j->request_header) + return -ENOMEM; + } else { + struct curl_slist *l; + + l = curl_slist_append(j->request_header, hdr); + if (!l) + return -ENOMEM; + + j->request_header = l; + } + } + + if (j->request_header) { + if (curl_easy_setopt(j->curl, CURLOPT_HTTPHEADER, j->request_header) != CURLE_OK) + return -EIO; + } + + if (curl_easy_setopt(j->curl, CURLOPT_WRITEFUNCTION, pull_job_write_callback) != CURLE_OK) + return -EIO; + + if (curl_easy_setopt(j->curl, CURLOPT_WRITEDATA, j) != CURLE_OK) + return -EIO; + + if (curl_easy_setopt(j->curl, CURLOPT_HEADERFUNCTION, pull_job_header_callback) != CURLE_OK) + return -EIO; + + if (curl_easy_setopt(j->curl, CURLOPT_HEADERDATA, j) != CURLE_OK) + return -EIO; + + if (curl_easy_setopt(j->curl, CURLOPT_XFERINFOFUNCTION, pull_job_progress_callback) != CURLE_OK) + return -EIO; + + if (curl_easy_setopt(j->curl, CURLOPT_XFERINFODATA, j) != CURLE_OK) + return -EIO; + + if (curl_easy_setopt(j->curl, CURLOPT_NOPROGRESS, 0) != CURLE_OK) + return -EIO; + + r = curl_glue_add(j->glue, j->curl); + if (r < 0) + return r; + + j->state = PULL_JOB_ANALYZING; + + return 0; +} diff --git a/src/import/pull-job.h b/src/import/pull-job.h new file mode 100644 index 0000000000..b807bd1b41 --- /dev/null +++ b/src/import/pull-job.h @@ -0,0 +1,122 @@ +/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ + +#pragma once + +/*** + This file is part of systemd. + + Copyright 2015 Lennart Poettering + + systemd is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + systemd is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with systemd; If not, see . +***/ + +#include +#include +#include +#include + +#include "macro.h" +#include "curl-util.h" + +typedef struct PullJob PullJob; + +typedef void (*PullJobFinished)(PullJob *job); +typedef int (*PullJobOpenDisk)(PullJob *job); +typedef int (*PullJobHeader)(PullJob *job, const char *header, size_t sz); +typedef void (*PullJobProgress)(PullJob *job); + +typedef enum PullJobState { + PULL_JOB_INIT, + PULL_JOB_ANALYZING, /* Still reading into ->payload, to figure out what we have */ + PULL_JOB_RUNNING, /* Writing to destination */ + PULL_JOB_DONE, + PULL_JOB_FAILED, + _PULL_JOB_STATE_MAX, + _PULL_JOB_STATE_INVALID = -1, +} PullJobState; + +#define PULL_JOB_STATE_IS_COMPLETE(j) (IN_SET((j)->state, PULL_JOB_DONE, PULL_JOB_FAILED)) + +typedef enum PullJobCompression { + PULL_JOB_UNCOMPRESSED, + PULL_JOB_XZ, + PULL_JOB_GZIP, + PULL_JOB_BZIP2, + _PULL_JOB_COMPRESSION_MAX, + _PULL_JOB_COMPRESSION_INVALID = -1, +} PullJobCompression; + +struct PullJob { + PullJobState state; + int error; + + char *url; + + void *userdata; + PullJobFinished on_finished; + PullJobOpenDisk on_open_disk; + PullJobHeader on_header; + PullJobProgress on_progress; + + CurlGlue *glue; + CURL *curl; + struct curl_slist *request_header; + + char *etag; + char **old_etags; + bool etag_exists; + + uint64_t content_length; + uint64_t written_compressed; + uint64_t written_uncompressed; + + uint64_t uncompressed_max; + uint64_t compressed_max; + + uint8_t *payload; + size_t payload_size; + size_t payload_allocated; + + int disk_fd; + + usec_t mtime; + + PullJobCompression compressed; + lzma_stream xz; + z_stream gzip; + bz_stream bzip2; + + unsigned progress_percent; + usec_t start_usec; + usec_t last_status_usec; + + bool allow_sparse; + + bool calc_checksum; + gcry_md_hd_t checksum_context; + + char *checksum; + + bool grow_machine_directory; + uint64_t written_since_last_grow; +}; + +int pull_job_new(PullJob **job, const char *url, CurlGlue *glue, void *userdata); +PullJob* pull_job_unref(PullJob *job); + +int pull_job_begin(PullJob *j); + +void pull_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result); + +DEFINE_TRIVIAL_CLEANUP_FUNC(PullJob*, pull_job_unref); diff --git a/src/import/pull-raw.c b/src/import/pull-raw.c new file mode 100644 index 0000000000..4029c7ed17 --- /dev/null +++ b/src/import/pull-raw.c @@ -0,0 +1,518 @@ +/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ + +/*** + This file is part of systemd. + + Copyright 2014 Lennart Poettering + + systemd is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + systemd is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with systemd; If not, see . +***/ + +#include +#include +#include + +#include "sd-daemon.h" +#include "utf8.h" +#include "strv.h" +#include "copy.h" +#include "btrfs-util.h" +#include "util.h" +#include "macro.h" +#include "mkdir.h" +#include "path-util.h" +#include "import-util.h" +#include "curl-util.h" +#include "qcow2-util.h" +#include "pull-job.h" +#include "pull-common.h" +#include "pull-raw.h" + +typedef enum RawProgress { + RAW_DOWNLOADING, + RAW_VERIFYING, + RAW_UNPACKING, + RAW_FINALIZING, + RAW_COPYING, +} RawProgress; + +struct RawPull { + sd_event *event; + CurlGlue *glue; + + char *image_root; + + PullJob *raw_job; + PullJob *checksum_job; + PullJob *signature_job; + + RawPullFinished on_finished; + void *userdata; + + char *local; + bool force_local; + bool grow_machine_directory; + + char *temp_path; + char *final_path; + + ImportVerify verify; +}; + +RawPull* raw_pull_unref(RawPull *i) { + if (!i) + return NULL; + + pull_job_unref(i->raw_job); + pull_job_unref(i->checksum_job); + pull_job_unref(i->signature_job); + + curl_glue_unref(i->glue); + sd_event_unref(i->event); + + if (i->temp_path) { + (void) unlink(i->temp_path); + free(i->temp_path); + } + + free(i->final_path); + free(i->image_root); + free(i->local); + free(i); + + return NULL; +} + +int raw_pull_new( + RawPull **ret, + sd_event *event, + const char *image_root, + RawPullFinished on_finished, + void *userdata) { + + _cleanup_(raw_pull_unrefp) RawPull *i = NULL; + int r; + + assert(ret); + + i = new0(RawPull, 1); + if (!i) + return -ENOMEM; + + i->on_finished = on_finished; + i->userdata = userdata; + + i->image_root = strdup(image_root ?: "/var/lib/machines"); + if (!i->image_root) + return -ENOMEM; + + i->grow_machine_directory = path_startswith(i->image_root, "/var/lib/machines"); + + if (event) + i->event = sd_event_ref(event); + else { + r = sd_event_default(&i->event); + if (r < 0) + return r; + } + + r = curl_glue_new(&i->glue, i->event); + if (r < 0) + return r; + + i->glue->on_finished = pull_job_curl_on_finished; + i->glue->userdata = i; + + *ret = i; + i = NULL; + + return 0; +} + +static void raw_pull_report_progress(RawPull *i, RawProgress p) { + unsigned percent; + + assert(i); + + switch (p) { + + case RAW_DOWNLOADING: { + unsigned remain = 80; + + percent = 0; + + if (i->checksum_job) { + percent += i->checksum_job->progress_percent * 5 / 100; + remain -= 5; + } + + if (i->signature_job) { + percent += i->signature_job->progress_percent * 5 / 100; + remain -= 5; + } + + if (i->raw_job) + percent += i->raw_job->progress_percent * remain / 100; + break; + } + + case RAW_VERIFYING: + percent = 80; + break; + + case RAW_UNPACKING: + percent = 85; + break; + + case RAW_FINALIZING: + percent = 90; + break; + + case RAW_COPYING: + percent = 95; + break; + + default: + assert_not_reached("Unknown progress state"); + } + + sd_notifyf(false, "X_IMPORT_PROGRESS=%u", percent); + log_debug("Combined progress %u%%", percent); +} + +static int raw_pull_maybe_convert_qcow2(RawPull *i) { + _cleanup_close_ int converted_fd = -1; + _cleanup_free_ char *t = NULL; + int r; + + assert(i); + assert(i->raw_job); + + r = qcow2_detect(i->raw_job->disk_fd); + if (r < 0) + return log_error_errno(r, "Failed to detect whether this is a QCOW2 image: %m"); + if (r == 0) + return 0; + + /* This is a QCOW2 image, let's convert it */ + r = tempfn_random(i->final_path, &t); + if (r < 0) + return log_oom(); + + converted_fd = open(t, O_RDWR|O_CREAT|O_EXCL|O_NOCTTY|O_CLOEXEC, 0644); + if (converted_fd < 0) + return log_error_errno(errno, "Failed to create %s: %m", t); + + r = chattr_fd(converted_fd, true, FS_NOCOW_FL); + if (r < 0) + log_warning_errno(errno, "Failed to set file attributes on %s: %m", t); + + log_info("Unpacking QCOW2 file."); + + r = qcow2_convert(i->raw_job->disk_fd, converted_fd); + if (r < 0) { + unlink(t); + return log_error_errno(r, "Failed to convert qcow2 image: %m"); + } + + unlink(i->temp_path); + free(i->temp_path); + + i->temp_path = t; + t = NULL; + + safe_close(i->raw_job->disk_fd); + i->raw_job->disk_fd = converted_fd; + converted_fd = -1; + + return 1; +} + +static int raw_pull_make_local_copy(RawPull *i) { + _cleanup_free_ char *tp = NULL; + _cleanup_close_ int dfd = -1; + const char *p; + int r; + + assert(i); + assert(i->raw_job); + + if (!i->local) + return 0; + + if (i->raw_job->etag_exists) { + /* We have downloaded this one previously, reopen it */ + + assert(i->raw_job->disk_fd < 0); + + if (!i->final_path) { + r = pull_make_path(i->raw_job->url, i->raw_job->etag, i->image_root, ".raw-", ".raw", &i->final_path); + if (r < 0) + return log_oom(); + } + + i->raw_job->disk_fd = open(i->final_path, O_RDONLY|O_NOCTTY|O_CLOEXEC); + if (i->raw_job->disk_fd < 0) + return log_error_errno(errno, "Failed to open vendor image: %m"); + } else { + /* We freshly downloaded the image, use it */ + + assert(i->raw_job->disk_fd >= 0); + + if (lseek(i->raw_job->disk_fd, SEEK_SET, 0) == (off_t) -1) + return log_error_errno(errno, "Failed to seek to beginning of vendor image: %m"); + } + + p = strjoina(i->image_root, "/", i->local, ".raw"); + + if (i->force_local) { + (void) btrfs_subvol_remove(p); + (void) rm_rf_dangerous(p, false, true, false); + } + + r = tempfn_random(p, &tp); + if (r < 0) + return log_oom(); + + dfd = open(tp, O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY|O_CLOEXEC, 0664); + if (dfd < 0) + return log_error_errno(errno, "Failed to create writable copy of image: %m"); + + /* Turn off COW writing. This should greatly improve + * performance on COW file systems like btrfs, since it + * reduces fragmentation caused by not allowing in-place + * writes. */ + r = chattr_fd(dfd, true, FS_NOCOW_FL); + if (r < 0) + log_warning_errno(errno, "Failed to set file attributes on %s: %m", tp); + + r = copy_bytes(i->raw_job->disk_fd, dfd, (off_t) -1, true); + if (r < 0) { + unlink(tp); + return log_error_errno(r, "Failed to make writable copy of image: %m"); + } + + (void) copy_times(i->raw_job->disk_fd, dfd); + (void) copy_xattr(i->raw_job->disk_fd, dfd); + + dfd = safe_close(dfd); + + r = rename(tp, p); + if (r < 0) { + unlink(tp); + return log_error_errno(errno, "Failed to move writable image into place: %m"); + } + + log_info("Created new local image '%s'.", i->local); + return 0; +} + +static bool raw_pull_is_done(RawPull *i) { + assert(i); + assert(i->raw_job); + + if (i->raw_job->state != PULL_JOB_DONE) + return false; + if (i->checksum_job && i->checksum_job->state != PULL_JOB_DONE) + return false; + if (i->signature_job && i->signature_job->state != PULL_JOB_DONE) + return false; + + return true; +} + +static void raw_pull_job_on_finished(PullJob *j) { + RawPull *i; + int r; + + assert(j); + assert(j->userdata); + + i = j->userdata; + if (j->error != 0) { + if (j == i->checksum_job) + log_error_errno(j->error, "Failed to retrieve SHA256 checksum, cannot verify. (Try --verify=no?)"); + else if (j == i->signature_job) + log_error_errno(j->error, "Failed to retrieve signature file, cannot verify. (Try --verify=no?)"); + else + log_error_errno(j->error, "Failed to retrieve image file. (Wrong URL?)"); + + r = j->error; + goto finish; + } + + /* This is invoked if either the download completed + * successfully, or the download was skipped because we + * already have the etag. In this case ->etag_exists is + * true. + * + * We only do something when we got all three files */ + + if (!raw_pull_is_done(i)) + return; + + if (!i->raw_job->etag_exists) { + /* This is a new download, verify it, and move it into place */ + assert(i->raw_job->disk_fd >= 0); + + raw_pull_report_progress(i, RAW_VERIFYING); + + r = pull_verify(i->raw_job, i->checksum_job, i->signature_job); + if (r < 0) + goto finish; + + raw_pull_report_progress(i, RAW_UNPACKING); + + r = raw_pull_maybe_convert_qcow2(i); + if (r < 0) + goto finish; + + raw_pull_report_progress(i, RAW_FINALIZING); + + r = pull_make_read_only_fd(i->raw_job->disk_fd); + if (r < 0) + goto finish; + + r = rename(i->temp_path, i->final_path); + if (r < 0) { + r = log_error_errno(errno, "Failed to move RAW file into place: %m"); + goto finish; + } + + free(i->temp_path); + i->temp_path = NULL; + } + + raw_pull_report_progress(i, RAW_COPYING); + + r = raw_pull_make_local_copy(i); + if (r < 0) + goto finish; + + r = 0; + +finish: + if (i->on_finished) + i->on_finished(i, r, i->userdata); + else + sd_event_exit(i->event, r); +} + +static int raw_pull_job_on_open_disk(PullJob *j) { + RawPull *i; + int r; + + assert(j); + assert(j->userdata); + + i = j->userdata; + assert(i->raw_job == j); + assert(!i->final_path); + assert(!i->temp_path); + + r = pull_make_path(j->url, j->etag, i->image_root, ".raw-", ".raw", &i->final_path); + if (r < 0) + return log_oom(); + + r = tempfn_random(i->final_path, &i->temp_path); + if (r <0) + return log_oom(); + + mkdir_parents_label(i->temp_path, 0700); + + j->disk_fd = open(i->temp_path, O_RDWR|O_CREAT|O_EXCL|O_NOCTTY|O_CLOEXEC, 0644); + if (j->disk_fd < 0) + return log_error_errno(errno, "Failed to create %s: %m", i->temp_path); + + r = chattr_fd(j->disk_fd, true, FS_NOCOW_FL); + if (r < 0) + log_warning_errno(errno, "Failed to set file attributes on %s: %m", i->temp_path); + + return 0; +} + +static void raw_pull_job_on_progress(PullJob *j) { + RawPull *i; + + assert(j); + assert(j->userdata); + + i = j->userdata; + + raw_pull_report_progress(i, RAW_DOWNLOADING); +} + +int raw_pull_start(RawPull *i, const char *url, const char *local, bool force_local, ImportVerify verify) { + int r; + + assert(i); + assert(verify < _IMPORT_VERIFY_MAX); + assert(verify >= 0); + + if (!http_url_is_valid(url)) + return -EINVAL; + + if (local && !machine_name_is_valid(local)) + return -EINVAL; + + if (i->raw_job) + return -EBUSY; + + r = free_and_strdup(&i->local, local); + if (r < 0) + return r; + i->force_local = force_local; + i->verify = verify; + + /* Queue job for the image itself */ + r = pull_job_new(&i->raw_job, url, i->glue, i); + if (r < 0) + return r; + + i->raw_job->on_finished = raw_pull_job_on_finished; + i->raw_job->on_open_disk = raw_pull_job_on_open_disk; + i->raw_job->on_progress = raw_pull_job_on_progress; + i->raw_job->calc_checksum = verify != IMPORT_VERIFY_NO; + i->raw_job->grow_machine_directory = i->grow_machine_directory; + + r = pull_find_old_etags(url, i->image_root, DT_REG, ".raw-", ".raw", &i->raw_job->old_etags); + if (r < 0) + return r; + + r = pull_make_verification_jobs(&i->checksum_job, &i->signature_job, verify, url, i->glue, raw_pull_job_on_finished, i); + if (r < 0) + return r; + + r = pull_job_begin(i->raw_job); + if (r < 0) + return r; + + if (i->checksum_job) { + i->checksum_job->on_progress = raw_pull_job_on_progress; + + r = pull_job_begin(i->checksum_job); + if (r < 0) + return r; + } + + if (i->signature_job) { + i->signature_job->on_progress = raw_pull_job_on_progress; + + r = pull_job_begin(i->signature_job); + if (r < 0) + return r; + } + + return 0; +} diff --git a/src/import/pull-raw.h b/src/import/pull-raw.h new file mode 100644 index 0000000000..808f7be818 --- /dev/null +++ b/src/import/pull-raw.h @@ -0,0 +1,37 @@ +/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ + +#pragma once + +/*** + This file is part of systemd. + + Copyright 2014 Lennart Poettering + + systemd is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + systemd is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with systemd; If not, see . +***/ + +#include "sd-event.h" +#include "macro.h" +#include "import-util.h" + +typedef struct RawPull RawPull; + +typedef void (*RawPullFinished)(RawPull *pull, int error, void *userdata); + +int raw_pull_new(RawPull **pull, sd_event *event, const char *image_root, RawPullFinished on_finished, void *userdata); +RawPull* raw_pull_unref(RawPull *pull); + +DEFINE_TRIVIAL_CLEANUP_FUNC(RawPull*, raw_pull_unref); + +int raw_pull_start(RawPull *pull, const char *url, const char *local, bool force_local, ImportVerify verify); diff --git a/src/import/pull-tar.c b/src/import/pull-tar.c new file mode 100644 index 0000000000..de653a88f9 --- /dev/null +++ b/src/import/pull-tar.c @@ -0,0 +1,414 @@ +/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ + +/*** + This file is part of systemd. + + Copyright 2015 Lennart Poettering + + systemd is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + systemd is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with systemd; If not, see . +***/ + +#include +#include + +#include "sd-daemon.h" +#include "utf8.h" +#include "strv.h" +#include "copy.h" +#include "btrfs-util.h" +#include "util.h" +#include "macro.h" +#include "mkdir.h" +#include "path-util.h" +#include "import-util.h" +#include "curl-util.h" +#include "pull-job.h" +#include "pull-common.h" +#include "pull-tar.h" + +typedef enum TarProgress { + TAR_DOWNLOADING, + TAR_VERIFYING, + TAR_FINALIZING, + TAR_COPYING, +} TarProgress; + +struct TarPull { + sd_event *event; + CurlGlue *glue; + + char *image_root; + + PullJob *tar_job; + PullJob *checksum_job; + PullJob *signature_job; + + TarPullFinished on_finished; + void *userdata; + + char *local; + bool force_local; + bool grow_machine_directory; + + pid_t tar_pid; + + char *temp_path; + char *final_path; + + ImportVerify verify; +}; + +TarPull* tar_pull_unref(TarPull *i) { + if (!i) + return NULL; + + if (i->tar_pid > 1) { + (void) kill_and_sigcont(i->tar_pid, SIGKILL); + (void) wait_for_terminate(i->tar_pid, NULL); + } + + pull_job_unref(i->tar_job); + pull_job_unref(i->checksum_job); + pull_job_unref(i->signature_job); + + curl_glue_unref(i->glue); + sd_event_unref(i->event); + + if (i->temp_path) { + (void) btrfs_subvol_remove(i->temp_path); + (void) rm_rf_dangerous(i->temp_path, false, true, false); + free(i->temp_path); + } + + free(i->final_path); + free(i->image_root); + free(i->local); + free(i); + + return NULL; +} + +int tar_pull_new( + TarPull **ret, + sd_event *event, + const char *image_root, + TarPullFinished on_finished, + void *userdata) { + + _cleanup_(tar_pull_unrefp) TarPull *i = NULL; + int r; + + assert(ret); + assert(event); + + i = new0(TarPull, 1); + if (!i) + return -ENOMEM; + + i->on_finished = on_finished; + i->userdata = userdata; + + i->image_root = strdup(image_root ?: "/var/lib/machines"); + if (!i->image_root) + return -ENOMEM; + + i->grow_machine_directory = path_startswith(i->image_root, "/var/lib/machines"); + + if (event) + i->event = sd_event_ref(event); + else { + r = sd_event_default(&i->event); + if (r < 0) + return r; + } + + r = curl_glue_new(&i->glue, i->event); + if (r < 0) + return r; + + i->glue->on_finished = pull_job_curl_on_finished; + i->glue->userdata = i; + + *ret = i; + i = NULL; + + return 0; +} + +static void tar_pull_report_progress(TarPull *i, TarProgress p) { + unsigned percent; + + assert(i); + + switch (p) { + + case TAR_DOWNLOADING: { + unsigned remain = 85; + + percent = 0; + + if (i->checksum_job) { + percent += i->checksum_job->progress_percent * 5 / 100; + remain -= 5; + } + + if (i->signature_job) { + percent += i->signature_job->progress_percent * 5 / 100; + remain -= 5; + } + + if (i->tar_job) + percent += i->tar_job->progress_percent * remain / 100; + break; + } + + case TAR_VERIFYING: + percent = 85; + break; + + case TAR_FINALIZING: + percent = 90; + break; + + case TAR_COPYING: + percent = 95; + break; + + default: + assert_not_reached("Unknown progress state"); + } + + sd_notifyf(false, "X_IMPORT_PROGRESS=%u", percent); + log_debug("Combined progress %u%%", percent); +} + +static int tar_pull_make_local_copy(TarPull *i) { + int r; + + assert(i); + assert(i->tar_job); + + if (!i->local) + return 0; + + if (!i->final_path) { + r = pull_make_path(i->tar_job->url, i->tar_job->etag, i->image_root, ".tar-", NULL, &i->final_path); + if (r < 0) + return log_oom(); + } + + r = pull_make_local_copy(i->final_path, i->image_root, i->local, i->force_local); + if (r < 0) + return r; + + return 0; +} + +static bool tar_pull_is_done(TarPull *i) { + assert(i); + assert(i->tar_job); + + if (i->tar_job->state != PULL_JOB_DONE) + return false; + if (i->checksum_job && i->checksum_job->state != PULL_JOB_DONE) + return false; + if (i->signature_job && i->signature_job->state != PULL_JOB_DONE) + return false; + + return true; +} + +static void tar_pull_job_on_finished(PullJob *j) { + TarPull *i; + int r; + + assert(j); + assert(j->userdata); + + i = j->userdata; + if (j->error != 0) { + if (j == i->checksum_job) + log_error_errno(j->error, "Failed to retrieve SHA256 checksum, cannot verify. (Try --verify=no?)"); + else if (j == i->signature_job) + log_error_errno(j->error, "Failed to retrieve signature file, cannot verify. (Try --verify=no?)"); + else + log_error_errno(j->error, "Failed to retrieve image file. (Wrong URL?)"); + + r = j->error; + goto finish; + } + + /* This is invoked if either the download completed + * successfully, or the download was skipped because we + * already have the etag. */ + + if (!tar_pull_is_done(i)) + return; + + j->disk_fd = safe_close(i->tar_job->disk_fd); + + if (i->tar_pid > 0) { + r = wait_for_terminate_and_warn("tar", i->tar_pid, true); + i->tar_pid = 0; + if (r < 0) + goto finish; + } + + if (!i->tar_job->etag_exists) { + /* This is a new download, verify it, and move it into place */ + + tar_pull_report_progress(i, TAR_VERIFYING); + + r = pull_verify(i->tar_job, i->checksum_job, i->signature_job); + if (r < 0) + goto finish; + + tar_pull_report_progress(i, TAR_FINALIZING); + + r = pull_make_read_only(i->temp_path); + if (r < 0) + goto finish; + + if (rename(i->temp_path, i->final_path) < 0) { + r = log_error_errno(errno, "Failed to rename to final image name: %m"); + goto finish; + } + + free(i->temp_path); + i->temp_path = NULL; + } + + tar_pull_report_progress(i, TAR_COPYING); + + r = tar_pull_make_local_copy(i); + if (r < 0) + goto finish; + + r = 0; + +finish: + if (i->on_finished) + i->on_finished(i, r, i->userdata); + else + sd_event_exit(i->event, r); +} + +static int tar_pull_job_on_open_disk(PullJob *j) { + TarPull *i; + int r; + + assert(j); + assert(j->userdata); + + i = j->userdata; + assert(i->tar_job == j); + assert(!i->final_path); + assert(!i->temp_path); + assert(i->tar_pid <= 0); + + r = pull_make_path(j->url, j->etag, i->image_root, ".tar-", NULL, &i->final_path); + if (r < 0) + return log_oom(); + + r = tempfn_random(i->final_path, &i->temp_path); + if (r < 0) + return log_oom(); + + mkdir_parents_label(i->temp_path, 0700); + + r = btrfs_subvol_make(i->temp_path); + if (r == -ENOTTY) { + if (mkdir(i->temp_path, 0755) < 0) + return log_error_errno(errno, "Failed to create directory %s: %m", i->temp_path); + } else if (r < 0) + return log_error_errno(errno, "Failed to create subvolume %s: %m", i->temp_path); + + j->disk_fd = pull_fork_tar(i->temp_path, &i->tar_pid); + if (j->disk_fd < 0) + return j->disk_fd; + + return 0; +} + +static void tar_pull_job_on_progress(PullJob *j) { + TarPull *i; + + assert(j); + assert(j->userdata); + + i = j->userdata; + + tar_pull_report_progress(i, TAR_DOWNLOADING); +} + +int tar_pull_start(TarPull *i, const char *url, const char *local, bool force_local, ImportVerify verify) { + int r; + + assert(i); + + if (!http_url_is_valid(url)) + return -EINVAL; + + if (local && !machine_name_is_valid(local)) + return -EINVAL; + + if (i->tar_job) + return -EBUSY; + + r = free_and_strdup(&i->local, local); + if (r < 0) + return r; + i->force_local = force_local; + i->verify = verify; + + r = pull_job_new(&i->tar_job, url, i->glue, i); + if (r < 0) + return r; + + i->tar_job->on_finished = tar_pull_job_on_finished; + i->tar_job->on_open_disk = tar_pull_job_on_open_disk; + i->tar_job->on_progress = tar_pull_job_on_progress; + i->tar_job->calc_checksum = verify != IMPORT_VERIFY_NO; + i->tar_job->grow_machine_directory = i->grow_machine_directory; + + r = pull_find_old_etags(url, i->image_root, DT_DIR, ".tar-", NULL, &i->tar_job->old_etags); + if (r < 0) + return r; + + r = pull_make_verification_jobs(&i->checksum_job, &i->signature_job, verify, url, i->glue, tar_pull_job_on_finished, i); + if (r < 0) + return r; + + r = pull_job_begin(i->tar_job); + if (r < 0) + return r; + + if (i->checksum_job) { + i->checksum_job->on_progress = tar_pull_job_on_progress; + + r = pull_job_begin(i->checksum_job); + if (r < 0) + return r; + } + + if (i->signature_job) { + i->signature_job->on_progress = tar_pull_job_on_progress; + + r = pull_job_begin(i->signature_job); + if (r < 0) + return r; + } + + return 0; +} diff --git a/src/import/pull-tar.h b/src/import/pull-tar.h new file mode 100644 index 0000000000..0ed507748c --- /dev/null +++ b/src/import/pull-tar.h @@ -0,0 +1,37 @@ +/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ + +#pragma once + +/*** + This file is part of systemd. + + Copyright 2015 Lennart Poettering + + systemd is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + systemd is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with systemd; If not, see . +***/ + +#include "sd-event.h" +#include "macro.h" +#include "import-util.h" + +typedef struct TarPull TarPull; + +typedef void (*TarPullFinished)(TarPull *pull, int error, void *userdata); + +int tar_pull_new(TarPull **pull, sd_event *event, const char *image_root, TarPullFinished on_finished, void *userdata); +TarPull* tar_pull_unref(TarPull *pull); + +DEFINE_TRIVIAL_CLEANUP_FUNC(TarPull*, tar_pull_unref); + +int tar_pull_start(TarPull *pull, const char *url, const char *local, bool force_local, ImportVerify verify); diff --git a/src/import/pull.c b/src/import/pull.c index ee3ff68036..b6283b54aa 100644 --- a/src/import/pull.c +++ b/src/import/pull.c @@ -26,10 +26,10 @@ #include "verbs.h" #include "build.h" #include "machine-image.h" -#include "import-tar.h" -#include "import-raw.h" -#include "import-dkr.h" #include "import-util.h" +#include "pull-tar.h" +#include "pull-raw.h" +#include "pull-dkr.h" static bool arg_force = false; static const char *arg_image_root = "/var/lib/machines"; @@ -42,9 +42,9 @@ static int interrupt_signal_handler(sd_event_source *s, const struct signalfd_si return 0; } -static void on_tar_finished(TarImport *import, int error, void *userdata) { +static void on_tar_finished(TarPull *pull, int error, void *userdata) { sd_event *event = userdata; - assert(import); + assert(pull); if (error == 0) log_info("Operation completed successfully."); @@ -53,7 +53,7 @@ static void on_tar_finished(TarImport *import, int error, void *userdata) { } static int pull_tar(int argc, char *argv[], void *userdata) { - _cleanup_(tar_import_unrefp) TarImport *import = NULL; + _cleanup_(tar_pull_unrefp) TarPull *pull = NULL; _cleanup_event_unref_ sd_event *event = NULL; const char *url, *local; _cleanup_free_ char *l = NULL, *ll = NULL; @@ -112,11 +112,11 @@ static int pull_tar(int argc, char *argv[], void *userdata) { sd_event_add_signal(event, NULL, SIGTERM, interrupt_signal_handler, NULL); sd_event_add_signal(event, NULL, SIGINT, interrupt_signal_handler, NULL); - r = tar_import_new(&import, event, arg_image_root, on_tar_finished, event); + r = tar_pull_new(&pull, event, arg_image_root, on_tar_finished, event); if (r < 0) - return log_error_errno(r, "Failed to allocate importer: %m"); + return log_error_errno(r, "Failed to allocate puller: %m"); - r = tar_import_pull(import, url, local, arg_force, arg_verify); + r = tar_pull_start(pull, url, local, arg_force, arg_verify); if (r < 0) return log_error_errno(r, "Failed to pull image: %m"); @@ -128,9 +128,9 @@ static int pull_tar(int argc, char *argv[], void *userdata) { return -r; } -static void on_raw_finished(RawImport *import, int error, void *userdata) { +static void on_raw_finished(RawPull *pull, int error, void *userdata) { sd_event *event = userdata; - assert(import); + assert(pull); if (error == 0) log_info("Operation completed successfully."); @@ -139,7 +139,7 @@ static void on_raw_finished(RawImport *import, int error, void *userdata) { } static int pull_raw(int argc, char *argv[], void *userdata) { - _cleanup_(raw_import_unrefp) RawImport *import = NULL; + _cleanup_(raw_pull_unrefp) RawPull *pull = NULL; _cleanup_event_unref_ sd_event *event = NULL; const char *url, *local; _cleanup_free_ char *l = NULL, *ll = NULL; @@ -198,11 +198,11 @@ static int pull_raw(int argc, char *argv[], void *userdata) { sd_event_add_signal(event, NULL, SIGTERM, interrupt_signal_handler, NULL); sd_event_add_signal(event, NULL, SIGINT, interrupt_signal_handler, NULL); - r = raw_import_new(&import, event, arg_image_root, on_raw_finished, event); + r = raw_pull_new(&pull, event, arg_image_root, on_raw_finished, event); if (r < 0) - return log_error_errno(r, "Failed to allocate importer: %m"); + return log_error_errno(r, "Failed to allocate puller: %m"); - r = raw_import_pull(import, url, local, arg_force, arg_verify); + r = raw_pull_start(pull, url, local, arg_force, arg_verify); if (r < 0) return log_error_errno(r, "Failed to pull image: %m"); @@ -214,9 +214,9 @@ static int pull_raw(int argc, char *argv[], void *userdata) { return -r; } -static void on_dkr_finished(DkrImport *import, int error, void *userdata) { +static void on_dkr_finished(DkrPull *pull, int error, void *userdata) { sd_event *event = userdata; - assert(import); + assert(pull); if (error == 0) log_info("Operation completed successfully."); @@ -225,7 +225,7 @@ static void on_dkr_finished(DkrImport *import, int error, void *userdata) { } static int pull_dkr(int argc, char *argv[], void *userdata) { - _cleanup_(dkr_import_unrefp) DkrImport *import = NULL; + _cleanup_(dkr_pull_unrefp) DkrPull *pull = NULL; _cleanup_event_unref_ sd_event *event = NULL; const char *name, *tag, *local; int r; @@ -236,7 +236,7 @@ static int pull_dkr(int argc, char *argv[], void *userdata) { } if (arg_verify != IMPORT_VERIFY_NO) { - log_error("Imports from dkr do not support image verification, please pass --verify=no."); + log_error("Pulls from dkr do not support image verification, please pass --verify=no."); return -EINVAL; } @@ -300,11 +300,11 @@ static int pull_dkr(int argc, char *argv[], void *userdata) { sd_event_add_signal(event, NULL, SIGTERM, interrupt_signal_handler, NULL); sd_event_add_signal(event, NULL, SIGINT, interrupt_signal_handler, NULL); - r = dkr_import_new(&import, event, arg_dkr_index_url, arg_image_root, on_dkr_finished, event); + r = dkr_pull_new(&pull, event, arg_dkr_index_url, arg_image_root, on_dkr_finished, event); if (r < 0) - return log_error_errno(r, "Failed to allocate importer: %m"); + return log_error_errno(r, "Failed to allocate puller: %m"); - r = dkr_import_pull(import, name, tag, local, arg_force); + r = dkr_pull_start(pull, name, tag, local, arg_force); if (r < 0) return log_error_errno(r, "Failed to pull image: %m"); @@ -319,7 +319,7 @@ static int pull_dkr(int argc, char *argv[], void *userdata) { static int help(int argc, char *argv[], void *userdata) { printf("%s [OPTIONS...] {COMMAND} ...\n\n" - "Import container or virtual machine image.\n\n" + "Download container or virtual machine image.\n\n" " -h --help Show this help\n" " --version Show package version\n" " --force Force creation of image\n" @@ -409,7 +409,7 @@ static int parse_argv(int argc, char *argv[]) { return 1; } -static int import_main(int argc, char *argv[]) { +static int pull_main(int argc, char *argv[]) { static const Verb verbs[] = { { "help", VERB_ANY, VERB_ANY, 0, help }, @@ -433,7 +433,7 @@ int main(int argc, char *argv[]) { if (r <= 0) goto finish; - r = import_main(argc, argv); + r = pull_main(argc, argv); finish: return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS; -- cgit v1.2.3-54-g00ecf