summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/journal/journal-internal.h21
-rw-r--r--src/journal/journalctl.c34
-rw-r--r--src/journal/sd-journal.c172
-rw-r--r--src/libsystemd/libsystemd.sym8
-rw-r--r--src/systemd/sd-journal.h13
-rw-r--r--src/tmpfiles/tmpfiles.c23
6 files changed, 238 insertions, 33 deletions
diff --git a/src/journal/journal-internal.h b/src/journal/journal-internal.h
index fa5ca11636..a55d1bcc47 100644
--- a/src/journal/journal-internal.h
+++ b/src/journal/journal-internal.h
@@ -103,18 +103,27 @@ struct sd_journal {
unsigned current_invalidate_counter, last_invalidate_counter;
usec_t last_process_usec;
+ /* Iterating through unique fields and their data values */
char *unique_field;
JournalFile *unique_file;
uint64_t unique_offset;
+ /* Iterating through known fields */
+ JournalFile *fields_file;
+ uint64_t fields_offset;
+ uint64_t fields_hash_table_index;
+ char *fields_buffer;
+ size_t fields_buffer_allocated;
+
int flags;
- bool on_network;
- bool no_new_files;
- bool unique_file_lost; /* File we were iterating over got
- removed, and there were no more
- files, so sd_j_enumerate_unique
- will return a value equal to 0. */
+ bool on_network:1;
+ bool no_new_files:1;
+ bool unique_file_lost:1; /* File we were iterating over got
+ removed, and there were no more
+ files, so sd_j_enumerate_unique
+ will return a value equal to 0. */
+ bool fields_file_lost:1;
bool has_runtime_files:1;
bool has_persistent_files:1;
diff --git a/src/journal/journalctl.c b/src/journal/journalctl.c
index cf359d20ca..20f7082175 100644
--- a/src/journal/journalctl.c
+++ b/src/journal/journalctl.c
@@ -138,6 +138,8 @@ static enum {
ACTION_SYNC,
ACTION_ROTATE,
ACTION_VACUUM,
+ ACTION_LIST_FIELDS,
+ ACTION_LIST_FIELD_NAMES,
} arg_action = ACTION_SHOW;
typedef struct BootId {
@@ -218,6 +220,10 @@ static int add_matches_for_device(sd_journal *j, const char *devpath) {
d = udev_device_get_parent(d);
}
+ r = add_match_this_boot(j, arg_machine);
+ if (r < 0)
+ return log_error_errno(r, "Failed to add match for the current boot: %m");
+
return 0;
}
@@ -320,6 +326,7 @@ static void help(void) {
"\nCommands:\n"
" -h --help Show this help text\n"
" --version Show package version\n"
+ " -N --fields List all field names currently used\n"
" -F --field=FIELD List all values that a specified field takes\n"
" --disk-usage Show total disk usage of all journal files\n"
" --vacuum-size=BYTES Reduce disk usage below specified size\n"
@@ -416,6 +423,7 @@ static int parse_argv(int argc, char *argv[]) {
{ "unit", required_argument, NULL, 'u' },
{ "user-unit", required_argument, NULL, ARG_USER_UNIT },
{ "field", required_argument, NULL, 'F' },
+ { "fields", no_argument, NULL, 'N' },
{ "catalog", no_argument, NULL, 'x' },
{ "list-catalog", no_argument, NULL, ARG_LIST_CATALOG },
{ "dump-catalog", no_argument, NULL, ARG_DUMP_CATALOG },
@@ -437,7 +445,7 @@ static int parse_argv(int argc, char *argv[]) {
assert(argc >= 0);
assert(argv);
- while ((c = getopt_long(argc, argv, "hefo:aln::qmb::kD:p:c:S:U:t:u:F:xrM:", options, NULL)) >= 0)
+ while ((c = getopt_long(argc, argv, "hefo:aln::qmb::kD:p:c:S:U:t:u:NF:xrM:", options, NULL)) >= 0)
switch (c) {
@@ -774,9 +782,14 @@ static int parse_argv(int argc, char *argv[]) {
break;
case 'F':
+ arg_action = ACTION_LIST_FIELDS;
arg_field = optarg;
break;
+ case 'N':
+ arg_action = ACTION_LIST_FIELD_NAMES;
+ break;
+
case 'x':
arg_catalog = true;
break;
@@ -2081,6 +2094,8 @@ int main(int argc, char *argv[]) {
case ACTION_DISK_USAGE:
case ACTION_LIST_BOOTS:
case ACTION_VACUUM:
+ case ACTION_LIST_FIELDS:
+ case ACTION_LIST_FIELD_NAMES:
/* These ones require access to the journal files, continue below. */
break;
@@ -2163,7 +2178,20 @@ int main(int argc, char *argv[]) {
goto finish;
}
+ case ACTION_LIST_FIELD_NAMES: {
+ const char *field;
+
+ SD_JOURNAL_FOREACH_FIELD(j, field) {
+ printf("%s\n", field);
+ n_shown ++;
+ }
+
+ r = 0;
+ goto finish;
+ }
+
case ACTION_SHOW:
+ case ACTION_LIST_FIELDS:
break;
default:
@@ -2217,10 +2245,12 @@ int main(int argc, char *argv[]) {
log_debug("Journal filter: %s", filter);
}
- if (arg_field) {
+ if (arg_action == ACTION_LIST_FIELDS) {
const void *data;
size_t size;
+ assert(arg_field);
+
r = sd_journal_set_data_threshold(j, 0);
if (r < 0) {
log_error_errno(r, "Failed to unset data size threshold: %m");
diff --git a/src/journal/sd-journal.c b/src/journal/sd-journal.c
index 74a5e262f8..7a3aaf0cab 100644
--- a/src/journal/sd-journal.c
+++ b/src/journal/sd-journal.c
@@ -1338,6 +1338,13 @@ static void remove_file_real(sd_journal *j, JournalFile *f) {
j->unique_file_lost = true;
}
+ if (j->fields_file == f) {
+ j->fields_file = ordered_hashmap_next(j->files, j->fields_file->path);
+ j->fields_offset = 0;
+ if (!j->fields_file)
+ j->fields_file_lost = true;
+ }
+
journal_file_close(f);
j->current_invalidate_counter ++;
@@ -1806,6 +1813,7 @@ _public_ void sd_journal_close(sd_journal *j) {
free(j->path);
free(j->prefix);
free(j->unique_field);
+ free(j->fields_buffer);
free(j);
}
@@ -2512,24 +2520,20 @@ _public_ int sd_journal_enumerate_unique(sd_journal *j, const void **data, size_
* traversed files. */
found = false;
ORDERED_HASHMAP_FOREACH(of, j->files, i) {
- Object *oo;
- uint64_t op;
-
if (of == j->unique_file)
break;
- /* Skip this file it didn't have any fields
- * indexed */
- if (JOURNAL_HEADER_CONTAINS(of->header, n_fields) &&
- le64toh(of->header->n_fields) <= 0)
+ /* Skip this file it didn't have any fields indexed */
+ if (JOURNAL_HEADER_CONTAINS(of->header, n_fields) && le64toh(of->header->n_fields) <= 0)
continue;
- r = journal_file_find_data_object_with_hash(of, odata, ol, le64toh(o->data.hash), &oo, &op);
+ r = journal_file_find_data_object_with_hash(of, odata, ol, le64toh(o->data.hash), NULL, NULL);
if (r < 0)
return r;
-
- if (r > 0)
+ if (r > 0) {
found = true;
+ break;
+ }
}
if (found)
@@ -2552,6 +2556,154 @@ _public_ void sd_journal_restart_unique(sd_journal *j) {
j->unique_file_lost = false;
}
+_public_ int sd_journal_enumerate_fields(sd_journal *j, const char **field) {
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+ assert_return(field, -EINVAL);
+
+ if (!j->fields_file) {
+ if (j->fields_file_lost)
+ return 0;
+
+ j->fields_file = ordered_hashmap_first(j->files);
+ if (!j->fields_file)
+ return 0;
+
+ j->fields_hash_table_index = 0;
+ j->fields_offset = 0;
+ }
+
+ for (;;) {
+ JournalFile *f, *of;
+ Iterator i;
+ uint64_t m;
+ Object *o;
+ size_t sz;
+ bool found;
+
+ f = j->fields_file;
+
+ if (j->fields_offset == 0) {
+ bool eof = false;
+
+ /* We are not yet positioned at any field. Let's pick the first one */
+ r = journal_file_map_field_hash_table(f);
+ if (r < 0)
+ return r;
+
+ m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem);
+ for (;;) {
+ if (j->fields_hash_table_index >= m) {
+ /* Reached the end of the hash table, go to the next file. */
+ eof = true;
+ break;
+ }
+
+ j->fields_offset = le64toh(f->field_hash_table[j->fields_hash_table_index].head_hash_offset);
+
+ if (j->fields_offset != 0)
+ break;
+
+ /* Empty hash table bucket, go to next one */
+ j->fields_hash_table_index++;
+ }
+
+ if (eof) {
+ /* Proceed with next file */
+ j->fields_file = ordered_hashmap_next(j->files, f->path);
+ if (!j->fields_file) {
+ *field = NULL;
+ return 0;
+ }
+
+ j->fields_offset = 0;
+ j->fields_hash_table_index = 0;
+ continue;
+ }
+
+ } else {
+ /* We are already positioned at a field. If so, let's figure out the next field from it */
+
+ r = journal_file_move_to_object(f, OBJECT_FIELD, j->fields_offset, &o);
+ if (r < 0)
+ return r;
+
+ j->fields_offset = le64toh(o->field.next_hash_offset);
+ if (j->fields_offset == 0) {
+ /* Reached the end of the hash table chain */
+ j->fields_hash_table_index++;
+ continue;
+ }
+ }
+
+ /* We use OBJECT_UNUSED here, so that the iteator below doesn't remove our mmap window */
+ r = journal_file_move_to_object(f, OBJECT_UNUSED, j->fields_offset, &o);
+ if (r < 0)
+ return r;
+
+ /* Because we used OBJECT_UNUSED above, we need to do our type check manually */
+ if (o->object.type != OBJECT_FIELD) {
+ log_debug("%s:offset " OFSfmt ": object has type %i, expected %i", f->path, j->fields_offset, o->object.type, OBJECT_FIELD);
+ return -EBADMSG;
+ }
+
+ sz = le64toh(o->object.size) - offsetof(Object, field.payload);
+
+ /* Let's see if we already returned this field name before. */
+ found = false;
+ ORDERED_HASHMAP_FOREACH(of, j->files, i) {
+ if (of == f)
+ break;
+
+ /* Skip this file it didn't have any fields indexed */
+ if (JOURNAL_HEADER_CONTAINS(of->header, n_fields) && le64toh(of->header->n_fields) <= 0)
+ continue;
+
+ r = journal_file_find_field_object_with_hash(of, o->field.payload, sz, le64toh(o->field.hash), NULL, NULL);
+ if (r < 0)
+ return r;
+ if (r > 0) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ /* Check if this is really a valid string containing no NUL byte */
+ if (memchr(o->field.payload, 0, sz))
+ return -EBADMSG;
+
+ if (sz > j->data_threshold)
+ sz = j->data_threshold;
+
+ if (!GREEDY_REALLOC(j->fields_buffer, j->fields_buffer_allocated, sz + 1))
+ return -ENOMEM;
+
+ memcpy(j->fields_buffer, o->field.payload, sz);
+ j->fields_buffer[sz] = 0;
+
+ if (!field_is_valid(j->fields_buffer))
+ return -EBADMSG;
+
+ *field = j->fields_buffer;
+ return 1;
+ }
+}
+
+_public_ void sd_journal_restart_fields(sd_journal *j) {
+ if (!j)
+ return;
+
+ j->fields_file = NULL;
+ j->fields_hash_table_index = 0;
+ j->fields_offset = 0;
+ j->fields_file_lost = false;
+}
+
_public_ int sd_journal_reliable_fd(sd_journal *j) {
assert_return(j, -EINVAL);
assert_return(!journal_pid_changed(j), -ECHILD);
diff --git a/src/libsystemd/libsystemd.sym b/src/libsystemd/libsystemd.sym
index 043ff13e6f..4ab637b686 100644
--- a/src/libsystemd/libsystemd.sym
+++ b/src/libsystemd/libsystemd.sym
@@ -481,3 +481,11 @@ global:
sd_bus_path_encode_many;
sd_listen_fds_with_names;
} LIBSYSTEMD_226;
+
+LIBSYSTEMD_229 {
+global:
+ sd_journal_has_runtime_files;
+ sd_journal_has_persistent_files;
+ sd_journal_enumerate_fields;
+ sd_journal_restart_fields;
+} LIBSYSTEMD_227;
diff --git a/src/systemd/sd-journal.h b/src/systemd/sd-journal.h
index 7f16c69ce5..caf322f062 100644
--- a/src/systemd/sd-journal.h
+++ b/src/systemd/sd-journal.h
@@ -129,6 +129,9 @@ int sd_journal_query_unique(sd_journal *j, const char *field);
int sd_journal_enumerate_unique(sd_journal *j, const void **data, size_t *l);
void sd_journal_restart_unique(sd_journal *j);
+int sd_journal_enumerate_fields(sd_journal *j, const char **field);
+void sd_journal_restart_fields(sd_journal *j);
+
int sd_journal_get_fd(sd_journal *j);
int sd_journal_get_events(sd_journal *j);
int sd_journal_get_timeout(sd_journal *j, uint64_t *timeout_usec);
@@ -142,22 +145,28 @@ int sd_journal_get_catalog_for_message_id(sd_id128_t id, char **text);
int sd_journal_has_runtime_files(sd_journal *j);
int sd_journal_has_persistent_files(sd_journal *j);
-/* the inverse condition avoids ambiguity of danling 'else' after the macro */
+/* The inverse condition avoids ambiguity of dangling 'else' after the macro */
#define SD_JOURNAL_FOREACH(j) \
if (sd_journal_seek_head(j) < 0) { } \
else while (sd_journal_next(j) > 0)
-/* the inverse condition avoids ambiguity of danling 'else' after the macro */
+/* The inverse condition avoids ambiguity of dangling 'else' after the macro */
#define SD_JOURNAL_FOREACH_BACKWARDS(j) \
if (sd_journal_seek_tail(j) < 0) { } \
else while (sd_journal_previous(j) > 0)
+/* Iterate through the data fields of the current journal entry */
#define SD_JOURNAL_FOREACH_DATA(j, data, l) \
for (sd_journal_restart_data(j); sd_journal_enumerate_data((j), &(data), &(l)) > 0; )
+/* Iterate through the all known values of a specific field */
#define SD_JOURNAL_FOREACH_UNIQUE(j, data, l) \
for (sd_journal_restart_unique(j); sd_journal_enumerate_unique((j), &(data), &(l)) > 0; )
+/* Iterate through all known field names */
+#define SD_JOURNAL_FOREACH_FIELD(j, field) \
+ for (sd_journal_restart_fields(j); sd_journal_enumerate_fields((j), &(field)) > 0; )
+
_SD_DEFINE_POINTER_CLEANUP_FUNC(sd_journal, sd_journal_close);
_SD_END_DECLARATIONS;
diff --git a/src/tmpfiles/tmpfiles.c b/src/tmpfiles/tmpfiles.c
index bb81ff5e3a..59ef940a4d 100644
--- a/src/tmpfiles/tmpfiles.c
+++ b/src/tmpfiles/tmpfiles.c
@@ -1153,6 +1153,7 @@ static int create_item(Item *i) {
_cleanup_free_ char *resolved = NULL;
struct stat st;
int r = 0;
+ int q = 0;
CreationMode creation;
assert(i);
@@ -1279,27 +1280,23 @@ static int create_item(Item *i) {
if (IN_SET(i->type, CREATE_SUBVOLUME_NEW_QUOTA, CREATE_SUBVOLUME_INHERIT_QUOTA)) {
r = btrfs_subvol_auto_qgroup(i->path, 0, i->type == CREATE_SUBVOLUME_NEW_QUOTA);
- if (r == -ENOTTY) {
+ if (r == -ENOTTY)
log_debug_errno(r, "Couldn't adjust quota for subvolume \"%s\" because of unsupported file system or because directory is not a subvolume: %m", i->path);
- return 0;
- }
- if (r == -EROFS) {
+ else if (r == -EROFS)
log_debug_errno(r, "Couldn't adjust quota for subvolume \"%s\" because of read-only file system: %m", i->path);
- return 0;
- }
- if (r == -ENOPROTOOPT) {
+ else if (r == -ENOPROTOOPT)
log_debug_errno(r, "Couldn't adjust quota for subvolume \"%s\" because quota support is disabled: %m", i->path);
- return 0;
- }
- if (r < 0)
- return log_error_errno(r, "Failed to adjust quota for subvolume \"%s\": %m", i->path);
- if (r > 0)
+ else if (r < 0)
+ q = log_error_errno(r, "Failed to adjust quota for subvolume \"%s\": %m", i->path);
+ else if (r > 0)
log_debug("Adjusted quota for subvolume \"%s\".", i->path);
- if (r == 0)
+ else if (r == 0)
log_debug("Quota for subvolume \"%s\" already in place, no change made.", i->path);
}
r = path_set_perms(i, i->path);
+ if (q < 0)
+ return q;
if (r < 0)
return r;