From 45c047b227d96e98e7076c15ae774ff6390dc403 Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Fri, 24 Jul 2015 01:40:44 +0200 Subject: journal-verify: don't hit SIGFPE when determining progress If we determine the progress based on a number of objects available, don't blindly devide by the number of objects, given that it might be 0. --- src/journal/journal-verify.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/src/journal/journal-verify.c b/src/journal/journal-verify.c index ce734d8df7..cf5be1f86a 100644 --- a/src/journal/journal-verify.c +++ b/src/journal/journal-verify.c @@ -69,6 +69,16 @@ static void draw_progress(uint64_t p, usec_t *last_usec) { fflush(stdout); } +static uint64_t scale_progress(uint64_t scale, uint64_t p, uint64_t m) { + + /* Calculates scale * p / m, but handles m == 0 safely, and saturates */ + + if (p >= m || m == 0) + return scale; + + return scale * p / m; +} + static void flush_progress(void) { unsigned n, i; @@ -584,7 +594,7 @@ static int verify_hash_table( uint64_t last = 0, p; if (show_progress) - draw_progress(0xC000 + (0x3FFF * i / n), last_usec); + draw_progress(0xC000 + scale_progress(0x3FFF, i, n), last_usec); p = le64toh(f->data_hash_table[i].head_hash_offset); while (p != 0) { @@ -726,7 +736,7 @@ static int verify_entry_array( Object *o; if (show_progress) - draw_progress(0x8000 + (0x3FFF * i / n), last_usec); + draw_progress(0x8000 + scale_progress(0x3FFF, i, n), last_usec); if (a == 0) { error(a, "array chain too short at %"PRIu64" of %"PRIu64, i, n); @@ -863,7 +873,7 @@ int journal_file_verify( p = le64toh(f->header->header_size); while (p != 0) { if (show_progress) - draw_progress(0x7FFF * p / le64toh(f->header->tail_object_offset), &last_usec); + draw_progress(scale_progress(0x7FFF, p, le64toh(f->header->tail_object_offset)), &last_usec); r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o); if (r < 0) { -- cgit v1.2.3-54-g00ecf From dade37d403f1b8c1d7bb2efbe2361f2a3e999613 Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Fri, 24 Jul 2015 01:55:45 +0200 Subject: journal: avoid mapping empty data and field hash tables When a new journal file is created we write the header first, then sync and only then create the data and field hash tables in them. That means to other processes it might appear that the files have a valid header but not data and field hash tables. Our reader code should be able to deal with this. With this change we'll not map the two hash tables right-away after opening a file for reading anymore (because that will of course fail if the objects are missing), but delay this until the first time we access them. On top of that, when we want to look something up in the hash tables and we notice they aren't initialized yet, we consider them empty. This improves handling of some journal files reported in #487. --- src/journal/journal-file.c | 37 ++++++++++++++++++++++++++----------- src/journal/journal-file.h | 3 +++ src/journal/journal-verify.c | 14 ++++++++++++++ 3 files changed, 43 insertions(+), 11 deletions(-) diff --git a/src/journal/journal-file.c b/src/journal/journal-file.c index be6a5522fa..f7815b2796 100644 --- a/src/journal/journal-file.c +++ b/src/journal/journal-file.c @@ -656,13 +656,16 @@ static int journal_file_setup_field_hash_table(JournalFile *f) { return 0; } -static int journal_file_map_data_hash_table(JournalFile *f) { +int journal_file_map_data_hash_table(JournalFile *f) { uint64_t s, p; void *t; int r; assert(f); + if (f->data_hash_table) + return 0; + p = le64toh(f->header->data_hash_table_offset); s = le64toh(f->header->data_hash_table_size); @@ -678,13 +681,16 @@ static int journal_file_map_data_hash_table(JournalFile *f) { return 0; } -static int journal_file_map_field_hash_table(JournalFile *f) { +int journal_file_map_field_hash_table(JournalFile *f) { uint64_t s, p; void *t; int r; assert(f); + if (f->field_hash_table) + return 0; + p = le64toh(f->header->field_hash_table_offset); s = le64toh(f->header->field_hash_table_size); @@ -803,10 +809,18 @@ int journal_file_find_field_object_with_hash( assert(f); assert(field && size > 0); + /* If the field hash table is empty, we can't find anything */ + if (le64toh(f->header->field_hash_table_size) <= 0) + return 0; + + /* Map the field hash table, if it isn't mapped yet. */ + r = journal_file_map_field_hash_table(f); + if (r < 0) + return r; + osize = offsetof(Object, field.payload) + size; m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem); - if (m <= 0) return -EBADMSG; @@ -866,6 +880,15 @@ int journal_file_find_data_object_with_hash( assert(f); assert(data || size == 0); + /* If there's no data hash table, then there's no entry. */ + if (le64toh(f->header->data_hash_table_size) <= 0) + return 0; + + /* Map the data hash table, if it isn't mapped yet. */ + r = journal_file_map_data_hash_table(f); + if (r < 0) + return r; + osize = offsetof(Object, data.payload) + size; m = le64toh(f->header->data_hash_table_size) / sizeof(HashItem); @@ -2731,14 +2754,6 @@ int journal_file_open( #endif } - r = journal_file_map_field_hash_table(f); - if (r < 0) - goto fail; - - r = journal_file_map_data_hash_table(f); - if (r < 0) - goto fail; - if (mmap_cache_got_sigbus(f->mmap, f->fd)) { r = -EIO; goto fail; diff --git a/src/journal/journal-file.h b/src/journal/journal-file.h index 403c8f760c..e92b75eabe 100644 --- a/src/journal/journal-file.h +++ b/src/journal/journal-file.h @@ -229,3 +229,6 @@ int journal_file_get_cutoff_realtime_usec(JournalFile *f, usec_t *from, usec_t * int journal_file_get_cutoff_monotonic_usec(JournalFile *f, sd_id128_t boot, usec_t *from, usec_t *to); bool journal_file_rotate_suggested(JournalFile *f, usec_t max_file_usec); + +int journal_file_map_data_hash_table(JournalFile *f); +int journal_file_map_field_hash_table(JournalFile *f); diff --git a/src/journal/journal-verify.c b/src/journal/journal-verify.c index cf5be1f86a..7d68149cd4 100644 --- a/src/journal/journal-verify.c +++ b/src/journal/journal-verify.c @@ -590,6 +590,13 @@ static int verify_hash_table( assert(last_usec); n = le64toh(f->header->data_hash_table_size) / sizeof(HashItem); + if (n <= 0) + return 0; + + r = journal_file_map_data_hash_table(f); + if (r < 0) + return log_error_errno(r, "Failed to map data hash table: %m"); + for (i = 0; i < n; i++) { uint64_t last = 0, p; @@ -647,6 +654,13 @@ static int data_object_in_hash_table(JournalFile *f, uint64_t hash, uint64_t p) assert(f); n = le64toh(f->header->data_hash_table_size) / sizeof(HashItem); + if (n <= 0) + return 0; + + r = journal_file_map_data_hash_table(f); + if (r < 0) + return log_error_errno(r, "Failed to map data hash table: %m"); + h = hash % n; q = le64toh(f->data_hash_table[h].head_hash_offset); -- cgit v1.2.3-54-g00ecf From 8dc37a85255f68d62f7af66696cbf6a66401fb2a Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Fri, 24 Jul 2015 02:00:43 +0200 Subject: journal: when verifying journal files, handle empty ones nicely A journal file that carries no objects should be considered valid. --- src/journal/journal-verify.c | 37 ++++++++++++++----------------------- 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/src/journal/journal-verify.c b/src/journal/journal-verify.c index 7d68149cd4..637162e305 100644 --- a/src/journal/journal-verify.c +++ b/src/journal/journal-verify.c @@ -885,7 +885,11 @@ int journal_file_verify( * superficial structure, headers, hashes. */ p = le64toh(f->header->header_size); - while (p != 0) { + for (;;) { + /* Early exit if there are no objects in the file, at all */ + if (le64toh(f->header->tail_object_offset) == 0) + break; + if (show_progress) draw_progress(scale_progress(0x7FFF, p, le64toh(f->header->tail_object_offset)), &last_usec); @@ -901,9 +905,6 @@ int journal_file_verify( goto fail; } - if (p == le64toh(f->header->tail_object_offset)) - found_last = true; - n_objects ++; r = journal_file_object_verify(f, p, o); @@ -1148,13 +1149,15 @@ int journal_file_verify( n_weird ++; } - if (p == le64toh(f->header->tail_object_offset)) - p = 0; - else - p = p + ALIGN64(le64toh(o->object.size)); - } + if (p == le64toh(f->header->tail_object_offset)) { + found_last = true; + break; + } - if (!found_last) { + p = p + ALIGN64(le64toh(o->object.size)); + }; + + if (!found_last && le64toh(f->header->tail_object_offset) != 0) { error(le64toh(f->header->tail_object_offset), "tail object pointer dead"); r = -EBADMSG; goto fail; @@ -1200,19 +1203,7 @@ int journal_file_verify( goto fail; } - if (n_data_hash_tables != 1) { - error(0, "missing data hash table"); - r = -EBADMSG; - goto fail; - } - - if (n_field_hash_tables != 1) { - error(0, "missing field hash table"); - r = -EBADMSG; - goto fail; - } - - if (!found_main_entry_array) { + if (!found_main_entry_array && le64toh(f->header->entry_array_offset) != 0) { error(0, "missing entry array"); r = -EBADMSG; goto fail; -- cgit v1.2.3-54-g00ecf From bca9e39dfadaefc4b02c0dd378adc3d6221071de Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Fri, 24 Jul 2015 02:02:07 +0200 Subject: journal: explain the error when we find a non-DATA object that is compressed Only objects of type DATA may be compressed, generate a message about that, like we do for all other errros. --- src/journal/journal-verify.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/journal/journal-verify.c b/src/journal/journal-verify.c index 637162e305..0a8f30ceca 100644 --- a/src/journal/journal-verify.c +++ b/src/journal/journal-verify.c @@ -123,8 +123,10 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o * other objects. */ if ((o->object.flags & OBJECT_COMPRESSED_XZ) && - o->object.type != OBJECT_DATA) + o->object.type != OBJECT_DATA) { + error(offset, "Found compressed object that isn't of type DATA, which is not allowed."); return -EBADMSG; + } switch (o->object.type) { -- cgit v1.2.3-54-g00ecf From 02ab86c732576a71179ce12e97d44c289833236d Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Fri, 24 Jul 2015 02:10:32 +0200 Subject: journalctl: properly detect empty journal files When we encounter a journal file with exactly zero entries, print a nice message and exit, and don't print a weird error message. --- src/journal/journalctl.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/journal/journalctl.c b/src/journal/journalctl.c index 2d6ecfb750..073cc77711 100644 --- a/src/journal/journalctl.c +++ b/src/journal/journalctl.c @@ -2066,6 +2066,10 @@ int main(int argc, char *argv[]) { log_error_errno(r, "Failed to iterate through journal: %m"); goto finish; } + if (r == 0) { + printf("-- No entries --\n"); + goto finish; + } if (!arg_follow) pager_open_if_enabled(); -- cgit v1.2.3-54-g00ecf From e80acc51aeaf2d74cb4d6cecbcb6e18f74c22c05 Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Fri, 24 Jul 2015 02:18:13 +0200 Subject: journal: uppercase first character in verify error messages In the english language the first character of a sentence is supposed to be uppercase. Let's make sure this also applies to the journal verification error messages. --- src/journal/journal-verify.c | 162 ++++++++++++++++++++----------------------- 1 file changed, 75 insertions(+), 87 deletions(-) diff --git a/src/journal/journal-verify.c b/src/journal/journal-verify.c index 0a8f30ceca..eaf006db7a 100644 --- a/src/journal/journal-verify.c +++ b/src/journal/journal-verify.c @@ -135,15 +135,15 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o int compression, r; if (le64toh(o->data.entry_offset) == 0) - warning(offset, "unused data (entry_offset==0)"); + warning(offset, "Unused data (entry_offset==0)"); if ((le64toh(o->data.entry_offset) == 0) ^ (le64toh(o->data.n_entries) == 0)) { - error(offset, "bad n_entries: %"PRIu64, o->data.n_entries); + error(offset, "Bad n_entries: %"PRIu64, o->data.n_entries); return -EBADMSG; } if (le64toh(o->object.size) - offsetof(DataObject, payload) <= 0) { - error(offset, "bad object size (<= %zu): %"PRIu64, + error(offset, "Bad object size (<= %zu): %"PRIu64, offsetof(DataObject, payload), le64toh(o->object.size)); return -EBADMSG; @@ -171,7 +171,7 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o h2 = hash64(o->data.payload, le64toh(o->object.size) - offsetof(Object, data.payload)); if (h1 != h2) { - error(offset, "invalid hash (%08"PRIx64" vs. %08"PRIx64, h1, h2); + error(offset, "Invalid hash (%08"PRIx64" vs. %08"PRIx64, h1, h2); return -EBADMSG; } @@ -179,7 +179,7 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o !VALID64(o->data.next_field_offset) || !VALID64(o->data.entry_offset) || !VALID64(o->data.entry_array_offset)) { - error(offset, "invalid offset (next_hash_offset="OFSfmt", next_field_offset="OFSfmt", entry_offset="OFSfmt", entry_array_offset="OFSfmt, + error(offset, "Invalid offset (next_hash_offset="OFSfmt", next_field_offset="OFSfmt", entry_offset="OFSfmt", entry_array_offset="OFSfmt, o->data.next_hash_offset, o->data.next_field_offset, o->data.entry_offset, @@ -193,7 +193,7 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o case OBJECT_FIELD: if (le64toh(o->object.size) - offsetof(FieldObject, payload) <= 0) { error(offset, - "bad field size (<= %zu): %"PRIu64, + "Bad field size (<= %zu): %"PRIu64, offsetof(FieldObject, payload), le64toh(o->object.size)); return -EBADMSG; @@ -202,7 +202,7 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o if (!VALID64(o->field.next_hash_offset) || !VALID64(o->field.head_data_offset)) { error(offset, - "invalid offset (next_hash_offset="OFSfmt", head_data_offset="OFSfmt, + "Invalid offset (next_hash_offset="OFSfmt", head_data_offset="OFSfmt, o->field.next_hash_offset, o->field.head_data_offset); return -EBADMSG; @@ -212,7 +212,7 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o case OBJECT_ENTRY: if ((le64toh(o->object.size) - offsetof(EntryObject, items)) % sizeof(EntryItem) != 0) { error(offset, - "bad entry size (<= %zu): %"PRIu64, + "Bad entry size (<= %zu): %"PRIu64, offsetof(EntryObject, items), le64toh(o->object.size)); return -EBADMSG; @@ -220,28 +220,28 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o if ((le64toh(o->object.size) - offsetof(EntryObject, items)) / sizeof(EntryItem) <= 0) { error(offset, - "invalid number items in entry: %"PRIu64, + "Invalid number items in entry: %"PRIu64, (le64toh(o->object.size) - offsetof(EntryObject, items)) / sizeof(EntryItem)); return -EBADMSG; } if (le64toh(o->entry.seqnum) <= 0) { error(offset, - "invalid entry seqnum: %"PRIx64, + "Invalid entry seqnum: %"PRIx64, le64toh(o->entry.seqnum)); return -EBADMSG; } if (!VALID_REALTIME(le64toh(o->entry.realtime))) { error(offset, - "invalid entry realtime timestamp: %"PRIu64, + "Invalid entry realtime timestamp: %"PRIu64, le64toh(o->entry.realtime)); return -EBADMSG; } if (!VALID_MONOTONIC(le64toh(o->entry.monotonic))) { error(offset, - "invalid entry monotonic timestamp: %"PRIu64, + "Invalid entry monotonic timestamp: %"PRIu64, le64toh(o->entry.monotonic)); return -EBADMSG; } @@ -250,7 +250,7 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o if (o->entry.items[i].object_offset == 0 || !VALID64(o->entry.items[i].object_offset)) { error(offset, - "invalid entry item (%"PRIu64"/%"PRIu64" offset: "OFSfmt, + "Invalid entry item (%"PRIu64"/%"PRIu64" offset: "OFSfmt, i, journal_file_entry_n_items(o), o->entry.items[i].object_offset); return -EBADMSG; @@ -264,7 +264,7 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o if ((le64toh(o->object.size) - offsetof(HashTableObject, items)) % sizeof(HashItem) != 0 || (le64toh(o->object.size) - offsetof(HashTableObject, items)) / sizeof(HashItem) <= 0) { error(offset, - "invalid %s hash table size: %"PRIu64, + "Invalid %s hash table size: %"PRIu64, o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field", le64toh(o->object.size)); return -EBADMSG; @@ -274,7 +274,7 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o if (o->hash_table.items[i].head_hash_offset != 0 && !VALID64(le64toh(o->hash_table.items[i].head_hash_offset))) { error(offset, - "invalid %s hash table item (%"PRIu64"/%"PRIu64") head_hash_offset: "OFSfmt, + "Invalid %s hash table item (%"PRIu64"/%"PRIu64") head_hash_offset: "OFSfmt, o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field", i, journal_file_hash_table_n_items(o), le64toh(o->hash_table.items[i].head_hash_offset)); @@ -283,7 +283,7 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o if (o->hash_table.items[i].tail_hash_offset != 0 && !VALID64(le64toh(o->hash_table.items[i].tail_hash_offset))) { error(offset, - "invalid %s hash table item (%"PRIu64"/%"PRIu64") tail_hash_offset: "OFSfmt, + "Invalid %s hash table item (%"PRIu64"/%"PRIu64") tail_hash_offset: "OFSfmt, o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field", i, journal_file_hash_table_n_items(o), le64toh(o->hash_table.items[i].tail_hash_offset)); @@ -293,7 +293,7 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o if ((o->hash_table.items[i].head_hash_offset != 0) != (o->hash_table.items[i].tail_hash_offset != 0)) { error(offset, - "invalid %s hash table item (%"PRIu64"/%"PRIu64"): head_hash_offset="OFSfmt" tail_hash_offset="OFSfmt, + "Invalid %s hash table item (%"PRIu64"/%"PRIu64"): head_hash_offset="OFSfmt" tail_hash_offset="OFSfmt, o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field", i, journal_file_hash_table_n_items(o), le64toh(o->hash_table.items[i].head_hash_offset), @@ -308,14 +308,14 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o if ((le64toh(o->object.size) - offsetof(EntryArrayObject, items)) % sizeof(le64_t) != 0 || (le64toh(o->object.size) - offsetof(EntryArrayObject, items)) / sizeof(le64_t) <= 0) { error(offset, - "invalid object entry array size: %"PRIu64, + "Invalid object entry array size: %"PRIu64, le64toh(o->object.size)); return -EBADMSG; } if (!VALID64(o->entry_array.next_entry_array_offset)) { error(offset, - "invalid object entry array next_entry_array_offset: "OFSfmt, + "Invalid object entry array next_entry_array_offset: "OFSfmt, o->entry_array.next_entry_array_offset); return -EBADMSG; } @@ -324,7 +324,7 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o if (le64toh(o->entry_array.items[i]) != 0 && !VALID64(le64toh(o->entry_array.items[i]))) { error(offset, - "invalid object entry array item (%"PRIu64"/%"PRIu64"): "OFSfmt, + "Invalid object entry array item (%"PRIu64"/%"PRIu64"): "OFSfmt, i, journal_file_entry_array_n_items(o), le64toh(o->entry_array.items[i])); return -EBADMSG; @@ -335,14 +335,14 @@ static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o case OBJECT_TAG: if (le64toh(o->object.size) != sizeof(TagObject)) { error(offset, - "invalid object tag size: %"PRIu64, + "Invalid object tag size: %"PRIu64, le64toh(o->object.size)); return -EBADMSG; } if (!VALID_EPOCH(o->tag.epoch)) { error(offset, - "invalid object tag epoch: %"PRIu64, + "Invalid object tag epoch: %"PRIu64, o->tag.epoch); return -EBADMSG; } @@ -415,8 +415,7 @@ static int entry_points_to_data( assert(entry_fd >= 0); if (!contains_uint64(f->mmap, entry_fd, n_entries, entry_p)) { - error(data_p, - "data object references invalid entry at "OFSfmt, entry_p); + error(data_p, "Data object references invalid entry at "OFSfmt, entry_p); return -EBADMSG; } @@ -432,8 +431,7 @@ static int entry_points_to_data( } if (!found) { - error(entry_p, - "data object at "OFSfmt" not referenced by linked entry", data_p); + error(entry_p, "Data object at "OFSfmt" not referenced by linked entry", data_p); return -EBADMSG; } @@ -476,7 +474,7 @@ static int entry_points_to_data( x = z; } - error(entry_p, "entry object doesn't exist in main entry array"); + error(entry_p, "Entry object doesn't exist in main entry array"); return -EBADMSG; } @@ -506,9 +504,7 @@ static int verify_data( /* Entry array means at least two objects */ if (a && n < 2) { - error(p, - "entry array present (entry_array_offset="OFSfmt", but n_entries=%"PRIu64")", - a, n); + error(p, "Entry array present (entry_array_offset="OFSfmt", but n_entries=%"PRIu64")", a, n); return -EBADMSG; } @@ -528,12 +524,12 @@ static int verify_data( uint64_t next, m, j; if (a == 0) { - error(p, "array chain too short"); + error(p, "Array chain too short"); return -EBADMSG; } if (!contains_uint64(f->mmap, entry_array_fd, n_entry_arrays, a)) { - error(p, "invalid array offset "OFSfmt, a); + error(p, "Invalid array offset "OFSfmt, a); return -EBADMSG; } @@ -543,8 +539,7 @@ static int verify_data( next = le64toh(o->entry_array.next_entry_array_offset); if (next != 0 && next <= a) { - error(p, "array chain has cycle (jumps back from "OFSfmt" to "OFSfmt")", - a, next); + error(p, "Array chain has cycle (jumps back from "OFSfmt" to "OFSfmt")", a, next); return -EBADMSG; } @@ -553,7 +548,7 @@ static int verify_data( q = le64toh(o->entry_array.items[j]); if (q <= last) { - error(p, "data object's entry array not sorted"); + error(p, "Data object's entry array not sorted"); return -EBADMSG; } last = q; @@ -611,8 +606,7 @@ static int verify_hash_table( uint64_t next; if (!contains_uint64(f->mmap, data_fd, n_data, p)) { - error(p, "invalid data object at hash entry %"PRIu64" of %"PRIu64, - i, n); + error(p, "Invalid data object at hash entry %"PRIu64" of %"PRIu64, i, n); return -EBADMSG; } @@ -622,14 +616,12 @@ static int verify_hash_table( next = le64toh(o->data.next_hash_offset); if (next != 0 && next <= p) { - error(p, "hash chain has a cycle in hash entry %"PRIu64" of %"PRIu64, - i, n); + error(p, "Hash chain has a cycle in hash entry %"PRIu64" of %"PRIu64, i, n); return -EBADMSG; } if (le64toh(o->data.hash) % n != i) { - error(p, "hash value mismatch in hash entry %"PRIu64" of %"PRIu64, - i, n); + error(p, "Hash value mismatch in hash entry %"PRIu64" of %"PRIu64, i, n); return -EBADMSG; } @@ -642,7 +634,7 @@ static int verify_hash_table( } if (last != le64toh(f->data_hash_table[i].tail_hash_offset)) { - error(p, "tail hash pointer mismatch in hash table"); + error(p, "Tail hash pointer mismatch in hash table"); return -EBADMSG; } } @@ -703,16 +695,16 @@ static int verify_entry( h = le64toh(o->entry.items[i].hash); if (!contains_uint64(f->mmap, data_fd, n_data, q)) { - error(p, "invalid data object of entry"); - return -EBADMSG; - } + error(p, "Invalid data object of entry"); + return -EBADMSG; + } r = journal_file_move_to_object(f, OBJECT_DATA, q, &u); if (r < 0) return r; if (le64toh(u->data.hash) != h) { - error(p, "hash mismatch for data object of entry"); + error(p, "Hash mismatch for data object of entry"); return -EBADMSG; } @@ -720,7 +712,7 @@ static int verify_entry( if (r < 0) return r; if (r == 0) { - error(p, "data object missing from hash table"); + error(p, "Data object missing from hash table"); return -EBADMSG; } } @@ -755,12 +747,12 @@ static int verify_entry_array( draw_progress(0x8000 + scale_progress(0x3FFF, i, n), last_usec); if (a == 0) { - error(a, "array chain too short at %"PRIu64" of %"PRIu64, i, n); + error(a, "Array chain too short at %"PRIu64" of %"PRIu64, i, n); return -EBADMSG; } if (!contains_uint64(f->mmap, entry_array_fd, n_entry_arrays, a)) { - error(a, "invalid array %"PRIu64" of %"PRIu64, i, n); + error(a, "Invalid array %"PRIu64" of %"PRIu64, i, n); return -EBADMSG; } @@ -770,9 +762,7 @@ static int verify_entry_array( next = le64toh(o->entry_array.next_entry_array_offset); if (next != 0 && next <= a) { - error(a, - "array chain has cycle at %"PRIu64" of %"PRIu64" (jumps back from to "OFSfmt")", - i, n, next); + error(a, "Array chain has cycle at %"PRIu64" of %"PRIu64" (jumps back from to "OFSfmt")", i, n, next); return -EBADMSG; } @@ -782,15 +772,13 @@ static int verify_entry_array( p = le64toh(o->entry_array.items[j]); if (p <= last) { - error(a, "entry array not sorted at %"PRIu64" of %"PRIu64, - i, n); + error(a, "Entry array not sorted at %"PRIu64" of %"PRIu64, i, n); return -EBADMSG; } last = p; if (!contains_uint64(f->mmap, entry_fd, n_entries, p)) { - error(a, "invalid array entry at %"PRIu64" of %"PRIu64, - i, n); + error(a, "Invalid array entry at %"PRIu64" of %"PRIu64, i, n); return -EBADMSG; } @@ -878,7 +866,7 @@ int journal_file_verify( for (i = 0; i < sizeof(f->header->reserved); i++) if (f->header->reserved[i] != 0) { - error(offsetof(Header, reserved[i]), "reserved field is non-zero"); + error(offsetof(Header, reserved[i]), "Reserved field is non-zero"); r = -EBADMSG; goto fail; } @@ -897,12 +885,12 @@ int journal_file_verify( r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o); if (r < 0) { - error(p, "invalid object"); + error(p, "Invalid object"); goto fail; } if (p > le64toh(f->header->tail_object_offset)) { - error(offsetof(Header, tail_object_offset), "invalid tail object pointer"); + error(offsetof(Header, tail_object_offset), "Invalid tail object pointer"); r = -EBADMSG; goto fail; } @@ -911,13 +899,13 @@ int journal_file_verify( r = journal_file_object_verify(f, p, o); if (r < 0) { - error(p, "invalid object contents: %s", strerror(-r)); + error(p, "Envalid object contents: %s", strerror(-r)); goto fail; } if ((o->object.flags & OBJECT_COMPRESSED_XZ) && (o->object.flags & OBJECT_COMPRESSED_LZ4)) { - error(p, "objected with double compression"); + error(p, "Objected with double compression"); r = -EINVAL; goto fail; } @@ -950,7 +938,7 @@ int journal_file_verify( case OBJECT_ENTRY: if (JOURNAL_HEADER_SEALED(f->header) && n_tags <= 0) { - error(p, "first entry before first tag"); + error(p, "First entry before first tag"); r = -EBADMSG; goto fail; } @@ -960,21 +948,21 @@ int journal_file_verify( goto fail; if (le64toh(o->entry.realtime) < last_tag_realtime) { - error(p, "older entry after newer tag"); + error(p, "Older entry after newer tag"); r = -EBADMSG; goto fail; } if (!entry_seqnum_set && le64toh(o->entry.seqnum) != le64toh(f->header->head_entry_seqnum)) { - error(p, "head entry sequence number incorrect"); + error(p, "Head entry sequence number incorrect"); r = -EBADMSG; goto fail; } if (entry_seqnum_set && entry_seqnum >= le64toh(o->entry.seqnum)) { - error(p, "entry sequence number out of synchronization"); + error(p, "Entry sequence number out of synchronization"); r = -EBADMSG; goto fail; } @@ -985,7 +973,7 @@ int journal_file_verify( if (entry_monotonic_set && sd_id128_equal(entry_boot_id, o->entry.boot_id) && entry_monotonic > le64toh(o->entry.monotonic)) { - error(p, "entry timestamp out of synchronization"); + error(p, "Entry timestamp out of synchronization"); r = -EBADMSG; goto fail; } @@ -996,7 +984,7 @@ int journal_file_verify( if (!entry_realtime_set && le64toh(o->entry.realtime) != le64toh(f->header->head_entry_realtime)) { - error(p, "head entry realtime timestamp incorrect"); + error(p, "Head entry realtime timestamp incorrect"); r = -EBADMSG; goto fail; } @@ -1009,7 +997,7 @@ int journal_file_verify( case OBJECT_DATA_HASH_TABLE: if (n_data_hash_tables > 1) { - error(p, "more than one data hash table"); + error(p, "More than one data hash table"); r = -EBADMSG; goto fail; } @@ -1026,14 +1014,14 @@ int journal_file_verify( case OBJECT_FIELD_HASH_TABLE: if (n_field_hash_tables > 1) { - error(p, "more than one field hash table"); + error(p, "More than one field hash table"); r = -EBADMSG; goto fail; } if (le64toh(f->header->field_hash_table_offset) != p + offsetof(HashTableObject, items) || le64toh(f->header->field_hash_table_size) != le64toh(o->object.size) - offsetof(HashTableObject, items)) { - error(p, "header fields for field hash table invalid"); + error(p, "Header fields for field hash table invalid"); r = -EBADMSG; goto fail; } @@ -1048,7 +1036,7 @@ int journal_file_verify( if (p == le64toh(f->header->entry_array_offset)) { if (found_main_entry_array) { - error(p, "more than one main entry array"); + error(p, "More than one main entry array"); r = -EBADMSG; goto fail; } @@ -1061,19 +1049,19 @@ int journal_file_verify( case OBJECT_TAG: if (!JOURNAL_HEADER_SEALED(f->header)) { - error(p, "tag object in file without sealing"); + error(p, "Tag object in file without sealing"); r = -EBADMSG; goto fail; } if (le64toh(o->tag.seqnum) != n_tags + 1) { - error(p, "tag sequence number out of synchronization"); + error(p, "Tag sequence number out of synchronization"); r = -EBADMSG; goto fail; } if (le64toh(o->tag.epoch) < last_epoch) { - error(p, "epoch sequence out of synchronization"); + error(p, "Epoch sequence out of synchronization"); r = -EBADMSG; goto fail; } @@ -1082,7 +1070,7 @@ int journal_file_verify( if (f->seal) { uint64_t q, rt; - debug(p, "checking tag %"PRIu64"...", le64toh(o->tag.seqnum)); + debug(p, "Checking tag %"PRIu64"...", le64toh(o->tag.seqnum)); rt = f->fss_start_usec + o->tag.epoch * f->fss_interval_usec; if (entry_realtime_set && entry_realtime >= rt + f->fss_interval_usec) { @@ -1129,7 +1117,7 @@ int journal_file_verify( goto fail; if (memcmp(o->tag.tag, gcry_md_read(f->hmac, 0), TAG_LENGTH) != 0) { - error(p, "tag failed verification"); + error(p, "Tag failed verification"); r = -EBADMSG; goto fail; } @@ -1160,60 +1148,60 @@ int journal_file_verify( }; if (!found_last && le64toh(f->header->tail_object_offset) != 0) { - error(le64toh(f->header->tail_object_offset), "tail object pointer dead"); + error(le64toh(f->header->tail_object_offset), "Tail object pointer dead"); r = -EBADMSG; goto fail; } if (n_objects != le64toh(f->header->n_objects)) { - error(offsetof(Header, n_objects), "object number mismatch"); + error(offsetof(Header, n_objects), "Object number mismatch"); r = -EBADMSG; goto fail; } if (n_entries != le64toh(f->header->n_entries)) { - error(offsetof(Header, n_entries), "entry number mismatch"); + error(offsetof(Header, n_entries), "Entry number mismatch"); r = -EBADMSG; goto fail; } if (JOURNAL_HEADER_CONTAINS(f->header, n_data) && n_data != le64toh(f->header->n_data)) { - error(offsetof(Header, n_data), "data number mismatch"); + error(offsetof(Header, n_data), "Data number mismatch"); r = -EBADMSG; goto fail; } if (JOURNAL_HEADER_CONTAINS(f->header, n_fields) && n_fields != le64toh(f->header->n_fields)) { - error(offsetof(Header, n_fields), "field number mismatch"); + error(offsetof(Header, n_fields), "Field number mismatch"); r = -EBADMSG; goto fail; } if (JOURNAL_HEADER_CONTAINS(f->header, n_tags) && n_tags != le64toh(f->header->n_tags)) { - error(offsetof(Header, n_tags), "tag number mismatch"); + error(offsetof(Header, n_tags), "Tag number mismatch"); r = -EBADMSG; goto fail; } if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays) && n_entry_arrays != le64toh(f->header->n_entry_arrays)) { - error(offsetof(Header, n_entry_arrays), "entry array number mismatch"); + error(offsetof(Header, n_entry_arrays), "Entry array number mismatch"); r = -EBADMSG; goto fail; } if (!found_main_entry_array && le64toh(f->header->entry_array_offset) != 0) { - error(0, "missing entry array"); + error(0, "Missing entry array"); r = -EBADMSG; goto fail; } if (entry_seqnum_set && entry_seqnum != le64toh(f->header->tail_entry_seqnum)) { - error(offsetof(Header, tail_entry_seqnum), "invalid tail seqnum"); + error(offsetof(Header, tail_entry_seqnum), "Invalid tail seqnum"); r = -EBADMSG; goto fail; } @@ -1221,13 +1209,13 @@ int journal_file_verify( if (entry_monotonic_set && (!sd_id128_equal(entry_boot_id, f->header->boot_id) || entry_monotonic != le64toh(f->header->tail_entry_monotonic))) { - error(0, "invalid tail monotonic timestamp"); + error(0, "Invalid tail monotonic timestamp"); r = -EBADMSG; goto fail; } if (entry_realtime_set && entry_realtime != le64toh(f->header->tail_entry_realtime)) { - error(0, "invalid tail realtime timestamp"); + error(0, "Invalid tail realtime timestamp"); r = -EBADMSG; goto fail; } -- cgit v1.2.3-54-g00ecf