summaryrefslogtreecommitdiff
path: root/src/journal/sd-journal.c
diff options
context:
space:
mode:
authorZbigniew Jędrzejewski-Szmek <zbyszek@in.waw.pl>2014-10-09 22:44:29 -0400
committerZbigniew Jędrzejewski-Szmek <zbyszek@in.waw.pl>2014-10-09 22:44:29 -0400
commit360af4cf6f18469df97c11af4cd5696e0ca8b3ef (patch)
tree4664de7fc976ca0075573a9ecd9297a752c13d57 /src/journal/sd-journal.c
parentf280bcfb21aacce03abf17b3ad732c1351df42b9 (diff)
sd-journal: do not reset sd_j_enumerate_unique position on error
systemctl would call sd_j_enumerate_unique() interleaved with sd_j_next(). But the latter can remove a file if it detects an error in it. In those circumstances sd_j_enumerate_unique would restart with the first file in hashmap. With many corrupted files sd_j_enumerate_unique might iterate over the list multiple times. Avoid this by jumping to the next file in unique list if possible, or setting a flag that tells sd_j_enumerate_unique that it is done otherwise.
Diffstat (limited to 'src/journal/sd-journal.c')
-rw-r--r--src/journal/sd-journal.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/src/journal/sd-journal.c b/src/journal/sd-journal.c
index 1fc9f01d0a..b72a0867e7 100644
--- a/src/journal/sd-journal.c
+++ b/src/journal/sd-journal.c
@@ -1375,8 +1375,11 @@ static void remove_file_real(sd_journal *j, JournalFile *f) {
}
if (j->unique_file == f) {
- j->unique_file = NULL;
+ /* Jump to the next unique_file or NULL if that one was last */
+ j->unique_file = hashmap_next(j->files, j->unique_file->path);
j->unique_offset = 0;
+ if (!j->unique_file)
+ j->unique_file_lost = true;
}
journal_file_close(f);
@@ -2490,6 +2493,7 @@ _public_ int sd_journal_query_unique(sd_journal *j, const char *field) {
j->unique_field = f;
j->unique_file = NULL;
j->unique_offset = 0;
+ j->unique_file_lost = false;
return 0;
}
@@ -2506,9 +2510,13 @@ _public_ int sd_journal_enumerate_unique(sd_journal *j, const void **data, size_
k = strlen(j->unique_field);
if (!j->unique_file) {
+ if (j->unique_file_lost)
+ return 0;
+
j->unique_file = hashmap_first(j->files);
if (!j->unique_file)
return 0;
+
j->unique_offset = 0;
}
@@ -2538,13 +2546,10 @@ _public_ int sd_journal_enumerate_unique(sd_journal *j, const void **data, size_
/* We reached the end of the list? Then start again, with the next file */
if (j->unique_offset == 0) {
- JournalFile *n;
-
- n = hashmap_next(j->files, j->unique_file->path);
- if (!n)
+ j->unique_file = hashmap_next(j->files, j->unique_file->path);
+ if (!j->unique_file)
return 0;
- j->unique_file = n;
continue;
}
@@ -2632,6 +2637,7 @@ _public_ void sd_journal_restart_unique(sd_journal *j) {
j->unique_file = NULL;
j->unique_offset = 0;
+ j->unique_file_lost = false;
}
_public_ int sd_journal_reliable_fd(sd_journal *j) {