summaryrefslogtreecommitdiff
path: root/src/journal-remote
diff options
context:
space:
mode:
authorZbigniew Jędrzejewski-Szmek <zbyszek@in.waw.pl>2015-10-24 23:28:07 -0400
committerLennart Poettering <lennart@poettering.net>2015-11-03 00:23:16 +0100
commit2c1a55cf3fe78bcf728f73487813ddd3ee020a98 (patch)
tree66b8bb774c344799884ac6a65195a5f7636895d5 /src/journal-remote
parent6ad623a3f77e087e308f334525fd4046811f2a9a (diff)
log-generator: add option to generate easily compressible data
This is useful to check that compression actually works, and how compression influences file size in the best-case-scenario for compression. (The answer is that not as much as one would hope: there's still a big overhead of the indexing and since every field is compressed separately, even fields that compress very well contribute to the file size. This overhead becomes negligible only for very big fields.)
Diffstat (limited to 'src/journal-remote')
-rwxr-xr-xsrc/journal-remote/log-generator.py10
1 files changed, 9 insertions, 1 deletions
diff --git a/src/journal-remote/log-generator.py b/src/journal-remote/log-generator.py
index 9a8fb07c7f..fd6964e758 100755
--- a/src/journal-remote/log-generator.py
+++ b/src/journal-remote/log-generator.py
@@ -6,6 +6,8 @@ import argparse
PARSER = argparse.ArgumentParser()
PARSER.add_argument('n', type=int)
PARSER.add_argument('--dots', action='store_true')
+PARSER.add_argument('--data-size', type=int, default=4000)
+PARSER.add_argument('--data-type', choices={'random', 'simple'})
OPTIONS = PARSER.parse_args()
template = """\
@@ -38,10 +40,16 @@ facility = 6
src = open('/dev/urandom', 'rb')
bytes = 0
+counter = 0
for i in range(OPTIONS.n):
message = repr(src.read(2000))
- data = repr(src.read(4000))
+ if OPTIONS.data_type == 'random':
+ data = repr(src.read(OPTIONS.data_size))
+ else:
+ # keep the pattern non-repeating so we get a different blob every time
+ data = '{:0{}}'.format(counter, OPTIONS.data_size)
+ counter += 1
entry = template.format(m=m,
realtime_ts=realtime_ts,