summaryrefslogtreecommitdiff
path: root/src/shared/hashmap.c
diff options
context:
space:
mode:
authorLennart Poettering <lennart@poettering.net>2012-10-26 03:24:03 +0200
committerLennart Poettering <lennart@poettering.net>2012-10-26 03:24:03 +0200
commita4bcff5ba36115495994e9f9ba66074471de76ab (patch)
tree16929d0178f57cfe21b6da81ae3e8488ed289b50 /src/shared/hashmap.c
parent7fb4d896e1ff018d91498f0e83b02e2534644907 (diff)
journal: introduce entry array chain cache
When traversing entry array chains for a bisection or for retrieving an item by index we previously always started at the beginning of the chain. Since we tend to look at the same chains repeatedly, let's cache where we have been the last time, and maybe we can skip ahead with this the next time. This turns most bisections and index lookups from O(log(n)*log(n)) into O(log(n)). More importantly however, we seek around on disk much less, which is good to reduce buffer cache and seek times on rotational disks.
Diffstat (limited to 'src/shared/hashmap.c')
-rw-r--r--src/shared/hashmap.c19
1 files changed, 19 insertions, 0 deletions
diff --git a/src/shared/hashmap.c b/src/shared/hashmap.c
index ef78070f4c..dcfbb67228 100644
--- a/src/shared/hashmap.c
+++ b/src/shared/hashmap.c
@@ -147,6 +147,25 @@ int trivial_compare_func(const void *a, const void *b) {
return a < b ? -1 : (a > b ? 1 : 0);
}
+unsigned uint64_hash_func(const void *p) {
+ uint64_t u;
+
+ assert_cc(sizeof(uint64_t) == 2*sizeof(unsigned));
+
+ u = *(const uint64_t*) p;
+
+ return (unsigned) ((u >> 32) ^ u);
+}
+
+int uint64_compare_func(const void *_a, const void *_b) {
+ uint64_t a, b;
+
+ a = *(const uint64_t*) _a;
+ b = *(const uint64_t*) _b;
+
+ return a < b ? -1 : (a > b ? 1 : 0);
+}
+
Hashmap *hashmap_new(hash_func_t hash_func, compare_func_t compare_func) {
bool b;
Hashmap *h;