summaryrefslogtreecommitdiff
path: root/src/shared/hashmap.c
diff options
context:
space:
mode:
authorMichal Schmidt <mschmidt@redhat.com>2014-10-14 23:35:24 +0200
committerMichal Schmidt <mschmidt@redhat.com>2014-10-23 17:38:02 +0200
commite4c691b59db60ef2e6d8e64766d6ae02cd0dd457 (patch)
treef6af93bdc6599e642099d3f9fc3a3b3e4ca2aee1 /src/shared/hashmap.c
parent9700d6980f7c212b10a69399e6430b82a6f45587 (diff)
hashmap: introduce hashmap_reserve()
With the current hashmap implementation that uses chaining, placing a reservation can serve two purposes: - To optimize putting of entries if the number of entries to put is known. The reservation allocates buckets, so later resizing can be avoided. - To avoid having very long bucket chains after using hashmap_move(_one). In an alternative hashmap implementation it will serve an additional purpose: - To guarantee a subsequent hashmap_move(_one) will not fail with -ENOMEM (this never happens in the current implementation).
Diffstat (limited to 'src/shared/hashmap.c')
-rw-r--r--src/shared/hashmap.c32
1 files changed, 26 insertions, 6 deletions
diff --git a/src/shared/hashmap.c b/src/shared/hashmap.c
index 0b456411d5..ebc2ef19bb 100644
--- a/src/shared/hashmap.c
+++ b/src/shared/hashmap.c
@@ -369,18 +369,26 @@ static struct hashmap_entry *hash_scan(Hashmap *h, unsigned hash, const void *ke
return NULL;
}
-static int resize_buckets(Hashmap *h) {
+static int resize_buckets(Hashmap *h, unsigned entries_add) {
struct hashmap_entry **n, *i;
- unsigned m;
+ unsigned m, new_n_entries, new_n_buckets;
uint8_t nkey[HASH_KEY_SIZE];
assert(h);
- if (_likely_(h->n_entries*4 < h->n_buckets*3))
+ new_n_entries = h->n_entries + entries_add;
+
+ /* overflow? */
+ if (_unlikely_(new_n_entries < entries_add || new_n_entries > UINT_MAX / 4))
+ return -ENOMEM;
+
+ new_n_buckets = new_n_entries * 4 / 3;
+
+ if (_likely_(new_n_buckets <= h->n_buckets))
return 0;
- /* Increase by four */
- m = (h->n_entries+1)*4-1;
+ /* Increase by four at least */
+ m = MAX((h->n_entries+1)*4-1, new_n_buckets);
/* If we hit OOM we simply risk packed hashmaps... */
n = new0(struct hashmap_entry*, m);
@@ -432,7 +440,7 @@ static int __hashmap_put(Hashmap *h, const void *key, void *value, unsigned hash
struct hashmap_entry *e;
- if (resize_buckets(h) > 0)
+ if (resize_buckets(h, 1) > 0)
hash = bucket_hash(h, key);
if (h->from_pool)
@@ -795,6 +803,18 @@ int hashmap_merge(Hashmap *h, Hashmap *other) {
return 0;
}
+int hashmap_reserve(Hashmap *h, unsigned entries_add) {
+ int r;
+
+ assert(h);
+
+ r = resize_buckets(h, entries_add);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
void hashmap_move(Hashmap *h, Hashmap *other) {
struct hashmap_entry *e, *n;