[Sheepdog] [PATCH 1/2] sheep: add object cache implemented by hashtable

Li Wenpeng levin108 at gmail.com
Thu Mar 1 12:16:24 CET 2012


From: levin li <xingke.lwp at taobao.com>

Added object list cache implemented by hashtable which can
expand itself automaticly when object count is 2 times larger than
the hash size.

Signed-off-by: levin li <xingke.lwp at taobao.com>
---
 sheep/store.c |  158 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 158 insertions(+), 0 deletions(-)

diff --git a/sheep/store.c b/sheep/store.c
index 4d90923..e90bf96 100644
--- a/sheep/store.c
+++ b/sheep/store.c
@@ -22,6 +22,7 @@
 #include <sys/stat.h>
 #include <fcntl.h>
 #include <time.h>
+#include <pthread.h>
 
 #include "sheep_priv.h"
 #include "strbuf.h"
@@ -41,12 +42,123 @@ static char *mnt_path;
 static char *jrnl_path;
 static char *config_path;
 
+struct objlist_cache {
+	uint64_t cache_size;
+	uint64_t hash_size;
+	uint8_t hash_bits;
+	struct hlist_head *hashtable;
+	pthread_rwlock_t lock;
+};
+
+struct objlist_cache_entry {
+	uint64_t oid;
+	struct hlist_node list;
+};
+
+static struct objlist_cache obj_list_cache;
+
 mode_t def_dmode = S_IRUSR | S_IWUSR | S_IXUSR | S_IRGRP | S_IWGRP | S_IXGRP;
 mode_t def_fmode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
 
 struct store_driver *sd_store;
 LIST_HEAD(store_drivers);
 
+static int lookup_obj_cache(uint64_t oid)
+{
+	int h = hash_64(oid, obj_list_cache.hash_bits);
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct objlist_cache_entry *cache_entry;
+
+	pthread_rwlock_rdlock(&obj_list_cache.lock);
+	head = obj_list_cache.hashtable + h;
+
+	hlist_for_each_entry(cache_entry, node, head, list) {
+		if (cache_entry->oid == oid) {
+			pthread_rwlock_unlock(&obj_list_cache.lock);
+			return h;
+		}
+	}
+
+	pthread_rwlock_unlock(&obj_list_cache.lock);
+
+	return 0;
+}
+
+static int expand_cache_hashtable(void)
+{
+	int i, h;
+	struct hlist_head *head;
+	struct hlist_node *node, *n;
+	struct objlist_cache_entry *entry;
+	uint64_t old_hash_size;
+	struct hlist_head *old_hash_table;
+
+	old_hash_size = obj_list_cache.hash_size;
+	old_hash_table = obj_list_cache.hashtable;
+
+	dprintf("expanding cache hashtable.\n");
+
+	obj_list_cache.hash_bits ++;
+	obj_list_cache.hash_size = 1 << obj_list_cache.hash_bits;
+	obj_list_cache.hashtable = zalloc(sizeof(struct hlist_head) * obj_list_cache.hash_size);
+	if (!obj_list_cache.hashtable) {
+		eprintf("fail to expand cache hashtable, no mem.\n");
+		/* roll back */
+		obj_list_cache.hash_bits --;
+		obj_list_cache.hash_size = old_hash_size;
+		obj_list_cache.hashtable = old_hash_table;
+		return -1;
+	}
+
+	for (i = 0; i < obj_list_cache.hash_size; i++)
+		INIT_HLIST_HEAD(obj_list_cache.hashtable + i);
+
+	for (i = 0; i < old_hash_size; i++) {
+		head = old_hash_table + i;
+		hlist_for_each_entry_safe(entry, node, n, head, list) {
+			hlist_del(&entry->list);
+			h = hash_64(entry->oid, obj_list_cache.hash_bits);
+			/* We dont't check conflicts here, we can be sure
+			 * there's no oid conflict in the old hash table. */
+			hlist_add_head(&entry->list, obj_list_cache.hashtable + h);
+		}
+	}
+
+	free(old_hash_table);
+
+	return 0;
+}
+
+static int check_and_insert_cache(uint64_t oid)
+{
+	struct objlist_cache_entry *entry;
+	int h;
+
+	if (obj_list_cache.cache_size > obj_list_cache.hash_size * 2) {
+		pthread_rwlock_wrlock(&obj_list_cache.lock);
+		expand_cache_hashtable();
+		pthread_rwlock_unlock(&obj_list_cache.lock);
+	}
+
+	h = lookup_obj_cache(oid);
+	if (h)
+		return 0;
+
+	entry = zalloc(sizeof(*entry));
+	if (!entry) {
+		eprintf("no memory to allocate cache entry.\n");
+		return -1;
+	}
+	entry->oid = oid;
+	pthread_rwlock_wrlock(&obj_list_cache.lock);
+	obj_list_cache.cache_size++;
+	hlist_add_head(&entry->list, obj_list_cache.hashtable + h);
+	pthread_rwlock_unlock(&obj_list_cache.lock);
+
+	return 0;
+}
+
 static int obj_cmp(const void *oid1, const void *oid2)
 {
 	const uint64_t hval1 = fnv_64a_buf((void *)oid1, sizeof(uint64_t), FNV1A_64_INIT);
@@ -673,6 +785,8 @@ int store_create_and_write_obj(const struct sd_req *req, struct sd_rsp *rsp, voi
 			goto out;
 	}
 	ret = do_write_obj(&iocb, hdr, epoch, request->data);
+	if (SD_RES_SUCCESS == ret)
+		check_and_insert_cache(hdr->oid);
 out:
 	free(buf);
 	sd_store->close(hdr->oid, &iocb);
@@ -1953,6 +2067,45 @@ static int init_config_path(const char *base_path)
 	return 0;
 }
 
+static int init_obj_cache(void)
+{
+	int i;
+	struct siocb iocb = { 0 };
+	uint64_t *buf;
+
+	pthread_rwlock_init(&obj_list_cache.lock, NULL);
+	obj_list_cache.hash_bits = 10;
+	obj_list_cache.hash_size = 1 << obj_list_cache.hash_bits;
+	obj_list_cache.hashtable = zalloc(obj_list_cache.hash_size *
+			sizeof(struct hlist_head));
+	if (!obj_list_cache.hashtable) {
+		eprintf("no memory to allocate obj cache.\n");
+		return -1;
+	}
+
+	for (i = 0; i < obj_list_cache.hash_size; i++)
+		INIT_HLIST_HEAD(obj_list_cache.hashtable + 1);
+
+	if (sd_store) {
+		buf = zalloc(1 << 22);
+		if (!buf) {
+			eprintf("no memory to allocate.\n");
+			return -1;
+		}
+
+		iocb.length = 0;
+		iocb.buf = buf;
+		sd_store->get_objlist(&iocb);
+
+		for (i = 0; i < iocb.length; i++)
+			check_and_insert_cache(buf[i]);
+
+		free(buf);
+	}
+
+	return 0;
+}
+
 int init_store(const char *d)
 {
 	int ret;
@@ -1991,6 +2144,11 @@ int init_store(const char *d)
 			return ret;
 	} else
 		dprintf("no store found\n");
+
+	ret = init_obj_cache();
+	if (ret)
+		return ret;
+
 	return ret;
 }
 
-- 
1.7.1




More information about the sheepdog mailing list