[sheepdog] [PATCH v2] obj list cache: add a buffer to object list cache as a second level cache

levin li levin108 at gmail.com
Wed May 23 09:47:57 CEST 2012


From: levin li <xingke.lwp at taobao.com>

It's inefficient for get_obj_list() to traverse the object
list rbtree to get the object list every time, so I add a buffer
to cache the list, every time in get_obj_list() if the rbtree
has changed which is detected by the version number, we update
the buffer, then we can just read the buffer by memcpy() if the
object list has not been changed, instead of traverse the full
rbtree.

Signed-off-by: levin li <xingke.lwp at taobao.com>
---
 sheep/object_list_cache.c |   35 ++++++++++++++++++++++++-----------
 sheep/ops.c               |    4 +++-
 sheep/sheep_priv.h        |    6 +++++-
 3 files changed, 32 insertions(+), 13 deletions(-)

diff --git a/sheep/object_list_cache.c b/sheep/object_list_cache.c
index 28cdbbc..0eb5223 100644
--- a/sheep/object_list_cache.c
+++ b/sheep/object_list_cache.c
@@ -26,7 +26,13 @@ struct objlist_cache_entry {
 	struct rb_node node;
 };
 
-struct objlist_cache obj_list_cache;
+struct objlist_cache obj_list_cache = {
+			1, /* tree_version */
+			0, /* buf_version */
+			0, /* cache_size */
+			STRBUF_INIT, /* buffer */
+			RB_ROOT, /* root */
+		};
 
 int init_objlist_cache(void)
 {
@@ -35,8 +41,6 @@ int init_objlist_cache(void)
 	uint64_t *buf;
 
 	pthread_rwlock_init(&obj_list_cache.lock, NULL);
-	obj_list_cache.root = RB_ROOT;
-	obj_list_cache.cache_size = 0;
 
 	if (sd_store) {
 		buf = zalloc(1 << 22);
@@ -123,8 +127,10 @@ int check_and_insert_objlist_cache(uint64_t oid)
 	p = objlist_cache_rb_insert(&obj_list_cache.root, entry);
 	if (p)
 		free(entry);
-	else
+	else {
 		obj_list_cache.cache_size++;
+		obj_list_cache.tree_version++;
+	}
 	pthread_rwlock_unlock(&obj_list_cache.lock);
 
 	return 0;
@@ -134,18 +140,25 @@ int get_obj_list(const struct sd_list_req *hdr, struct sd_list_rsp *rsp, void *d
 {
 	uint64_t *list = (uint64_t *)data;
 	int nr = 0;
-	int res = SD_RES_SUCCESS;
 	struct objlist_cache_entry *entry;
 	struct rb_node *p;
 
+	rsp->data_length = obj_list_cache.cache_size * sizeof(uint64_t);
+
 	pthread_rwlock_rdlock(&obj_list_cache.lock);
-	for (p = rb_first(&obj_list_cache.root); p; p = rb_next(p)) {
-		entry = rb_entry(p, struct objlist_cache_entry, node);
-		list[nr++] = entry->oid;
+	if (obj_list_cache.tree_version == obj_list_cache.buf_version)
+		memcpy(list, obj_list_cache.buffer.buf, rsp->data_length);
+	else {
+		for (p = rb_first(&obj_list_cache.root); p; p = rb_next(p)) {
+			entry = rb_entry(p, struct objlist_cache_entry, node);
+			list[nr++] = entry->oid;
+		}
+
+		strbuf_reset(&obj_list_cache.buffer);
+		strbuf_add(&obj_list_cache.buffer, list, nr * sizeof(uint64_t));
+		obj_list_cache.buf_version = obj_list_cache.tree_version;
 	}
 	pthread_rwlock_unlock(&obj_list_cache.lock);
 
-	rsp->data_length = nr * sizeof(uint64_t);
-
-	return res;
+	return SD_RES_SUCCESS;
 }
diff --git a/sheep/ops.c b/sheep/ops.c
index b4df70f..582ed24 100644
--- a/sheep/ops.c
+++ b/sheep/ops.c
@@ -712,8 +712,10 @@ static int store_remove_obj(struct request *req)
 		ret =  SD_RES_EIO;
 	}
 	pthread_rwlock_wrlock(&obj_list_cache.lock);
-	if (!objlist_cache_rb_remove(&obj_list_cache.root, oid))
+	if (!objlist_cache_rb_remove(&obj_list_cache.root, oid)) {
 		obj_list_cache.cache_size--;
+		obj_list_cache.tree_version++;
+	}
 	pthread_rwlock_unlock(&obj_list_cache.lock);
  out:
 	strbuf_release(&buf);
diff --git a/sheep/sheep_priv.h b/sheep/sheep_priv.h
index 21ee282..424c015 100644
--- a/sheep/sheep_priv.h
+++ b/sheep/sheep_priv.h
@@ -22,6 +22,7 @@
 #include "sheep.h"
 #include "cluster.h"
 #include "rbtree.h"
+#include "strbuf.h"
 
 #define SD_OP_GET_OBJ_LIST   0xA1
 #define SD_OP_GET_EPOCH      0XA2
@@ -203,8 +204,11 @@ static inline struct store_driver *find_store_driver(const char *name)
 }
 
 struct objlist_cache {
-	struct rb_root root;
+	int tree_version;
+	int buf_version;
 	int cache_size;
+	struct strbuf buffer;
+	struct rb_root root;
 	pthread_rwlock_t lock;
 };
 
-- 
1.7.10




More information about the sheepdog mailing list