[sheepdog] [PATCH 3/8] object cache: rename object_cache_entry to dirty_cache_entry

levin li levin108 at gmail.com
Mon Jul 9 08:29:17 CEST 2012


From: levin li <xingke.lwp at taobao.com>

object_cache_entry is indeed entry of dirty object cache, we should
rename it to dirty_cache_entry, then we can use object_cache_entry
for all the cached objects.

Signed-off-by: levin li <xingke.lwp at taobao.com>
---
 sheep/object_cache.c |   36 ++++++++++++++++++------------------
 1 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/sheep/object_cache.c b/sheep/object_cache.c
index 124650d..de45bf1 100644
--- a/sheep/object_cache.c
+++ b/sheep/object_cache.c
@@ -51,7 +51,7 @@ struct object_cache {
 	pthread_mutex_t lock;
 };
 
-struct object_cache_entry {
+struct dirty_cache_entry {
 	uint32_t idx;
 	uint64_t bmap; /* each bit represents one dirty
 			* block which should be flushed */
@@ -105,16 +105,16 @@ static uint64_t calc_object_bmap(size_t len, off_t offset)
 	return bmap;
 }
 
-static struct object_cache_entry *
-dirty_tree_insert(struct rb_root *root, struct object_cache_entry *new)
+static struct dirty_cache_entry *
+dirty_tree_insert(struct rb_root *root, struct dirty_cache_entry *new)
 {
 	struct rb_node **p = &root->rb_node;
 	struct rb_node *parent = NULL;
-	struct object_cache_entry *entry;
+	struct dirty_cache_entry *entry;
 
 	while (*p) {
 		parent = *p;
-		entry = rb_entry(parent, struct object_cache_entry, rb);
+		entry = rb_entry(parent, struct dirty_cache_entry, rb);
 
 		if (new->idx < entry->idx)
 			p = &(*p)->rb_left;
@@ -132,14 +132,14 @@ dirty_tree_insert(struct rb_root *root, struct object_cache_entry *new)
 	return NULL; /* insert successfully */
 }
 
-static struct object_cache_entry *dirty_tree_search(struct rb_root *root,
+static struct dirty_cache_entry *dirty_tree_search(struct rb_root *root,
 						    uint32_t idx)
 {
 	struct rb_node *n = root->rb_node;
-	struct object_cache_entry *t;
+	struct dirty_cache_entry *t;
 
 	while (n) {
-		t = rb_entry(n, struct object_cache_entry, rb);
+		t = rb_entry(n, struct dirty_cache_entry, rb);
 
 		if (idx < t->idx)
 			n = n->rb_left;
@@ -209,7 +209,7 @@ out:
 }
 
 static inline void
-del_from_dirty_tree_and_list(struct object_cache_entry *entry,
+del_from_dirty_tree_and_list(struct dirty_cache_entry *entry,
 			     struct rb_root *dirty_tree)
 {
 	rb_erase(&entry->rb, dirty_tree);
@@ -239,7 +239,7 @@ static void switch_dirty_tree_and_list(struct object_cache *oc,
 /* Caller should hold the oc->lock */
 static inline void
 add_to_dirty_tree_and_list(struct object_cache *oc,
-			   struct object_cache_entry *entry)
+			   struct dirty_cache_entry *entry)
 {
 	if (!dirty_tree_insert(oc->active_dirty_tree, entry))
 		list_add(&entry->list, oc->active_dirty_list);
@@ -251,7 +251,7 @@ static void merge_dirty_tree_and_list(struct object_cache *oc,
 				      struct rb_root *inactive_dirty_tree,
 				      struct list_head *inactive_dirty_list)
 {
-	struct object_cache_entry *entry, *t;
+	struct dirty_cache_entry *entry, *t;
 
 	pthread_mutex_lock(&oc->lock);
 
@@ -263,10 +263,10 @@ static void merge_dirty_tree_and_list(struct object_cache *oc,
 	pthread_mutex_unlock(&oc->lock);
 }
 
-static inline struct object_cache_entry *
+static inline struct dirty_cache_entry *
 alloc_cache_entry(uint32_t idx, uint64_t bmap, int create)
 {
-	struct object_cache_entry *entry = xzalloc(sizeof(*entry));
+	struct dirty_cache_entry *entry = xzalloc(sizeof(*entry));
 
 	entry->idx = idx;
 	entry->bmap = bmap;
@@ -295,7 +295,7 @@ static int object_cache_lookup(struct object_cache *oc, uint32_t idx,
 	}
 
 	if (create) {
-		struct object_cache_entry *entry;
+		struct dirty_cache_entry *entry;
 		unsigned data_length;
 
 		if (idx_has_vdi_bit(idx))
@@ -423,7 +423,7 @@ static int object_cache_rw(struct object_cache *oc, uint32_t idx,
 		hdr->data_length, hdr->obj.offset);
 
 	if (hdr->flags & SD_FLAG_CMD_WRITE) {
-		struct object_cache_entry *entry;
+		struct dirty_cache_entry *entry;
 
 		ret = write_cache_object(oc->vid, idx, req->data,
 					 hdr->data_length, hdr->obj.offset);
@@ -600,7 +600,7 @@ out:
 /* Push back all the dirty objects to sheep cluster storage */
 static int object_cache_push(struct object_cache *oc)
 {
-	struct object_cache_entry *entry, *t;
+	struct dirty_cache_entry *entry, *t;
 	struct rb_root *inactive_dirty_tree;
 	struct list_head *inactive_dirty_list;
 	int ret = SD_RES_SUCCESS;
@@ -654,7 +654,7 @@ void object_cache_delete(uint32_t vid)
 	cache = find_object_cache(vid, 0);
 	if (cache) {
 		int h = hash(vid);
-		struct object_cache_entry *entry, *t;
+		struct dirty_cache_entry *entry, *t;
 		struct strbuf buf = STRBUF_INIT;
 
 		/* Firstly we free memeory */
@@ -867,7 +867,7 @@ void object_cache_remove(uint64_t oid)
 	uint32_t vid = oid_to_vid(oid);
 	uint32_t idx = object_cache_oid_to_idx(oid);
 	struct object_cache *oc;
-	struct object_cache_entry *entry;
+	struct dirty_cache_entry *entry;
 	int tree_id = 0;
 
 	oc = find_object_cache(vid, 0);
-- 
1.7.1




More information about the sheepdog mailing list