[sheepdog] [PATCH 8/8] object cache: use rwlock to replace the mutex lock in object_cache

levin li levin108 at gmail.com
Mon Jul 9 08:29:22 CEST 2012


From: levin li <xingke.lwp at taobao.com>


Signed-off-by: levin li <xingke.lwp at taobao.com>
---
 sheep/object_cache.c |   54 +++++++++++++++++++++++++-------------------------
 1 files changed, 27 insertions(+), 27 deletions(-)

diff --git a/sheep/object_cache.c b/sheep/object_cache.c
index 335cb1e..4228696 100644
--- a/sheep/object_cache.c
+++ b/sheep/object_cache.c
@@ -83,7 +83,7 @@ struct object_cache {
 
 	struct rb_root object_tree;
 
-	pthread_mutex_t lock;
+	pthread_rwlock_t lock;
 };
 
 struct dirty_cache_entry {
@@ -333,7 +333,7 @@ not_found:
 		INIT_LIST_HEAD(&cache->dirty_lists[1]);
 		cache->active_dirty_list = &cache->dirty_lists[0];
 
-		pthread_mutex_init(&cache->lock, NULL);
+		pthread_rwlock_init(&cache->lock, NULL);
 		hlist_add_head(&cache->hash, head);
 	} else
 		cache = NULL;
@@ -362,7 +362,7 @@ static void switch_dirty_tree_and_list(struct object_cache *oc,
 				       struct rb_root **inactive_dirty_tree,
 				       struct list_head **inactive_dirty_list)
 {
-	pthread_mutex_lock(&oc->lock);
+	pthread_rwlock_wrlock(&oc->lock);
 
 	*inactive_dirty_list = oc->active_dirty_list;
 	*inactive_dirty_tree = oc->active_dirty_tree;
@@ -375,7 +375,7 @@ static void switch_dirty_tree_and_list(struct object_cache *oc,
 		oc->active_dirty_tree = &oc->dirty_trees[0];
 	}
 
-	pthread_mutex_unlock(&oc->lock);
+	pthread_rwlock_unlock(&oc->lock);
 }
 
 /* Caller should hold the oc->lock */
@@ -407,14 +407,14 @@ static void merge_dirty_tree_and_list(struct object_cache *oc,
 {
 	struct dirty_cache_entry *entry, *t;
 
-	pthread_mutex_lock(&oc->lock);
+	pthread_rwlock_wrlock(&oc->lock);
 
 	list_for_each_entry_safe(entry, t, inactive_dirty_list, list) {
 		del_from_dirty_tree_and_list(entry, inactive_dirty_tree);
 		add_to_dirty_tree_and_list(oc, entry, 1);
 	}
 
-	pthread_mutex_unlock(&oc->lock);
+	pthread_rwlock_unlock(&oc->lock);
 }
 
 static inline struct dirty_cache_entry *
@@ -481,7 +481,7 @@ static int reclaim_object(struct object_cache *oc,
 	uint32_t idx = entry->idx;
 	int ret = SD_RES_SUCCESS;
 
-	pthread_mutex_lock(&oc->lock);
+	pthread_rwlock_wrlock(&oc->lock);
 	dprintf("reclaiming /%06"PRIx32"/%08"PRIx32", cache_size: %ld\n",
 		oc->vid, idx, uatomic_read(&sys_cache.cache_size));
 
@@ -499,12 +499,12 @@ static int reclaim_object(struct object_cache *oc,
 		del_from_dirty_tree_and_list(dirty_entry,
 					     oc->active_dirty_tree);
 		entry_clr_dirty(entry);
-		pthread_mutex_unlock(&oc->lock);
+		pthread_rwlock_unlock(&oc->lock);
 
 		ret = push_cache_object(oc->vid, dirty_entry->idx,
 					dirty_entry->bmap, dirty_entry->create);
 
-		pthread_mutex_lock(&oc->lock);
+		pthread_rwlock_wrlock(&oc->lock);
 		free(dirty_entry);
 		if (ret != SD_RES_SUCCESS) {
 			/* Rollback to the dirty state. */
@@ -533,7 +533,7 @@ static int reclaim_object(struct object_cache *oc,
 	if (ret == SD_RES_SUCCESS)
 		del_from_object_tree_and_list(entry, &oc->object_tree);
 out:
-	pthread_mutex_unlock(&oc->lock);
+	pthread_rwlock_unlock(&oc->lock);
 	return ret;
 }
 
@@ -596,7 +596,7 @@ add_to_object_cache(struct object_cache *oc, uint32_t idx)
 	dprintf("cache object for vdi %" PRIx32 ", idx %08" PRIx32 "added\n",
 		oc->vid, idx);
 
-	pthread_mutex_lock(&oc->lock);
+	pthread_rwlock_wrlock(&oc->lock);
 	old = object_cache_insert(&oc->object_tree, entry);
 	if (!old) {
 		uatomic_add(&sys_cache.cache_size, data_length);
@@ -605,7 +605,7 @@ add_to_object_cache(struct object_cache *oc, uint32_t idx)
 		free(entry);
 		entry = old;
 	}
-	pthread_mutex_unlock(&oc->lock);
+	pthread_rwlock_unlock(&oc->lock);
 
 	if (sys->cache_size &&
 	    uatomic_read(&sys_cache.cache_size) > sys->cache_size &&
@@ -649,9 +649,9 @@ static int object_cache_lookup(struct object_cache *oc, uint32_t idx,
 	unsigned data_length;
 
 	if (!create) {
-		pthread_mutex_lock(&oc->lock);
+		pthread_rwlock_rdlock(&oc->lock);
 		ret = cache_sanity_check(oc, idx, NULL);
-		pthread_mutex_unlock(&oc->lock);
+		pthread_rwlock_unlock(&oc->lock);
 		return ret;
 	}
 
@@ -681,9 +681,9 @@ static int object_cache_lookup(struct object_cache *oc, uint32_t idx,
 		entry = add_to_object_cache(oc, idx);
 
 		dirty_entry = alloc_cache_entry(entry, idx, bmap, 1);
-		pthread_mutex_lock(&oc->lock);
+		pthread_rwlock_wrlock(&oc->lock);
 		add_to_dirty_tree_and_list(oc, dirty_entry, 0);
-		pthread_mutex_unlock(&oc->lock);
+		pthread_rwlock_unlock(&oc->lock);
 	}
 	close(fd);
 out:
@@ -793,16 +793,16 @@ static int object_cache_rw(struct object_cache *oc, uint32_t idx,
 	dprintf("%08"PRIx32", len %"PRIu32", off %"PRIu64"\n", idx,
 		hdr->data_length, hdr->obj.offset);
 
-	pthread_mutex_lock(&oc->lock);
+	pthread_rwlock_rdlock(&oc->lock);
 	ret = cache_sanity_check(oc, idx, &entry);
 	if (ret != SD_RES_SUCCESS) {
 		ret = SD_RES_NO_CACHE;
-		pthread_mutex_unlock(&oc->lock);
+		pthread_rwlock_unlock(&oc->lock);
 		goto out;
 	}
 
 	uatomic_inc(&entry->refcnt);
-	pthread_mutex_unlock(&oc->lock);
+	pthread_rwlock_unlock(&oc->lock);
 
 	if (hdr->flags & SD_FLAG_CMD_WRITE) {
 		struct dirty_cache_entry *dirty_entry;
@@ -813,10 +813,10 @@ static int object_cache_rw(struct object_cache *oc, uint32_t idx,
 			goto err;
 		bmap = calc_object_bmap(hdr->data_length, hdr->obj.offset);
 		dirty_entry = alloc_cache_entry(entry, idx, bmap, 0);
-		pthread_mutex_lock(&oc->lock);
+		pthread_rwlock_wrlock(&oc->lock);
 		add_to_dirty_tree_and_list(oc, dirty_entry, 0);
 		entry_set_dirty(entry);
-		pthread_mutex_unlock(&oc->lock);
+		pthread_rwlock_unlock(&oc->lock);
 	} else {
 		ret = read_cache_object(oc->vid, idx, req->data,
 					hdr->data_length, hdr->obj.offset);
@@ -1074,16 +1074,16 @@ void object_cache_delete(uint32_t vid)
 
 static void object_cache_flush_begin(struct object_cache *oc)
 {
-	pthread_mutex_lock(&oc->lock);
+	pthread_rwlock_wrlock(&oc->lock);
 	oc->flushing = 1;
-	pthread_mutex_unlock(&oc->lock);
+	pthread_rwlock_unlock(&oc->lock);
 }
 
 static void object_cache_flush_end(struct object_cache *oc)
 {
-	pthread_mutex_lock(&oc->lock);
+	pthread_rwlock_wrlock(&oc->lock);
 	oc->flushing = 0;
-	pthread_mutex_unlock(&oc->lock);
+	pthread_rwlock_unlock(&oc->lock);
 }
 
 static int object_cache_flush_and_delete(struct object_cache *oc)
@@ -1299,7 +1299,7 @@ void object_cache_remove(uint64_t oid)
 	if (!oc)
 		return;
 
-	pthread_mutex_lock(&oc->lock);
+	pthread_rwlock_wrlock(&oc->lock);
 
 	dirty_entry = dirty_tree_search(&oc->dirty_trees[tree_id], idx);
 	if (!dirty_entry) {
@@ -1314,7 +1314,7 @@ void object_cache_remove(uint64_t oid)
 		entry = object_tree_search(&oc->object_tree, idx);
 	if (entry)
 		del_from_object_tree_and_list(entry, &oc->object_tree);
-	pthread_mutex_unlock(&oc->lock);
+	pthread_rwlock_unlock(&oc->lock);
 
 	return;
 }
-- 
1.7.1




More information about the sheepdog mailing list