[sheepdog] [PATCH stable-0.7 1/9] lib: rename wrappers of pthread_rwlock

Hitoshi Mitake mitake.hitoshi at lab.ntt.co.jp
Fri Jan 31 06:46:36 CET 2014


From: Hitoshi Mitake <mitake.hitoshi at gmail.com>

Current lib/util.h wraps stuff related to pthread_rwlock with the
prefix like "sd_lock".

It is confusing because we also need to wrap pthread_mutex. This patch
changes the convention from sd_lock to sd_rw_lock.

Renamed stuff list:
- struct sd_lock -> struct sd_rw_lock
- sd_init_lock() -> sd_init_rw_lock()
- sd_destroy_lock() -> sd_destroy_rw_lock()
- sd_unlock() -> sd_rw_unlock()

Signed-off-by: Hitoshi Mitake <mitake.hitoshi at lab.ntt.co.jp>
Signed-off-by: Liu Yuan <namei.unix at gmail.com>

Conflicts:
	lib/event.c
	sheep/cluster/local.c
	sheep/cluster/zookeeper.c
	sheep/md.c
	sheep/object_cache.c
	sheep/object_list_cache.c
	sheep/vdi.c

Conflicts were resolved by Hitoshi Mitake.
Signed-off-by: Hitoshi Mitake <mitake.hitoshi at lab.ntt.co.jp>
---
 dog/farm/farm.c           |    4 ++--
 include/util.h            |   14 +++++++-------
 lib/sockfd_cache.c        |   22 +++++++++++-----------
 sheep/cluster/zookeeper.c |   19 ++++++++++---------
 sheep/md.c                |   22 +++++++++++-----------
 sheep/object_cache.c      |   30 +++++++++++++++---------------
 sheep/object_list_cache.c |   16 ++++++++--------
 sheep/vdi.c               |   12 ++++++------
 sheepfs/volume.c          |   16 ++++++++--------
 9 files changed, 78 insertions(+), 77 deletions(-)

diff --git a/dog/farm/farm.c b/dog/farm/farm.c
index 9d0f1f6..e153987 100644
--- a/dog/farm/farm.c
+++ b/dog/farm/farm.c
@@ -21,7 +21,7 @@
 static char farm_object_dir[PATH_MAX];
 static char farm_dir[PATH_MAX];
 
-static struct sd_lock vdi_list_lock = SD_LOCK_INITIALIZER;
+static struct sd_rw_lock vdi_list_lock = SD_RW_LOCK_INITIALIZER;
 struct vdi_entry {
 	char name[SD_MAX_VDI_LEN];
 	uint64_t vdi_size;
@@ -356,7 +356,7 @@ static void do_load_object(struct work *work)
 
 		sd_write_lock(&vdi_list_lock);
 		insert_vdi(buffer);
-		sd_unlock(&vdi_list_lock);
+		sd_rw_unlock(&vdi_list_lock);
 	}
 
 	farm_show_progress(uatomic_add_return(&loaded, 1), trunk_get_count());
diff --git a/include/util.h b/include/util.h
index a9b228e..848b55c 100644
--- a/include/util.h
+++ b/include/util.h
@@ -261,13 +261,13 @@ static inline int refcount_dec(refcnt_t *rc)
 
 /* wrapper for pthread_rwlock */
 
-#define SD_LOCK_INITIALIZER { .rwlock = PTHREAD_RWLOCK_INITIALIZER }
+#define SD_RW_LOCK_INITIALIZER { .rwlock = PTHREAD_RWLOCK_INITIALIZER }
 
-struct sd_lock {
+struct sd_rw_lock {
 	pthread_rwlock_t rwlock;
 };
 
-static inline void sd_init_lock(struct sd_lock *lock)
+static inline void sd_init_rw_lock(struct sd_rw_lock *lock)
 {
 	int ret;
 
@@ -279,7 +279,7 @@ static inline void sd_init_lock(struct sd_lock *lock)
 		panic("failed to initialize a lock, %s", strerror(ret));
 }
 
-static inline void sd_destroy_lock(struct sd_lock *lock)
+static inline void sd_destroy_rw_lock(struct sd_rw_lock *lock)
 {
 	int ret;
 
@@ -291,7 +291,7 @@ static inline void sd_destroy_lock(struct sd_lock *lock)
 		panic("failed to destroy a lock, %s", strerror(ret));
 }
 
-static inline void sd_read_lock(struct sd_lock *lock)
+static inline void sd_read_lock(struct sd_rw_lock *lock)
 {
 	int ret;
 
@@ -307,7 +307,7 @@ static inline void sd_read_lock(struct sd_lock *lock)
  * Even though POSIX manual it doesn't return EAGAIN, we indeed have met the
  * case that it returned EAGAIN
  */
-static inline void sd_write_lock(struct sd_lock *lock)
+static inline void sd_write_lock(struct sd_rw_lock *lock)
 {
 	int ret;
 
@@ -319,7 +319,7 @@ static inline void sd_write_lock(struct sd_lock *lock)
 		panic("failed to lock for writing, %s", strerror(ret));
 }
 
-static inline void sd_unlock(struct sd_lock *lock)
+static inline void sd_rw_unlock(struct sd_rw_lock *lock)
 {
 	int ret;
 
diff --git a/lib/sockfd_cache.c b/lib/sockfd_cache.c
index c9404ea..ef88bcc 100644
--- a/lib/sockfd_cache.c
+++ b/lib/sockfd_cache.c
@@ -37,13 +37,13 @@
 
 struct sockfd_cache {
 	struct rb_root root;
-	struct sd_lock lock;
+	struct sd_rw_lock lock;
 	int count;
 };
 
 static struct sockfd_cache sockfd_cache = {
 	.root = RB_ROOT,
-	.lock = SD_LOCK_INITIALIZER,
+	.lock = SD_RW_LOCK_INITIALIZER,
 };
 
 /*
@@ -155,7 +155,7 @@ static struct sockfd_cache_entry *sockfd_cache_grab(const struct node_id *nid,
 	if (*ret_idx == -1)
 		entry = NULL;
 out:
-	sd_unlock(&sockfd_cache.lock);
+	sd_rw_unlock(&sockfd_cache.lock);
 	return entry;
 }
 
@@ -206,14 +206,14 @@ static bool sockfd_cache_destroy(const struct node_id *nid)
 	}
 
 	rb_erase(&entry->rb, &sockfd_cache.root);
-	sd_unlock(&sockfd_cache.lock);
+	sd_rw_unlock(&sockfd_cache.lock);
 
 	destroy_all_slots(entry);
 	free_cache_entry(entry);
 
 	return true;
 false_out:
-	sd_unlock(&sockfd_cache.lock);
+	sd_rw_unlock(&sockfd_cache.lock);
 	return false;
 }
 
@@ -245,7 +245,7 @@ void sockfd_cache_add_group(const struct sd_node *nodes, int nr)
 		p = nodes + nr;
 		sockfd_cache_add_nolock(&p->nid);
 	}
-	sd_unlock(&sockfd_cache.lock);
+	sd_rw_unlock(&sockfd_cache.lock);
 }
 
 /* Add one node to the cache means we can do caching tricks on this node */
@@ -263,10 +263,10 @@ void sockfd_cache_add(const struct node_id *nid)
 	memcpy(&new->nid, nid, sizeof(struct node_id));
 	if (sockfd_cache_insert(new)) {
 		free_cache_entry(new);
-		sd_unlock(&sockfd_cache.lock);
+		sd_rw_unlock(&sockfd_cache.lock);
 		return;
 	}
-	sd_unlock(&sockfd_cache.lock);
+	sd_rw_unlock(&sockfd_cache.lock);
 	n = uatomic_add_return(&sockfd_cache.count, 1);
 	sd_debug("%s, count %d", addr_to_str(nid->addr, nid->port), n);
 }
@@ -298,7 +298,7 @@ static void do_grow_fds(struct work *work)
 
 	fds_count *= 2;
 	fds_high_watermark = FDS_WATERMARK(fds_count);
-	sd_unlock(&sockfd_cache.lock);
+	sd_rw_unlock(&sockfd_cache.lock);
 }
 
 static void grow_fds_done(struct work *work)
@@ -409,7 +409,7 @@ static void sockfd_cache_put_long(const struct node_id *nid, int idx)
 	entry = sockfd_cache_search(nid);
 	if (entry)
 		uatomic_set_false(&entry->fds[idx].in_use);
-	sd_unlock(&sockfd_cache.lock);
+	sd_rw_unlock(&sockfd_cache.lock);
 }
 
 static void sockfd_cache_close(const struct node_id *nid, int idx)
@@ -428,7 +428,7 @@ static void sockfd_cache_close(const struct node_id *nid, int idx)
 		entry->fds[idx].fd = -1;
 		uatomic_set_false(&entry->fds[idx].in_use);
 	}
-	sd_unlock(&sockfd_cache.lock);
+	sd_rw_unlock(&sockfd_cache.lock);
 }
 
 /*
diff --git a/sheep/cluster/zookeeper.c b/sheep/cluster/zookeeper.c
index 5088968..28b936c 100644
--- a/sheep/cluster/zookeeper.c
+++ b/sheep/cluster/zookeeper.c
@@ -71,8 +71,8 @@ struct zk_event {
 static struct sd_node sd_nodes[SD_MAX_NODES];
 static size_t nr_sd_nodes;
 static struct rb_root zk_node_root = RB_ROOT;
-static struct sd_lock zk_tree_lock = SD_LOCK_INITIALIZER;
-static struct sd_lock zk_compete_master_lock = SD_LOCK_INITIALIZER;
+static struct sd_rw_lock zk_tree_lock = SD_RW_LOCK_INITIALIZER;
+static struct sd_rw_lock zk_compete_master_lock = SD_RW_LOCK_INITIALIZER;
 static LIST_HEAD(zk_block_list);
 static uatomic_bool is_master;
 static uatomic_bool stop;
@@ -134,7 +134,7 @@ static inline struct zk_node *zk_tree_search(const struct node_id *nid)
 
 	sd_read_lock(&zk_tree_lock);
 	n = zk_tree_search_nolock(nid);
-	sd_unlock(&zk_tree_lock);
+	sd_rw_unlock(&zk_tree_lock);
 	return n;
 }
 
@@ -450,7 +450,7 @@ static inline void zk_tree_add(struct zk_node *node)
 	 */
 	sd_nodes[nr_sd_nodes++] = zk->node;
 out:
-	sd_unlock(&zk_tree_lock);
+	sd_rw_unlock(&zk_tree_lock);
 }
 
 static inline void zk_tree_del_nolock(struct zk_node *node)
@@ -463,7 +463,7 @@ static inline void zk_tree_del(struct zk_node *node)
 {
 	sd_write_lock(&zk_tree_lock);
 	zk_tree_del_nolock(node);
-	sd_unlock(&zk_tree_lock);
+	sd_rw_unlock(&zk_tree_lock);
 }
 
 static inline void zk_tree_destroy(void)
@@ -477,7 +477,8 @@ static inline void zk_tree_destroy(void)
 		if (zk)
 			zk_tree_del_nolock(zk);
 	}
-	sd_unlock(&zk_tree_lock);
+	rb_destroy(&zk_node_root, struct zk_node, rb);
+	sd_rw_unlock(&zk_tree_lock);
 }
 
 static inline void build_node_list(void)
@@ -579,7 +580,7 @@ static void zk_watcher(zhandle_t *zh, int type, int state, const char *path,
 		n = zk_tree_search_nolock(&znode.node.nid);
 		if (n)
 			n->gone = true;
-		sd_unlock(&zk_tree_lock);
+		sd_rw_unlock(&zk_tree_lock);
 		if (n)
 			add_event(EVENT_LEAVE, &znode, NULL, 0);
 	}
@@ -782,7 +783,7 @@ success:
 	uatomic_set_true(&is_master);
 	sd_debug("success");
 out_unlock:
-	sd_unlock(&zk_compete_master_lock);
+	sd_rw_unlock(&zk_compete_master_lock);
 }
 
 static int zk_join(const struct sd_node *myself,
@@ -996,7 +997,7 @@ static void zk_handle_update_node(struct zk_event *ev)
 	assert(t);
 	t->node = *snode;
 	build_node_list();
-	sd_unlock(&zk_tree_lock);
+	sd_rw_unlock(&zk_tree_lock);
 	sd_update_node_handler(snode);
 }
 
diff --git a/sheep/md.c b/sheep/md.c
index 1c22b72..92d019f 100644
--- a/sheep/md.c
+++ b/sheep/md.c
@@ -30,7 +30,7 @@ struct vdisk {
 static struct disk md_disks[MD_MAX_DISK];
 static struct vdisk md_vds[MD_MAX_VDISK];
 
-static struct sd_lock md_lock = SD_LOCK_INITIALIZER;
+static struct sd_rw_lock md_lock = SD_RW_LOCK_INITIALIZER;
 static int md_nr_disks; /* Protected by md_lock */
 static int md_nr_vds;
 
@@ -40,7 +40,7 @@ static inline int nr_online_disks(void)
 
 	sd_read_lock(&md_lock);
 	nr = md_nr_disks;
-	sd_unlock(&md_lock);
+	sd_rw_unlock(&md_lock);
 
 	return nr;
 }
@@ -340,7 +340,7 @@ char *md_get_object_path(uint64_t oid)
 	sd_read_lock(&md_lock);
 	vd = oid_to_vdisk(oid);
 	p = md_disks[vd->idx].path;
-	sd_unlock(&md_lock);
+	sd_rw_unlock(&md_lock);
 	sd_debug("%d, %s", vd->idx, p);
 
 	return p;
@@ -367,7 +367,7 @@ int for_each_object_in_wd(int (*func)(uint64_t oid, char *path, uint32_t epoch,
 		if (ret != SD_RES_SUCCESS)
 			break;
 	}
-	sd_unlock(&md_lock);
+	sd_rw_unlock(&md_lock);
 	return ret;
 }
 
@@ -385,7 +385,7 @@ int for_each_object_in_stale(int (*func)(uint64_t oid, char *path,
 		if (ret != SD_RES_SUCCESS)
 			break;
 	}
-	sd_unlock(&md_lock);
+	sd_rw_unlock(&md_lock);
 	return ret;
 }
 
@@ -400,7 +400,7 @@ int for_each_obj_path(int (*func)(char *path))
 		if (ret != SD_RES_SUCCESS)
 			break;
 	}
-	sd_unlock(&md_lock);
+	sd_rw_unlock(&md_lock);
 	return ret;
 }
 
@@ -431,7 +431,7 @@ static void md_do_recover(struct work *work)
 	md_init_space();
 	nr = md_nr_disks;
 out:
-	sd_unlock(&md_lock);
+	sd_rw_unlock(&md_lock);
 
 	if (nr > 0)
 		kick_recover();
@@ -554,7 +554,7 @@ static int scan_wd(uint64_t oid, uint32_t epoch)
 		if (ret == SD_RES_SUCCESS)
 			break;
 	}
-	sd_unlock(&md_lock);
+	sd_rw_unlock(&md_lock);
 	return ret;
 }
 
@@ -606,7 +606,7 @@ uint32_t md_get_info(struct sd_md_info *info)
 							&info->disk[i].used);
 	}
 	info->nr = md_nr_disks;
-	sd_unlock(&md_lock);
+	sd_rw_unlock(&md_lock);
 	return ret;
 }
 
@@ -647,7 +647,7 @@ static int do_plug_unplug(char *disks, bool plug)
 
 	ret = SD_RES_SUCCESS;
 out:
-	sd_unlock(&md_lock);
+	sd_rw_unlock(&md_lock);
 
 	/*
 	 * We have to kick recover aggressively because there is possibility
@@ -678,7 +678,7 @@ uint64_t md_get_size(uint64_t *used)
 	sd_read_lock(&md_lock);
 	for (int i = 0; i < md_nr_disks; i++)
 		fsize += get_path_free_size(md_disks[i].path, used);
-	sd_unlock(&md_lock);
+	sd_rw_unlock(&md_lock);
 
 	return fsize + *used;
 }
diff --git a/sheep/object_cache.c b/sheep/object_cache.c
index 0282135..f07003d 100644
--- a/sheep/object_cache.c
+++ b/sheep/object_cache.c
@@ -47,7 +47,7 @@ struct object_cache_entry {
 	struct list_head dirty_list; /* For dirty list of object cache */
 	struct list_head lru_list; /* For lru list of object cache */
 
-	struct sd_lock lock; /* Entry lock */
+	struct sd_rw_lock lock; /* Entry lock */
 };
 
 struct object_cache {
@@ -62,7 +62,7 @@ struct object_cache {
 	int push_efd; /* Used to synchronize between pusher and push threads */
 	uatomic_bool in_push; /* Whether if pusher is running */
 
-	struct sd_lock lock; /* Cache lock */
+	struct sd_rw_lock lock; /* Cache lock */
 };
 
 struct push_work {
@@ -78,8 +78,8 @@ static int def_open_flags = O_RDWR;
 #define HASH_BITS	5
 #define HASH_SIZE	(1 << HASH_BITS)
 
-static struct sd_lock hashtable_lock[HASH_SIZE] = {
-	[0 ... HASH_SIZE - 1] = SD_LOCK_INITIALIZER
+static struct sd_rw_lock hashtable_lock[HASH_SIZE] = {
+	[0 ... HASH_SIZE - 1] = SD_RW_LOCK_INITIALIZER
 };
 
 static struct hlist_head cache_hashtable[HASH_SIZE];
@@ -178,7 +178,7 @@ static inline void write_lock_cache(struct object_cache *oc)
 
 static inline void unlock_cache(struct object_cache *oc)
 {
-	sd_unlock(&oc->lock);
+	sd_rw_unlock(&oc->lock);
 }
 
 static inline void read_lock_entry(struct object_cache_entry *entry)
@@ -193,7 +193,7 @@ static inline void write_lock_entry(struct object_cache_entry *entry)
 
 static inline void unlock_entry(struct object_cache_entry *entry)
 {
-	sd_unlock(&entry->lock);
+	sd_rw_unlock(&entry->lock);
 }
 
 static struct object_cache_entry *
@@ -302,7 +302,7 @@ free_cache_entry(struct object_cache_entry *entry)
 	oc->total_count--;
 	if (!list_empty(&entry->dirty_list))
 		del_from_dirty_list(entry);
-	sd_destroy_lock(&entry->lock);
+	sd_destroy_rw_lock(&entry->lock);
 	free(entry);
 }
 
@@ -598,12 +598,12 @@ static void do_reclaim(struct work *work)
 			do_reclaim_object(cache);
 			cap = uatomic_read(&gcache.capacity);
 			if (cap <= HIGH_WATERMARK) {
-				sd_unlock(&hashtable_lock[idx]);
+				sd_rw_unlock(&hashtable_lock[idx]);
 				sd_debug("complete, capacity %"PRIu32, cap);
 				return;
 			}
 		}
-		sd_unlock(&hashtable_lock[idx]);
+		sd_rw_unlock(&hashtable_lock[idx]);
 	}
 	sd_debug("finished");
 }
@@ -658,13 +658,13 @@ not_found:
 		INIT_LIST_HEAD(&cache->dirty_head);
 		INIT_LIST_HEAD(&cache->lru_head);
 
-		sd_init_lock(&cache->lock);
+		sd_init_rw_lock(&cache->lock);
 		hlist_add_head(&cache->hash, head);
 	} else {
 		cache = NULL;
 	}
 out:
-	sd_unlock(&hashtable_lock[h]);
+	sd_rw_unlock(&hashtable_lock[h]);
 	return cache;
 }
 
@@ -697,7 +697,7 @@ alloc_cache_entry(struct object_cache *oc, uint32_t idx)
 	entry = xzalloc(sizeof(*entry));
 	entry->oc = oc;
 	entry->idx = idx;
-	sd_init_lock(&entry->lock);
+	sd_init_rw_lock(&entry->lock);
 	INIT_LIST_HEAD(&entry->dirty_list);
 	INIT_LIST_HEAD(&entry->lru_list);
 
@@ -982,7 +982,7 @@ void object_cache_delete(uint32_t vid)
 	/* Firstly we free memeory */
 	sd_write_lock(&hashtable_lock[h]);
 	hlist_del(&cache->hash);
-	sd_unlock(&hashtable_lock[h]);
+	sd_rw_unlock(&hashtable_lock[h]);
 
 	write_lock_cache(cache);
 	list_for_each_entry_safe(entry, t, &cache->lru_head, lru_list) {
@@ -990,7 +990,7 @@ void object_cache_delete(uint32_t vid)
 		uatomic_sub(&gcache.capacity, CACHE_OBJECT_SIZE);
 	}
 	unlock_cache(cache);
-	sd_destroy_lock(&cache->lock);
+	sd_destroy_rw_lock(&cache->lock);
 	close(cache->push_efd);
 	free(cache);
 
@@ -1424,7 +1424,7 @@ int object_cache_get_info(struct object_cache_info *info)
 			j++;
 			unlock_cache(cache);
 		}
-		sd_unlock(&hashtable_lock[i]);
+		sd_rw_unlock(&hashtable_lock[i]);
 	}
 	info->count = j;
 
diff --git a/sheep/object_list_cache.c b/sheep/object_list_cache.c
index 6737efd..9ba6b96 100644
--- a/sheep/object_list_cache.c
+++ b/sheep/object_list_cache.c
@@ -26,7 +26,7 @@ struct objlist_cache {
 	uint64_t *buf;
 	struct list_head entry_list;
 	struct rb_root root;
-	struct sd_lock lock;
+	struct sd_rw_lock lock;
 };
 
 struct objlist_deletion_work {
@@ -38,7 +38,7 @@ static struct objlist_cache obj_list_cache = {
 	.tree_version	= 1,
 	.root		= RB_ROOT,
 	.entry_list     = LIST_HEAD_INIT(obj_list_cache.entry_list),
-	.lock		= SD_LOCK_INITIALIZER,
+	.lock		= SD_RW_LOCK_INITIALIZER,
 };
 
 static struct objlist_cache_entry *objlist_cache_rb_insert(struct rb_root *root,
@@ -97,7 +97,7 @@ void objlist_cache_remove(uint64_t oid)
 		obj_list_cache.cache_size--;
 		obj_list_cache.tree_version++;
 	}
-	sd_unlock(&obj_list_cache.lock);
+	sd_rw_unlock(&obj_list_cache.lock);
 }
 
 int objlist_cache_insert(uint64_t oid)
@@ -117,7 +117,7 @@ int objlist_cache_insert(uint64_t oid)
 		obj_list_cache.cache_size++;
 		obj_list_cache.tree_version++;
 	}
-	sd_unlock(&obj_list_cache.lock);
+	sd_rw_unlock(&obj_list_cache.lock);
 
 	return 0;
 }
@@ -133,7 +133,7 @@ int get_obj_list(const struct sd_req *hdr, struct sd_rsp *rsp, void *data)
 		goto out;
 
 	/* if that fails grab a write lock for the usually nessecary update */
-	sd_unlock(&obj_list_cache.lock);
+	sd_rw_unlock(&obj_list_cache.lock);
 	sd_write_lock(&obj_list_cache.lock);
 	if (obj_list_cache.tree_version == obj_list_cache.buf_version)
 		goto out;
@@ -148,14 +148,14 @@ int get_obj_list(const struct sd_req *hdr, struct sd_rsp *rsp, void *data)
 
 out:
 	if (hdr->data_length < obj_list_cache.cache_size * sizeof(uint64_t)) {
-		sd_unlock(&obj_list_cache.lock);
+		sd_rw_unlock(&obj_list_cache.lock);
 		sd_err("GET_OBJ_LIST buffer too small");
 		return SD_RES_BUFFER_SMALL;
 	}
 
 	rsp->data_length = obj_list_cache.cache_size * sizeof(uint64_t);
 	memcpy(data, obj_list_cache.buf, rsp->data_length);
-	sd_unlock(&obj_list_cache.lock);
+	sd_rw_unlock(&obj_list_cache.lock);
 	return SD_RES_SUCCESS;
 }
 
@@ -194,7 +194,7 @@ static void objlist_deletion_work(struct work *work)
 		rb_erase(&entry->node, &obj_list_cache.root);
 		free(entry);
 	}
-	sd_unlock(&obj_list_cache.lock);
+	sd_rw_unlock(&obj_list_cache.lock);
 }
 
 static void objlist_deletion_done(struct work *work)
diff --git a/sheep/vdi.c b/sheep/vdi.c
index 2252702..708f03f 100644
--- a/sheep/vdi.c
+++ b/sheep/vdi.c
@@ -20,7 +20,7 @@ struct vdi_state_entry {
 
 static uint32_t max_copies;
 static struct rb_root vdi_state_root = RB_ROOT;
-static struct sd_lock vdi_state_lock = SD_LOCK_INITIALIZER;
+static struct sd_rw_lock vdi_state_lock = SD_RW_LOCK_INITIALIZER;
 
 static struct vdi_state_entry *vdi_state_search(struct rb_root *root,
 						uint32_t vid)
@@ -72,7 +72,7 @@ static bool vid_is_snapshot(uint32_t vid)
 
 	sd_read_lock(&vdi_state_lock);
 	entry = vdi_state_search(&vdi_state_root, vid);
-	sd_unlock(&vdi_state_lock);
+	sd_rw_unlock(&vdi_state_lock);
 
 	if (!entry) {
 		sd_err("No VDI entry for %" PRIx32 " found", vid);
@@ -97,7 +97,7 @@ int get_vdi_copy_number(uint32_t vid)
 
 	sd_read_lock(&vdi_state_lock);
 	entry = vdi_state_search(&vdi_state_root, vid);
-	sd_unlock(&vdi_state_lock);
+	sd_rw_unlock(&vdi_state_lock);
 
 	if (!entry) {
 		sd_alert("copy number for %" PRIx32 " not found, set %d", vid,
@@ -158,7 +158,7 @@ int add_vdi_state(uint32_t vid, int nr_copies, bool snapshot)
 	if (uatomic_read(&max_copies) == 0 ||
 	    nr_copies > uatomic_read(&max_copies))
 		uatomic_set(&max_copies, nr_copies);
-	sd_unlock(&vdi_state_lock);
+	sd_rw_unlock(&vdi_state_lock);
 
 	return SD_RES_SUCCESS;
 }
@@ -180,7 +180,7 @@ int fill_vdi_state_list(void *data)
 		vs++;
 		nr++;
 	}
-	sd_unlock(&vdi_state_lock);
+	sd_rw_unlock(&vdi_state_lock);
 
 	return nr * sizeof(*vs);
 }
@@ -963,5 +963,5 @@ void clean_vdi_state(void)
 		current_node = rb_first(&vdi_state_root);
 	}
 	INIT_RB_ROOT(&vdi_state_root);
-	sd_unlock(&vdi_state_lock);
+	sd_rw_unlock(&vdi_state_lock);
 }
diff --git a/sheepfs/volume.c b/sheepfs/volume.c
index 221c4a2..fb5a924 100644
--- a/sheepfs/volume.c
+++ b/sheepfs/volume.c
@@ -62,7 +62,7 @@ struct vdi_inode {
 };
 
 static struct rb_root vdi_inode_tree = RB_ROOT;
-static struct sd_lock vdi_inode_tree_lock = SD_LOCK_INITIALIZER;
+static struct sd_rw_lock vdi_inode_tree_lock = SD_RW_LOCK_INITIALIZER;
 
 static struct vdi_inode *vdi_inode_tree_insert(struct vdi_inode *new)
 {
@@ -149,7 +149,7 @@ static int volume_rw_object(char *buf, uint64_t oid, size_t size,
 
 	sd_read_lock(&vdi_inode_tree_lock);
 	vdi = vdi_inode_tree_search(vid);
-	sd_unlock(&vdi_inode_tree_lock);
+	sd_rw_unlock(&vdi_inode_tree_lock);
 
 	if (is_data_obj(oid)) {
 		idx = data_oid_to_idx(oid);
@@ -293,7 +293,7 @@ static int volume_do_sync(uint32_t vid)
 
 	sd_read_lock(&vdi_inode_tree_lock);
 	vdi = vdi_inode_tree_search(vid);
-	sd_unlock(&vdi_inode_tree_lock);
+	sd_rw_unlock(&vdi_inode_tree_lock);
 
 	hdr.opcode = SD_OP_FLUSH_VDI;
 	hdr.obj.oid = vid_to_vdi_oid(vid);
@@ -385,7 +385,7 @@ int reset_socket_pool(void)
 		}
 	}
 out:
-	sd_unlock(&vdi_inode_tree_lock);
+	sd_rw_unlock(&vdi_inode_tree_lock);
 	return ret;
 }
 
@@ -422,7 +422,7 @@ static int init_vdi_info(const char *entry, uint32_t *vid, size_t *size)
 	/* we need insert inode before calling volume_rw_object */
 	sd_write_lock(&vdi_inode_tree_lock);
 	dummy = vdi_inode_tree_insert(inode);
-	sd_unlock(&vdi_inode_tree_lock);
+	sd_rw_unlock(&vdi_inode_tree_lock);
 	if (dummy)
 		goto err;
 	if (volume_rw_object(inode_buf, vid_to_vdi_oid(*vid), SD_INODE_SIZE,
@@ -486,7 +486,7 @@ static int volume_sync_and_delete(uint32_t vid)
 
 	sd_read_lock(&vdi_inode_tree_lock);
 	vdi = vdi_inode_tree_search(vid);
-	sd_unlock(&vdi_inode_tree_lock);
+	sd_rw_unlock(&vdi_inode_tree_lock);
 
 	hdr.opcode = SD_OP_FLUSH_DEL_CACHE;
 	hdr.obj.oid = vid_to_vdi_oid(vid);
@@ -525,12 +525,12 @@ int volume_remove_entry(const char *entry)
 
 	sd_read_lock(&vdi_inode_tree_lock);
 	vdi = vdi_inode_tree_search(vid);
-	sd_unlock(&vdi_inode_tree_lock);
+	sd_rw_unlock(&vdi_inode_tree_lock);
 	destroy_socket_pool(vdi->socket_pool, SOCKET_POOL_SIZE);
 
 	sd_write_lock(&vdi_inode_tree_lock);
 	rb_erase(&vdi->rb, &vdi_inode_tree);
-	sd_unlock(&vdi_inode_tree_lock);
+	sd_rw_unlock(&vdi_inode_tree_lock);
 
 	free(vdi->inode);
 	free(vdi);
-- 
1.7.10.4




More information about the sheepdog mailing list