[sheepdog] [PATCH 1/2] lib: rename wrappers of pthread_rwlock
Hitoshi Mitake
mitake.hitoshi at gmail.com
Wed Dec 25 14:34:38 CET 2013
Current lib/util.h wraps stuff related to pthread_rwlock with the
prefix like "sd_lock".
It is confusing because we also need to wrap pthread_mutex. This patch
changes the convention from sd_lock to sd_rw_lock.
Renamed stuff list:
- struct sd_lock -> struct sd_rw_lock
- sd_init_lock() -> sd_init_rw_lock()
- sd_destroy_lock() -> sd_destroy_rw_lock()
- sd_unlock() -> sd_rw_unlock()
Signed-off-by: Hitoshi Mitake <mitake.hitoshi at lab.ntt.co.jp>
---
dog/farm/farm.c | 4 ++--
include/util.h | 14 +++++++-------
lib/event.c | 8 ++++----
lib/sockfd_cache.c | 22 +++++++++++-----------
sheep/cluster/local.c | 6 +++---
sheep/cluster/zookeeper.c | 18 +++++++++---------
sheep/md.c | 24 ++++++++++++------------
sheep/object_cache.c | 30 +++++++++++++++---------------
sheep/object_list_cache.c | 16 ++++++++--------
sheep/vdi.c | 14 +++++++-------
sheepfs/volume.c | 16 ++++++++--------
11 files changed, 86 insertions(+), 86 deletions(-)
diff --git a/dog/farm/farm.c b/dog/farm/farm.c
index 07dcbd4..00895af 100644
--- a/dog/farm/farm.c
+++ b/dog/farm/farm.c
@@ -21,7 +21,7 @@
static char farm_object_dir[PATH_MAX];
static char farm_dir[PATH_MAX];
-static struct sd_lock vdi_list_lock = SD_LOCK_INITIALIZER;
+static struct sd_rw_lock vdi_list_lock = SD_RW_LOCK_INITIALIZER;
struct vdi_entry {
char name[SD_MAX_VDI_LEN];
uint64_t vdi_size;
@@ -377,7 +377,7 @@ static void do_load_object(struct work *work)
sd_write_lock(&vdi_list_lock);
insert_vdi(buffer);
- sd_unlock(&vdi_list_lock);
+ sd_rw_unlock(&vdi_list_lock);
}
farm_show_progress(uatomic_add_return(&loaded, 1), trunk_get_count());
diff --git a/include/util.h b/include/util.h
index 2885d8f..4385fd1 100644
--- a/include/util.h
+++ b/include/util.h
@@ -270,13 +270,13 @@ static inline int refcount_dec(refcnt_t *rc)
/* wrapper for pthread_rwlock */
-#define SD_LOCK_INITIALIZER { .rwlock = PTHREAD_RWLOCK_INITIALIZER }
+#define SD_RW_LOCK_INITIALIZER { .rwlock = PTHREAD_RWLOCK_INITIALIZER }
-struct sd_lock {
+struct sd_rw_lock {
pthread_rwlock_t rwlock;
};
-static inline void sd_init_lock(struct sd_lock *lock)
+static inline void sd_init_rw_lock(struct sd_rw_lock *lock)
{
int ret;
@@ -288,7 +288,7 @@ static inline void sd_init_lock(struct sd_lock *lock)
panic("failed to initialize a lock, %s", strerror(ret));
}
-static inline void sd_destroy_lock(struct sd_lock *lock)
+static inline void sd_destroy_rw_lock(struct sd_rw_lock *lock)
{
int ret;
@@ -300,7 +300,7 @@ static inline void sd_destroy_lock(struct sd_lock *lock)
panic("failed to destroy a lock, %s", strerror(ret));
}
-static inline void sd_read_lock(struct sd_lock *lock)
+static inline void sd_read_lock(struct sd_rw_lock *lock)
{
int ret;
@@ -316,7 +316,7 @@ static inline void sd_read_lock(struct sd_lock *lock)
* Even though POSIX manual it doesn't return EAGAIN, we indeed have met the
* case that it returned EAGAIN
*/
-static inline void sd_write_lock(struct sd_lock *lock)
+static inline void sd_write_lock(struct sd_rw_lock *lock)
{
int ret;
@@ -328,7 +328,7 @@ static inline void sd_write_lock(struct sd_lock *lock)
panic("failed to lock for writing, %s", strerror(ret));
}
-static inline void sd_unlock(struct sd_lock *lock)
+static inline void sd_rw_unlock(struct sd_rw_lock *lock)
{
int ret;
diff --git a/lib/event.c b/lib/event.c
index 16aa921..cdb33d6 100644
--- a/lib/event.c
+++ b/lib/event.c
@@ -21,7 +21,7 @@
static int efd;
static struct rb_root events_tree = RB_ROOT;
-static struct sd_lock events_lock = SD_LOCK_INITIALIZER;
+static struct sd_rw_lock events_lock = SD_RW_LOCK_INITIALIZER;
static void timer_handler(int fd, int events, void *data)
{
@@ -97,7 +97,7 @@ static struct event_info *lookup_event(int fd)
sd_read_lock(&events_lock);
ret = rb_search(&events_tree, &key, rb, event_cmp);
- sd_unlock(&events_lock);
+ sd_rw_unlock(&events_lock);
return ret;
}
@@ -124,7 +124,7 @@ int register_event_prio(int fd, event_handler_t h, void *data, int prio)
} else {
sd_write_lock(&events_lock);
rb_insert(&events_tree, ei, rb, event_cmp);
- sd_unlock(&events_lock);
+ sd_rw_unlock(&events_lock);
}
return ret;
@@ -145,7 +145,7 @@ void unregister_event(int fd)
sd_write_lock(&events_lock);
rb_erase(&ei->rb, &events_tree);
- sd_unlock(&events_lock);
+ sd_rw_unlock(&events_lock);
free(ei);
/*
diff --git a/lib/sockfd_cache.c b/lib/sockfd_cache.c
index 0bfb274..ec06091 100644
--- a/lib/sockfd_cache.c
+++ b/lib/sockfd_cache.c
@@ -37,13 +37,13 @@
struct sockfd_cache {
struct rb_root root;
- struct sd_lock lock;
+ struct sd_rw_lock lock;
int count;
};
static struct sockfd_cache sockfd_cache = {
.root = RB_ROOT,
- .lock = SD_LOCK_INITIALIZER,
+ .lock = SD_RW_LOCK_INITIALIZER,
};
/*
@@ -125,7 +125,7 @@ static struct sockfd_cache_entry *sockfd_cache_grab(const struct node_id *nid,
if (*ret_idx == -1)
entry = NULL;
out:
- sd_unlock(&sockfd_cache.lock);
+ sd_rw_unlock(&sockfd_cache.lock);
return entry;
}
@@ -176,14 +176,14 @@ static bool sockfd_cache_destroy(const struct node_id *nid)
}
rb_erase(&entry->rb, &sockfd_cache.root);
- sd_unlock(&sockfd_cache.lock);
+ sd_rw_unlock(&sockfd_cache.lock);
destroy_all_slots(entry);
free_cache_entry(entry);
return true;
false_out:
- sd_unlock(&sockfd_cache.lock);
+ sd_rw_unlock(&sockfd_cache.lock);
return false;
}
@@ -213,7 +213,7 @@ void sockfd_cache_add_group(const struct rb_root *nroot)
rb_for_each_entry(n, nroot, rb) {
sockfd_cache_add_nolock(&n->nid);
}
- sd_unlock(&sockfd_cache.lock);
+ sd_rw_unlock(&sockfd_cache.lock);
}
/* Add one node to the cache means we can do caching tricks on this node */
@@ -231,10 +231,10 @@ void sockfd_cache_add(const struct node_id *nid)
memcpy(&new->nid, nid, sizeof(struct node_id));
if (sockfd_cache_insert(new)) {
free_cache_entry(new);
- sd_unlock(&sockfd_cache.lock);
+ sd_rw_unlock(&sockfd_cache.lock);
return;
}
- sd_unlock(&sockfd_cache.lock);
+ sd_rw_unlock(&sockfd_cache.lock);
n = uatomic_add_return(&sockfd_cache.count, 1);
sd_debug("%s, count %d", addr_to_str(nid->addr, nid->port), n);
}
@@ -264,7 +264,7 @@ static void do_grow_fds(struct work *work)
fds_count *= 2;
fds_high_watermark = FDS_WATERMARK(fds_count);
- sd_unlock(&sockfd_cache.lock);
+ sd_rw_unlock(&sockfd_cache.lock);
}
static void grow_fds_done(struct work *work)
@@ -375,7 +375,7 @@ static void sockfd_cache_put_long(const struct node_id *nid, int idx)
entry = sockfd_cache_search(nid);
if (entry)
uatomic_set_false(&entry->fds[idx].in_use);
- sd_unlock(&sockfd_cache.lock);
+ sd_rw_unlock(&sockfd_cache.lock);
}
static void sockfd_cache_close(const struct node_id *nid, int idx)
@@ -394,7 +394,7 @@ static void sockfd_cache_close(const struct node_id *nid, int idx)
entry->fds[idx].fd = -1;
uatomic_set_false(&entry->fds[idx].in_use);
}
- sd_unlock(&sockfd_cache.lock);
+ sd_rw_unlock(&sockfd_cache.lock);
}
/*
diff --git a/sheep/cluster/local.c b/sheep/cluster/local.c
index 3a89f19..7f9a6eb 100644
--- a/sheep/cluster/local.c
+++ b/sheep/cluster/local.c
@@ -31,10 +31,10 @@
static const char *shmfile = "/tmp/sheepdog_shm";
static const char *lockdir = "/tmp/sheepdog_locks/";
/*
- * we have to use sd_lock because flock isn't thread exclusive
+ * we have to use sd_rw_lock because flock isn't thread exclusive
* and it also serves to project lock_tree
*/
-static struct sd_lock lock_tree_lock = SD_LOCK_INITIALIZER;
+static struct sd_rw_lock lock_tree_lock = SD_RW_LOCK_INITIALIZER;
static struct rb_root lock_tree_root = RB_ROOT;
struct lock_entry {
@@ -643,7 +643,7 @@ static void local_unlock(uint64_t lock_id)
close(entry->fd);
rb_erase(&entry->rb, &lock_tree_root);
free(entry);
- sd_unlock(&lock_tree_lock);
+ sd_rw_unlock(&lock_tree_lock);
}
static int local_update_node(struct sd_node *node)
diff --git a/sheep/cluster/zookeeper.c b/sheep/cluster/zookeeper.c
index 43a40b3..2867e78 100644
--- a/sheep/cluster/zookeeper.c
+++ b/sheep/cluster/zookeeper.c
@@ -102,8 +102,8 @@ struct zk_event {
static struct rb_root sd_node_root = RB_ROOT;
static size_t nr_sd_nodes;
static struct rb_root zk_node_root = RB_ROOT;
-static struct sd_lock zk_tree_lock = SD_LOCK_INITIALIZER;
-static struct sd_lock zk_compete_master_lock = SD_LOCK_INITIALIZER;
+static struct sd_rw_lock zk_tree_lock = SD_RW_LOCK_INITIALIZER;
+static struct sd_rw_lock zk_compete_master_lock = SD_RW_LOCK_INITIALIZER;
static LIST_HEAD(zk_block_list);
static uatomic_bool is_master;
static uatomic_bool stop;
@@ -135,7 +135,7 @@ static inline struct zk_node *zk_tree_search(const struct node_id *nid)
sd_read_lock(&zk_tree_lock);
n = zk_tree_search_nolock(nid);
- sd_unlock(&zk_tree_lock);
+ sd_rw_unlock(&zk_tree_lock);
return n;
}
@@ -574,7 +574,7 @@ static inline void zk_tree_add(struct zk_node *node)
rb_insert(&sd_node_root, &zk->node, rb, node_cmp);
nr_sd_nodes++;
out:
- sd_unlock(&zk_tree_lock);
+ sd_rw_unlock(&zk_tree_lock);
}
static inline void zk_tree_del(struct zk_node *node)
@@ -582,14 +582,14 @@ static inline void zk_tree_del(struct zk_node *node)
sd_write_lock(&zk_tree_lock);
rb_erase(&node->rb, &zk_node_root);
free(node);
- sd_unlock(&zk_tree_lock);
+ sd_rw_unlock(&zk_tree_lock);
}
static inline void zk_tree_destroy(void)
{
sd_write_lock(&zk_tree_lock);
rb_destroy(&zk_node_root, struct zk_node, rb);
- sd_unlock(&zk_tree_lock);
+ sd_rw_unlock(&zk_tree_lock);
}
static inline void build_node_list(void)
@@ -705,7 +705,7 @@ static void zk_watcher(zhandle_t *zh, int type, int state, const char *path,
n = zk_tree_search_nolock(&znode.node.nid);
if (n)
n->gone = true;
- sd_unlock(&zk_tree_lock);
+ sd_rw_unlock(&zk_tree_lock);
if (n)
add_event(EVENT_LEAVE, &znode, NULL, 0);
}
@@ -920,7 +920,7 @@ success:
uatomic_set_true(&is_master);
sd_debug("success");
out_unlock:
- sd_unlock(&zk_compete_master_lock);
+ sd_rw_unlock(&zk_compete_master_lock);
}
static int zk_join(const struct sd_node *myself,
@@ -1135,7 +1135,7 @@ static void zk_handle_update_node(struct zk_event *ev)
assert(t);
t->node = *snode;
build_node_list();
- sd_unlock(&zk_tree_lock);
+ sd_rw_unlock(&zk_tree_lock);
sd_update_node_handler(snode);
}
diff --git a/sheep/md.c b/sheep/md.c
index 6814c61..1c065b7 100644
--- a/sheep/md.c
+++ b/sheep/md.c
@@ -32,7 +32,7 @@ struct vdisk {
struct md {
struct rb_root vroot;
struct rb_root root;
- struct sd_lock lock;
+ struct sd_rw_lock lock;
uint64_t space;
uint32_t nr_disks;
};
@@ -40,7 +40,7 @@ struct md {
static struct md md = {
.vroot = RB_ROOT,
.root = RB_ROOT,
- .lock = SD_LOCK_INITIALIZER,
+ .lock = SD_RW_LOCK_INITIALIZER,
};
static inline int nr_online_disks(void)
@@ -49,7 +49,7 @@ static inline int nr_online_disks(void)
sd_read_lock(&md.lock);
nr = md.nr_disks;
- sd_unlock(&md.lock);
+ sd_rw_unlock(&md.lock);
return nr;
}
@@ -343,7 +343,7 @@ const char *md_get_object_path(uint64_t oid)
sd_read_lock(&md.lock);
p = md_get_object_path_nolock(oid);
- sd_unlock(&md.lock);
+ sd_rw_unlock(&md.lock);
return p;
}
@@ -361,7 +361,7 @@ int for_each_object_in_wd(int (*func)(uint64_t oid, const char *path,
if (ret != SD_RES_SUCCESS)
break;
}
- sd_unlock(&md.lock);
+ sd_rw_unlock(&md.lock);
return ret;
}
@@ -380,7 +380,7 @@ int for_each_object_in_stale(int (*func)(uint64_t oid, const char *path,
if (ret != SD_RES_SUCCESS)
break;
}
- sd_unlock(&md.lock);
+ sd_rw_unlock(&md.lock);
return ret;
}
@@ -396,7 +396,7 @@ int for_each_obj_path(int (*func)(const char *path))
if (ret != SD_RES_SUCCESS)
break;
}
- sd_unlock(&md.lock);
+ sd_rw_unlock(&md.lock);
return ret;
}
@@ -427,7 +427,7 @@ static void md_do_recover(struct work *work)
md_remove_disk(disk);
nr = md.nr_disks;
out:
- sd_unlock(&md.lock);
+ sd_rw_unlock(&md.lock);
if (nr > 0)
kick_recover();
@@ -553,7 +553,7 @@ static int scan_wd(uint64_t oid, uint32_t epoch)
if (ret == SD_RES_SUCCESS)
break;
}
- sd_unlock(&md.lock);
+ sd_rw_unlock(&md.lock);
return ret;
}
@@ -607,7 +607,7 @@ uint32_t md_get_info(struct sd_md_info *info)
i++;
}
info->nr = md.nr_disks;
- sd_unlock(&md.lock);
+ sd_rw_unlock(&md.lock);
return ret;
}
@@ -645,7 +645,7 @@ static int do_plug_unplug(char *disks, bool plug)
ret = SD_RES_SUCCESS;
out:
- sd_unlock(&md.lock);
+ sd_rw_unlock(&md.lock);
if (ret == SD_RES_SUCCESS)
kick_recover();
@@ -673,7 +673,7 @@ uint64_t md_get_size(uint64_t *used)
rb_for_each_entry(disk, &md.root, rb) {
fsize += get_path_free_size(disk->path, used);
}
- sd_unlock(&md.lock);
+ sd_rw_unlock(&md.lock);
return fsize + *used;
}
diff --git a/sheep/object_cache.c b/sheep/object_cache.c
index 10a051c..9e2370c 100644
--- a/sheep/object_cache.c
+++ b/sheep/object_cache.c
@@ -48,7 +48,7 @@ struct object_cache_entry {
struct list_node dirty_list; /* For dirty list of object cache */
struct list_node lru_list; /* For lru list of object cache */
- struct sd_lock lock; /* Entry lock */
+ struct sd_rw_lock lock; /* Entry lock */
};
struct object_cache {
@@ -63,7 +63,7 @@ struct object_cache {
int push_efd; /* Used to synchronize between pusher and push threads */
uatomic_bool in_push; /* Whether if pusher is running */
- struct sd_lock lock; /* Cache lock */
+ struct sd_rw_lock lock; /* Cache lock */
};
struct push_work {
@@ -79,8 +79,8 @@ static int def_open_flags = O_RDWR;
#define HASH_BITS 5
#define HASH_SIZE (1 << HASH_BITS)
-static struct sd_lock hashtable_lock[HASH_SIZE] = {
- [0 ... HASH_SIZE - 1] = SD_LOCK_INITIALIZER
+static struct sd_rw_lock hashtable_lock[HASH_SIZE] = {
+ [0 ... HASH_SIZE - 1] = SD_RW_LOCK_INITIALIZER
};
static struct hlist_head cache_hashtable[HASH_SIZE];
@@ -187,7 +187,7 @@ static inline void write_lock_cache(struct object_cache *oc)
static inline void unlock_cache(struct object_cache *oc)
{
- sd_unlock(&oc->lock);
+ sd_rw_unlock(&oc->lock);
}
static inline void read_lock_entry(struct object_cache_entry *entry)
@@ -202,7 +202,7 @@ static inline void write_lock_entry(struct object_cache_entry *entry)
static inline void unlock_entry(struct object_cache_entry *entry)
{
- sd_unlock(&entry->lock);
+ sd_rw_unlock(&entry->lock);
}
static struct object_cache_entry *
@@ -278,7 +278,7 @@ free_cache_entry(struct object_cache_entry *entry)
oc->total_count--;
if (list_linked(&entry->dirty_list))
del_from_dirty_list(entry);
- sd_destroy_lock(&entry->lock);
+ sd_destroy_rw_lock(&entry->lock);
free(entry);
}
@@ -576,12 +576,12 @@ static void do_reclaim(struct work *work)
do_reclaim_object(cache);
cap = uatomic_read(&gcache.capacity);
if (cap <= HIGH_WATERMARK) {
- sd_unlock(&hashtable_lock[idx]);
+ sd_rw_unlock(&hashtable_lock[idx]);
sd_debug("complete, capacity %"PRIu32, cap);
return;
}
}
- sd_unlock(&hashtable_lock[idx]);
+ sd_rw_unlock(&hashtable_lock[idx]);
}
sd_debug("finished");
}
@@ -636,13 +636,13 @@ not_found:
INIT_LIST_HEAD(&cache->dirty_head);
INIT_LIST_HEAD(&cache->lru_head);
- sd_init_lock(&cache->lock);
+ sd_init_rw_lock(&cache->lock);
hlist_add_head(&cache->hash, head);
} else {
cache = NULL;
}
out:
- sd_unlock(&hashtable_lock[h]);
+ sd_rw_unlock(&hashtable_lock[h]);
return cache;
}
@@ -675,7 +675,7 @@ alloc_cache_entry(struct object_cache *oc, uint64_t idx)
entry = xzalloc(sizeof(*entry));
entry->oc = oc;
entry->idx = idx;
- sd_init_lock(&entry->lock);
+ sd_init_rw_lock(&entry->lock);
INIT_LIST_NODE(&entry->dirty_list);
INIT_LIST_NODE(&entry->lru_list);
@@ -947,7 +947,7 @@ void object_cache_delete(uint32_t vid)
/* Firstly we free memeory */
sd_write_lock(&hashtable_lock[h]);
hlist_del(&cache->hash);
- sd_unlock(&hashtable_lock[h]);
+ sd_rw_unlock(&hashtable_lock[h]);
write_lock_cache(cache);
list_for_each_entry(entry, &cache->lru_head, lru_list) {
@@ -955,7 +955,7 @@ void object_cache_delete(uint32_t vid)
uatomic_sub(&gcache.capacity, CACHE_OBJECT_SIZE);
}
unlock_cache(cache);
- sd_destroy_lock(&cache->lock);
+ sd_destroy_rw_lock(&cache->lock);
close(cache->push_efd);
free(cache);
@@ -1389,7 +1389,7 @@ int object_cache_get_info(struct object_cache_info *info)
j++;
unlock_cache(cache);
}
- sd_unlock(&hashtable_lock[i]);
+ sd_rw_unlock(&hashtable_lock[i]);
}
info->count = j;
info->directio = sys->object_cache_directio;
diff --git a/sheep/object_list_cache.c b/sheep/object_list_cache.c
index caba3ce..eefa40a 100644
--- a/sheep/object_list_cache.c
+++ b/sheep/object_list_cache.c
@@ -24,7 +24,7 @@ struct objlist_cache {
int cache_size;
uint64_t *buf;
struct rb_root root;
- struct sd_lock lock;
+ struct sd_rw_lock lock;
};
struct objlist_deletion_work {
@@ -35,7 +35,7 @@ struct objlist_deletion_work {
static struct objlist_cache obj_list_cache = {
.tree_version = 1,
.root = RB_ROOT,
- .lock = SD_LOCK_INITIALIZER,
+ .lock = SD_RW_LOCK_INITIALIZER,
};
static int objlist_cache_cmp(const struct objlist_cache_entry *a,
@@ -71,7 +71,7 @@ void objlist_cache_remove(uint64_t oid)
obj_list_cache.cache_size--;
obj_list_cache.tree_version++;
}
- sd_unlock(&obj_list_cache.lock);
+ sd_rw_unlock(&obj_list_cache.lock);
}
int objlist_cache_insert(uint64_t oid)
@@ -90,7 +90,7 @@ int objlist_cache_insert(uint64_t oid)
obj_list_cache.cache_size++;
obj_list_cache.tree_version++;
}
- sd_unlock(&obj_list_cache.lock);
+ sd_rw_unlock(&obj_list_cache.lock);
return 0;
}
@@ -106,7 +106,7 @@ int get_obj_list(const struct sd_req *hdr, struct sd_rsp *rsp, void *data)
goto out;
/* if that fails grab a write lock for the usually nessecary update */
- sd_unlock(&obj_list_cache.lock);
+ sd_rw_unlock(&obj_list_cache.lock);
sd_write_lock(&obj_list_cache.lock);
if (obj_list_cache.tree_version == obj_list_cache.buf_version)
goto out;
@@ -121,14 +121,14 @@ int get_obj_list(const struct sd_req *hdr, struct sd_rsp *rsp, void *data)
out:
if (hdr->data_length < obj_list_cache.cache_size * sizeof(uint64_t)) {
- sd_unlock(&obj_list_cache.lock);
+ sd_rw_unlock(&obj_list_cache.lock);
sd_err("GET_OBJ_LIST buffer too small");
return SD_RES_BUFFER_SMALL;
}
rsp->data_length = obj_list_cache.cache_size * sizeof(uint64_t);
memcpy(data, obj_list_cache.buf, rsp->data_length);
- sd_unlock(&obj_list_cache.lock);
+ sd_rw_unlock(&obj_list_cache.lock);
return SD_RES_SUCCESS;
}
@@ -166,7 +166,7 @@ static void objlist_deletion_work(struct work *work)
rb_erase(&entry->node, &obj_list_cache.root);
free(entry);
}
- sd_unlock(&obj_list_cache.lock);
+ sd_rw_unlock(&obj_list_cache.lock);
}
static void objlist_deletion_done(struct work *work)
diff --git a/sheep/vdi.c b/sheep/vdi.c
index a558417..f8b6bac 100644
--- a/sheep/vdi.c
+++ b/sheep/vdi.c
@@ -20,7 +20,7 @@ struct vdi_state_entry {
};
static struct rb_root vdi_state_root = RB_ROOT;
-static struct sd_lock vdi_state_lock = SD_LOCK_INITIALIZER;
+static struct sd_rw_lock vdi_state_lock = SD_RW_LOCK_INITIALIZER;
/*
* ec_max_data_strip represent max number of data strips in the cluster. When
@@ -68,7 +68,7 @@ static bool vid_is_snapshot(uint32_t vid)
sd_read_lock(&vdi_state_lock);
entry = vdi_state_search(&vdi_state_root, vid);
- sd_unlock(&vdi_state_lock);
+ sd_rw_unlock(&vdi_state_lock);
if (!entry) {
sd_err("No VDI entry for %" PRIx32 " found", vid);
@@ -93,7 +93,7 @@ int get_vdi_copy_number(uint32_t vid)
sd_read_lock(&vdi_state_lock);
entry = vdi_state_search(&vdi_state_root, vid);
- sd_unlock(&vdi_state_lock);
+ sd_rw_unlock(&vdi_state_lock);
if (!entry) {
sd_alert("copy number for %" PRIx32 " not found, set %d", vid,
@@ -110,7 +110,7 @@ int get_vdi_copy_policy(uint32_t vid)
sd_read_lock(&vdi_state_lock);
entry = vdi_state_search(&vdi_state_root, vid);
- sd_unlock(&vdi_state_lock);
+ sd_rw_unlock(&vdi_state_lock);
if (!entry) {
sd_alert("copy policy for %" PRIx32 " not found, set %d", vid,
@@ -167,7 +167,7 @@ int add_vdi_state(uint32_t vid, int nr_copies, bool snapshot, uint8_t cp)
entry->copy_policy = cp;
}
- sd_unlock(&vdi_state_lock);
+ sd_rw_unlock(&vdi_state_lock);
return SD_RES_SUCCESS;
}
@@ -188,7 +188,7 @@ int fill_vdi_state_list(void *data)
vs++;
nr++;
}
- sd_unlock(&vdi_state_lock);
+ sd_rw_unlock(&vdi_state_lock);
return nr * sizeof(*vs);
}
@@ -1208,7 +1208,7 @@ void clean_vdi_state(void)
sd_write_lock(&vdi_state_lock);
rb_destroy(&vdi_state_root, struct vdi_state_entry, node);
INIT_RB_ROOT(&vdi_state_root);
- sd_unlock(&vdi_state_lock);
+ sd_rw_unlock(&vdi_state_lock);
}
int sd_delete_vdi(const char *name)
diff --git a/sheepfs/volume.c b/sheepfs/volume.c
index 2fbb54c..f6aadaf 100644
--- a/sheepfs/volume.c
+++ b/sheepfs/volume.c
@@ -62,7 +62,7 @@ struct vdi_inode {
};
static struct rb_root vdi_inode_tree = RB_ROOT;
-static struct sd_lock vdi_inode_tree_lock = SD_LOCK_INITIALIZER;
+static struct sd_rw_lock vdi_inode_tree_lock = SD_RW_LOCK_INITIALIZER;
static int sheepfs_bnode_writer(uint64_t oid, void *mem, unsigned int len,
@@ -144,7 +144,7 @@ static int volume_rw_object(char *buf, uint64_t oid, size_t size,
sd_read_lock(&vdi_inode_tree_lock);
vdi = vdi_inode_tree_search(vid);
- sd_unlock(&vdi_inode_tree_lock);
+ sd_rw_unlock(&vdi_inode_tree_lock);
if (is_data_obj(oid)) {
idx = data_oid_to_idx(oid);
@@ -304,7 +304,7 @@ static int volume_do_sync(uint32_t vid)
sd_read_lock(&vdi_inode_tree_lock);
vdi = vdi_inode_tree_search(vid);
- sd_unlock(&vdi_inode_tree_lock);
+ sd_rw_unlock(&vdi_inode_tree_lock);
hdr.opcode = SD_OP_FLUSH_VDI;
hdr.obj.oid = vid_to_vdi_oid(vid);
@@ -394,7 +394,7 @@ int reset_socket_pool(void)
}
}
out:
- sd_unlock(&vdi_inode_tree_lock);
+ sd_rw_unlock(&vdi_inode_tree_lock);
return ret;
}
@@ -431,7 +431,7 @@ static int init_vdi_info(const char *entry, uint32_t *vid, size_t *size)
/* we need insert inode before calling volume_rw_object */
sd_write_lock(&vdi_inode_tree_lock);
dummy = vdi_inode_tree_insert(inode);
- sd_unlock(&vdi_inode_tree_lock);
+ sd_rw_unlock(&vdi_inode_tree_lock);
if (dummy)
goto err;
if (volume_rw_object(inode_buf, vid_to_vdi_oid(*vid), SD_INODE_SIZE,
@@ -495,7 +495,7 @@ static int volume_sync_and_delete(uint32_t vid)
sd_read_lock(&vdi_inode_tree_lock);
vdi = vdi_inode_tree_search(vid);
- sd_unlock(&vdi_inode_tree_lock);
+ sd_rw_unlock(&vdi_inode_tree_lock);
hdr.opcode = SD_OP_FLUSH_DEL_CACHE;
hdr.obj.oid = vid_to_vdi_oid(vid);
@@ -534,12 +534,12 @@ int volume_remove_entry(const char *entry)
sd_read_lock(&vdi_inode_tree_lock);
vdi = vdi_inode_tree_search(vid);
- sd_unlock(&vdi_inode_tree_lock);
+ sd_rw_unlock(&vdi_inode_tree_lock);
destroy_socket_pool(vdi->socket_pool, SOCKET_POOL_SIZE);
sd_write_lock(&vdi_inode_tree_lock);
rb_erase(&vdi->rb, &vdi_inode_tree);
- sd_unlock(&vdi_inode_tree_lock);
+ sd_rw_unlock(&vdi_inode_tree_lock);
free(vdi->inode);
free(vdi);
--
1.8.1.2
More information about the sheepdog
mailing list