[sheepdog] [PATCH 1/2] add helper macros to try lock and unlock based on urcu

MORITA Kazutaka morita.kazutaka at lab.ntt.co.jp
Thu Nov 1 06:07:14 CET 2012


Signed-off-by: MORITA Kazutaka <morita.kazutaka at lab.ntt.co.jp>
---
 include/util.h            |    5 +++++
 sheep/cluster/zookeeper.c |   10 ++++++----
 sheep/object_cache.c      |    6 +++---
 sheep/sockfd_cache.c      |   10 ++++------
 sheepfs/volume.c          |    4 ++--
 5 files changed, 20 insertions(+), 15 deletions(-)

diff --git a/include/util.h b/include/util.h
index afab903..bfde8c9 100644
--- a/include/util.h
+++ b/include/util.h
@@ -85,6 +85,11 @@ void trim_zero_sectors(void *buf, uint64_t *offset, uint32_t *len);
 void set_trimmed_sectors(void *buf, uint64_t offset, uint32_t len,
 			 uint32_t requested_len);
 
+/* return true if the lock was acquired */
+#define uatomic_trylock(lock)  (uatomic_cmpxchg((lock), 0, 1) == 0)
+#define uatomic_is_locked(lock)  (uatomic_read(lock) == 1)
+#define uatomic_unlock(lock) uatomic_set((lock), 0)
+
 #ifdef assert
 #undef assert
 #endif
diff --git a/sheep/cluster/zookeeper.c b/sheep/cluster/zookeeper.c
index 388bebc..a785b1a 100644
--- a/sheep/cluster/zookeeper.c
+++ b/sheep/cluster/zookeeper.c
@@ -286,7 +286,7 @@ static int zk_queue_pop(zhandle_t *zh, struct zk_event *ev)
 		return 0;
 	}
 
-	if (!called_by_zk_unblock && uatomic_read(&zk_notify_blocked) > 0)
+	if (!called_by_zk_unblock && uatomic_is_locked(&zk_notify_blocked))
 		return -1;
 
 	if (zk_queue_empty(zh))
@@ -639,7 +639,7 @@ static void zk_unblock(void *msg, size_t msg_len)
 
 	zk_queue_push_back(zhandle, &ev);
 
-	uatomic_dec(&zk_notify_blocked);
+	uatomic_unlock(&zk_notify_blocked);
 
 	/* this notify is necessary */
 	dprintf("write event to efd:%d\n", efd);
@@ -778,8 +778,10 @@ static void zk_handler(int listen_fd, int events, void *data)
 	case EVENT_BLOCK:
 		dprintf("BLOCK\n");
 		zk_queue_push_back(zhandle, NULL);
-		if (sd_block_handler(&ev.sender.node))
-			uatomic_inc(&zk_notify_blocked);
+		if (sd_block_handler(&ev.sender.node)) {
+			bool locked = uatomic_trylock(&zk_notify_blocked);
+			assert(locked);
+		}
 		break;
 	case EVENT_NOTIFY:
 		dprintf("NOTIFY\n");
diff --git a/sheep/object_cache.c b/sheep/object_cache.c
index ab6499d..427ff75 100644
--- a/sheep/object_cache.c
+++ b/sheep/object_cache.c
@@ -96,7 +96,7 @@ static struct hlist_head cache_hashtable[HASH_SIZE];
  */
 static inline int mark_cache_in_reclaim(void)
 {
-	return uatomic_cmpxchg(&sys_cache.in_reclaim, 0, 1);
+	return uatomic_trylock(&sys_cache.in_reclaim);
 }
 
 static inline bool entry_is_dirty(const struct object_cache_entry *entry)
@@ -558,7 +558,7 @@ static void do_reclaim(struct work *work)
 
 static void reclaim_done(struct work *work)
 {
-	uatomic_set(&sys_cache.in_reclaim, 0);
+	uatomic_unlock(&sys_cache.in_reclaim);
 	free(work);
 }
 
@@ -1236,7 +1236,7 @@ int object_cache_init(const char *p)
 
 	CDS_INIT_LIST_HEAD(&sys_cache.cache_lru_list);
 	uatomic_set(&sys_cache.cache_size, 0);
-	uatomic_set(&sys_cache.in_reclaim, 0);
+	uatomic_unlock(&sys_cache.in_reclaim);
 
 	ret = load_existing_cache();
 err:
diff --git a/sheep/sockfd_cache.c b/sheep/sockfd_cache.c
index 6e5e289..cdb71d7 100644
--- a/sheep/sockfd_cache.c
+++ b/sheep/sockfd_cache.c
@@ -128,7 +128,7 @@ static inline int get_free_slot(struct sockfd_cache_entry *entry)
 	int idx = -1, i;
 
 	for (i = 0; i < fds_count; i++) {
-		if (uatomic_cmpxchg(&entry->fds[i].in_use, 0, 1))
+		if (!uatomic_trylock(&entry->fds[i].in_use))
 			continue;
 		idx = i;
 		break;
@@ -165,7 +165,7 @@ static inline bool slots_all_free(struct sockfd_cache_entry *entry)
 {
 	int i;
 	for (i = 0; i < fds_count; i++)
-		if (uatomic_read(&entry->fds[i].in_use))
+		if (uatomic_is_locked(&entry->fds[i].in_use))
 			return false;
 	return true;
 }
@@ -354,7 +354,7 @@ static struct sockfd *sockfd_cache_get(const struct node_id *nid, char *name)
 	dprintf("create connection %s:%d idx %d\n", name, nid->port, idx);
 	fd = connect_to(name, nid->port);
 	if (fd < 0) {
-		uatomic_dec(&entry->fds[idx].in_use);
+		uatomic_unlock(&entry->fds[idx].in_use);
 		return NULL;
 	}
 	entry->fds[idx].fd = fd;
@@ -370,7 +370,6 @@ static void sockfd_cache_put(const struct node_id *nid, int idx)
 {
 	struct sockfd_cache_entry *entry;
 	char name[INET6_ADDRSTRLEN];
-	int refcnt;
 
 	addr_to_str(name, sizeof(name), nid->addr, 0);
 	dprintf("%s:%d idx %d\n", name, nid->port, idx);
@@ -380,8 +379,7 @@ static void sockfd_cache_put(const struct node_id *nid, int idx)
 	pthread_rwlock_unlock(&sockfd_cache.lock);
 
 	assert(entry);
-	refcnt = uatomic_cmpxchg(&entry->fds[idx].in_use, 1, 0);
-	assert(refcnt == 1);
+	uatomic_unlock(&entry->fds[idx].in_use);
 }
 
 /*
diff --git a/sheepfs/volume.c b/sheepfs/volume.c
index 3f1391f..982118b 100644
--- a/sheepfs/volume.c
+++ b/sheepfs/volume.c
@@ -121,7 +121,7 @@ retry:
 	sock_idx = uatomic_add_return(&vdi->socket_poll_adder, 1) %
 		   SOCKET_POOL_SIZE;
 	/* if socket_in_use[sock_idx] == 0, set it to 1, otherwise, retry */
-	if (uatomic_cmpxchg(&vdi->socket_in_use[sock_idx], 0, 1))
+	if (uatomic_trylock(&vdi->socket_in_use[sock_idx]))
 		goto retry;
 	fd = vdi->socket_pool[sock_idx];
 	*idx = sock_idx;
@@ -131,7 +131,7 @@ retry:
 
 static inline void put_socket_fd(struct vdi_inode *vdi, int idx)
 {
-	uatomic_dec(&vdi->socket_in_use[idx]);
+	uatomic_unlock(&vdi->socket_in_use[idx]);
 }
 
 static int volume_rw_object(char *buf, uint64_t oid, size_t size,
-- 
1.7.2.5




More information about the sheepdog mailing list