[sheepdog] [PATCH 2/6] sockfd: implement shrinking mechanism for handling EMFILE

Hitoshi Mitake mitake.hitoshi at gmail.com
Tue Aug 13 18:27:37 CEST 2013


sockfd is a big fd consumer and cached fds should be closed when sheep
faces EMFILE. This patch adds a new function, sockfd_shrink(), for
closing unused cached fds.

Signed-off-by: Hitoshi Mitake <mitake.hitoshi at lab.ntt.co.jp>
---
 include/sockfd_cache.h |  1 +
 lib/sockfd_cache.c     | 62 ++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 63 insertions(+)

diff --git a/include/sockfd_cache.h b/include/sockfd_cache.h
index d91c56a..aaceaa7 100644
--- a/include/sockfd_cache.h
+++ b/include/sockfd_cache.h
@@ -12,6 +12,7 @@ void sockfd_cache_add(const struct node_id *nid);
 void sockfd_cache_add_group(const struct sd_node *nodes, int nr);
 
 int sockfd_init(void);
+bool sockfd_shrink(void);
 
 /* sockfd_cache */
 struct sockfd {
diff --git a/lib/sockfd_cache.c b/lib/sockfd_cache.c
index c9404ea..ef3fa7d 100644
--- a/lib/sockfd_cache.c
+++ b/lib/sockfd_cache.c
@@ -47,6 +47,12 @@ static struct sockfd_cache sockfd_cache = {
 };
 
 /*
+ * shrink_head: used by shrink_sockfd() for fair shrinking
+ * protected by sockfd_cache.lock
+ */
+static struct rb_node *shrink_head;
+
+/*
  * Suppose request size from Guest is 512k, then 4M / 512k = 8, so at
  * most 8 requests can be issued to the same sheep object. Based on this
  * assumption, '8' would be effecient for servers that only host 2~4
@@ -208,6 +214,9 @@ static bool sockfd_cache_destroy(const struct node_id *nid)
 	rb_erase(&entry->rb, &sockfd_cache.root);
 	sd_unlock(&sockfd_cache.lock);
 
+	if (&entry->rb == shrink_head)
+		shrink_head = rb_next(&entry->rb);
+
 	destroy_all_slots(entry);
 	free_cache_entry(entry);
 
@@ -529,3 +538,56 @@ void sockfd_cache_del(const struct node_id *nid, struct sockfd *sfd)
 	sockfd_cache_del_node(nid);
 	free(sfd);
 }
+
+bool sockfd_shrink(void)
+{
+	bool ret = false;
+	struct rb_node *p, *first;
+
+	sd_write_lock(&sockfd_cache.lock);
+
+	p = shrink_head ? shrink_head : rb_first(&sockfd_cache.root);
+	if (!p) {
+		sd_debug("There's no sockfd");
+		goto out;
+	}
+
+	first = p;
+	do {
+		struct sockfd_cache_entry *entry =
+			rb_entry(p, struct sockfd_cache_entry, rb);
+
+		for (int i = 0; i < fds_count; i++) {
+			if (!uatomic_set_true(&entry->fds[i].in_use))
+				/* failed to grab, someone is using */
+				continue;
+
+			if (entry->fds[i].fd == -1) {
+				/* this fd is not used */
+				uatomic_set_false(&entry->fds[i].in_use);
+				continue;
+			}
+
+			sd_debug("victim node: %s, fd: %d",
+				 nid_to_str(&entry->nid), entry->fds[i].fd);
+			close(entry->fds[i].fd);
+			entry->fds[i].fd = -1;
+			uatomic_set_false(&entry->fds[i].in_use);
+
+			shrink_head = rb_next(p);
+
+			ret = true;
+			goto out;
+		}
+
+		p = rb_next(p);
+		if (!p)
+			p = rb_first(&sockfd_cache.root);
+	} while (first != p);
+
+	sd_debug("shrinking couldn't be done");
+
+out:
+	sd_unlock(&sockfd_cache.lock);
+	return ret;
+}
-- 
1.8.1.2




More information about the sheepdog mailing list