[sheepdog] [PATCH v2 1/4] lib, sheep: exclude stuff for tracing when it is not enabled

Hitoshi Mitake mitake.hitoshi at gmail.com
Tue Dec 17 15:42:53 CET 2013


From: Hitoshi Mitake <mitake.hitoshi at lab.ntt.co.jp>

The current build process of sheepdog compiles stuff for tracing even
if tracing is disabled. Basically they are not harmful but causes
memory consumption (tid_map), we should exlucde them when tracing is
disabled.

In addition, this patch adds a new mutex tid_map_lock for protecting
tid_map. Previous work.c used wi->pending_lock also for protecting the
bitmap. This protection scheme is very confusing.

Signed-off-by: Hitoshi Mitake <mitake.hitoshi at lab.ntt.co.jp>
---
 include/work.h      |  7 +++++--
 lib/Makefile.am     |  4 ++++
 lib/work.c          | 58 ++++++++++++++++++++++++++++++++++++++++-------------
 sheep/Makefile.am   |  1 +
 sheep/trace/trace.c |  1 +
 5 files changed, 55 insertions(+), 16 deletions(-)

diff --git a/include/work.h b/include/work.h
index a5808b5..0cb3313 100644
--- a/include/work.h
+++ b/include/work.h
@@ -61,9 +61,12 @@ static inline bool is_worker_thread(void)
 int init_work_queue(size_t (*get_nr_nodes)(void));
 struct work_queue *create_work_queue(const char *name, enum wq_thread_control);
 struct work_queue *create_ordered_work_queue(const char *name);
-void suspend_worker_threads(void);
-void resume_worker_threads(void);
 void queue_work(struct work_queue *q, struct work *work);
 bool work_queue_empty(struct work_queue *q);
 
+#ifdef ENABLE_TRACE
+void suspend_worker_threads(void);
+void resume_worker_threads(void);
+#endif	/* BUILD_TRACE */
+
 #endif
diff --git a/lib/Makefile.am b/lib/Makefile.am
index a681167..5204879 100644
--- a/lib/Makefile.am
+++ b/lib/Makefile.am
@@ -11,6 +11,10 @@ if BUILD_SHA1_HW
 libsheepdog_a_SOURCES	+= sha1_ssse3.S
 endif
 
+if BUILD_TRACE
+AM_CPPFLAGS		+= -DENABLE_TRACE
+endif
+
 # support for GNU Flymake
 check-syntax:
 	$(COMPILE) -fsyntax-only $(CHK_SOURCES)
diff --git a/lib/work.c b/lib/work.c
index 6933e1a..84eb727 100644
--- a/lib/work.c
+++ b/lib/work.c
@@ -33,13 +33,19 @@
 #include "work.h"
 #include "event.h"
 
+#ifdef ENABLE_TRACE
+
 #define TID_MAX_DEFAULT 0x8000 /* default maximum tid for most systems */
 
 static size_t tid_max;
 static unsigned long *tid_map;
+static pthread_mutex_t tid_map_lock = PTHREAD_MUTEX_INITIALIZER;
+
 static int resume_efd;
 static int ack_efd;
 
+#endif	/* ENABLE_TRACE */
+
 /*
  * The protection period from shrinking work queue.  This is necessary
  * to avoid many calls of pthread_create.  Without it, threads are
@@ -155,6 +161,8 @@ static int create_worker_threads(struct worker_info *wi, size_t nr_threads)
 	return 0;
 }
 
+#ifdef ENABLE_TRACE
+
 void suspend_worker_threads(void)
 {
 	struct worker_info *wi;
@@ -197,6 +205,18 @@ void resume_worker_threads(void)
 	}
 }
 
+static void suspend(int num)
+{
+	int uninitialized_var(value);
+
+	eventfd_xwrite(ack_efd, 1); /* ack of suspend */
+	value = eventfd_xread(resume_efd);
+	assert(value == 1);
+	eventfd_xwrite(ack_efd, 1); /* ack of resume */
+}
+
+#endif	/* ENABLE_TRACE */
+
 void queue_work(struct work_queue *q, struct work *work)
 {
 	struct worker_info *wi = container_of(q, struct worker_info, q);
@@ -252,7 +272,9 @@ static void *worker_routine(void *arg)
 	/* started this thread */
 	pthread_mutex_unlock(&wi->startup_lock);
 
-	pthread_mutex_lock(&wi->pending_lock);
+#ifdef ENABLE_TRACE
+
+	pthread_mutex_lock(&tid_map_lock);
 	if (tid > tid_max) {
 		size_t old_tid_max = tid_max;
 
@@ -263,14 +285,22 @@ static void *worker_routine(void *arg)
 		tid_map = alloc_bitmap(tid_map, old_tid_max, tid_max);
 	}
 	set_bit(tid, tid_map);
-	pthread_mutex_unlock(&wi->pending_lock);
+	pthread_mutex_unlock(&tid_map_lock);
+
+#endif	/* ENABLE_TRACE */
 
 	while (true) {
 
 		pthread_mutex_lock(&wi->pending_lock);
 		if (wq_need_shrink(wi)) {
 			wi->nr_threads--;
+
+#ifdef ENABLE_TRACE
+			pthread_mutex_lock(&tid_map_lock);
 			clear_bit(tid, tid_map);
+			pthread_mutex_unlock(&tid_map_lock);
+#endif
+
 			pthread_mutex_unlock(&wi->pending_lock);
 			pthread_detach(pthread_self());
 			sd_debug("destroy thread %s %d, %zu", wi->name, tid,
@@ -302,16 +332,6 @@ retest:
 	pthread_exit(NULL);
 }
 
-static void suspend(int num)
-{
-	int uninitialized_var(value);
-
-	eventfd_xwrite(ack_efd, 1); /* ack of suspend */
-	value = eventfd_xread(resume_efd);
-	assert(value == 1);
-	eventfd_xwrite(ack_efd, 1); /* ack of resume */
-}
-
 int init_work_queue(size_t (*get_nr_nodes)(void))
 {
 	int ret;
@@ -321,13 +341,21 @@ int init_work_queue(size_t (*get_nr_nodes)(void))
 	if (wq_get_nr_nodes)
 		nr_nodes = wq_get_nr_nodes();
 
+	efd = eventfd(0, EFD_NONBLOCK);
+	if (efd < 0) {
+		sd_err("failed to create event fd: %m");
+		return 1;
+	}
+
+#ifdef ENABLE_TRACE
+
 	tid_max = TID_MAX_DEFAULT;
 	tid_map = alloc_bitmap(NULL, 0, tid_max);
 
 	resume_efd = eventfd(0, EFD_SEMAPHORE);
 	ack_efd = eventfd(0, EFD_SEMAPHORE);
-	efd = eventfd(0, EFD_NONBLOCK);
-	if (resume_efd < 0 || ack_efd < 0 || efd < 0) {
+
+	if (resume_efd < 0 || ack_efd < 0) {
 		sd_err("failed to create event fds: %m");
 		return 1;
 	}
@@ -338,6 +366,8 @@ int init_work_queue(size_t (*get_nr_nodes)(void))
 		return -1;
 	}
 
+#endif	/* ENABLE_TRACE */
+
 	ret = register_event(efd, worker_thread_request_done, NULL);
 	if (ret) {
 		sd_err("failed to register event fd %m");
diff --git a/sheep/Makefile.am b/sheep/Makefile.am
index 3cfec53..5fff697 100644
--- a/sheep/Makefile.am
+++ b/sheep/Makefile.am
@@ -44,6 +44,7 @@ sheep_SOURCES		+= cluster/shepherd.c
 endif
 
 if BUILD_TRACE
+AM_CPPFLAGS		+= -DENABLE_TRACE
 sheep_SOURCES		+= trace/trace.c trace/mcount.S trace/graph.c trace/checker.c
 endif
 
diff --git a/sheep/trace/trace.c b/sheep/trace/trace.c
index f4f11e7..937dc72 100644
--- a/sheep/trace/trace.c
+++ b/sheep/trace/trace.c
@@ -14,6 +14,7 @@
 #include <bfd.h>
 
 #include "trace.h"
+#include "work.h"
 
 /* Intel recommended one for 5 bytes nops (nopl 0x0(%rax,%rax,1)) */
 static const unsigned char NOP5[INSN_SIZE] = {0x0f, 0x1f, 0x44, 0x00, 0x00};
-- 
1.8.1.2




More information about the sheepdog mailing list