[sheepdog] [PATCH V2 08/11] sheep: send dirty object list to each sheep when cluster do recovery
Yunkai Zhang
yunkai.me at gmail.com
Thu Aug 9 10:43:46 CEST 2012
From: Yunkai Zhang <qiushu.zyk at taobao.com>
When cluster do recovery, dirtry object list should be sent to each sheep
who will use this list to decide which object should be dropped and which
node should be selected to fetch dirty object data.
Signed-off-by: Yunkai Zhang <qiushu.zyk at taobao.com>
---
include/internal_proto.h | 16 +++++++++++++++-
sheep/object_list_cache.c | 43 ++++++++++++++++++++++++++++++++++++++++++-
sheep/ops.c | 38 ++++++++++++++++++++++++++++++++------
sheep/recovery.c | 40 +++++++++++++++++++++++++++++++---------
4 files changed, 120 insertions(+), 17 deletions(-)
diff --git a/include/internal_proto.h b/include/internal_proto.h
index 742aa1b..491b03d 100644
--- a/include/internal_proto.h
+++ b/include/internal_proto.h
@@ -141,7 +141,16 @@ struct sd_list_rsp {
uint32_t id;
uint32_t data_length;
uint32_t result;
- uint32_t pad[7];
+ /*
+ * 1) if there is no dirty object:
+ * data_length = nr_oids * sizeof(uint64_t)
+ * 2) else:
+ * data_length = nr_oids * sizeof(uint64_t)
+ * + nr_dirty_oid_infos * sizeof(struct dirty_oid_info)
+ */
+ uint32_t nr_oids;
+ uint32_t nr_dirty_oid_infos;
+ uint32_t pad[6];
};
struct sd_node_req {
@@ -226,4 +235,9 @@ struct vdi_op_message {
uint8_t data[0];
};
+struct dirty_oid_info {
+ uint8_t dirty_flag;
+ uint64_t oid;
+};
+
#endif /* __INTERNAL_PROTO_H__ */
diff --git a/sheep/object_list_cache.c b/sheep/object_list_cache.c
index fe6b5bd..4c34783 100644
--- a/sheep/object_list_cache.c
+++ b/sheep/object_list_cache.c
@@ -202,7 +202,15 @@ int get_obj_list(const struct sd_list_req *hdr, struct sd_list_rsp *rsp, void *d
obj_list_cache.cache_size * sizeof(uint64_t));
list_for_each_entry(entry, &obj_list_cache.entry_list, list) {
- obj_list_cache.buf[nr++] = entry->oid;
+ if (sys->disable_recovery) {
+ /* excluding deleted entries */
+ if (entry->dirty_flag != 2)
+ obj_list_cache.buf[nr++] = entry->oid;
+ } else {
+ assert(entry->dirty_flag == 0);
+ obj_list_cache.buf[nr++] = entry->oid;
+ }
+
}
out:
@@ -212,8 +220,41 @@ out:
return SD_RES_EIO;
}
+ rsp->nr_dirty_oid_infos = 0;
+ rsp->nr_oids = obj_list_cache.cache_size;
rsp->data_length = obj_list_cache.cache_size * sizeof(uint64_t);
memcpy(data, obj_list_cache.buf, rsp->data_length);
+
+ if (sys->disable_recovery) {
+ char *p;
+ struct dirty_oid_info *doi;
+
+ doi = malloc(SD_DATA_OBJ_SIZE * sizeof(*doi));
+ if (!doi) {
+ eprintf("Out of memory\n");
+ return SD_RES_EIO;
+ }
+
+ nr = 0;
+ list_for_each_entry(entry, &obj_list_cache.entry_list, list) {
+ if (entry->dirty_flag) {
+ dprintf("dirty_oid:%"PRIx64", dirty_flag:%d\n",
+ entry->oid, entry->dirty_flag);
+ doi[nr].oid = entry->oid;
+ doi[nr++].dirty_flag = entry->dirty_flag;
+ }
+ }
+
+ if (nr) {
+ p = data;
+ memcpy(&p[rsp->data_length], doi, nr * sizeof(*doi));
+ rsp->data_length += nr * sizeof(*doi);
+ rsp->nr_dirty_oid_infos = nr;
+ }
+ dprintf("nr_oids:%"PRIu32", nr_dois:%"PRIu32", len:%"PRIu32"\n",
+ rsp->nr_oids, nr, rsp->data_length);
+ }
+
pthread_rwlock_unlock(&obj_list_cache.lock);
return SD_RES_SUCCESS;
}
diff --git a/sheep/ops.c b/sheep/ops.c
index de65b5d..de0dae7 100644
--- a/sheep/ops.c
+++ b/sheep/ops.c
@@ -298,12 +298,18 @@ static int cluster_enable_recover(const struct sd_req *req,
start_recovery(vnode_info, old_vnode_info);
put_vnode_info(old_vnode_info);
+ } else if (!node_in_recovery()) {
+ /*
+ * If recovery is not running, clear recover state
+ * directly, otherwise they will be reset when
+ * recovery finished.
+ */
+ sys->disable_recovery = 0;
+ nr_joining_nodes = 0;
+ nr_leaving_nodes = 0;
+ nr_all_nodes = 0;
}
- nr_all_nodes = 0;
- nr_joining_nodes = 0;
- nr_leaving_nodes = 0;
- sys->disable_recovery = 0;
return SD_RES_SUCCESS;
}
@@ -736,6 +742,12 @@ int peer_remove_obj(struct request *req)
if (req->rq.flags & SD_FLAG_MARK_DIRTY_OBJECT)
has_left_node = 1;
+ if (!sys->disable_recovery)
+ assert(has_left_node == 0);
+
+ if (has_left_node)
+ dprintf("dirty_oid:%"PRIx64", dirty_flag:%d\n",
+ req->rq.obj.oid, 2);
objlist_cache_remove(oid, has_left_node);
object_cache_remove(oid);
@@ -808,13 +820,20 @@ int peer_write_obj(struct request *req)
if (req->rq.flags & SD_FLAG_MARK_DIRTY_OBJECT)
has_left_node = 1;
+ if (!sys->disable_recovery)
+ assert(has_left_node == 0);
+
memset(&iocb, 0, sizeof(iocb));
iocb.epoch = epoch;
iocb.flags = hdr->flags;
ret = do_write_obj(&iocb, hdr, epoch, req->data, 0);
- if (SD_RES_SUCCESS == ret)
+ if (SD_RES_SUCCESS == ret) {
+ if (has_left_node)
+ dprintf("dirty_oid:%"PRIx64", dirty_flag:%d\n",
+ req->rq.obj.oid, has_left_node);
objlist_cache_insert(req->rq.obj.oid, has_left_node);
+ }
return ret;
}
@@ -833,6 +852,9 @@ int peer_create_and_write_obj(struct request *req)
if (req->rq.flags & SD_FLAG_MARK_DIRTY_OBJECT)
has_left_node = 1;
+ if (!sys->disable_recovery)
+ assert(has_left_node == 0);
+
memset(&iocb, 0, sizeof(iocb));
iocb.epoch = epoch;
iocb.flags = hdr->flags;
@@ -863,8 +885,12 @@ int peer_create_and_write_obj(struct request *req)
} else
ret = do_write_obj(&iocb, hdr, epoch, req->data, 1);
- if (SD_RES_SUCCESS == ret)
+ if (SD_RES_SUCCESS == ret) {
+ if (has_left_node)
+ dprintf("dirty_oid:%"PRIx64", dirty_flag:%d\n",
+ req->rq.obj.oid, has_left_node);
objlist_cache_insert(oid, has_left_node);
+ }
out:
if (buf)
free(buf);
diff --git a/sheep/recovery.c b/sheep/recovery.c
index d74810f..38d79d8 100644
--- a/sheep/recovery.c
+++ b/sheep/recovery.c
@@ -347,6 +347,11 @@ static inline void finish_recovery(struct recovery_work *rw)
free_recovery_work(rw);
+ sys->disable_recovery = 0;
+ nr_joining_nodes = 0;
+ nr_leaving_nodes = 0;
+ nr_all_nodes = 0;
+
dprintf("recovery complete: new epoch %"PRIu32"\n",
sys->recovered_epoch);
}
@@ -464,7 +469,8 @@ static void finish_object_list(struct work *work)
/* Fetch the object list from all the nodes in the cluster */
static int fetch_object_list(struct sd_node *e, uint32_t epoch,
- uint8_t *buf, size_t buf_size)
+ uint8_t *buf, size_t buf_size,
+ uint32_t *nr_dirty_oid_infos)
{
int fd, ret;
unsigned wlen, rlen;
@@ -501,9 +507,11 @@ static int fetch_object_list(struct sd_node *e, uint32_t epoch,
return -1;
}
- dprintf("%zu\n", rsp->data_length / sizeof(uint64_t));
+ *nr_dirty_oid_infos = rsp->nr_dirty_oid_infos;
+ dprintf("nr_oids:%"PRIu32", nr_dois:%"PRIu32", len:%"PRIu32"\n",
+ rsp->nr_oids, *nr_dirty_oid_infos, rsp->data_length);
- return rsp->data_length / sizeof(uint64_t);
+ return rsp->nr_oids;
}
/* Screen out objects that don't belong to this node */
@@ -548,19 +556,22 @@ static void prepare_object_list(struct work *work)
struct recovery_work *rw = container_of(work, struct recovery_work,
work);
uint8_t *buf = NULL;
- size_t buf_size = SD_DATA_OBJ_SIZE; /* FIXME */
+ size_t buf_size;
struct sd_node *cur = rw->cur_vinfo->nodes;
int cur_nr = rw->cur_vinfo->nr_nodes;
- int start = random() % cur_nr, i, end = cur_nr;
+ int start = random() % cur_nr, i, j, end = cur_nr;
dprintf("%u\n", rw->epoch);
+ buf_size = SD_DATA_OBJ_SIZE * sizeof(uint64_t)
+ + SD_DATA_OBJ_SIZE * sizeof(struct dirty_oid_info);
buf = xmalloc(buf_size);
again:
/* We need to start at random node for better load balance */
for (i = start; i < end; i++) {
- int buf_nr;
struct sd_node *node = cur + i;
+ struct dirty_oid_info *dois;
+ uint32_t nr_oids, nr_dois, offset;
if (next_rw) {
dprintf("go to the next recovery\n");
@@ -570,10 +581,21 @@ again:
/* new node doesn't have a list file */
continue;
- buf_nr = fetch_object_list(node, rw->epoch, buf, buf_size);
- if (buf_nr < 0)
+ nr_oids = fetch_object_list(node, rw->epoch, buf, buf_size,
+ &nr_dois);
+ if ((nr_oids + nr_dois) < 0)
continue;
- screen_object_list(rw, (uint64_t *)buf, buf_nr);
+
+ if (sys->disable_recovery) {
+ offset = nr_oids * sizeof(uint64_t);
+ dois = (struct dirty_oid_info *)&buf[offset];
+ for (j = 0; j < nr_dois; j++) {
+ dprintf("dirty_oid:%"PRIx64", dirty_flag:%d\n",
+ dois[j].oid, dois[j].dirty_flag);
+ }
+ }
+
+ screen_object_list(rw, (uint64_t *)buf, nr_oids);
}
if (start != 0) {
--
1.7.11.2
More information about the sheepdog
mailing list