[Sheepdog] [PATCH v4 4/6] change process_work and process_main to process_top and process_main
levin li
levin108 at gmail.com
Thu May 3 12:25:47 CEST 2012
The old semantics 'work' and 'main' indicate us that process_work should
work in worker thread, and process_main should work in main thread,
but in fact, sometimes such as for the cluster request, we'd like to
make process_main run in worker thread, because process_work only run
in local node, so we should change the semantics to 'top' and 'bottom',
at the same time, for cluster request, process_work run before process_main,
so the semantics 'top' and 'bottom' is better.
Signed-off-by: levin li <xingke.lwp at taobao.com>
---
sheep/group.c | 16 ++++-----
sheep/ops.c | 94 ++++++++++++++++++++++++++--------------------------
sheep/sdnet.c | 8 ++---
sheep/sheep_priv.h | 8 ++---
sheep/store.c | 2 +-
5 files changed, 64 insertions(+), 64 deletions(-)
diff --git a/sheep/group.c b/sheep/group.c
index cc03d55..f2a62f9 100644
--- a/sheep/group.c
+++ b/sheep/group.c
@@ -215,11 +215,11 @@ static void do_cluster_op(void *arg)
req = list_first_entry(&sys->pending_list, struct request, pending_list);
- if (has_process_main(req->op))
+ if (has_process_bottom(req->op))
data = msg->data;
else
data = req->data;
- ret = do_process_work(req->op, (const struct sd_req *)&msg->req,
+ ret = do_process_top(req->op, (const struct sd_req *)&msg->req,
(struct sd_rsp *)&msg->rsp, data);
msg->rsp.result = ret;
@@ -234,7 +234,7 @@ void do_cluster_request(struct work *work)
eprintf("%p %x\n", req, hdr->opcode);
- if (has_process_main(req->op))
+ if (has_process_bottom(req->op))
size = sizeof(*msg) + hdr->data_length;
else
size = sizeof(*msg);
@@ -247,12 +247,12 @@ void do_cluster_request(struct work *work)
msg->req = *((struct sd_vdi_req *)&req->rq);
msg->rsp = *((struct sd_vdi_rsp *)&req->rp);
- if (has_process_main(req->op))
+ if (has_process_bottom(req->op))
memcpy(msg->data, req->data, hdr->data_length);
list_add_tail(&req->pending_list, &sys->pending_list);
- if (has_process_work(req->op))
+ if (has_process_top(req->op))
sys->cdrv->notify(msg, size, do_cluster_op);
else {
msg->rsp.result = SD_RES_SUCCESS;
@@ -623,15 +623,15 @@ static void __sd_notify_done(struct event_struct *cevent)
int ret = msg->rsp.result;
struct sd_op_template *op = get_sd_op(msg->req.opcode);
- if (ret == SD_RES_SUCCESS && has_process_main(op))
- ret = do_process_main(op, (const struct sd_req *)&msg->req,
+ if (ret == SD_RES_SUCCESS && has_process_bottom(op))
+ ret = do_process_bottom(op, (const struct sd_req *)&msg->req,
(struct sd_rsp *)&msg->rsp, msg->data);
if (!req)
return;
msg->rsp.result = ret;
- if (has_process_main(req->op))
+ if (has_process_bottom(req->op))
memcpy(req->data, msg->data, msg->rsp.data_length);
memcpy(&req->rp, &msg->rsp, sizeof(req->rp));
req->done(req);
diff --git a/sheep/ops.c b/sheep/ops.c
index 6b2dec3..ab5449e 100644
--- a/sheep/ops.c
+++ b/sheep/ops.c
@@ -39,25 +39,25 @@ struct sd_op_template {
int force;
/*
- * process_work() will be called in the worker thread, and
- * process_main() will be called in the main thread.
+ * process_top() will be called in the worker thread, and
+ * process_bottom() will be called in the main thread.
*
* If type is SD_OP_TYPE_CLUSTER, it is guaranteed that only
* one node processes a cluster operation at the same time.
* We can use this for something like distributed locking.
- * process_work() will be called on the local node, and
- * process_main() will be called on every nodes.
+ * process_top() will be called on the local node, and
+ * process_bottom() will be called on every nodes.
*
- * If type is SD_OP_TYPE_LOCAL, both process_work() and
- * process_main() will be called on the local node.
+ * If type is SD_OP_TYPE_LOCAL, both process_bottom() and
+ * process_bottom() will be called on the local node.
*
- * If type is SD_OP_TYPE_IO, neither process_work() nor
- * process_main() is used because this type of operation is
+ * If type is SD_OP_TYPE_IO, neither process_top() nor
+ * process_bottom() is used because this type of operation is
* heavily intertwined with Sheepdog core codes. We will be
* unlikely to add new operations of this type.
*/
- int (*process_work)(const struct sd_req *req, struct sd_rsp *rsp, void *data);
- int (*process_main)(const struct sd_req *req, struct sd_rsp *rsp, void *data);
+ int (*process_top)(const struct sd_req *req, struct sd_rsp *rsp, void *data);
+ int (*process_bottom)(const struct sd_req *req, struct sd_rsp *rsp, void *data);
};
struct flush_work {
@@ -881,39 +881,39 @@ static struct sd_op_template sd_ops[] = {
/* cluster operations */
[SD_OP_NEW_VDI] = {
.type = SD_OP_TYPE_CLUSTER,
- .process_work = cluster_new_vdi,
- .process_main = post_cluster_new_vdi,
+ .process_top = cluster_new_vdi,
+ .process_bottom = post_cluster_new_vdi,
},
[SD_OP_DEL_VDI] = {
.type = SD_OP_TYPE_CLUSTER,
- .process_work = cluster_del_vdi,
+ .process_top = cluster_del_vdi,
},
[SD_OP_GET_VDI_INFO] = {
.type = SD_OP_TYPE_CLUSTER,
- .process_work = cluster_get_vdi_info,
+ .process_top = cluster_get_vdi_info,
},
[SD_OP_LOCK_VDI] = {
.type = SD_OP_TYPE_CLUSTER,
- .process_work = cluster_get_vdi_info,
+ .process_top = cluster_get_vdi_info,
},
[SD_OP_MAKE_FS] = {
.type = SD_OP_TYPE_CLUSTER,
.force = 1,
- .process_main = cluster_make_fs,
+ .process_bottom = cluster_make_fs,
},
[SD_OP_SHUTDOWN] = {
.type = SD_OP_TYPE_CLUSTER,
- .process_main = cluster_shutdown,
+ .process_bottom = cluster_shutdown,
},
[SD_OP_GET_VDI_ATTR] = {
.type = SD_OP_TYPE_CLUSTER,
- .process_work = cluster_get_vdi_attr,
+ .process_top = cluster_get_vdi_attr,
},
[SD_OP_RELEASE_VDI] = {
@@ -923,120 +923,120 @@ static struct sd_op_template sd_ops[] = {
[SD_OP_RECOVER] = {
.type = SD_OP_TYPE_CLUSTER,
.force = 1,
- .process_main = cluster_manual_recover,
+ .process_bottom = cluster_manual_recover,
},
[SD_OP_SNAPSHOT] = {
.type = SD_OP_TYPE_CLUSTER,
.force = 1,
- .process_main = cluster_snapshot,
+ .process_bottom = cluster_snapshot,
},
[SD_OP_RESTORE] = {
.type = SD_OP_TYPE_CLUSTER,
.force = 1,
- .process_main = cluster_restore,
+ .process_bottom = cluster_restore,
},
[SD_OP_CLEANUP] = {
.type = SD_OP_TYPE_CLUSTER,
.force = 1,
- .process_main = cluster_cleanup,
+ .process_bottom = cluster_cleanup,
},
/* local operations */
[SD_OP_GET_STORE_LIST] = {
.type = SD_OP_TYPE_LOCAL,
.force = 1,
- .process_work = local_get_store_list,
+ .process_top = local_get_store_list,
},
[SD_OP_READ_VDIS] = {
.type = SD_OP_TYPE_LOCAL,
.force = 1,
- .process_main = local_read_vdis,
+ .process_bottom = local_read_vdis,
},
[SD_OP_GET_NODE_LIST] = {
.type = SD_OP_TYPE_LOCAL,
.force = 1,
- .process_main = local_get_node_list,
+ .process_bottom = local_get_node_list,
},
[SD_OP_STAT_SHEEP] = {
.type = SD_OP_TYPE_LOCAL,
- .process_work = local_stat_sheep,
+ .process_top = local_stat_sheep,
},
[SD_OP_STAT_RECOVERY] = {
.type = SD_OP_TYPE_LOCAL,
- .process_main = local_stat_recovery,
+ .process_bottom = local_stat_recovery,
},
[SD_OP_STAT_CLUSTER] = {
.type = SD_OP_TYPE_LOCAL,
.force = 1,
- .process_work = local_stat_cluster,
+ .process_top = local_stat_cluster,
},
[SD_OP_KILL_NODE] = {
.type = SD_OP_TYPE_LOCAL,
.force = 1,
- .process_work = local_kill_node,
+ .process_top = local_kill_node,
},
[SD_OP_GET_OBJ_LIST] = {
.type = SD_OP_TYPE_LOCAL,
- .process_work = local_get_obj_list,
+ .process_top = local_get_obj_list,
},
[SD_OP_GET_EPOCH] = {
.type = SD_OP_TYPE_LOCAL,
- .process_work = local_get_epoch,
+ .process_top = local_get_epoch,
},
[SD_OP_GET_SNAP_FILE] = {
.type = SD_OP_TYPE_LOCAL,
.force = 1,
- .process_work = local_get_snap_file,
+ .process_top = local_get_snap_file,
},
[SD_OP_FLUSH_VDI] = {
.type = SD_OP_TYPE_LOCAL,
- .process_work = local_flush_vdi,
+ .process_top = local_flush_vdi,
},
[SD_OP_TRACE] = {
.type = SD_OP_TYPE_LOCAL,
.force = 1,
- .process_main = local_trace_ops,
+ .process_bottom = local_trace_ops,
},
[SD_OP_TRACE_CAT] = {
.type = SD_OP_TYPE_LOCAL,
.force = 1,
- .process_main = local_trace_cat_ops,
+ .process_bottom = local_trace_cat_ops,
},
/* I/O operations */
[SD_OP_CREATE_AND_WRITE_OBJ] = {
.type = SD_OP_TYPE_IO,
- .process_work = store_create_and_write_obj,
+ .process_top = store_create_and_write_obj,
},
[SD_OP_READ_OBJ] = {
.type = SD_OP_TYPE_IO,
- .process_work = store_read_obj,
+ .process_top = store_read_obj,
},
[SD_OP_WRITE_OBJ] = {
.type = SD_OP_TYPE_IO,
- .process_work = store_write_obj,
+ .process_top = store_write_obj,
},
[SD_OP_REMOVE_OBJ] = {
.type = SD_OP_TYPE_IO,
- .process_work = store_remove_obj,
+ .process_top = store_remove_obj,
},
};
@@ -1068,24 +1068,24 @@ int is_force_op(struct sd_op_template *op)
return !!op->force;
}
-int has_process_work(struct sd_op_template *op)
+int has_process_top(struct sd_op_template *op)
{
- return !!op->process_work;
+ return !!op->process_top;
}
-int has_process_main(struct sd_op_template *op)
+int has_process_bottom(struct sd_op_template *op)
{
- return !!op->process_main;
+ return !!op->process_bottom;
}
-int do_process_work(struct sd_op_template *op, const struct sd_req *req,
+int do_process_top(struct sd_op_template *op, const struct sd_req *req,
struct sd_rsp *rsp, void *data)
{
- return op->process_work(req, rsp, data);
+ return op->process_top(req, rsp, data);
}
-int do_process_main(struct sd_op_template *op, const struct sd_req *req,
+int do_process_bottom(struct sd_op_template *op, const struct sd_req *req,
struct sd_rsp *rsp, void *data)
{
- return op->process_main(req, rsp, data);
+ return op->process_bottom(req, rsp, data);
}
diff --git a/sheep/sdnet.c b/sheep/sdnet.c
index 1e001af..5fb1472 100644
--- a/sheep/sdnet.c
+++ b/sheep/sdnet.c
@@ -163,8 +163,8 @@ static void local_op_done(struct work *work)
{
struct request *req = container_of(work, struct request, work);
- if (has_process_main(req->op)) {
- req->rp.result = do_process_main(req->op, &req->rq,
+ if (has_process_bottom(req->op)) {
+ req->rp.result = do_process_bottom(req->op, &req->rq,
&req->rp, req->data);
}
@@ -182,8 +182,8 @@ static void do_local_request(struct work *work)
struct sd_obj_rsp *rsp = (struct sd_obj_rsp *)&req->rp;
int ret = SD_RES_SUCCESS;
- if (has_process_work(req->op))
- ret = do_process_work(req->op, &req->rq, &req->rp, req->data);
+ if (has_process_top(req->op))
+ ret = do_process_top(req->op, &req->rq, &req->rp, req->data);
rsp->result = ret;
}
diff --git a/sheep/sheep_priv.h b/sheep/sheep_priv.h
index 9cb22d1..8258738 100644
--- a/sheep/sheep_priv.h
+++ b/sheep/sheep_priv.h
@@ -321,11 +321,11 @@ int is_cluster_op(struct sd_op_template *op);
int is_local_op(struct sd_op_template *op);
int is_io_op(struct sd_op_template *op);
int is_force_op(struct sd_op_template *op);
-int has_process_work(struct sd_op_template *op);
-int has_process_main(struct sd_op_template *op);
-int do_process_work(struct sd_op_template *op, const struct sd_req *req,
+int has_process_top(struct sd_op_template *op);
+int has_process_bottom(struct sd_op_template *op);
+int do_process_top(struct sd_op_template *op, const struct sd_req *req,
struct sd_rsp *rsp, void *data);
-int do_process_main(struct sd_op_template *op, const struct sd_req *req,
+int do_process_bottom(struct sd_op_template *op, const struct sd_req *req,
struct sd_rsp *rsp, void *data);
/* Journal */
diff --git a/sheep/store.c b/sheep/store.c
index 7ef72c5..d41adb4 100644
--- a/sheep/store.c
+++ b/sheep/store.c
@@ -53,7 +53,7 @@ static int do_local_io(struct request *req, uint32_t epoch)
hdr->epoch = epoch;
dprintf("%x, %" PRIx64" , %u\n", hdr->opcode, hdr->oid, epoch);
- return do_process_work(req->op, &req->rq, &req->rp, req);
+ return do_process_top(req->op, &req->rq, &req->rp, req);
}
static int forward_read_obj_req(struct request *req)
--
1.7.10
More information about the sheepdog
mailing list