[Sheepdog] [PATCH 1/4] sheep: refactor process_request_queue()
Liu Yuan
namei.unix at gmail.com
Mon Apr 16 09:39:48 CEST 2012
On 04/11/2012 10:12 PM, Liu Yuan wrote:
> From: Liu Yuan <tailai.ly at taobao.com>
>
> - move away request checking logic from request handling
> process. This will improve the readibility a bit.
>
> - add a new field 'local_cow_oid' in struct request to avoid
> redundant calls of is_access_local()
>
> Signed-off-by: Liu Yuan <tailai.ly at taobao.com>
> ---
> sheep/group.c | 82 -------------------------------------------
> sheep/sdnet.c | 98 +++++++++++++++++++++++++++++++++++++++++++++++++---
> sheep/sheep_priv.h | 1 +
> 3 files changed, 94 insertions(+), 87 deletions(-)
>
> diff --git a/sheep/group.c b/sheep/group.c
> index 469a2ac..21b26a2 100644
> --- a/sheep/group.c
> +++ b/sheep/group.c
> @@ -971,24 +971,6 @@ static void cpg_event_done(struct work *work)
> start_cpg_event_work();
> }
>
> -static int check_epoch(struct request *req)
> -{
> - uint32_t req_epoch = req->rq.epoch;
> - uint32_t opcode = req->rq.opcode;
> - int ret = SD_RES_SUCCESS;
> -
> - if (before(req_epoch, sys->epoch)) {
> - ret = SD_RES_OLD_NODE_VER;
> - eprintf("old node version %u, %u, %x\n",
> - sys->epoch, req_epoch, opcode);
> - } else if (after(req_epoch, sys->epoch)) {
> - ret = SD_RES_NEW_NODE_VER;
> - eprintf("new node version %u, %u, %x\n",
> - sys->epoch, req_epoch, opcode);
> - }
> - return ret;
> -}
> -
> int is_access_to_busy_objects(uint64_t oid)
> {
> struct request *req;
> @@ -1008,34 +990,6 @@ int is_access_to_busy_objects(uint64_t oid)
> return 0;
> }
>
> -static int __is_access_to_recoverying_objects(struct request *req)
> -{
> - if (req->rq.flags & SD_FLAG_CMD_RECOVERY) {
> - if (req->rq.opcode != SD_OP_READ_OBJ)
> - eprintf("bug\n");
> - return 0;
> - }
> -
> - if (is_recoverying_oid(req->local_oid))
> - return 1;
> -
> - return 0;
> -}
> -
> -static int __is_access_to_busy_objects(struct request *req)
> -{
> - if (req->rq.flags & SD_FLAG_CMD_RECOVERY) {
> - if (req->rq.opcode != SD_OP_READ_OBJ)
> - eprintf("bug\n");
> - return 0;
> - }
> -
> - if (is_access_to_busy_objects(req->local_oid))
> - return 1;
> -
> - return 0;
> -}
> -
> static int need_consistency_check(uint8_t opcode, uint16_t flags)
> {
> if (flags & SD_FLAG_CMD_IO_LOCAL)
> @@ -1056,9 +1010,7 @@ static int need_consistency_check(uint8_t opcode, uint16_t flags)
> static void process_request_queue(void)
> {
> struct cpg_event *cevent, *n;
> - LIST_HEAD(failed_req_list);
>
> -retry:
> list_for_each_entry_safe(cevent, n, &sys->cpg_request_queue, cpg_event_list) {
> struct request *req = container_of(cevent, struct request, cev);
> struct sd_obj_req *hdr = (struct sd_obj_req *)&req->rq;
> @@ -1080,37 +1032,9 @@ retry:
> continue;
> }
>
> - if (__is_access_to_recoverying_objects(req)) {
> - if (req->rq.flags & SD_FLAG_CMD_IO_LOCAL) {
> - req->rp.result = SD_RES_NEW_NODE_VER;
> - sys->nr_outstanding_io++; /* TODO: cleanup */
> - list_add_tail(&req->r_wlist, &failed_req_list);
> - } else
> - list_add_tail(&req->r_wlist, &sys->req_wait_for_obj_list);
> - continue;
> - }
> - if (__is_access_to_busy_objects(req)) {
> - list_add_tail(&req->r_wlist, &sys->req_wait_for_obj_list);
> - continue;
> - }
> -
> list_add_tail(&req->r_wlist, &sys->outstanding_req_list);
> -
> sys->nr_outstanding_io++;
>
> - if (is_access_local(req->entry, req->nr_vnodes,
> - ((struct sd_obj_req *)&req->rq)->oid, copies) ||
> - is_access_local(req->entry, req->nr_vnodes,
> - ((struct sd_obj_req *)&req->rq)->cow_oid, copies)) {
> - int ret = check_epoch(req);
> - if (ret != SD_RES_SUCCESS) {
> - req->rp.result = ret;
> - list_del(&req->r_wlist);
> - list_add_tail(&req->r_wlist, &failed_req_list);
> - continue;
> - }
> - }
> -
> if (need_consistency_check(req->rq.opcode, req->rq.flags)) {
> uint32_t vdi_id = oid_to_vid(hdr->oid);
> struct data_object_bmap *bmap;
> @@ -1135,12 +1059,6 @@ retry:
> else
> queue_work(sys->gateway_wqueue, &req->work);
> }
> - while (!list_empty(&failed_req_list)) {
> - struct request *req = list_first_entry(&failed_req_list,
> - struct request, r_wlist);
> - req->work.done(&req->work);
> - goto retry;
> - }
> }
>
> /* can be called only by the main process */
> diff --git a/sheep/sdnet.c b/sheep/sdnet.c
> index fb75c89..d366cd0 100644
> --- a/sheep/sdnet.c
> +++ b/sheep/sdnet.c
> @@ -74,6 +74,10 @@ static void setup_access_to_local_objects(struct request *req)
>
> if (is_access_local(req->entry, req->nr_vnodes, hdr->oid, copies))
> req->local_oid = hdr->oid;
> +
> + if (hdr->cow_oid)
> + if (is_access_local(req->entry, req->nr_vnodes, hdr->cow_oid, copies))
> + req->local_cow_oid = hdr->cow_oid;
> }
>
> static void io_op_done(struct work *work)
> @@ -196,6 +200,89 @@ static void do_local_request(struct work *work)
> rsp->result = ret;
> }
>
> +static int check_epoch(struct request *req)
> +{
> + uint32_t req_epoch = req->rq.epoch;
> + uint32_t opcode = req->rq.opcode;
> + int ret = SD_RES_SUCCESS;
> +
> + if (before(req_epoch, sys->epoch)) {
> + ret = SD_RES_OLD_NODE_VER;
> + eprintf("old node version %u, %u, %x\n",
> + sys->epoch, req_epoch, opcode);
> + } else if (after(req_epoch, sys->epoch)) {
> + ret = SD_RES_NEW_NODE_VER;
> + eprintf("new node version %u, %u, %x\n",
> + sys->epoch, req_epoch, opcode);
> + }
> + return ret;
> +}
> +
> +static int __is_access_to_recoverying_objects(struct request *req)
> +{
> + if (req->rq.flags & SD_FLAG_CMD_RECOVERY) {
> + if (req->rq.opcode != SD_OP_READ_OBJ)
> + eprintf("bug\n");
> + return 0;
> + }
> +
> + if (is_recoverying_oid(req->local_oid))
> + return 1;
> +
> + return 0;
> +}
> +
> +static int __is_access_to_busy_objects(struct request *req)
> +{
> + if (req->rq.flags & SD_FLAG_CMD_RECOVERY) {
> + if (req->rq.opcode != SD_OP_READ_OBJ)
> + eprintf("bug\n");
> + return 0;
> + }
> +
> + if (is_access_to_busy_objects(req->local_oid))
> + return 1;
> +
> + return 0;
> +}
> +static int check_request(struct request *req)
> +{
> + if (!req->local_oid && !req->local_cow_oid)
> + return 0;
> + else {
> + int ret = check_epoch(req);
> + if (ret != SD_RES_SUCCESS) {
> + req->rp.result = ret;
> + sys->nr_outstanding_io++;
> + req->work.done(&req->work);
> + return -1;
> + }
> + }
> +
> + if (!req->local_oid)
> + return 0;
> +
> + if (__is_access_to_recoverying_objects(req)) {
> + if (req->rq.flags & SD_FLAG_CMD_IO_LOCAL) {
> + req->rp.result = SD_RES_NEW_NODE_VER;
> + sys->nr_outstanding_io++;
> + req->work.done(&req->work);
> + } else {
> + list_del(&req->r_wlist);
> + list_add_tail(&req->r_wlist, &sys->req_wait_for_obj_list);
> + }
> + return -1;
> + }
> +
> + if (__is_access_to_busy_objects(req)) {
> + list_del(&req->r_wlist);
> + list_add_tail(&req->r_wlist, &sys->req_wait_for_obj_list);
> + return -1;
> + }
> +
> + return 0;
> +}
> +
> static void queue_request(struct request *req)
> {
> struct cpg_event *cevent = &req->cev;
> @@ -241,6 +328,7 @@ static void queue_request(struct request *req)
> if (is_io_op(req->op)) {
> req->work.fn = do_io_request;
> req->work.done = io_op_done;
> + setup_access_to_local_objects(req);
> } else if (is_local_op(req->op)) {
> req->work.fn = do_local_request;
> req->work.done = local_op_done;
> @@ -253,9 +341,6 @@ static void queue_request(struct request *req)
> req->done(req);
> return;
> }
> -
> - list_del(&req->r_wlist);
> -
> /*
> * we set epoch for non direct requests here. Note that we
> * can't access to sys->epoch after calling
> @@ -265,9 +350,12 @@ static void queue_request(struct request *req)
> if (!(hdr->flags & SD_FLAG_CMD_IO_LOCAL))
> hdr->epoch = sys->epoch;
>
> + if (check_request(req) < 0)
> + return;
> +
> + list_del(&req->r_wlist);
> +
> setup_ordered_sd_vnode_list(req);
> - if (is_io_op(req->op))
> - setup_access_to_local_objects(req);
>
> cevent->ctype = CPG_EVENT_REQUEST;
> list_add_tail(&cevent->cpg_event_list, &sys->cpg_request_queue);
> diff --git a/sheep/sheep_priv.h b/sheep/sheep_priv.h
> index a5d8cad..d59606d 100644
> --- a/sheep/sheep_priv.h
> +++ b/sheep/sheep_priv.h
> @@ -82,6 +82,7 @@ struct request {
> struct list_head pending_list;
>
> uint64_t local_oid;
> + uint64_t local_cow_oid;
>
> struct sd_vnode *entry;
> int nr_vnodes;
Applied
thanks.
Yuan
More information about the sheepdog
mailing list