[Sheepdog] 答复: sheepdog Digest, Vol 26, Issue 28
Zouxianjun
zouxianjun at huawei.com
Tue Nov 15 01:21:33 CET 2011
thank you very much!! i think it's what i need... i will follow the instructions and compile again...
thanks!!
________________________________________
发件人: sheepdog-bounces at lists.wpkg.org [sheepdog-bounces at lists.wpkg.org] 代表 sheepdog-request at lists.wpkg.org [sheepdog-request at lists.wpkg.org]
发送时间: 2011年11月15日 8:04
到: sheepdog at lists.wpkg.org
主题: sheepdog Digest, Vol 26, Issue 28
Send sheepdog mailing list submissions to
sheepdog at lists.wpkg.org
To subscribe or unsubscribe via the World Wide Web, visit
http://lists.wpkg.org/mailman/listinfo/sheepdog
or, via email, send a message with subject or body 'help' to
sheepdog-request at lists.wpkg.org
You can reach the person managing the list at
sheepdog-owner at lists.wpkg.org
When replying, please edit your Subject line so it is more specific
than "Re: Contents of sheepdog digest..."
Today's Topics:
1. [PATCH] fix a compiler warning in forward_write_obj_req
(Christoph Hellwig)
2. [PATCH 1/2] sdnet: split up __done (Christoph Hellwig)
3. [PATCH 2/2] sdnet: tidy up queue_request (Christoph Hellwig)
4. Re: [PATCH] tests: add qemu-io testcases. (MORITA Kazutaka)
----------------------------------------------------------------------
Message: 1
Date: Mon, 14 Nov 2011 10:45:33 -0500
From: Christoph Hellwig <hch at infradead.org>
To: sheepdog at lists.wpkg.org
Subject: [Sheepdog] [PATCH] fix a compiler warning in
forward_write_obj_req
Message-ID: <20111114154532.GA14787 at infradead.org>
Content-Type: text/plain; charset=us-ascii
rlen is never used in the function, and recent gcc complains about
this fact.
Signed-off-by: Christoph Hellwig <hch at lst.de>
Index: sheepdog/sheep/store.c
===================================================================
--- sheepdog.orig/sheep/store.c 2011-11-14 16:34:09.534609838 +0100
+++ sheepdog/sheep/store.c 2011-11-14 16:34:18.517943118 +0100
@@ -306,7 +306,7 @@ out:
static int forward_write_obj_req(struct request *req, int idx)
{
int i, n, nr, fd, ret;
- unsigned wlen, rlen;
+ unsigned wlen;
char name[128];
struct sd_obj_req hdr = *(struct sd_obj_req *)&req->rq;
struct sd_obj_rsp *rsp = (struct sd_obj_rsp *)&req->rp;
@@ -336,7 +336,6 @@ static int forward_write_obj_req(struct
hdr.flags |= SD_FLAG_CMD_IO_LOCAL;
wlen = hdr.data_length;
- rlen = 0;
for (i = 0; i < copies; i++) {
n = obj_to_sheep(e, nr, oid, i);
------------------------------
Message: 2
Date: Mon, 14 Nov 2011 10:48:41 -0500
From: Christoph Hellwig <hch at infradead.org>
To: sheepdog at lists.wpkg.org
Subject: [Sheepdog] [PATCH 1/2] sdnet: split up __done
Message-ID: <20111114154841.GA15954 at infradead.org>
Content-Type: text/plain; charset=us-ascii
Split the __done function into one helper per operation type given that
there is no shared code between the different types.
Signed-off-by: Christoph Hellwig <hch at lst.de>
Index: sheepdog/sheep/sdnet.c
===================================================================
--- sheepdog.orig/sheep/sdnet.c 2011-11-14 16:39:17.411274667 +0100
+++ sheepdog/sheep/sdnet.c 2011-11-14 16:41:13.821273968 +0100
@@ -76,109 +76,114 @@ static void setup_access_to_local_object
req->local_oid = hdr->oid;
}
-static void __done(struct work *work, int idx)
+static void io_op_done(struct work *work, int idx)
{
struct request *req = container_of(work, struct request, work);
+ struct cpg_event *cevent = &req->cev;
int again = 0;
int copies = sys->nr_sobjs;
if (copies > req->nr_zones)
copies = req->nr_zones;
- if (is_cluster_op(req->op))
- /* request is forwarded to cpg group */
- return;
-
- if (is_local_op(req->op) && has_process_main(req->op))
- req->rp.result = do_process_main(req->op, &req->rq,
- &req->rp, req->data);
-
- if (is_io_op(req->op)) {
- struct cpg_event *cevent = &req->cev;
-
- list_del(&req->r_wlist);
-
- sys->nr_outstanding_io--;
- /*
- * TODO: if the request failed due to epoch unmatch,
- * we should retry here (adds this request to the tail
- * of sys->cpg_event_siblings.
- */
-
- if (!(req->rq.flags & SD_FLAG_CMD_IO_LOCAL) &&
- (req->rp.result == SD_RES_OLD_NODE_VER ||
- req->rp.result == SD_RES_NEW_NODE_VER ||
- req->rp.result == SD_RES_NETWORK_ERROR ||
- req->rp.result == SD_RES_WAIT_FOR_JOIN ||
- req->rp.result == SD_RES_WAIT_FOR_FORMAT)) {
+ list_del(&req->r_wlist);
+ sys->nr_outstanding_io--;
+ /*
+ * TODO: if the request failed due to epoch unmatch,
+ * we should retry here (adds this request to the tail
+ * of sys->cpg_event_siblings.
+ */
+ if (!(req->rq.flags & SD_FLAG_CMD_IO_LOCAL) &&
+ (req->rp.result == SD_RES_OLD_NODE_VER ||
+ req->rp.result == SD_RES_NEW_NODE_VER ||
+ req->rp.result == SD_RES_NETWORK_ERROR ||
+ req->rp.result == SD_RES_WAIT_FOR_JOIN ||
+ req->rp.result == SD_RES_WAIT_FOR_FORMAT)) {
+
+ req->rq.epoch = sys->epoch;
+ setup_ordered_sd_vnode_list(req);
+ setup_access_to_local_objects(req);
+
+ list_add_tail(&cevent->cpg_event_list, &sys->cpg_event_siblings);
+ again = 1;
+ } else if (req->rp.result == SD_RES_SUCCESS && req->check_consistency) {
+ struct sd_obj_req *obj_hdr = (struct sd_obj_req *)&req->rq;
+ uint32_t vdi_id = oid_to_vid(obj_hdr->oid);
+ struct data_object_bmap *bmap, *n;
+ int nr_bmaps = 0;
+
+ if (!is_data_obj(obj_hdr->oid))
+ goto done;
+
+ list_for_each_entry_safe(bmap, n, &sys->consistent_obj_list, list) {
+ nr_bmaps++;
+ if (bmap->vdi_id == vdi_id) {
+ set_bit(data_oid_to_idx(obj_hdr->oid), bmap->dobjs);
+ list_del(&bmap->list);
+ list_add_tail(&bmap->list, &sys->consistent_obj_list);
+ goto done;
+ }
+ }
+ bmap = zalloc(sizeof(*bmap));
+ if (bmap == NULL) {
+ eprintf("failed to allocate memory\n");
+ goto done;
+ }
+ dprintf("allocating a new object map\n");
+ bmap->vdi_id = vdi_id;
+ list_add_tail(&bmap->list, &sys->consistent_obj_list);
+ set_bit(data_oid_to_idx(obj_hdr->oid), bmap->dobjs);
+ if (nr_bmaps >= MAX_DATA_OBJECT_BMAPS) {
+ /* the first entry is the least recently used one */
+ bmap = list_first_entry(&sys->consistent_obj_list,
+ struct data_object_bmap, list);
+ list_del(&bmap->list);
+ free(bmap);
+ }
+ } else if (is_access_local(req->entry, req->nr_vnodes,
+ ((struct sd_obj_req *)&req->rq)->oid, copies) &&
+ req->rp.result == SD_RES_EIO) {
+ eprintf("leaving sheepdog cluster\n");
+ leave_cluster();
+
+ if (req->rq.flags & SD_FLAG_CMD_IO_LOCAL)
+ /* hack to retry */
+ req->rp.result = SD_RES_NETWORK_ERROR;
+ else {
req->rq.epoch = sys->epoch;
setup_ordered_sd_vnode_list(req);
setup_access_to_local_objects(req);
list_add_tail(&cevent->cpg_event_list, &sys->cpg_event_siblings);
again = 1;
- } else if (req->rp.result == SD_RES_SUCCESS && req->check_consistency) {
- struct sd_obj_req *obj_hdr = (struct sd_obj_req *)&req->rq;
- uint32_t vdi_id = oid_to_vid(obj_hdr->oid);
- struct data_object_bmap *bmap, *n;
- int nr_bmaps = 0;
-
- if (!is_data_obj(obj_hdr->oid))
- goto done;
-
- list_for_each_entry_safe(bmap, n, &sys->consistent_obj_list, list) {
- nr_bmaps++;
- if (bmap->vdi_id == vdi_id) {
- set_bit(data_oid_to_idx(obj_hdr->oid), bmap->dobjs);
- list_del(&bmap->list);
- list_add_tail(&bmap->list, &sys->consistent_obj_list);
- goto done;
- }
- }
- bmap = zalloc(sizeof(*bmap));
- if (bmap == NULL) {
- eprintf("failed to allocate memory\n");
- goto done;
- }
- dprintf("allocating a new object map\n");
- bmap->vdi_id = vdi_id;
- list_add_tail(&bmap->list, &sys->consistent_obj_list);
- set_bit(data_oid_to_idx(obj_hdr->oid), bmap->dobjs);
- if (nr_bmaps >= MAX_DATA_OBJECT_BMAPS) {
- /* the first entry is the least recently used one */
- bmap = list_first_entry(&sys->consistent_obj_list,
- struct data_object_bmap, list);
- list_del(&bmap->list);
- free(bmap);
- }
- } else if (is_access_local(req->entry, req->nr_vnodes,
- ((struct sd_obj_req *)&req->rq)->oid, copies) &&
- req->rp.result == SD_RES_EIO) {
- eprintf("leaving sheepdog cluster\n");
- leave_cluster();
-
- if (req->rq.flags & SD_FLAG_CMD_IO_LOCAL)
- /* hack to retry */
- req->rp.result = SD_RES_NETWORK_ERROR;
- else {
- req->rq.epoch = sys->epoch;
- setup_ordered_sd_vnode_list(req);
- setup_access_to_local_objects(req);
-
- list_add_tail(&cevent->cpg_event_list, &sys->cpg_event_siblings);
- again = 1;
- }
}
-done:
- resume_pending_requests();
- resume_recovery_work();
}
+done:
+ resume_pending_requests();
+ resume_recovery_work();
if (!again)
req->done(req);
}
+static void local_op_done(struct work *work, int idx)
+{
+ struct request *req = container_of(work, struct request, work);
+
+ if (has_process_main(req->op)) {
+ req->rp.result = do_process_main(req->op, &req->rq,
+ &req->rp, req->data);
+ }
+
+ req->done(req);
+}
+
+static void cluster_op_done(struct work *work, int idx)
+{
+ /* request is forwarded to cpg group */
+}
+
static void queue_request(struct request *req)
{
struct cpg_event *cevent = &req->cev;
@@ -214,19 +219,22 @@ static void queue_request(struct request
}
}
- if (is_io_op(req->op) || is_local_op(req->op))
+ if (is_io_op(req->op)) {
req->work.fn = store_queue_request;
- else if (is_cluster_op(req->op))
+ req->work.done = io_op_done;
+ } else if (is_local_op(req->op)) {
+ req->work.fn = store_queue_request;
+ req->work.done = local_op_done;
+ } else if (is_cluster_op(req->op)) {
req->work.fn = cluster_queue_request;
- else {
+ req->work.done = cluster_op_done;
+ } else {
eprintf("unknown operation %d\n", hdr->opcode);
rsp->result = SD_RES_SYSTEM_ERROR;
req->done(req);
return;
}
- req->work.done = __done;
-
list_del(&req->r_wlist);
/*
------------------------------
Message: 3
Date: Mon, 14 Nov 2011 10:49:00 -0500
From: Christoph Hellwig <hch at infradead.org>
To: sheepdog at lists.wpkg.org
Subject: [Sheepdog] [PATCH 2/2] sdnet: tidy up queue_request
Message-ID: <20111114154900.GA15962 at infradead.org>
Content-Type: text/plain; charset=us-ascii
Use a switch for the system status, and use a common done goto labels for
all cases that want to complete the request and return.
Signed-off-by: Christoph Hellwig <hch at lst.de>
Index: sheepdog/sheep/sdnet.c
===================================================================
--- sheepdog.orig/sheep/sdnet.c 2011-11-14 16:39:40.511274529 +0100
+++ sheepdog/sheep/sdnet.c 2011-11-14 16:40:59.004607398 +0100
@@ -187,7 +187,6 @@ static void cluster_op_done(struct work
static void queue_request(struct request *req)
{
struct cpg_event *cevent = &req->cev;
-
struct sd_req *hdr = (struct sd_req *)&req->rq;
struct sd_rsp *rsp = (struct sd_rsp *)&req->rp;
@@ -201,22 +200,24 @@ static void queue_request(struct request
dprintf("%x\n", hdr->opcode);
- if (sys->status == SD_STATUS_SHUTDOWN) {
+ switch (sys->status) {
+ case SD_STATUS_SHUTDOWN:
rsp->result = SD_RES_SHUTDOWN;
- req->done(req);
- return;
- }
-
- if (sys->status == SD_STATUS_WAIT_FOR_FORMAT ||
- sys->status == SD_STATUS_WAIT_FOR_JOIN) {
+ goto done;
+ case SD_STATUS_WAIT_FOR_FORMAT:
+ if (!is_force_op(req->op)) {
+ rsp->result = SD_RES_WAIT_FOR_FORMAT;
+ goto done;
+ }
+ break;
+ case SD_STATUS_WAIT_FOR_JOIN:
if (!is_force_op(req->op)) {
- if (sys->status == SD_STATUS_WAIT_FOR_FORMAT)
- rsp->result = SD_RES_WAIT_FOR_FORMAT;
- else
- rsp->result = SD_RES_WAIT_FOR_JOIN;
- req->done(req);
- return;
+ rsp->result = SD_RES_WAIT_FOR_JOIN;
+ goto done;
}
+ break;
+ default:
+ break;
}
if (is_io_op(req->op)) {
@@ -253,6 +254,9 @@ static void queue_request(struct request
cevent->ctype = CPG_EVENT_REQUEST;
list_add_tail(&cevent->cpg_event_list, &sys->cpg_event_siblings);
start_cpg_event_work();
+ return;
+done:
+ req->done(req);
}
static void client_incref(struct client_info *ci);
------------------------------
Message: 4
Date: Tue, 15 Nov 2011 09:04:33 +0900
From: MORITA Kazutaka <morita.kazutaka at lab.ntt.co.jp>
To: chenbaozi at gmail.com
Cc: sheepdog at lists.wpkg.org, CHEN Baozi <chenbaozi.pt at taobao.com>
Subject: Re: [Sheepdog] [PATCH] tests: add qemu-io testcases.
Message-ID: <8739dq2qum.wl%morita.kazutaka at lab.ntt.co.jp>
Content-Type: text/plain; charset=US-ASCII
At Mon, 14 Nov 2011 15:45:54 +0800,
chenbaozi at gmail.com wrote:
>
> From: CHEN Baozi <chenbaozi.pt at taobao.com>
>
> Signed-off-by: CHEN Baozi <chenbaozi.pt at taobao.com>
> ---
> tests/qemu_io_testcases.py | 182 ++++++++++++++++++++++++++++++++++++++++++++
> tests/test_qemu_io.py | 84 ++++++++++++++++++++
> 2 files changed, 266 insertions(+), 0 deletions(-)
> create mode 100644 tests/qemu_io_testcases.py
> create mode 100644 tests/test_qemu_io.py
Applied, thanks!
Currently, we can select tests with command line options.
For example:
$ ./tests/testsuite -k test_mastership
It would be much better if we can select io testcases without editing
qemu_io_testcases.py.
Thanks,
Kazutaka
>
> diff --git a/tests/qemu_io_testcases.py b/tests/qemu_io_testcases.py
> new file mode 100644
> index 0000000..d9e8fc2
> --- /dev/null
> +++ b/tests/qemu_io_testcases.py
> @@ -0,0 +1,182 @@
> +# Copyright (c) 2011 Taobao.com, Inc.
> +#
> +# This program is free software; you can redistribute it and/or
> +# modify it under the terms of the GNU General Public License as
> +# published by the Free Software Foundation.
> +#
> +# This program is distributed in the hope that it would be useful,
> +# but WITHOUT ANY WARRANTY; without even the implied warranty of
> +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> +# GNU General Public License for more details.
> +#
> +# You should have received a copy of the GNU General Public License
> +# along with this program. If not, see <http://www.gnu.org/licenses/>.
> +#
> +# Based on code from the QEMU I/O test suite (qemu-iotests)
> +# Copyright (C) 2009 Red Hat, Inc.
> +#
> +
> +# Brief description of each test cases.
> +cases_desc = {
> +"001": "Test simple read/write using plain bdrv_read/bdrv_write.",
> +"002": "Test simple read/write using plain bdrv_pread/bdrv_pwrite.",
> +"003": "Test simple read/write using bdrv_aio_readv/bdrv_aio_writev.",
> +"004": "Make sure we can't read and write outside of the image size.",
> +"008": "Test simple asynchronous read/write operations.",
> +"011": "Test for AIO allocation on the same cluster.",
> +"016": "Test I/O after EOF for growable images.",
> +"025": "Resizing images.",
> +}
> +
> +# Used by test_io() method.
> +io_cases = {
> +"001":[
> +("read 0 128M", """read 134217728/134217728 bytes at offset 0
> +128 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("write -P 0xa 0 128M", """wrote 134217728/134217728 bytes at offset 0
> +128 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("read -P 0xa 0 128M", """read 134217728/134217728 bytes at offset 0
> +128 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +],
> +"002":[
> +("read -p 0 128M", """read 134217728/134217728 bytes at offset 0
> +128 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("write -pP 0xa 0 128M", """wrote 134217728/134217728 bytes at offset 0
> +128 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("read -pP 0xa 0 128M", """read 134217728/134217728 bytes at offset 0
> +128 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("write -pP 0xab 66 42", """wrote 42/42 bytes at offset 66
> +42.000000 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("read -pP 0xab 66 42", """read 42/42 bytes at offset 66
> +42.000000 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +],
> +"003":[
> +("readv 0 128M", """read 134217728/134217728 bytes at offset 0
> +128 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("writev -P 0xa 0 128M", """wrote 134217728/134217728 bytes at offset 0
> +128 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("readv -P 0xa 0 128M", """read 134217728/134217728 bytes at offset 0
> +128 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("writev -P 0xb 67M 8k 8k 8k 8k 8k 8k 8k",
> +"""wrote 57344/57344 bytes at offset 70254592
> +56 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("readv -P 0xb 67M 8k 8k 8k 8k 8k 8k 8k",
> +"""read 57344/57344 bytes at offset 70254592
> +56 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +],
> +"004":[
> +("write 127M 1M", """wrote 1048576/1048576 bytes at offset 133169152
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("write 127M 4M", """write failed: Input/output error"""),
> +("write 128M 4096", """write failed: Input/output error"""),
> +("write 140M 4096", """write failed: Input/output error"""),
> +("write -p 140M 4096", """write failed: Input/output error"""),
> +("writev 140M 4096","""writev failed: Input/output error"""),
> +("read 127M 1M", """read 1048576/1048576 bytes at offset 133169152
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("read 127M 4M", """read failed: Input/output error"""),
> +("read 128M 4096", """read failed: Input/output error"""),
> +("read 140M 4096", """read failed: Input/output error"""),
> +("read -p 140M 4096", """read failed: Input/output error"""),
> +("readv 140M 4096", """readv failed: Input/output error"""),
> +],
> +"008":[
> +("aio_read 0 128M", """read 134217728/134217728 bytes at offset 0
> +128 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("aio_write -P 0xa 0 128M", """wrote 134217728/134217728 bytes at offset 0
> +128 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("aio_read -P 0xa 0 128M", """read 134217728/134217728 bytes at offset 0
> +128 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +],
> +"025":[
> +("length", """128 MiB"""),
> +("truncate 384M", """"""),
> +("length", """384 MiB"""),
> +],
> +}
> +
> +# Used by test_growable_io() method.
> +io_cases_g = {
> +"016":[
> +("read -P 0 128M 512", """read 512/512 bytes at offset 134217728
> +512.000000 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("read -P 0 256M 512", """read 512/512 bytes at offset 268435456
> +512.000000 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("write -P 66 128M 512", """wrote 512/512 bytes at offset 134217728
> +512.000000 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("read -P 66 128M 512", """read 512/512 bytes at offset 134217728
> +512.000000 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("write -P 66 256M 512", """wrote 512/512 bytes at offset 268435456
> +512.000000 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("read -P 66 256M 512", """read 512/512 bytes at offset 268435456
> +512.000000 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +],
> +}
> +
> +# Used by test_aio()
> +aio_cases = {
> +"011":[
> +("""aio_write 1M 1M
> +aio_write 1536K 1M""", """wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
> +wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("""aio_write 2M 1M
> +aio_write 2560K 1M""", """wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
> +wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("""aio_write 3M 1M
> +aio_write 3584K 1M""", """wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
> +wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("""aio_write 4M 1M
> +aio_write 4608K 1M""", """wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
> +wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("""aio_write 5M 1M
> +aio_write 5632K 1M""", """wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
> +wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("""aio_write 6M 1M
> +aio_write 6656K 1M""", """wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
> +wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("""aio_write 7M 1M
> +aio_write 7680K 1M""", """wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
> +wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("""aio_write 8M 1M
> +aio_write 8704K 1M""", """wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
> +wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("""aio_write 9M 1M
> +aio_write 9728K 1M""", """wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
> +wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +("""aio_write 10M 1M
> +aio_write 10752K 1M""", """wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
> +wrote 1048576/1048576 bytes at offset XXX
> +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)"""),
> +],
> +}
> +
> +# Used to specify the image size of each test case.
> +size_cases = {
> +"001":128*1024*1024,
> +"002":128*1024*1024,
> +"003":128*1024*1024,
> +"004":128*1024*1024,
> +"008":128*1024*1024,
> +"011":6*1024*1024*1024,
> +"016":128*1024*1024,
> +"025":128*1024*1024,
> +}
> diff --git a/tests/test_qemu_io.py b/tests/test_qemu_io.py
> new file mode 100644
> index 0000000..2ba951a
> --- /dev/null
> +++ b/tests/test_qemu_io.py
> @@ -0,0 +1,84 @@
> +from sheepdog_test import *
> +from qemu_io_testcases import *
> +import time
> +
> +def test_io():
> +
> + cmd = ["qemu-io"]
> + sdog = Sheepdog()
> +
> + for n in sdog.nodes:
> + n.start()
> + n.wait()
> +
> + p = sdog.format()
> + p.wait()
> +
> + for i in io_cases:
> + vdi = sdog.create_vdi(str(i), size_cases[i])
> + vdi.wait()
> + time.sleep(1)
> + vm = n.create_vm(vdi)
> + for j in io_cases[i]:
> + (out, err) = vm.test_io(cmd, j[0] + "\n")
> + assert out == j[1]
> + time.sleep(1)
> + print "Pass"
> + vdi.destroy()
> + vdi.wait()
> +
> + p = sdog.format()
> + p.wait()
> + for n in sdog.nodes:
> + n.stop()
> +
> +def test_aio():
> +
> + cmd = ["qemu-io"]
> + sdog = Sheepdog()
> +
> + for n in sdog.nodes:
> + n.start()
> + n.wait()
> +
> + p = sdog.format()
> + p.wait()
> +
> + for i in aio_cases:
> + vdi = sdog.create_vdi(str(i), size_cases[i])
> + vdi.wait()
> + time.sleep(1)
> + vm = n.create_vm(vdi)
> + for j in aio_cases[i]:
> + (out, err) = vm.test_io(cmd, j[0] + "\n", async=True)
> + assert out == j[1]
> + time.sleep(1)
> + print "Pass"
> + vdi.destroy()
> + vdi.wait()
> +
> +def test_growable_io():
> +
> + cmd = ["qemu-io", "-g"]
> + sdog = Sheepdog()
> +
> + for n in sdog.nodes:
> + n.start()
> + n.wait()
> +
> + p = sdog.format()
> + p.wait()
> +
> + for i in io_cases_g:
> + vdi = sdog.create_vdi(str(i), size_cases[i])
> + vdi.wait()
> + time.sleep(1)
> + vm = n.create_vm(vdi)
> + for j in io_cases_g[i]:
> + (out, err) = vm.test_io(cmd, j[0] + "\n")
> + assert out == j[1]
> + time.sleep(1)
> + print "Pass"
> + vdi.destroy()
> + vdi.wait()
> +
> --
> 1.7.6.4
>
> --
> sheepdog mailing list
> sheepdog at lists.wpkg.org
> http://lists.wpkg.org/mailman/listinfo/sheepdog
------------------------------
_______________________________________________
sheepdog mailing list
sheepdog at lists.wpkg.org
http://lists.wpkg.org/mailman/listinfo/sheepdog
End of sheepdog Digest, Vol 26, Issue 28
****************************************
More information about the sheepdog
mailing list