[sheepdog] [PATCH v4 RESEND 6/8] sheepdev: create kernel threads to process the IO request in the driver

levin li levin108 at gmail.com
Wed Jan 23 09:16:15 CET 2013


From: levin li <xingke.lwp at taobao.com>

This patch creates two kernel threads 'sheep_req' and 'sheep_fin' to process
the IO requests and sheep response respectively, sheep_req just fetches IO request
from the pending list, and forward the request to sheep, sheep_fin waits for
sheep response, by which to end the IO request in the finish list.

After this patch, we can use the device normally, such as format/mount the device:

# mkfs.ext4 /dev/sheepa
# mount -t ext4 /dev/sheepa test

Signed-off-by: levin li <xingke.lwp at taobao.com>
---
 sheepdev/device.c   | 683 +++++++++++++++++++++++++++++++++++++++++++++++++++-
 sheepdev/sheepdev.h |  29 +++
 2 files changed, 711 insertions(+), 1 deletion(-)

diff --git a/sheepdev/device.c b/sheepdev/device.c
index bcf82a2..fdf17ac 100644
--- a/sheepdev/device.c
+++ b/sheepdev/device.c
@@ -42,6 +42,26 @@ static void sheepdev_put(struct sheepdev *dev)
 		kfree(dev);
 }
 
+static int add_request(struct sheepdev *dev, struct request *req,
+		       uint64_t oid, int idx)
+{
+	struct obj_request *s_req = kmalloc(sizeof(*s_req), GFP_KERNEL);
+	if (!s_req)
+		return -EIO;
+
+	s_req->req_id = dev->req_id;
+	s_req->req = req;
+	s_req->oid = oid;
+	s_req->idx = idx;
+	INIT_LIST_HEAD(&s_req->list);
+
+	spin_lock(&dev->que_lock);
+	list_add_tail(&s_req->list, &dev->finish_list);
+	spin_unlock(&dev->que_lock);
+
+	return 0;
+}
+
 static void sheep_end_request_directly(struct request *req, int ret)
 {
 	struct request_queue *q = req->q;
@@ -54,10 +74,407 @@ static void sheep_end_request_directly(struct request *req, int ret)
 	spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
+static void copy_read_data(struct request *req, char *buf,
+			   int data_length, int higher_part_len, int result)
+{
+	struct req_iterator iter;
+	struct bio_vec *bvec;
+	int len = 0, rlen, offset, buf_len = 0;
+	int boundary = 0;
+
+	if (result == SD_RES_NO_OBJ)
+		return;
+
+	rq_for_each_segment(bvec, req, iter) {
+		void *addr;
+
+		if (len + bvec->bv_len <= higher_part_len) {
+			len += bvec->bv_len;
+			continue;
+		}
+
+		if (higher_part_len > len) {
+			offset = higher_part_len - len;
+			rlen = bvec->bv_len - offset;
+		} else {
+			offset = 0;
+			rlen = bvec->bv_len;
+		}
+
+		if (buf_len + rlen > data_length) {
+			rlen = data_length - buf_len;
+			boundary = 1;
+		}
+
+		addr = kmap(bvec->bv_page);
+		memcpy(addr + bvec->bv_offset + offset, buf + buf_len, rlen);
+		buf_len += rlen;
+		kunmap(bvec->bv_page);
+
+		if (boundary)
+			break;
+	}
+}
+
+static struct sheep_request *sd_req_search(struct sheepdev *dev,
+					   struct request *req)
+{
+	struct sheep_request *sdreq;
+
+	list_for_each_entry(sdreq, &dev->sd_req_list, list) {
+		if (sdreq->req == req)
+			return sdreq;
+	}
+
+	return NULL;
+}
+
+static void sheep_end_request(struct sheepdev *dev, struct request *req,
+			      int ret, int idx, char *buf, uint32_t data_length)
+{
+	unsigned long sector = blk_rq_pos(req);
+	unsigned long offset = sector * KERNEL_SECTOR_SIZE;
+	unsigned long nbytes = blk_rq_bytes(req);
+
+	offset = offset % SHEEP_OBJECT_SIZE;
+	/* Check whether the request visits two objects. */
+	if (offset + nbytes > SHEEP_OBJECT_SIZE) {
+		struct sheep_request *sre;
+
+		spin_lock(&dev->sd_req_lock);
+		sre = sd_req_search(dev, req);
+		spin_unlock(&dev->sd_req_lock);
+
+		if (sre) {
+			if (!rq_data_dir(req)) {
+				copy_read_data(req, buf, data_length,
+					       sre->read_length, ret);
+				if (ret == SD_RES_NO_OBJ) {
+					if (sre->result &&
+					    sre->result != SD_RES_NO_OBJ)
+						ret = sre->result;
+					else
+						ret = 0;
+				}
+			} else
+				ret = ret ? ret : sre->result;
+			spin_lock(&dev->sd_req_lock);
+			list_del_init(&sre->list);
+			spin_unlock(&dev->sd_req_lock);
+			kfree(sre);
+
+			sheep_end_request_directly(req, ret);
+		} else {
+			if (!rq_data_dir(req))
+				copy_read_data(req, buf, data_length, 0, ret);
+
+			sre = kmalloc(sizeof(*sre), GFP_KERNEL);
+			sre->result = ret;
+			sre->read_length = data_length;
+			sre->req = req;
+
+			spin_lock(&dev->sd_req_lock);
+			list_add_tail(&sre->list, &dev->sd_req_list);
+			spin_unlock(&dev->sd_req_lock);
+		}
+	} else {
+		if (!rq_data_dir(req)) {
+			copy_read_data(req, buf, data_length, 0, ret);
+			if (ret && ret == SD_RES_NO_OBJ)
+				ret = 0;
+		}
+
+		sheep_end_request_directly(req, ret);
+	}
+}
+
+static struct obj_state_entry *obj_state_rb_insert(struct rb_root *root,
+						   struct obj_state_entry *new)
+{
+	struct rb_node **p = &root->rb_node;
+	struct rb_node *parent = NULL;
+	struct obj_state_entry *entry;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct obj_state_entry, node);
+
+		if (new->idx < entry->idx)
+			p = &(*p)->rb_left;
+		else if (new->idx > entry->idx)
+			p = &(*p)->rb_right;
+		else
+			return entry;
+	}
+	rb_link_node(&new->node, parent, p);
+	rb_insert_color(&new->node, root);
+
+	return NULL;
+}
+
+static struct obj_state_entry *obj_state_rb_search(struct rb_root *root,
+						   uint32_t idx)
+{
+	struct rb_node *n = root->rb_node;
+	struct obj_state_entry *t;
+
+	while (n) {
+		t = rb_entry(n, struct obj_state_entry, node);
+
+		if (idx < t->idx)
+			n = n->rb_left;
+		else if (idx > t->idx)
+			n = n->rb_right;
+		else
+			return t;
+	}
+
+	return NULL;
+}
+
+/*
+ * Should not hold spin-lock, because we allocate memory with kmalloc
+ * which may sleep
+ */
+static void set_obj_state(struct sheepdev *dev, int idx, int state)
+{
+	struct obj_state_entry *old, *new;
+
+	new = kmalloc(sizeof(*new), GFP_KERNEL);
+	if (!new)
+		DBPRT("[%s] No-Mem\n", __func__);
+
+	new->idx = idx;
+	new->state = state;
+
+	write_lock(&dev->creating_lock);
+	old = obj_state_rb_insert(&dev->obj_state_tree, new);
+	write_unlock(&dev->creating_lock);
+	if (old) {
+		kfree(new);
+		old->state = state;
+	}
+}
+
+static int get_obj_state(struct sheepdev *dev, int idx)
+{
+	struct obj_state_entry *entry;
+
+	read_lock(&dev->creating_lock);
+	entry = obj_state_rb_search(&dev->obj_state_tree, idx);
+	read_unlock(&dev->creating_lock);
+	if (entry)
+		return entry->state;
+
+	return -ENOENT;
+}
+
+static int remove_obj_state(struct sheepdev *dev, int idx)
+{
+	struct rb_root *root = &dev->obj_state_tree;
+	struct rb_node **p = &root->rb_node;
+	struct rb_node *parent = NULL;
+	struct obj_state_entry *entry;
+
+	write_lock(&dev->creating_lock);
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct obj_state_entry, node);
+
+		if (idx < entry->idx)
+			p = &(*p)->rb_left;
+		else if (idx > entry->idx)
+			p = &(*p)->rb_right;
+		else {
+			rb_erase(parent, root);
+			write_unlock(&dev->creating_lock);
+			kfree(entry);
+			return 0;
+		}
+	}
+	write_unlock(&dev->creating_lock);
+
+	return -ENOENT;
+}
+
+static int handle_read_request(struct request *req)
+{
+	struct gendisk *disk = req->rq_disk;
+	struct sheepdev *dev = disk->private_data;
+	unsigned long sector = blk_rq_pos(req);
+	unsigned long offset = sector * KERNEL_SECTOR_SIZE;
+	unsigned long nbytes = blk_rq_bytes(req);
+	uint64_t oid, obj_offset;
+	int ret = 0, idx, read_len = 0, visit_two_objs;
+
+next_obj:
+	idx = offset / SHEEP_OBJECT_SIZE;
+	oid = vid_to_data_oid(dev->vid, idx);
+	obj_offset = offset % SHEEP_OBJECT_SIZE;
+	visit_two_objs = 0;
+
+	read_lock(&dev->creating_lock);
+	if (dev->inode->data_vdi_id[idx])
+		oid = vid_to_data_oid(dev->inode->data_vdi_id[idx], idx);
+	else
+		oid = vid_to_data_oid(dev->vid, idx);
+	read_unlock(&dev->creating_lock);
+
+	if (obj_offset + nbytes > SHEEP_OBJECT_SIZE) {
+		read_len = SHEEP_OBJECT_SIZE - obj_offset;
+		visit_two_objs = 1;
+	} else
+		read_len = nbytes;
+
+	ret = add_request(dev, req, oid, idx);
+	if (ret)
+		return -EIO;
+
+	ret = send_read_req(dev, oid, read_len, obj_offset);
+	if (ret)
+		return -EIO;
+
+	if (visit_two_objs) {
+		nbytes -= read_len;
+		offset += read_len;
+		goto next_obj;
+	}
+
+	return 0;
+}
+
+static void sheep_wait_object(struct sheepdev *dev, int idx, int *create,
+			      uint64_t *cow_oid)
+{
+	*create = *cow_oid = 0;
+
+	read_lock(&dev->creating_lock);
+	if (!dev->inode->data_vdi_id[idx]) {
+		read_unlock(&dev->creating_lock);
+		if (get_obj_state(dev, idx) > 0) {
+			/* Wait for pending inode-update to complete */
+			wait_event_interruptible(dev->creating_wait,
+						 object_ready(dev, idx));
+		} else {
+			set_obj_state(dev, idx, OBJ_STATE_CREATING);
+			*create = 1;
+		}
+	} else if (!object_ready(dev, idx)) {
+		read_unlock(&dev->creating_lock);
+		/*
+		 * Now we check the rbtree to determine whether to wait for
+		 * copy-on-write done or to invoke copy-on-write for this object
+		 */
+		if (get_obj_state(dev, idx) > 0) {
+			/* Wait inode to be updated */
+			wait_event_interruptible(dev->creating_wait,
+						 object_ready(dev, idx));
+		} else {
+			set_obj_state(dev, idx, OBJ_STATE_COWING);
+			*cow_oid = vid_to_data_oid(dev->inode->data_vdi_id[idx],
+						   idx);
+			*create = 1;
+		}
+	} else
+		read_unlock(&dev->creating_lock);
+}
+
+
+static int handle_write_request(struct request *req)
+{
+	struct req_iterator iter;
+	struct bio_vec *bvec;
+	struct gendisk *disk = req->rq_disk;
+	struct sheepdev *dev = disk->private_data;
+	unsigned long sector = blk_rq_pos(req);
+	unsigned long offset = sector * KERNEL_SECTOR_SIZE;
+	unsigned long nbytes = blk_rq_bytes(req);
+	uint64_t oid, obj_offset, cow_oid;
+	int ret = 0, len = 0, send_len = 0, sent_len = 0;
+	int create, idx, visit_two_objs;
+	void *sheep_buf = NULL;
+
+	sheep_buf = kmalloc(nbytes, GFP_KERNEL);
+	if (!sheep_buf)
+		return -EIO;
+
+	rq_for_each_segment(bvec, req, iter) {
+		void *addr = kmap(bvec->bv_page);
+		memcpy(sheep_buf + len, addr + bvec->bv_offset, bvec->bv_len);
+		len += bvec->bv_len;
+		kunmap(bvec->bv_page);
+	}
+
+next_obj:
+	idx = offset / SHEEP_OBJECT_SIZE;
+	oid = vid_to_data_oid(dev->vid, idx);
+	obj_offset = offset % SHEEP_OBJECT_SIZE;
+	send_len = nbytes;
+	visit_two_objs = 0;
+
+	sheep_wait_object(dev, idx, &create, &cow_oid);
+
+	if (obj_offset + send_len > SHEEP_OBJECT_SIZE) {
+		send_len = SHEEP_OBJECT_SIZE - obj_offset;
+		visit_two_objs = 1;
+	}
+
+	ret = add_request(dev, req, oid, idx);
+	if (ret) {
+		ret = -EIO;
+		goto out;
+	}
+
+	ret = send_write_req(dev, oid, cow_oid, sheep_buf + sent_len,
+			     send_len, obj_offset, create);
+	if (ret != SD_RES_SUCCESS) {
+		ret = -EIO;
+		goto out;
+	}
+
+	if (create) {
+		/* For create/cow operations we need to update inode */
+		oid = vid_to_vdi_oid(dev->vid);
+		obj_offset = offsetof(struct sheepdog_inode, data_vdi_id);
+		obj_offset += sizeof(uint32_t) * idx;
+
+		ret = add_request(dev, req, oid, idx);
+		if (ret) {
+			ret = -EIO;
+			goto out;
+		}
+
+		ret = send_write_req(dev, oid, 0, (char *)&dev->vid,
+				     sizeof(dev->vid), obj_offset, 0);
+		if (ret != SD_RES_SUCCESS) {
+			ret = -EIO;
+			goto out;
+		}
+	}
+
+	if (visit_two_objs) {
+		sent_len += send_len;
+		offset += send_len;
+		nbytes -= send_len;
+		goto next_obj;
+	}
+
+out:
+	kfree(sheep_buf);
+	return ret;
+}
+
 static void remove_device(struct sheepdev *dev)
 {
 	DBPRT("remove device /dev/%s\n", dev->disk->disk_name);
 
+	kthread_stop(dev->req_thread);
+	wake_up_interruptible(&dev->req_wait);
+	if (dev->fin_thread) {
+		kthread_stop(dev->fin_thread);
+		wake_up_interruptible(&dev->fin_wait);
+	}
+
 	blk_cleanup_queue(dev->disk->queue);
 	del_gendisk(dev->disk);
 	put_disk(dev->disk);
@@ -73,6 +490,89 @@ static void remove_device(struct sheepdev *dev)
 	sheepdev_put(dev);
 }
 
+static void cleanup_finish_list(struct sheepdev *dev)
+{
+	struct obj_request *objreq, *t;
+	struct request *req, *n;
+	LIST_HEAD(deletion_list);
+	LIST_HEAD(finish_list);
+
+	DBPRT("Network Error, cleanup request queue\n");
+
+	spin_lock(&dev->que_lock);
+	list_splice_init(&dev->finish_list, &finish_list);
+	list_splice_init(&dev->deletion_list, &deletion_list);
+	spin_unlock(&dev->que_lock);
+
+	list_for_each_entry_safe(objreq, t, &finish_list, list) {
+		list_del_init(&objreq->list);
+		kfree(objreq);
+	}
+
+	list_for_each_entry_safe(req, n, &deletion_list, queuelist) {
+		list_del_init(&req->queuelist);
+		sheep_end_request_directly(req, -EIO);
+	}
+}
+
+static int process_request(void *data)
+{
+	struct sheepdev *dev = (struct sheepdev *)data;
+	struct request *req;
+	int ret;
+
+	sheepdev_get(dev);
+
+	while (!kthread_should_stop() || !list_empty(&dev->pending_list)) {
+		wait_event_interruptible(dev->req_wait,
+					 !list_empty(&dev->pending_list) ||
+					 kthread_should_stop());
+
+		spin_lock(&dev->que_lock);
+		if (list_empty(&dev->pending_list)) {
+			spin_unlock(&dev->que_lock);
+			continue;
+		}
+
+		req = list_entry(dev->pending_list.next, struct request,
+				 queuelist);
+		list_del_init(&req->queuelist);
+		list_add_tail(&req->queuelist, &dev->deletion_list);
+		spin_unlock(&dev->que_lock);
+
+		/* Check whether the connection died */
+		read_lock(&dev->sock_lock);
+		if (!dev->sock) {
+			read_unlock(&dev->sock_lock);
+
+			sheep_end_request_directly(req, -EIO);
+			continue;
+		}
+		read_unlock(&dev->sock_lock);
+
+		if (rq_data_dir(req))
+			ret = handle_write_request(req);
+		else
+			ret = handle_read_request(req);
+
+		if (ret) {
+			write_lock(&dev->sock_lock);
+			inet_release(dev->sock);
+			dev->sock = NULL;
+			write_unlock(&dev->sock_lock);
+
+			kthread_stop(dev->fin_thread);
+			cleanup_finish_list(dev);
+		}
+
+		wake_up_interruptible(&dev->fin_wait);
+	}
+
+	sheepdev_put(dev);
+
+	return 0;
+}
+
 static int sheepdev_open(struct block_device *blkdev, fmode_t mode)
 {
 	struct gendisk *disk = blkdev->bd_disk;
@@ -120,7 +620,11 @@ static void sheep_io_request(struct request_queue *rq)
 			__blk_end_request_all(req, -EIO);
 		}
 
-		sheep_end_request_directly(req, -EIO);
+		spin_lock_irq(rq->queue_lock);
+		list_add_tail(&req->queuelist, &dev->pending_list);
+		spin_unlock_irq(rq->queue_lock);
+
+		wake_up_interruptible(&dev->req_wait);
 
 		spin_lock_irq(rq->queue_lock);
 	}
@@ -154,6 +658,179 @@ static int sheep_add_disk(struct sheepdev *dev)
 	return 0;
 }
 
+static struct obj_request *find_request(struct sheepdev *dev, int id)
+{
+	struct obj_request *req, *t;
+
+	spin_lock(&dev->que_lock);
+	list_for_each_entry_safe(req, t, &dev->finish_list, list) {
+		if (req->req_id != id)
+			continue;
+		list_del_init(&req->list);
+		spin_unlock(&dev->que_lock);
+		return req;
+	}
+	spin_unlock(&dev->que_lock);
+
+	return NULL;
+}
+
+static int read_reply(struct sheepdev *dev, int *req_id, int *result,
+		      void **data, uint32_t *data_length)
+{
+	int ret;
+	struct sd_rsp rsp;
+	void *buf = NULL;
+
+	*result = *req_id = *data_length = 0;
+
+	ret = do_read(dev->sock, (char *)&rsp, sizeof(rsp));
+	if (ret < 0) {
+		DBPRT("failed to read response\n");
+		return -EIO;
+	}
+
+	if (rsp.data_length > 0) {
+		buf = kmalloc(rsp.data_length, GFP_KERNEL);
+		if (!buf) {
+			DBPRT("No-mem\n");
+			return -ENOMEM;
+		}
+
+		ret = do_read(dev->sock, buf, rsp.data_length);
+		if (ret != rsp.data_length) {
+			kfree(buf);
+			return -EIO;
+		}
+	}
+
+	*req_id = rsp.id;
+	*result = rsp.result;
+	*data = buf;
+	*data_length = rsp.data_length;
+
+	return 0;
+}
+
+static void process_response_one(struct sheepdev *dev, int req_id, int result,
+				 char *buf, int data_length)
+{
+	struct obj_request *obj_req;
+	struct request *req;
+	int idx, obj_state;
+
+	obj_req = find_request(dev, req_id);
+	if (!obj_req) {
+		DBPRT("No-request  rfor id %d\n", req_id);
+		return;
+	}
+	req = obj_req->req;
+
+	if (!rq_data_dir(req)) {
+		sheep_end_request(dev, req, result, obj_req->idx, buf,
+				  data_length);
+		goto out;
+	}
+
+	result = (result != SD_RES_SUCCESS) ? -EIO : 0;
+	if (obj_req->oid == vid_to_vdi_oid(dev->vid)) {
+		/* inode-update response */
+		idx = obj_req->idx;
+	} else {
+		/* oridinary write response */
+		idx = data_oid_to_idx(obj_req->oid);
+
+		/* obj already exist */
+		read_lock(&dev->creating_lock);
+		if (dev->inode->data_vdi_id[idx] == dev->vid) {
+			read_unlock(&dev->creating_lock);
+			sheep_end_request(dev, obj_req->req, result, idx,
+					  NULL, 0);
+			goto out;
+		}
+		read_unlock(&dev->creating_lock);
+	}
+
+	/* inode-update response */
+	obj_state = get_obj_state(dev, idx);
+	if (obj_state == OBJ_STATE_OK) {
+		/*
+		 * Both obj-write and inode-update are complete
+		 * we can end the write request and wake other
+		 * requests waiting for this object.
+		 */
+		remove_obj_state(dev, idx);
+
+		write_lock(&dev->creating_lock);
+		dev->inode->data_vdi_id[idx] = dev->vid;
+		write_unlock(&dev->creating_lock);
+
+		sheep_end_request(dev, req, result, idx, NULL, 0);
+		wake_up_interruptible(&dev->creating_wait);
+	} else {
+		/*
+		 * wait for obj-write or inode-update to complete
+		 */
+		set_obj_state(dev, idx, OBJ_STATE_OK);
+	}
+
+out:
+	kfree(obj_req);
+}
+
+static int process_response(void *data)
+{
+	struct sheepdev *dev = data;
+	uint32_t data_length;
+	int ret, req_id, res;
+
+	sheepdev_get(dev);
+
+	while (!kthread_should_stop() || !list_empty(&dev->finish_list)) {
+		void *buf = NULL;
+
+		wait_event_interruptible(dev->fin_wait,
+					 !list_empty(&dev->finish_list) ||
+					 kthread_should_stop());
+
+		read_lock(&dev->sock_lock);
+		if (!dev->sock) {
+			read_unlock(&dev->sock_lock);
+			dev->fin_thread = NULL;
+			break;
+		}
+		read_unlock(&dev->sock_lock);
+
+		spin_lock(&dev->que_lock);
+		if (list_empty(&dev->finish_list)) {
+			spin_unlock(&dev->que_lock);
+			continue;
+		}
+		spin_unlock(&dev->que_lock);
+
+		ret = read_reply(dev, &req_id, &res, &buf, &data_length);
+		if (ret) {
+			cleanup_finish_list(dev);
+
+			write_lock(&dev->sock_lock);
+			if (dev->sock) {
+				inet_release(dev->sock);
+				dev->sock = NULL;
+			}
+			write_unlock(&dev->sock_lock);
+			dev->fin_thread = NULL;
+			break;
+		}
+
+		process_response_one(dev, req_id, res, buf, data_length);
+
+		kfree(buf);
+	}
+
+	sheepdev_put(dev);
+	return 0;
+}
+
 static int sheep_dev_setup(struct sheepdev *dev)
 {
 	int ret;
@@ -178,6 +855,10 @@ static int sheep_dev_setup(struct sheepdev *dev)
 
 	dev->obj_state_tree = RB_ROOT;
 	dev->req_id = 1;
+	dev->req_thread = kthread_run(process_request, dev,
+				      "sheep_req");
+	dev->fin_thread = kthread_run(process_response, dev,
+				      "sheep_fin");
 
 	ret = sheep_add_disk(dev);
 	if (ret)
diff --git a/sheepdev/sheepdev.h b/sheepdev/sheepdev.h
index 5ef7098..6f963e5 100644
--- a/sheepdev/sheepdev.h
+++ b/sheepdev/sheepdev.h
@@ -81,6 +81,31 @@ struct sheepdev {
 	struct sheepdog_inode *inode;
 };
 
+struct sheep_request {
+	int result;
+	uint32_t read_length;
+	struct request *req;
+	struct list_head list;
+};
+
+struct obj_request {
+	int req_id;
+	int idx; /* idx is only used when update inode */
+	uint64_t oid;
+	struct request *req;
+	struct list_head list;
+};
+
+#define OBJ_STATE_CREATING 1
+#define OBJ_STATE_COWING   2
+#define OBJ_STATE_OK       3
+
+struct obj_state_entry {
+	int idx;
+	int state;
+	struct rb_node node;
+};
+
 /* connect.c */
 int connect_to(struct socket **sock, const char *addr, int port);
 int send_req(struct socket *sock, struct sd_req *hdr, void *data,
@@ -106,4 +131,8 @@ int sheep_add_device(const char *addr, int port, const char *vdiname,
 int sheep_remove_device(const char *vdiname, int snapshot_id,
 			const char *snapshot_tag);
 
+static inline int object_ready(struct sheepdev *dev, int idx)
+{
+	return dev->inode->data_vdi_id[idx] == dev->vid;
+}
 #endif
-- 
1.7.11.7




More information about the sheepdog mailing list