[sheepdog] [PATCH v2 2/7] sheep: revive lock operation

Liu Yuan namei.unix at gmail.com
Wed Jul 30 11:36:47 CEST 2014


On Wed, Jul 30, 2014 at 04:24:37PM +0900, Hitoshi Mitake wrote:
> Current sheepdog doesn't support vdi locking. This patch and
> succeeding ones revive the feature. With this feature, more than two
> clients (including QEMU and tgt) are not allowed to open same VDI at
> the same time.
> 
> Cc: Fabian Zimmermann <dev.faz at gmail.com>
> Cc: Valerio Pachera <sirio81 at gmail.com>
> Signed-off-by: Hitoshi Mitake <mitake.hitoshi at lab.ntt.co.jp>
> ---
>  sheep/ops.c        | 41 ++++++++++++++++++++++----
>  sheep/sheep_priv.h |  3 ++
>  sheep/vdi.c        | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 125 insertions(+), 5 deletions(-)
> 
> diff --git a/sheep/ops.c b/sheep/ops.c
> index 58ff397..7ca06d6 100644
> --- a/sheep/ops.c
> +++ b/sheep/ops.c
> @@ -1306,6 +1306,34 @@ static int local_repair_replica(struct request *req)
>  	return ret;
>  }
>  
> +static int cluster_lock_vdi(const struct sd_req *req, struct sd_rsp *rsp,
> +			    void *data, const struct sd_node *sender)
> +{
> +	uint32_t vid = rsp->vdi.vdi_id;
> +
> +	sd_info("node: %s is locking VDI: %"PRIx32, node_to_str(sender), vid);
> +
> +	if (!lock_vdi(vid, &sender->nid)) {
> +		sd_err("locking %"PRIx32 "failed", vid);
> +		return SD_RES_VDI_NOT_LOCKED;
> +	}
> +
> +	return SD_RES_SUCCESS;
> +}
> +
> +static int cluster_release_vdi_main(const struct sd_req *req,
> +				    struct sd_rsp *rsp, void *data,
> +				    const struct sd_node *sender)
> +{
> +	uint32_t vid = req->vdi.base_vdi_id;
> +
> +	sd_info("node: %s is unlocking VDI: %"PRIx32, node_to_str(sender), vid);
> +
> +	unlock_vdi(vid, &sender->nid);
> +
> +	return SD_RES_SUCCESS;
> +}
> +
>  static struct sd_op_template sd_ops[] = {
>  
>  	/* cluster operations */
> @@ -1400,6 +1428,14 @@ static struct sd_op_template sd_ops[] = {
>  		.name = "LOCK_VDI",
>  		.type = SD_OP_TYPE_CLUSTER,
>  		.process_work = cluster_get_vdi_info,
> +		.process_main = cluster_lock_vdi,
> +	},
> +
> +	[SD_OP_RELEASE_VDI] = {
> +		.name = "RELEASE_VDI",
> +		.type = SD_OP_TYPE_CLUSTER,
> +		.process_work = local_release_vdi,
> +		.process_main = cluster_release_vdi_main,
>  	},
>  
>  	[SD_OP_REWEIGHT] = {
> @@ -1438,11 +1474,6 @@ static struct sd_op_template sd_ops[] = {
>  	},
>  
>  	/* local operations */
> -	[SD_OP_RELEASE_VDI] = {
> -		.name = "RELEASE_VDI",
> -		.type = SD_OP_TYPE_LOCAL,
> -		.process_work = local_release_vdi,
> -	},
>  
>  	[SD_OP_GET_STORE_LIST] = {
>  		.name = "GET_STORE_LIST",
> diff --git a/sheep/sheep_priv.h b/sheep/sheep_priv.h
> index b0156ee..74aa08e 100644
> --- a/sheep/sheep_priv.h
> +++ b/sheep/sheep_priv.h
> @@ -345,6 +345,9 @@ int sd_delete_vdi(const char *name);
>  int sd_lookup_vdi(const char *name, uint32_t *vid);
>  int sd_create_hyper_volume(const char *name, uint32_t *vdi_id);
>  
> +bool lock_vdi(uint32_t vid, const struct node_id *owner);
> +bool unlock_vdi(uint32_t vid, const struct node_id *owner);
> +

It would be better to name them as 'vdi_{lock,unlock}' like other vdi helpers
vdi_create. Here prefix vdi_ works as a namespace for functions.

Thanks
Yuan



More information about the sheepdog mailing list