[sheepdog] [PATCH v3 2/2] use preferred block comments style
Liu Yuan
namei.unix at gmail.com
Mon Mar 25 07:01:42 CET 2013
From: Liu Yuan <tailai.ly at taobao.com>
Signed-off-by: Liu Yuan <tailai.ly at taobao.com>
---
collie/common.c | 4 +---
collie/vdi.c | 10 +++++-----
include/bitops.h | 6 +++---
include/list.h | 10 +++++-----
include/logger.h | 4 +---
include/sheepdog_proto.h | 8 ++------
lib/logger.c | 22 +++++++++++++---------
lib/rbtree.c | 36 +++++++++++++++++++++---------------
lib/strbuf.c | 3 ++-
lib/util.c | 2 +-
sheep/cluster.h | 22 ++++++++++++++--------
sheep/cluster/corosync.c | 30 ++++++++++++++++++++----------
sheep/cluster/local.c | 4 +---
sheep/group.c | 26 ++++++++++++++------------
sheep/journal.c | 4 +---
sheep/migrate.c | 6 ++++--
sheep/object_cache.c | 4 +---
sheep/object_list_cache.c | 3 ++-
sheep/ops.c | 9 ++++++---
sheep/plain_store.c | 20 ++++++++++----------
sheep/recovery.c | 19 +++++++++++++------
sheep/request.c | 16 +++++++++-------
sheep/store.c | 10 +++++-----
sheep/vdi.c | 6 ++++--
sheep/work.c | 6 ++++--
sheepfs/volume.c | 6 ++++--
26 files changed, 166 insertions(+), 130 deletions(-)
diff --git a/collie/common.c b/collie/common.c
index d086f7b..a599ca3 100644
--- a/collie/common.c
+++ b/collie/common.c
@@ -222,9 +222,7 @@ int send_light_req_get_response(struct sd_req *hdr, const char *host, int port)
return SD_RES_SUCCESS;
}
-/*
- * Light request only contains header, without body content.
- */
+/* Light request only contains header, without body content. */
int send_light_req(struct sd_req *hdr, const char *host, int port)
{
int ret = send_light_req_get_response(hdr, host, port);
diff --git a/collie/vdi.c b/collie/vdi.c
index e480e29..c18f9a9 100644
--- a/collie/vdi.c
+++ b/collie/vdi.c
@@ -1562,9 +1562,7 @@ struct obj_backup {
uint8_t data[SD_DATA_OBJ_SIZE];
};
-/*
- * discards redundant area from backup data
- */
+/* discards redundant area from backup data */
static void compact_obj_backup(struct obj_backup *backup, uint8_t *from_data)
{
uint8_t *p1, *p2;
@@ -1822,8 +1820,10 @@ static int vdi_restore(int argc, char **argv)
goto out;
}
- /* delete the current vdi temporarily first to avoid making
- * the current state become snapshot */
+ /*
+ * delete the current vdi temporarily first to avoid making
+ * the current state become snapshot
+ */
ret = read_vdi_obj(vdiname, 0, "", NULL, current_inode,
SD_INODE_HEADER_SIZE);
if (ret != EXIT_SUCCESS)
diff --git a/include/bitops.h b/include/bitops.h
index e6942a3..358c402 100644
--- a/include/bitops.h
+++ b/include/bitops.h
@@ -113,7 +113,7 @@ static inline void clear_bit(unsigned int nr, unsigned long *addr)
addr[nr / BITS_PER_LONG] &= ~(1UL << (nr % BITS_PER_LONG));
}
-/**
+/*
* fls - find last (most-significant) bit set
* @x: the word to search
*
@@ -150,7 +150,7 @@ static __always_inline int fls(int x)
return r;
}
-/**
+/*
* __fls - find last (most-significant) set bit in a long word
* @word: the word to search
*
@@ -187,7 +187,7 @@ static __always_inline unsigned long __fls(unsigned long word)
return num;
}
-/**
+/*
* fls64 - find last set bit in a 64-bit word
* @x: the word to search
*
diff --git a/include/list.h b/include/list.h
index 2823075..9555e3b 100644
--- a/include/list.h
+++ b/include/list.h
@@ -223,10 +223,9 @@ static inline void hlist_add_after(struct hlist_node *n,
for (pos = (head)->first; pos ; pos = pos->next)
#define hlist_for_each_safe(pos, n, head) \
- for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
- pos = n)
+ for (pos = (head)->first; pos && ({ n = pos->next; 1; }); pos = n)
-/**
+/*
* hlist_for_each_entry - iterate over list of given type
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
@@ -238,8 +237,9 @@ static inline void hlist_add_after(struct hlist_node *n,
pos && ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
pos = pos->next)
-/**
- * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+/*
+ * hlist_for_each_entry_safe - iterate over list of given type safe against
+ * removal of list entry
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @n: another &struct hlist_node to use as temporary storage
diff --git a/include/logger.h b/include/logger.h
index 88af7d5..03e9686 100644
--- a/include/logger.h
+++ b/include/logger.h
@@ -45,9 +45,7 @@ void get_thread_name(char *name);
int __sd_dump_variable(const char *var, const void *base_sp);
void sd_backtrace(void);
-/*
-+ * sheep log priorities, comliant with syslog spec
-+ */
+/* sheep log priorities, comliant with syslog spec */
#define SDOG_EMERG LOG_EMERG
#define SDOG_ALERT LOG_ALERT
#define SDOG_CRIT LOG_CRIT
diff --git a/include/sheepdog_proto.h b/include/sheepdog_proto.h
index 865f451..ee813d6 100644
--- a/include/sheepdog_proto.h
+++ b/include/sheepdog_proto.h
@@ -195,14 +195,10 @@ struct snap_log {
unsigned char sha1[SHA1_LEN];
};
-/*
- * 64 bit FNV-1a non-zero initial basis
- */
+/* 64 bit FNV-1a non-zero initial basis */
#define FNV1A_64_INIT ((uint64_t) 0xcbf29ce484222325ULL)
-/*
- * 64 bit Fowler/Noll/Vo FNV-1a hash code
- */
+/* 64 bit Fowler/Noll/Vo FNV-1a hash code */
static inline uint64_t fnv_64a_buf(const void *buf, size_t len, uint64_t hval)
{
unsigned char *bp = (unsigned char *) buf;
diff --git a/lib/logger.c b/lib/logger.c
index 15b6880..ad5b462 100644
--- a/lib/logger.c
+++ b/lib/logger.c
@@ -318,9 +318,7 @@ static notrace int json_log_formatter(char *buff, size_t size,
}
log_format_register("json", json_log_formatter);
-/*
- * this one can block under memory pressure
- */
+/* this one can block under memory pressure */
static notrace void log_syslog(const struct logmsg *msg)
{
char str[MAX_MSG_SIZE];
@@ -528,8 +526,10 @@ static notrace void logger(char *log_dir, char *outfile)
prctl(PR_SET_PDEATHSIG, SIGHUP);
- /* we need to check the aliveness of the sheep process since
- * it could die before the logger call prctl. */
+ /*
+ * we need to check the aliveness of the sheep process since
+ * it could die before the logger call prctl.
+ */
if (kill(sheep_pid, 0) < 0)
kill(logger_pid, SIGHUP);
@@ -800,8 +800,10 @@ notrace void sd_backtrace(void)
char cmd[ARG_MAX], path[PATH_MAX], info[256], **str;
FILE *f;
- /* the called function is at the previous address
- * because addr contains a return address */
+ /*
+ * The called function is at the previous address
+ * because addr contains a return address
+ */
addr = (void *)((char *)addr - 1);
/* try to get a line number with addr2line if possible */
@@ -824,8 +826,10 @@ notrace void sd_backtrace(void)
pclose(f);
continue;
- /* failed to get a line number, so simply use
- * backtrace_symbols instead */
+ /*
+ * Failed to get a line number, so simply use
+ * backtrace_symbols instead
+ */
fallback_close:
pclose(f);
fallback:
diff --git a/lib/rbtree.c b/lib/rbtree.c
index cb2076a..0db1054 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -319,9 +319,7 @@ void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data)
rb_augment_path(node, func, data);
}
-/*
- * This function returns the first node (in sort order) of the tree.
- */
+/* This function returns the first node (in sort order) of the tree. */
struct rb_node *rb_first(const struct rb_root *root)
{
struct rb_node *n;
@@ -353,8 +351,10 @@ struct rb_node *rb_next(const struct rb_node *node)
if (rb_parent(node) == node)
return NULL;
- /* If we have a right-hand child, go down and then left as far
- as we can. */
+ /*
+ * If we have a right-hand child, go down and then left as far
+ * as we can.
+ */
if (node->rb_right) {
node = node->rb_right;
while (node->rb_left)
@@ -362,12 +362,14 @@ struct rb_node *rb_next(const struct rb_node *node)
return (struct rb_node *)node;
}
- /* No right-hand children. Everything down and left is
- smaller than us, so any 'next' node must be in the general
- direction of our parent. Go up the tree; any time the
- ancestor is a right-hand child of its parent, keep going
- up. First time it's a left-hand child of its parent, said
- parent is our 'next' node. */
+ /*
+ * No right-hand children. Everything down and left is
+ * smaller than us, so any 'next' node must be in the general
+ * direction of our parent. Go up the tree; any time the
+ * ancestor is a right-hand child of its parent, keep going
+ * up. First time it's a left-hand child of its parent, said
+ * parent is our 'next' node.
+ */
while ((parent = rb_parent(node)) && node == parent->rb_right)
node = parent;
@@ -381,8 +383,10 @@ struct rb_node *rb_prev(const struct rb_node *node)
if (rb_parent(node) == node)
return NULL;
- /* If we have a left-hand child, go down and then right as far
- as we can. */
+ /*
+ * If we have a left-hand child, go down and then right as far
+ * as we can.
+ */
if (node->rb_left) {
node = node->rb_left;
while (node->rb_right)
@@ -390,8 +394,10 @@ struct rb_node *rb_prev(const struct rb_node *node)
return (struct rb_node *)node;
}
- /* No left-hand children. Go up till we find an ancestor which
- is a right-hand child of its parent */
+ /*
+ * No left-hand children. Go up till we find an ancestor which
+ * is a right-hand child of its parent
+ */
while ((parent = rb_parent(node)) && node == parent->rb_left)
node = parent;
diff --git a/lib/strbuf.c b/lib/strbuf.c
index c425b17..b6553d3 100644
--- a/lib/strbuf.c
+++ b/lib/strbuf.c
@@ -1,4 +1,5 @@
-/* Taken from git by Liu Yuan <namei.unix at gmail.com>
+/*
+ * Taken from git by Liu Yuan <namei.unix at gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
diff --git a/lib/util.c b/lib/util.c
index bb47654..39779a5 100644
--- a/lib/util.c
+++ b/lib/util.c
@@ -235,7 +235,7 @@ int xmkdir(const char *pathname, mode_t mode)
return 0;
}
-/**
+/*
* Copy the string str to buf. If str length is bigger than buf_size -
* 1 then it is clamped to buf_size - 1.
* NOTE: this function does what strncpy should have done to be
diff --git a/sheep/cluster.h b/sheep/cluster.h
index df73d06..934c2f9 100644
--- a/sheep/cluster.h
+++ b/sheep/cluster.h
@@ -30,11 +30,16 @@
enum cluster_join_result {
CJ_RES_SUCCESS, /* Success */
CJ_RES_FAIL, /* Fail to join. The joining node has an invalidepoch. */
- CJ_RES_JOIN_LATER, /* Fail to join. The joining node should
- * be added after the cluster start working. */
- CJ_RES_MASTER_TRANSFER, /* Transfer mastership. The joining
- * node has a newer epoch, so this node
- * will leave the cluster (restart later). */
+ /*
+ * Fail to join. The joining node should be added after the cluster
+ * start working.
+ */
+ CJ_RES_JOIN_LATER,
+ /*
+ * Transfer mastership. The joining node has a newer epoch, so this
+ * node will leave the cluster (restart later).
+ */
+ CJ_RES_MASTER_TRANSFER,
};
struct cluster_driver {
@@ -172,8 +177,9 @@ static inline char *node_to_str(const struct sd_node *id)
}
snprintf(str, sizeof(str), "%s ip:%s port:%d",
- (af == AF_INET) ? "IPv4" : "IPv6",
- addr_to_str(name, sizeof(name), id->nid.addr, 0), id->nid.port);
+ (af == AF_INET) ? "IPv4" : "IPv6",
+ addr_to_str(name, sizeof(name), id->nid.addr, 0),
+ id->nid.port);
return str;
}
@@ -201,7 +207,7 @@ void sd_leave_handler(const struct sd_node *left, const struct sd_node *members,
void sd_notify_handler(const struct sd_node *sender, void *msg, size_t msg_len);
bool sd_block_handler(const struct sd_node *sender);
enum cluster_join_result sd_check_join_cb(const struct sd_node *joining,
- void *opaque);
+ void *opaque);
void recalculate_vnodes(struct sd_node *nodes, int nr_nodes);
#endif
diff --git a/sheep/cluster/corosync.c b/sheep/cluster/corosync.c
index e3224e4..8262a55 100644
--- a/sheep/cluster/corosync.c
+++ b/sheep/cluster/corosync.c
@@ -342,8 +342,10 @@ static bool __corosync_dispatch_one(struct corosync_event *cevent)
break;
case COROSYNC_EVENT_TYPE_BLOCK:
if (cevent->callbacked)
- /* block events until the unblock message
- removes this event */
+ /*
+ * block events until the unblock message
+ * removes this event
+ */
return false;
cevent->callbacked = sd_block_handler(&cevent->sender.ent);
return false;
@@ -365,10 +367,12 @@ static void __corosync_dispatch(void)
};
if (poll(&pfd, 1, 0)) {
- /* Corosync dispatches leave events one by one even
+ /*
+ * Corosync dispatches leave events one by one even
* when network partition has occured. To count the
* number of alive nodes correctly, we postpone
- * processsing events if there are incoming ones. */
+ * processsing events if there are incoming ones.
+ */
sd_dprintf("wait for a next dispatch event");
return;
}
@@ -514,9 +518,11 @@ static void cdrv_cpg_deliver(cpg_handle_t handle,
master = is_master(&cmsg->sender);
if (master >= 0)
- /* Master is down before new nodes finish joining.
+ /*
+ * Master is down before new nodes finish joining.
* We have to revoke its mastership to avoid cluster
- * hanging */
+ * hanging
+ */
cpg_nodes[master].gone = 1;
cevent->sender = cmsg->sender;
@@ -582,8 +588,10 @@ static void cdrv_cpg_confchg(cpg_handle_t handle,
if (nr_majority == 0) {
size_t total = member_list_entries + left_list_entries;
- /* we need at least 3 nodes to handle network
- * partition failure */
+ /*
+ * we need at least 3 nodes to handle network
+ * partition failure
+ */
if (total > 2)
nr_majority = total / 2 + 1;
}
@@ -623,9 +631,11 @@ static void cdrv_cpg_confchg(cpg_handle_t handle,
cevent = xzalloc(sizeof(*cevent));
master = is_master(&left_sheep[i]);
if (master >= 0)
- /* Master is down before new nodes finish joining.
+ /*
+ * Master is down before new nodes finish joining.
* We have to revoke its mastership to avoid cluster
- * hanging */
+ * hanging
+ */
cpg_nodes[master].gone = 1;
cevent->type = COROSYNC_EVENT_TYPE_LEAVE;
diff --git a/sheep/cluster/local.c b/sheep/cluster/local.c
index d1cb45f..142221f 100644
--- a/sheep/cluster/local.c
+++ b/sheep/cluster/local.c
@@ -389,9 +389,7 @@ static void local_unblock(void *msg, size_t msg_len)
shm_queue_unlock();
}
-/*
- * Returns true if an event is processed
- */
+/* Returns true if an event is processed */
static bool local_process_event(void)
{
struct local_event *ev;
diff --git a/sheep/group.c b/sheep/group.c
index b991e5e..212f642 100644
--- a/sheep/group.c
+++ b/sheep/group.c
@@ -222,9 +222,7 @@ int local_get_node_list(const struct sd_req *req, struct sd_rsp *rsp,
return SD_RES_SUCCESS;
}
-/*
- * Indicator if a cluster operation is currently running.
- */
+/* Indicator if a cluster operation is currently running. */
static bool cluster_op_running;
static struct vdi_op_message *prepare_cluster_msg(struct request *req,
@@ -432,9 +430,7 @@ static void update_exceptional_node_list(uint32_t epoch,
add_delayed_node(epoch, &jm->nodes[i]);
}
-/*
- * Format the lists of failed or delayed nodes into the join message.
- */
+/* Format the lists of failed or delayed nodes into the join message. */
static void format_exceptional_node_list(struct join_message *jm)
{
struct node *n;
@@ -751,8 +747,10 @@ static void setup_backend_store(const char *store, bool need_purge)
panic("failed to store into config file");
}
- /* We need to purge the stale objects for sheep joining back
- * after crash */
+ /*
+ * We need to purge the stale objects for sheep joining back
+ * after crash
+ */
if (need_purge && sd_store->purge_obj) {
ret = sd_store->purge_obj();
if (ret != SD_RES_SUCCESS)
@@ -773,9 +771,11 @@ static void finish_join(const struct join_message *msg,
update_exceptional_node_list(get_latest_epoch(), msg);
if (msg->store[0]) {
- /* We don't need backend for gateway-only node, but need to save
+ /*
+ * We don't need backend for gateway-only node, but need to save
* store name. Otherwise, the node cannot notify the store name
- * when it become master */
+ * when it become master
+ */
if (sys->gateway_only) {
ret = set_cluster_store((char *)msg->store);
if (ret != SD_RES_SUCCESS)
@@ -1131,7 +1131,8 @@ void sd_join_handler(const struct sd_node *joined,
case CJ_RES_MASTER_TRANSFER:
update_exceptional_node_list(le, jm);
- /* Sheep needs this to identify itself as master.
+ /*
+ * Sheep needs this to identify itself as master.
* Now mastership transfer is done.
*/
if (!sys->join_finished) {
@@ -1267,7 +1268,8 @@ int create_cluster(int port, int64_t zone, int nr_vnodes,
return 0;
}
-/* We will call this function for two reason:
+/*
+ * We will call this function for two reason:
* 1) make this node working as a gateway, or
* 2) the program is going to shutdown itself.
*/
diff --git a/sheep/journal.c b/sheep/journal.c
index 1f092e5..fafb8f9 100644
--- a/sheep/journal.c
+++ b/sheep/journal.c
@@ -169,9 +169,7 @@ static int jrnl_apply_to_target_object(struct jrnl_descriptor *jd)
return res;
}
-/*
- * We cannot use this function for concurrent write operations
- */
+/* We cannot use this function for concurrent write operations */
struct jrnl_descriptor *jrnl_begin(const void *buf, size_t count, off_t offset,
const char *path, const char *jrnl_dir)
{
diff --git a/sheep/migrate.c b/sheep/migrate.c
index 35cb70b..e5c2f37 100644
--- a/sheep/migrate.c
+++ b/sheep/migrate.c
@@ -184,9 +184,11 @@ static int migrate_from_v0_to_v1(void)
close(fd);
- /* If the config file contains a space field, the store layout
+ /*
+ * If the config file contains a space field, the store layout
* is compatible with v1. In this case, what we need to do is
- * only adding version number to the config file. */
+ * only adding version number to the config file.
+ */
if (config.space > 0)
return 0;
diff --git a/sheep/object_cache.c b/sheep/object_cache.c
index 3090233..e80fd99 100644
--- a/sheep/object_cache.c
+++ b/sheep/object_cache.c
@@ -1027,9 +1027,7 @@ bool bypass_object_cache(const struct request *req)
}
}
- /*
- * For vmstate && vdi_attr object, we don't do caching
- */
+ /* For vmstate && vdi_attr object, we don't do caching */
if (is_vmstate_obj(oid) || is_vdi_attr_obj(oid) ||
req->rq.flags & SD_FLAG_CMD_COW)
return true;
diff --git a/sheep/object_list_cache.c b/sheep/object_list_cache.c
index e3bfe3c..28a75ee 100644
--- a/sheep/object_list_cache.c
+++ b/sheep/object_list_cache.c
@@ -174,7 +174,8 @@ static void objlist_deletion_work(struct work *work)
struct objlist_cache_entry *entry, *t;
uint32_t vid = ow->vid, entry_vid;
- /* Before reclaiming the cache belonging to the VDI just deleted,
+ /*
+ * Before reclaiming the cache belonging to the VDI just deleted,
* we should test whether the VDI is exist, because after some node
* deleting it and before the notification is sent to all the node,
* another node may issus a VDI creation event and reused the VDI id
diff --git a/sheep/ops.c b/sheep/ops.c
index a4ff50e..8cba70d 100644
--- a/sheep/ops.c
+++ b/sheep/ops.c
@@ -326,8 +326,10 @@ static int cluster_get_vdi_attr(struct request *req)
if (ret != SD_RES_SUCCESS)
return ret;
- /* the current VDI id can change if we take a snapshot,
- so we use the hash value of the VDI name as the VDI id */
+ /*
+ * the current VDI id can change if we take a snapshot,
+ * so we use the hash value of the VDI name as the VDI id
+ */
vid = fnv_64a_buf(vattr->name, strlen(vattr->name), FNV1A_64_INIT);
vid &= SD_NR_VDIS - 1;
ret = get_vdi_attr(req->data, hdr->data_length,
@@ -487,7 +489,8 @@ static int cluster_force_recover(const struct sd_req *req, struct sd_rsp *rsp,
uint8_t c;
uint16_t f;
- /* We should manually recover the cluster when
+ /*
+ * We should manually recover the cluster when
* 1) the master is physically down (different epoch condition).
* 2) some nodes are physically down (same epoch condition).
* In both case, the nodes(s) stat is WAIT_FOR_JOIN.
diff --git a/sheep/plain_store.c b/sheep/plain_store.c
index 7bda5cd..87ebd35 100644
--- a/sheep/plain_store.c
+++ b/sheep/plain_store.c
@@ -24,9 +24,7 @@ static int get_open_flags(uint64_t oid, bool create, int fl)
if (uatomic_is_true(&sys->use_journal) || sys->nosync == true)
flags &= ~O_DSYNC;
- /*
- * We can not use DIO for inode object because it is not 512B aligned.
- */
+ /* We can not use DIO for inode object because it is not 512B aligned */
if (sys->backend_dio && is_data_obj(oid))
flags |= O_DIRECT;
@@ -261,8 +259,10 @@ int default_read(uint64_t oid, const struct siocb *iocb)
get_obj_path(oid, path);
ret = default_read_from_path(oid, path, iocb);
- /* If the request is againt the older epoch, try to read from
- * the stale directory */
+ /*
+ * If the request is againt the older epoch, try to read from
+ * the stale directory
+ */
while (ret == SD_RES_NO_OBJ && iocb->epoch < epoch) {
epoch--;
get_stale_obj_path(oid, epoch, path);
@@ -272,9 +272,7 @@ int default_read(uint64_t oid, const struct siocb *iocb)
return ret;
}
-/*
- * Preallocate the whole object to get a better filesystem layout.
- */
+/* Preallocate the whole object to get a better filesystem layout. */
int prealloc(int fd, uint32_t size)
{
int ret = fallocate(fd, 0, 0, size);
@@ -312,11 +310,13 @@ int default_create_and_write(uint64_t oid, const struct siocb *iocb)
fd = open(tmp_path, flags, def_fmode);
if (fd < 0) {
if (errno == EEXIST) {
- /* This happens if node membership changes during object
+ /*
+ * This happens if node membership changes during object
* creation; while gateway retries a CREATE request,
* recovery process could also recover the object at the
* same time. They should try to write the same date,
- * so it is okay to simply return success here. */
+ * so it is okay to simply return success here.
+ */
sd_dprintf("%s exists", tmp_path);
return SD_RES_SUCCESS;
}
diff --git a/sheep/recovery.c b/sheep/recovery.c
index 10e9cdb..deba08a 100644
--- a/sheep/recovery.c
+++ b/sheep/recovery.c
@@ -30,8 +30,11 @@ struct recovery_work {
bool stop;
struct work work;
- bool suspended; /* true when automatic recovery is disabled
- * and recovery process is suspended */
+ /*
+ * true when automatic recovery is disabled
+ * and recovery process is suspended
+ */
+ bool suspended;
int count;
uint64_t *oids;
@@ -240,8 +243,10 @@ static inline void prepare_schedule_oid(uint64_t oid)
oid);
return;
}
- /* When auto recovery is enabled, the oid is currently being
- * recovered */
+ /*
+ * When auto recovery is enabled, the oid is currently being
+ * recovered
+ */
if (!sys->disable_recovery && rw->oids[rw->done] == oid)
return;
rw->nr_prio_oids++;
@@ -662,8 +667,10 @@ int start_recovery(struct vnode_info *cur_vinfo, struct vnode_info *old_vinfo)
sd_dprintf("recovery skipped");
next_rw = rw;
- /* This is necesary to invoke run_next_rw when
- * recovery work is suspended. */
+ /*
+ * This is necesary to invoke run_next_rw when
+ * recovery work is suspended.
+ */
resume_suspended_recovery();
} else {
recovering_work = rw;
diff --git a/sheep/request.c b/sheep/request.c
index 3ebdff7..be12015 100644
--- a/sheep/request.c
+++ b/sheep/request.c
@@ -137,8 +137,10 @@ static int check_request_epoch(struct request *req)
sd_eprintf("new node version %u, %u (%s)",
sys->epoch, req->rq.epoch, op_name(req->op));
- /* put on local wait queue, waiting for local epoch
- to be lifted */
+ /*
+ * put on local wait queue, waiting for local epoch
+ * to be lifted
+ */
req->rp.result = SD_RES_NEW_NODE_VER;
list_add_tail(&req->request_list, &sys->wait_rw_queue);
return -1;
@@ -166,9 +168,7 @@ static bool request_in_recovery(struct request *req)
*/
if (oid_in_recovery(req->local_oid) &&
!(req->rq.flags & SD_FLAG_CMD_RECOVERY)) {
- /*
- * Put request on wait queues of local node
- */
+ /* Put request on wait queues of local node */
if (is_recovery_init()) {
sd_dprintf("%"PRIx64" on rw_queue", req->local_oid);
req->rp.result = SD_RES_OBJ_RECOVERING;
@@ -245,8 +245,10 @@ void resume_wait_obj_requests(uint64_t oid)
if (req->local_oid != oid)
continue;
- /* the object requested by a pending request has been
- * recovered, notify the pending request. */
+ /*
+ * the object requested by a pending request has been
+ * recovered, notify the pending request.
+ */
sd_dprintf("retry %" PRIx64, req->local_oid);
list_del(&req->request_list);
requeue_request(req);
diff --git a/sheep/store.c b/sheep/store.c
index c327b9b..58303fa 100644
--- a/sheep/store.c
+++ b/sheep/store.c
@@ -311,8 +311,10 @@ static int init_jrnl_path(const char *base_path)
return 0;
}
-/* if the node is gateway, this function only finds the store driver.
- * Otherwise, this function initializes the backend store*/
+/*
+ * If the node is gateway, this function only finds the store driver.
+ * Otherwise, this function initializes the backend store
+ */
int init_store_driver(bool is_gateway)
{
char driver_name[STORE_LEN], *p;
@@ -419,9 +421,7 @@ int init_global_pathnames(const char *d, char *argp)
return 0;
}
-/*
- * Write data to both local object cache (if enabled) and backends
- */
+/* Write data to both local object cache (if enabled) and backends */
int write_object(uint64_t oid, char *data, unsigned int datalen,
uint64_t offset, uint16_t flags, bool create, int nr_copies)
{
diff --git a/sheep/vdi.c b/sheep/vdi.c
index cf1d3ba..52f91e4 100644
--- a/sheep/vdi.c
+++ b/sheep/vdi.c
@@ -816,8 +816,10 @@ static int start_deletion(struct request *req, uint32_t vid)
ret = fill_vdi_list(dw, root_vid);
if (ret) {
- /* if the VDI is a cloned VDI, delete its objects
- * no matter whether the VDI tree is clear. */
+ /*
+ * if the VDI is a cloned VDI, delete its objects
+ * no matter whether the VDI tree is clear.
+ */
if (cloned) {
dw->buf[0] = vid;
dw->count = 1;
diff --git a/sheep/work.c b/sheep/work.c
index 33aed36..602c997 100644
--- a/sheep/work.c
+++ b/sheep/work.c
@@ -35,9 +35,11 @@
#include "trace/trace.h"
#include "sheep_priv.h"
-/* The protection period from shrinking work queue. This is necessary
+/*
+ * The protection period from shrinking work queue. This is necessary
* to avoid many calls of pthread_create. Without it, threads are
- * frequently created and deleted and it leads poor performance. */
+ * frequently created and deleted and it leads poor performance.
+ */
#define WQ_PROTECTION_PERIOD 1000 /* ms */
static int efd;
diff --git a/sheepfs/volume.c b/sheepfs/volume.c
index 87086a4..701d488 100644
--- a/sheepfs/volume.c
+++ b/sheepfs/volume.c
@@ -46,13 +46,15 @@ struct vdi_inode {
struct rb_node rb;
uint32_t vid;
struct sheepdog_inode *inode;
-/* FIXME
+/*
+ * FIXME
* 1) Consider various VM request queue depth.
* 2) Most drive presents 31 to Linux, I set it as 31 to expect that VM's
* real queue depth never exceed 31
*/
#define SOCKET_POOL_SIZE 31
-/* Socket pool is used for FUSE read threads, which use threads
+/*
+ * Socket pool is used for FUSE read threads, which use threads
* to simulate aysnc read. All sockets point to the same gateway
*/
int socket_pool[SOCKET_POOL_SIZE];
--
1.7.9.5
More information about the sheepdog
mailing list