[stgt] [PATCH 3/3] CHange lock that protects the it nexus list to rwlock
Chandra Seetharaman
sekharan at us.ibm.com
Fri Oct 15 06:58:23 CEST 2010
Convert the per target lock it_nexus_lock to a read write lock it_nexus_rwlock.
This will help with performance as there are more walk-thru the list
than the add/delete operations.
Signed-off-by: Chandra Seetharaman <sekharan at us.ibm.com>
---
usr/target.c | 51 ++++++++++++++++++++++++++-------------------------
usr/target.h | 2 +-
2 files changed, 27 insertions(+), 26 deletions(-)
Index: tgt-thread/usr/target.c
===================================================================
--- tgt-thread.orig/usr/target.c
+++ tgt-thread/usr/target.c
@@ -91,14 +91,14 @@ static struct it_nexus *it_nexus_lookup(
if (!target)
return NULL;
- pthread_mutex_lock(&target->it_nexus_lock);
+ pthread_rwlock_rdlock(&target->it_nexus_rwlock);
list_for_each_entry(itn, &target->it_nexus_list, nexus_siblings) {
if (itn->itn_id == itn_id) {
- pthread_mutex_unlock(&target->it_nexus_lock);
+ pthread_rwlock_unlock(&target->it_nexus_rwlock);
return itn;
}
}
- pthread_mutex_unlock(&target->it_nexus_lock);
+ pthread_rwlock_unlock(&target->it_nexus_rwlock);
return NULL;
}
@@ -220,7 +220,7 @@ void ua_sense_add_other_it_nexus(uint64_
struct it_nexus_lu_info *itn_lu;
int ret;
- pthread_mutex_lock(&lu->tgt->it_nexus_lock);
+ pthread_rwlock_rdlock(&lu->tgt->it_nexus_rwlock);
list_for_each_entry(itn, &lu->tgt->it_nexus_list, nexus_siblings) {
if (itn->itn_id == itn_id)
@@ -238,7 +238,7 @@ void ua_sense_add_other_it_nexus(uint64_
lu->lun, itn_id);
}
}
- pthread_mutex_unlock(&lu->tgt->it_nexus_lock);
+ pthread_rwlock_unlock(&lu->tgt->it_nexus_rwlock);
}
int it_nexus_create(int tid, uint64_t itn_id, int host_no, char *info)
@@ -291,9 +291,9 @@ int it_nexus_create(int tid, uint64_t it
for (i = 0; i < ARRAY_SIZE(itn->cmd_hash_list); i++)
INIT_LIST_HEAD(&itn->cmd_hash_list[i]);
- pthread_mutex_lock(&target->it_nexus_lock);
+ pthread_rwlock_wrlock(&target->it_nexus_rwlock);
list_add_tail(&itn->nexus_siblings, &target->it_nexus_list);
- pthread_mutex_unlock(&target->it_nexus_lock);
+ pthread_rwlock_unlock(&target->it_nexus_rwlock);
return 0;
out:
@@ -322,9 +322,9 @@ int it_nexus_destroy(int tid, uint64_t i
it_nexus_del_lu_info(itn);
- pthread_mutex_lock(&itn->nexus_target->it_nexus_lock);
+ pthread_rwlock_wrlock(&itn->nexus_target->it_nexus_rwlock);
list_del(&itn->nexus_siblings);
- pthread_mutex_unlock(&itn->nexus_target->it_nexus_lock);
+ pthread_rwlock_unlock(&itn->nexus_target->it_nexus_rwlock);
free(itn);
return 0;
}
@@ -581,7 +581,7 @@ int tgt_device_create(int tid, int dev_t
}
list_add_tail(&lu->device_siblings, &pos->device_siblings);
- pthread_mutex_lock(&target->it_nexus_lock);
+ pthread_rwlock_rdlock(&target->it_nexus_rwlock);
list_for_each_entry(itn, &target->it_nexus_list, nexus_siblings) {
itn_lu = zalloc(sizeof(*itn_lu));
if (!itn_lu)
@@ -601,7 +601,7 @@ int tgt_device_create(int tid, int dev_t
ASC_REPORTED_LUNS_DATA_HAS_CHANGED);
}
}
- pthread_mutex_unlock(&target->it_nexus_lock);
+ pthread_rwlock_unlock(&target->it_nexus_rwlock);
if (backing && !path)
lu->dev_type_template.lu_offline(lu);
@@ -658,7 +658,7 @@ int tgt_device_destroy(int tid, uint64_t
if (lu->bst->bs_exit)
lu->bst->bs_exit(lu);
- pthread_mutex_lock(&target->it_nexus_lock);
+ pthread_rwlock_rdlock(&target->it_nexus_rwlock);
list_for_each_entry(itn, &target->it_nexus_list, nexus_siblings) {
list_for_each_entry_safe(itn_lu, next, &itn->it_nexus_lu_info_list,
lu_info_siblings) {
@@ -668,7 +668,7 @@ int tgt_device_destroy(int tid, uint64_t
}
}
}
- pthread_mutex_unlock(&target->it_nexus_lock);
+ pthread_rwlock_unlock(&target->it_nexus_rwlock);
list_del(&lu->device_siblings);
@@ -679,7 +679,7 @@ int tgt_device_destroy(int tid, uint64_t
free(lu);
- pthread_mutex_lock(&target->it_nexus_lock);
+ pthread_rwlock_rdlock(&target->it_nexus_rwlock);
list_for_each_entry(itn, &target->it_nexus_list, nexus_siblings) {
list_for_each_entry(itn_lu, &itn->it_nexus_lu_info_list,
lu_info_siblings) {
@@ -688,7 +688,7 @@ int tgt_device_destroy(int tid, uint64_t
ASC_REPORTED_LUNS_DATA_HAS_CHANGED);
}
}
- pthread_mutex_unlock(&target->it_nexus_lock);
+ pthread_rwlock_unlock(&target->it_nexus_rwlock);
return 0;
}
@@ -1112,7 +1112,7 @@ static int abort_task_set(struct mgmt_re
eprintf("found %" PRIx64 " %d\n", tag, all);
- pthread_mutex_lock(&target->it_nexus_lock);
+ pthread_rwlock_rdlock(&target->it_nexus_rwlock);
list_for_each_entry(itn, &target->it_nexus_list, nexus_siblings) {
for (i = 0; i < ARRAY_SIZE(itn->cmd_hash_list); i++) {
struct list_head *list = &itn->cmd_hash_list[i];
@@ -1128,7 +1128,7 @@ static int abort_task_set(struct mgmt_re
}
}
}
- pthread_mutex_unlock(&target->it_nexus_lock);
+ pthread_rwlock_unlock(&target->it_nexus_rwlock);
return count;
}
@@ -1186,7 +1186,7 @@ enum mgmt_req_result target_mgmt_request
if (mreq->busy)
send = 0;
- pthread_mutex_lock(&target->it_nexus_lock);
+ pthread_rwlock_rdlock(&target->it_nexus_rwlock);
list_for_each_entry(itn, &target->it_nexus_list, nexus_siblings) {
list_for_each_entry(itn_lu, &itn->it_nexus_lu_info_list,
lu_info_siblings) {
@@ -1201,7 +1201,7 @@ enum mgmt_req_result target_mgmt_request
}
}
}
- pthread_mutex_unlock(&target->it_nexus_lock);
+ pthread_rwlock_unlock(&target->it_nexus_rwlock);
break;
case LOGICAL_UNIT_RESET:
lun = scsi_get_devid(target->lid, lun_buf);
@@ -1210,7 +1210,7 @@ enum mgmt_req_result target_mgmt_request
if (mreq->busy)
send = 0;
- pthread_mutex_lock(&target->it_nexus_lock);
+ pthread_rwlock_rdlock(&target->it_nexus_rwlock);
list_for_each_entry(itn, &target->it_nexus_list, nexus_siblings) {
list_for_each_entry(itn_lu, &itn->it_nexus_lu_info_list,
lu_info_siblings) {
@@ -1220,7 +1220,7 @@ enum mgmt_req_result target_mgmt_request
}
}
}
- pthread_mutex_unlock(&target->it_nexus_lock);
+ pthread_rwlock_unlock(&target->it_nexus_rwlock);
break;
default:
err = -EINVAL;
@@ -1728,7 +1728,7 @@ int tgt_target_show_all(char *buf, int r
shprintf(total, buf, rest, _TAB1 "I_T nexus information:\n");
- pthread_mutex_lock(&target->it_nexus_lock);
+ pthread_rwlock_rdlock(&target->it_nexus_rwlock);
mutex_held = 1;
list_for_each_entry(nexus, &target->it_nexus_list, nexus_siblings) {
shprintf(total, buf, rest, _TAB2 "I_T nexus: %" PRIu64 "\n",
@@ -1737,7 +1737,7 @@ int tgt_target_show_all(char *buf, int r
shprintf(total, buf, rest, "%s", nexus->info);
}
mutex_held = 0;
- pthread_mutex_unlock(&target->it_nexus_lock);
+ pthread_rwlock_unlock(&target->it_nexus_rwlock);
shprintf(total, buf, rest, _TAB1 "LUN information:\n");
list_for_each_entry(lu, &target->device_list, device_siblings)
@@ -1792,7 +1792,7 @@ int tgt_target_show_all(char *buf, int r
return total;
overflow:
if (mutex_held)
- pthread_mutex_unlock(&target->it_nexus_lock);
+ pthread_rwlock_unlock(&target->it_nexus_rwlock);
return max;
}
@@ -1888,7 +1888,7 @@ int tgt_target_create(int lld, int tid,
INIT_LIST_HEAD(&target->acl_list);
INIT_LIST_HEAD(&target->it_nexus_list);
- pthread_mutex_init(&target->it_nexus_lock, NULL);
+ pthread_rwlock_init(&target->it_nexus_rwlock, NULL);
tgt_device_create(tid, TYPE_RAID, 0, NULL, 0);
@@ -1928,6 +1928,7 @@ int tgt_target_destroy(int lld_no, int t
if (tgt_drivers[lld_no]->target_destroy)
tgt_drivers[lld_no]->target_destroy(tid);
+ pthread_rwlock_destroy(&target->it_nexus_rwlock);
list_del(&target->target_siblings);
list_for_each_entry_safe(acl, tmp, &target->acl_list, aclent_list) {
Index: tgt-thread/usr/target.h
===================================================================
--- tgt-thread.orig/usr/target.h
+++ tgt-thread/usr/target.h
@@ -34,7 +34,7 @@ struct target {
struct list_head it_nexus_list;
- pthread_mutex_t it_nexus_lock;
+ pthread_rwlock_t it_nexus_rwlock;
struct backingstore_template *bst;
--
To unsubscribe from this list: send the line "unsubscribe stgt" in
the body of a message to majordomo at vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
More information about the stgt
mailing list