[lustre-devel] [PATCH 01/39] lustre: ldlm: page discard speedup

James Simmons jsimmons at infradead.org
Thu Jan 21 09:16:24 PST 2021


From: Alexander Zarochentsev <c17826 at cray.com>

Improving check_and_discard_cb, allowing to cache
negative result of dlm lock lookup and avoid
excessive osc_dlm_lock_at_pgoff() calls.

HPE-bug-id: LUS-6432
WC-bug-id: https://jira.whamcloud.com/browse/LU-11290
Lustre-commit: 0f48cd0b9856fe ("LU-11290 ldlm: page discard speedup")
Signed-off-by: Alexander Zarochentsev <c17826 at cray.com>
Reviewed-on: https://review.whamcloud.com/39327
Reviewed-by: Vitaly Fertman <vitaly.fertman at hpe.com>
Reviewed-by: Andrew Perepechko <andrew.perepechko at hpe.com>
Reviewed-by: Oleg Drokin <green at whamcloud.com>
Signed-off-by: James Simmons <jsimmons at infradead.org>
---
 fs/lustre/include/lustre_dlm.h |  1 +
 fs/lustre/include/lustre_osc.h |  5 +++++
 fs/lustre/ldlm/ldlm_lock.c     | 16 +++++++++-----
 fs/lustre/osc/osc_cache.c      | 48 +++++++++++++++++++++++++++++++-----------
 fs/lustre/osc/osc_lock.c       |  3 +++
 5 files changed, 56 insertions(+), 17 deletions(-)

diff --git a/fs/lustre/include/lustre_dlm.h b/fs/lustre/include/lustre_dlm.h
index f056c2d..e4c95a2 100644
--- a/fs/lustre/include/lustre_dlm.h
+++ b/fs/lustre/include/lustre_dlm.h
@@ -858,6 +858,7 @@ enum ldlm_match_flags {
 	LDLM_MATCH_UNREF	= BIT(0),
 	LDLM_MATCH_AST		= BIT(1),
 	LDLM_MATCH_AST_ANY	= BIT(2),
+	LDLM_MATCH_RIGHT	= BIT(3),
 };
 
 /**
diff --git a/fs/lustre/include/lustre_osc.h b/fs/lustre/include/lustre_osc.h
index ef5237b..e7bf392 100644
--- a/fs/lustre/include/lustre_osc.h
+++ b/fs/lustre/include/lustre_osc.h
@@ -186,6 +186,7 @@ struct osc_thread_info {
 	 */
 	pgoff_t			oti_next_index;
 	pgoff_t			oti_fn_index; /* first non-overlapped index */
+	pgoff_t			oti_ng_index; /* negative lock caching */
 	struct cl_sync_io	oti_anchor;
 	struct cl_req_attr	oti_req_attr;
 	struct lu_buf		oti_ladvise_buf;
@@ -248,6 +249,10 @@ enum osc_dap_flags {
 	 * check ast data is present, requested to cancel cb
 	 */
 	OSC_DAP_FL_AST	     = BIT(2),
+	/**
+	 * look at right region for the desired lock
+	 */
+	OSC_DAP_FL_RIGHT     = BIT(3),
 };
 
 /*
diff --git a/fs/lustre/ldlm/ldlm_lock.c b/fs/lustre/ldlm/ldlm_lock.c
index 56f1550..b7ce0bb 100644
--- a/fs/lustre/ldlm/ldlm_lock.c
+++ b/fs/lustre/ldlm/ldlm_lock.c
@@ -1093,8 +1093,9 @@ static bool lock_matches(struct ldlm_lock *lock, void *vdata)
 
 	switch (lock->l_resource->lr_type) {
 	case LDLM_EXTENT:
-		if (lpol->l_extent.start > data->lmd_policy->l_extent.start ||
-		    lpol->l_extent.end < data->lmd_policy->l_extent.end)
+		if (!(data->lmd_match & LDLM_MATCH_RIGHT) &&
+		    (lpol->l_extent.start > data->lmd_policy->l_extent.start ||
+		     lpol->l_extent.end < data->lmd_policy->l_extent.end))
 			return false;
 
 		if (unlikely(match == LCK_GROUP) &&
@@ -1160,10 +1161,17 @@ static bool lock_matches(struct ldlm_lock *lock, void *vdata)
 struct ldlm_lock *search_itree(struct ldlm_resource *res,
 			       struct ldlm_match_data *data)
 {
+	struct ldlm_extent ext = {
+		.start	= data->lmd_policy->l_extent.start,
+		.end	= data->lmd_policy->l_extent.end
+	};
 	int idx;
 
 	data->lmd_lock = NULL;
 
+	if (data->lmd_match & LDLM_MATCH_RIGHT)
+		ext.end = OBD_OBJECT_EOF;
+
 	for (idx = 0; idx < LCK_MODE_NUM; idx++) {
 		struct ldlm_interval_tree *tree = &res->lr_itree[idx];
 
@@ -1173,9 +1181,7 @@ struct ldlm_lock *search_itree(struct ldlm_resource *res,
 		if (!(tree->lit_mode & *data->lmd_mode))
 			continue;
 
-		ldlm_extent_search(&tree->lit_root,
-				   data->lmd_policy->l_extent.start,
-				   data->lmd_policy->l_extent.end,
+		ldlm_extent_search(&tree->lit_root, ext.start, ext.end,
 				   lock_matches, data);
 		if (data->lmd_lock)
 			return data->lmd_lock;
diff --git a/fs/lustre/osc/osc_cache.c b/fs/lustre/osc/osc_cache.c
index ddf6fb1..d511ece 100644
--- a/fs/lustre/osc/osc_cache.c
+++ b/fs/lustre/osc/osc_cache.c
@@ -3207,28 +3207,51 @@ static bool check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
 {
 	struct osc_thread_info *info = osc_env_info(env);
 	struct osc_object *osc = cbdata;
+	struct cl_page *page = ops->ops_cl.cpl_page;
 	pgoff_t index;
+	bool discard = false;
 
 	index = osc_index(ops);
-	if (index >= info->oti_fn_index) {
+	/* negative lock caching */
+	if (index < info->oti_ng_index) {
+		discard = true;
+	} else if (index >= info->oti_fn_index) {
 		struct ldlm_lock *tmp;
-		struct cl_page *page = ops->ops_cl.cpl_page;
 
 		/* refresh non-overlapped index */
 		tmp = osc_dlmlock_at_pgoff(env, osc, index,
-					   OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_AST);
+					   OSC_DAP_FL_TEST_LOCK |
+					   OSC_DAP_FL_AST | OSC_DAP_FL_RIGHT);
 		if (tmp) {
 			u64 end = tmp->l_policy_data.l_extent.end;
-			/* Cache the first-non-overlapped index so as to skip
-			 * all pages within [index, oti_fn_index). This is safe
-			 * because if tmp lock is canceled, it will discard
-			 * these pages.
-			 */
-			info->oti_fn_index = cl_index(osc2cl(osc), end + 1);
-			if (end == OBD_OBJECT_EOF)
-				info->oti_fn_index = CL_PAGE_EOF;
+			u64 start = tmp->l_policy_data.l_extent.start;
+
+			/* no lock covering this page */
+			if (index < cl_index(osc2cl(osc), start)) {
+				/* no lock at @index, first lock at @start */
+				info->oti_ng_index = cl_index(osc2cl(osc),
+							      start);
+				discard = true;
+			} else {
+				/* Cache the first-non-overlapped index so as to
+				 * skip all pages within [index, oti_fn_index).
+				 * This is safe because if tmp lock is canceled,
+				 * it will discard these pages.
+				 */
+				info->oti_fn_index = cl_index(osc2cl(osc),
+							      end + 1);
+				if (end == OBD_OBJECT_EOF)
+					info->oti_fn_index = CL_PAGE_EOF;
+			}
 			LDLM_LOCK_PUT(tmp);
-		} else if (cl_page_own(env, io, page) == 0) {
+		} else {
+			info->oti_ng_index = CL_PAGE_EOF;
+			discard = true;
+		}
+	}
+
+	if (discard) {
+		if (cl_page_own(env, io, page) == 0) {
 			/* discard the page */
 			cl_page_discard(env, io, page);
 			cl_page_disown(env, io, page);
@@ -3292,6 +3315,7 @@ int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
 	cb = discard ? osc_discard_cb : check_and_discard_cb;
 	info->oti_fn_index = start;
 	info->oti_next_index = start;
+	info->oti_ng_index = 0;
 
 	osc_page_gang_lookup(env, io, osc,
 			     info->oti_next_index, end, cb, osc);
diff --git a/fs/lustre/osc/osc_lock.c b/fs/lustre/osc/osc_lock.c
index 7bfcbfb..536142f2 100644
--- a/fs/lustre/osc/osc_lock.c
+++ b/fs/lustre/osc/osc_lock.c
@@ -1282,6 +1282,9 @@ struct ldlm_lock *osc_obj_dlmlock_at_pgoff(const struct lu_env *env,
 	if (dap_flags & OSC_DAP_FL_CANCELING)
 		match_flags |= LDLM_MATCH_UNREF;
 
+	if (dap_flags & OSC_DAP_FL_RIGHT)
+		match_flags |= LDLM_MATCH_RIGHT;
+
 	/*
 	 * It is fine to match any group lock since there could be only one
 	 * with a uniq gid and it conflicts with all other lock modes too
-- 
1.8.3.1



More information about the lustre-devel mailing list