[lustre-devel] [PATCH 11/18] lnet: RMDA infrastructure updates

James Simmons jsimmons at infradead.org
Mon Jul 19 05:32:06 PDT 2021


From: Amir Shehata <ashehata at whamcloud.com>

Add infrastructure to force RDMA for payloads < 4K.
Add infrastructure to extract the first page in a
payload. Useful for determining the type of the payload
to be transmitted.

WC-bug-id: https://jira.whamcloud.com/browse/LU-14798
Lustre-commit: 7ac839837c1c6cd1f ("LU-14798 lnet: RMDA infrastructure updates")
Signed-off-by: Amir Shehata <ashehata at whamcloud.com>
Lustre-change: https://review.whamcloud.com/37453
Reviewed-by: Andreas Dilger <adilger at whamcloud.com>
Reviewed-by: Wang Shilong <wshilong at ddn.com>
Whamcloud-bug-id: EX-773
Reviewed-on: https://review.whamcloud.com/44109
Reviewed-by: Wang Shilong <wshilong at whamcloud.com>
Signed-off-by: James Simmons <jsimmons at infradead.org>
---
 include/linux/lnet/lib-lnet.h       |  1 +
 include/linux/lnet/lib-types.h      |  2 ++
 net/lnet/klnds/o2iblnd/o2iblnd_cb.c |  4 ++--
 net/lnet/lnet/lib-md.c              | 29 +++++++++++++++++++++++------
 4 files changed, 28 insertions(+), 8 deletions(-)

diff --git a/include/linux/lnet/lib-lnet.h b/include/linux/lnet/lib-lnet.h
index 6b9e926..f56ecab 100644
--- a/include/linux/lnet/lib-lnet.h
+++ b/include/linux/lnet/lib-lnet.h
@@ -711,6 +711,7 @@ void lnet_copy_kiov2iter(struct iov_iter *to,
 void lnet_md_unlink(struct lnet_libmd *md);
 void lnet_md_deconstruct(struct lnet_libmd *lmd, struct lnet_event *ev);
 struct page *lnet_kvaddr_to_page(unsigned long vaddr);
+struct page *lnet_get_first_page(struct lnet_libmd *md, unsigned int offset);
 int lnet_cpt_of_md(struct lnet_libmd *md, unsigned int offset);
 
 unsigned int lnet_get_lnd_timeout(void);
diff --git a/include/linux/lnet/lib-types.h b/include/linux/lnet/lib-types.h
index 64d7472..e951e02 100644
--- a/include/linux/lnet/lib-types.h
+++ b/include/linux/lnet/lib-types.h
@@ -122,6 +122,8 @@ struct lnet_msg {
 	enum lnet_msg_hstatus	msg_health_status;
 	/* This is a recovery message */
 	bool			msg_recovery;
+	/* force an RDMA even if the message size is < 4K */
+	bool			msg_rdma_force;
 	/* the number of times a transmission has been retried */
 	int			msg_retry_count;
 	/* flag to indicate that we do not want to resend this message */
diff --git a/net/lnet/klnds/o2iblnd/o2iblnd_cb.c b/net/lnet/klnds/o2iblnd/o2iblnd_cb.c
index ec0d05a..c66acc51 100644
--- a/net/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/net/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1553,7 +1553,7 @@ static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx,
 
 		/* is the REPLY message too small for RDMA? */
 		nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
-		if (nob <= IBLND_MSG_SIZE)
+		if (nob <= IBLND_MSG_SIZE && !lntmsg->msg_rdma_force)
 			break;		/* send IMMEDIATE */
 
 		tx = kiblnd_get_idle_tx(ni, target.nid);
@@ -1599,7 +1599,7 @@ static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx,
 	case LNET_MSG_PUT:
 		/* Is the payload small enough not to need RDMA? */
 		nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]);
-		if (nob <= IBLND_MSG_SIZE)
+		if (nob <= IBLND_MSG_SIZE && !lntmsg->msg_rdma_force)
 			break;			/* send IMMEDIATE */
 
 		tx = kiblnd_get_idle_tx(ni, target.nid);
diff --git a/net/lnet/lnet/lib-md.c b/net/lnet/lnet/lib-md.c
index fbee4e0..affa921 100644
--- a/net/lnet/lnet/lib-md.c
+++ b/net/lnet/lnet/lib-md.c
@@ -87,9 +87,9 @@ struct page *lnet_kvaddr_to_page(unsigned long vaddr)
 }
 EXPORT_SYMBOL(lnet_kvaddr_to_page);
 
-int lnet_cpt_of_md(struct lnet_libmd *md, unsigned int offset)
+struct page *
+lnet_get_first_page(struct lnet_libmd *md, unsigned int offset)
 {
-	int cpt = CFS_CPT_ANY;
 	unsigned int niov;
 	struct bio_vec *kiov;
 
@@ -102,7 +102,7 @@ int lnet_cpt_of_md(struct lnet_libmd *md, unsigned int offset)
 		md = lnet_handle2md(&md->md_bulk_handle);
 
 	if (!md || md->md_niov == 0)
-		return CFS_CPT_ANY;
+		return NULL;
 
 	kiov = md->md_kiov;
 	niov = md->md_niov;
@@ -113,12 +113,29 @@ int lnet_cpt_of_md(struct lnet_libmd *md, unsigned int offset)
 		kiov++;
 		if (niov == 0) {
 			CERROR("offset %d goes beyond iov\n", offset);
-			goto out;
+			return NULL;
 		}
 	}
 
-	cpt = cfs_cpt_of_node(lnet_cpt_table(),
-			      page_to_nid(kiov->bv_page));
+	return kiov->bv_page;
+}
+
+int
+lnet_cpt_of_md(struct lnet_libmd *md, unsigned int offset)
+{
+	struct page *page;
+	int cpt = CFS_CPT_ANY;
+
+	page = lnet_get_first_page(md, offset);
+	if (!page) {
+		CDEBUG(D_NET,
+		       "Couldn't resolve first page of md %p with offset %u\n",
+		       md, offset);
+		goto out;
+	}
+
+	cpt = cfs_cpt_of_node(lnet_cpt_table(), page_to_nid(page));
+
 out:
 	return cpt;
 }
-- 
1.8.3.1



More information about the lustre-devel mailing list