diff --git a/block/blk-core.c b/block/blk-core.c
index c21a16e9fdf96e3344bcbbec2f9b98d8c7022f06..1645a1e54a37bee22be9973325adb142e11b9d82 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2520,7 +2520,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
 		 * bypass a potential scheduler on the bottom device for
 		 * insert.
 		 */
-		return blk_mq_request_direct_issue(rq);
+		return blk_mq_request_issue_directly(rq);
 	}
 
 	spin_lock_irqsave(q->queue_lock, flags);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 74a4f237ba91ac15e1411e7b8d41eb1948c20ff2..0fc6c95e5a29b85e0f6de2857270bf4d1aadd8c1 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1785,7 +1785,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 	 * RCU or SRCU read lock is needed before checking quiesced flag.
 	 *
 	 * When queue is stopped or quiesced, ignore 'bypass_insert' from
-	 * blk_mq_request_direct_issue(), and return BLK_STS_OK to caller,
+	 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
 	 * and avoid driver to try to dispatch again.
 	 */
 	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
@@ -1833,7 +1833,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 	hctx_unlock(hctx, srcu_idx);
 }
 
-blk_status_t blk_mq_request_direct_issue(struct request *rq)
+blk_status_t blk_mq_request_issue_directly(struct request *rq)
 {
 	blk_status_t ret;
 	int srcu_idx;
diff --git a/block/blk-mq.h b/block/blk-mq.h
index e3ebc93646ca58c4887abb66ce22661092efb1a6..88c558f718190f88e123bba0444b0c50b296e572 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -75,7 +75,7 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
 				struct list_head *list);
 
 /* Used by blk_insert_cloned_request() to issue request directly */
-blk_status_t blk_mq_request_direct_issue(struct request *rq);
+blk_status_t blk_mq_request_issue_directly(struct request *rq);
 
 /*
  * CPU -> queue mappings