diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 0430926426fe3401d0493e193f9c5587c3634add..8dfe62786cd5fa5eae77afed283fcff238070835 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -65,19 +65,12 @@ static bool blkcg_policy_enabled(struct request_queue *q,
 	return pol && test_bit(pol->plid, q->blkcg_pols);
 }
 
-/**
- * blkg_free - free a blkg
- * @blkg: blkg to free
- *
- * Free @blkg which may be partially allocated.
- */
-static void blkg_free(struct blkcg_gq *blkg)
+static void blkg_free_workfn(struct work_struct *work)
 {
+	struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
+					     free_work);
 	int i;
 
-	if (!blkg)
-		return;
-
 	for (i = 0; i < BLKCG_MAX_POLS; i++)
 		if (blkg->pd[i])
 			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
@@ -89,6 +82,25 @@ static void blkg_free(struct blkcg_gq *blkg)
 	kfree(blkg);
 }
 
+/**
+ * blkg_free - free a blkg
+ * @blkg: blkg to free
+ *
+ * Free @blkg which may be partially allocated.
+ */
+static void blkg_free(struct blkcg_gq *blkg)
+{
+	if (!blkg)
+		return;
+
+	/*
+	 * Both ->pd_free_fn() and request queue's release handler may
+	 * sleep, so free us by scheduling one work func
+	 */
+	INIT_WORK(&blkg->free_work, blkg_free_workfn);
+	schedule_work(&blkg->free_work);
+}
+
 static void __blkg_release(struct rcu_head *rcu)
 {
 	struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 11f49f78db32bcd8a5525bda5d09e8b414e57d04..df9cfe4ca5328e48d7590ec74935712fada4ed02 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -280,7 +280,6 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
 
 		task_lock(task);
 		if (task->flags & PF_EXITING) {
-			err = -ESRCH;
 			kmem_cache_free(iocontext_cachep, ioc);
 			goto out;
 		}
@@ -292,7 +291,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
 	task->io_context->ioprio = ioprio;
 out:
 	task_unlock(task);
-	return err;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(set_task_ioprio);
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e6f24fa4a4c227f699aac582f1900e7dc7c5a22f..ed3ed86f7dd2426ed8d31ccaa6660f1189d352c3 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4462,21 +4462,28 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
 	return true;
 }
 
-static void blk_mq_elv_switch_back(struct list_head *head,
-		struct request_queue *q)
+static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
+						struct request_queue *q)
 {
 	struct blk_mq_qe_pair *qe;
-	struct elevator_type *t = NULL;
 
 	list_for_each_entry(qe, head, node)
-		if (qe->q == q) {
-			t = qe->type;
-			break;
-		}
+		if (qe->q == q)
+			return qe;
 
-	if (!t)
-		return;
+	return NULL;
+}
 
+static void blk_mq_elv_switch_back(struct list_head *head,
+				  struct request_queue *q)
+{
+	struct blk_mq_qe_pair *qe;
+	struct elevator_type *t;
+
+	qe = blk_lookup_qe_pair(head, q);
+	if (!qe)
+		return;
+	t = qe->type;
 	list_del(&qe->node);
 	kfree(qe);
 
diff --git a/block/blk-wbt.h b/block/blk-wbt.h
index 2eb01becde8c412c11a7ffedd8a04bd8e41521ac..7e44eccc676dd270da538fa116a4a350984b64b4 100644
--- a/block/blk-wbt.h
+++ b/block/blk-wbt.h
@@ -101,9 +101,6 @@ u64 wbt_default_latency_nsec(struct request_queue *);
 
 #else
 
-static inline void wbt_track(struct request *rq, enum wbt_flags flags)
-{
-}
 static inline int wbt_init(struct request_queue *q)
 {
 	return -EINVAL;
diff --git a/block/genhd.c b/block/genhd.c
index c9a4fc90d3e9018a4eaf691ad7cbe8433461b7ab..b8b6759d670f01dc3584b8c4fda150a0c03579f1 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -335,7 +335,7 @@ int blk_alloc_ext_minor(void)
 {
 	int idx;
 
-	idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL);
+	idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT - 1, GFP_KERNEL);
 	if (idx == -ENOSPC)
 		return -EBUSY;
 	return idx;
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index f2ad8ed8f777cf6df31796fe52fa1eeeb861027b..652cd05b0924c3d687716a0f4bb42e5c7f8d331e 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -95,7 +95,10 @@ struct blkcg_gq {
 
 	spinlock_t			async_bio_lock;
 	struct bio_list			async_bios;
-	struct work_struct		async_bio_work;
+	union {
+		struct work_struct	async_bio_work;
+		struct work_struct	free_work;
+	};
 
 	atomic_t			use_delay;
 	atomic64_t			delay_nsec;
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index dffeb8281c2d9de474f777f577f02175122bf42e..8f5a86e210b90c43c5246eaa121b2048d0351066 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -174,7 +174,7 @@ static inline unsigned int __map_depth(const struct sbitmap *sb, int index)
 static inline void sbitmap_free(struct sbitmap *sb)
 {
 	free_percpu(sb->alloc_hint);
-	kfree(sb->map);
+	kvfree(sb->map);
 	sb->map = NULL;
 }
 
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 2eb3de18ded3ea7dea97494d16aae13a2107e852..ae4fd4de9ebe781603e27c4c0a3a67a5ba0477df 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -110,7 +110,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
 		sb->alloc_hint = NULL;
 	}
 
-	sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
+	sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
 	if (!sb->map) {
 		free_percpu(sb->alloc_hint);
 		return -ENOMEM;