@@ -2264,7 +2264,6 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
blk_queue_exit(q);
return BLK_QC_T_NONE;
}
-EXPORT_SYMBOL_GPL(blk_mq_submit_bio); /* only for request based dm */
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
unsigned int hctx_idx)
@@ -1762,18 +1762,6 @@ static blk_qc_t dm_submit_bio(struct bio *bio)
int srcu_idx;
struct dm_table *map;
- if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
- /*
- * We are called with a live reference on q_usage_counter, but
- * that one will be released as soon as we return. Grab an
- * extra one as blk_mq_submit_bio expects to be able to consume
- * a reference (which lives until the request is freed in case a
- * request is allocated).
- */
- percpu_ref_get(&bio->bi_disk->queue->q_usage_counter);
- return blk_mq_submit_bio(bio);
- }
-
map = dm_get_live_table(md, &srcu_idx);
/* if we're suspended, we have to queue this io for later */
@@ -1843,6 +1831,7 @@ static int next_free_minor(int *minor)
}
static const struct block_device_operations dm_blk_dops;
+static const struct block_device_operations dm_rq_blk_dops;
static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work);
@@ -2242,9 +2231,10 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
switch (type) {
case DM_TYPE_REQUEST_BASED:
+ md->disk->fops = &dm_rq_blk_dops;
r = dm_mq_init_request_queue(md, t);
if (r) {
- DMERR("Cannot initialize queue for request-based dm-mq mapped device");
+ DMERR("Cannot initialize queue for request-based dm mapped device");
return r;
}
break;
@@ -3227,6 +3217,15 @@ static const struct block_device_operations dm_blk_dops = {
.owner = THIS_MODULE
};
+static const struct block_device_operations dm_rq_blk_dops = {
+ .open = dm_blk_open,
+ .release = dm_blk_close,
+ .ioctl = dm_blk_ioctl,
+ .getgeo = dm_blk_getgeo,
+ .pr_ops = &dm_pr_ops,
+ .owner = THIS_MODULE
+};
+
static const struct dax_operations dm_dax_ops = {
.direct_access = dm_dax_direct_access,
.dax_supported = dm_dax_supported,