@@ -12,6 +12,7 @@
#include <linux/bio.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
+#include <uapi/linux/io_uring.h>
#include "blk.h"
static struct kmem_cache *bip_slab;
@@ -252,7 +253,7 @@ static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
goto free_bip;
}
- bip->bip_flags |= BIP_COPY_USER;
+ bip->bip_flags |= BIP_INTEGRITY_USER | BIP_COPY_USER;
bip->bip_iter.bi_sector = seed;
bip->bip_vcnt = nr_vecs;
bip->bio_iter = bio->bi_iter;
@@ -274,6 +275,7 @@ static int bio_integrity_init_user(struct bio *bio, struct bio_vec *bvec,
return PTR_ERR(bip);
memcpy(bip->bip_vec, bvec, nr_vecs * sizeof(*bvec));
+ bip->bip_flags |= BIP_INTEGRITY_USER;
bip->bip_iter.bi_sector = seed;
bip->bip_iter.bi_size = len;
bip->bip_vcnt = nr_vecs;
@@ -310,6 +312,47 @@ static unsigned int bvec_from_pages(struct bio_vec *bvec, struct page **pages,
return nr_bvecs;
}
+static void bio_uio_meta_to_bip(struct bio *bio, struct uio_meta *meta)
+{
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+
+ if (meta->flags & INTEGRITY_CHK_GUARD)
+ bip->bip_flags |= BIP_CHECK_GUARD;
+ if (meta->flags & INTEGRITY_CHK_APPTAG)
+ bip->bip_flags |= BIP_CHECK_APPTAG;
+ if (meta->flags & INTEGRITY_CHK_REFTAG)
+ bip->bip_flags |= BIP_CHECK_REFTAG;
+
+ bip->app_tag = meta->app_tag;
+}
+
+int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta)
+{
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
+ unsigned int integrity_bytes;
+ int ret;
+ struct iov_iter it;
+
+ if (!bi)
+ return -EINVAL;
+ /*
+ * original meta iterator can be bigger.
+ * process integrity info corresponding to current data buffer only.
+ */
+ it = meta->iter;
+ integrity_bytes = bio_integrity_bytes(bi, bio_sectors(bio));
+ if (it.count < integrity_bytes)
+ return -EINVAL;
+
+ it.count = integrity_bytes;
+ ret = bio_integrity_map_user(bio, &it, 0);
+ if (!ret) {
+ bio_uio_meta_to_bip(bio, meta);
+ iov_iter_advance(&meta->iter, integrity_bytes);
+ }
+ return ret;
+}
+
int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter,
u32 seed)
{
@@ -154,6 +154,9 @@ static void blkdev_bio_end_io(struct bio *bio)
}
}
+ if (bio_integrity(bio) && (dio->iocb->ki_flags & IOCB_HAS_META))
+ bio_integrity_unmap_user(bio);
+
if (should_dirty) {
bio_check_pages_dirty(bio);
} else {
@@ -231,6 +234,16 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
}
bio->bi_opf |= REQ_NOWAIT;
}
+ if (!is_sync && unlikely(iocb->ki_flags & IOCB_HAS_META)) {
+ ret = bio_integrity_map_iter(bio, iocb->private);
+ if (unlikely(ret)) {
+ bio_release_pages(bio, false);
+ bio_clear_flag(bio, BIO_REFFED);
+ bio_put(bio);
+ blk_finish_plug(&plug);
+ return ret;
+ }
+ }
if (is_read) {
if (dio->flags & DIO_SHOULD_DIRTY)
@@ -288,6 +301,9 @@ static void blkdev_bio_end_io_async(struct bio *bio)
ret = blk_status_to_errno(bio->bi_status);
}
+ if (bio_integrity(bio) && (iocb->ki_flags & IOCB_HAS_META))
+ bio_integrity_unmap_user(bio);
+
iocb->ki_complete(iocb, ret);
if (dio->flags & DIO_SHOULD_DIRTY) {
@@ -348,6 +364,15 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
task_io_account_write(bio->bi_iter.bi_size);
}
+ if (unlikely(iocb->ki_flags & IOCB_HAS_META)) {
+ ret = bio_integrity_map_iter(bio, iocb->private);
+ WRITE_ONCE(iocb->private, NULL);
+ if (unlikely(ret)) {
+ bio_put(bio);
+ return ret;
+ }
+ }
+
if (iocb->ki_flags & IOCB_ATOMIC)
bio->bi_opf |= REQ_ATOMIC;
@@ -139,6 +139,8 @@ static void t10_pi_type1_prepare(struct request *rq)
/* Already remapped? */
if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
break;
+ if (bip->bip_flags & BIP_INTEGRITY_USER)
+ break;
bip_for_each_vec(iv, bip, iter) {
unsigned int j;
@@ -188,6 +190,8 @@ static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
struct bio_vec iv;
struct bvec_iter iter;
+ if (bip->bip_flags & BIP_INTEGRITY_USER)
+ break;
bip_for_each_vec(iv, bip, iter) {
unsigned int j;
void *p;
@@ -313,6 +317,8 @@ static void ext_pi_type1_prepare(struct request *rq)
/* Already remapped? */
if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
break;
+ if (bip->bip_flags & BIP_INTEGRITY_USER)
+ break;
bip_for_each_vec(iv, bip, iter) {
unsigned int j;
@@ -14,6 +14,9 @@ enum bip_flags {
BIP_CHECK_GUARD = 1 << 6,
BIP_CHECK_REFTAG = 1 << 7,
BIP_CHECK_APPTAG = 1 << 8,
+ BIP_INTEGRITY_USER = 1 << 9, /* Integrity payload is user
+ * address
+ */
};
struct bio_integrity_payload {
@@ -24,6 +27,7 @@ struct bio_integrity_payload {
unsigned short bip_vcnt; /* # of integrity bio_vecs */
unsigned short bip_max_vcnt; /* integrity bio_vec slots */
unsigned short bip_flags; /* control flags */
+ u16 app_tag;
struct bvec_iter bio_iter; /* for rewinding parent bio */
@@ -44,7 +48,8 @@ struct uio_meta {
#define BIP_CLONE_FLAGS (BIP_MAPPED_INTEGRITY | BIP_CTRL_NOCHECK | \
BIP_DISK_NOCHECK | BIP_IP_CHECKSUM | \
- BIP_CHECK_GUARD | BIP_CHECK_REFTAG | BIP_CHECK_APPTAG)
+ BIP_CHECK_GUARD | BIP_CHECK_REFTAG | \
+ BIP_CHECK_APPTAG | BIP_INTEGRITY_USER)
#ifdef CONFIG_BLK_DEV_INTEGRITY
@@ -89,6 +94,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, gfp_t gfp,
int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len,
unsigned int offset);
int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter, u32 seed);
+int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta);
void bio_integrity_unmap_user(struct bio *bio);
bool bio_integrity_prep(struct bio *bio);
void bio_integrity_advance(struct bio *bio, unsigned int bytes_done);
@@ -120,6 +126,11 @@ static inline int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter,
return -EINVAL;
}
+static inline int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta)
+{
+ return -EINVAL;
+}
+
static inline void bio_integrity_unmap_user(struct bio *bio)
{
}