diff mbox series

[RFC,5/5] block/nvme: Align iov's va and size on host page size

Message ID 20201015115252.15582-6-eric.auger@redhat.com
State Accepted
Commit 9e13d598843cca1cdab7b7bdcb9cc0868ebf7fed
Headers show
Series NVMe passthrough: Support 64kB page host | expand

Commit Message

Eric Auger Oct. 15, 2020, 11:52 a.m. UTC
Make sure iov's va and size are properly aligned on the host page
size.

Signed-off-by: Eric Auger <eric.auger@redhat.com>
---
 block/nvme.c | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)

Comments

Philippe Mathieu-Daudé Oct. 20, 2020, 11:33 a.m. UTC | #1
On 10/15/20 1:52 PM, Eric Auger wrote:
> Make sure iov's va and size are properly aligned on the host page

> size.

> 

> Signed-off-by: Eric Auger <eric.auger@redhat.com>


Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>


> ---

>   block/nvme.c | 13 +++++++------

>   1 file changed, 7 insertions(+), 6 deletions(-)

> 

> diff --git a/block/nvme.c b/block/nvme.c

> index be8ec48bf2..45807ed110 100644

> --- a/block/nvme.c

> +++ b/block/nvme.c

> @@ -978,11 +978,12 @@ static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,

>       for (i = 0; i < qiov->niov; ++i) {

>           bool retry = true;

>           uint64_t iova;

> +        size_t len = QEMU_ALIGN_UP(qiov->iov[i].iov_len,

> +                                   qemu_real_host_page_size);

>   try_map:

>           r = qemu_vfio_dma_map(s->vfio,

>                                 qiov->iov[i].iov_base,

> -                              qiov->iov[i].iov_len,

> -                              true, &iova);

> +                              len, true, &iova);

>           if (r == -ENOMEM && retry) {

>               retry = false;

>               trace_nvme_dma_flush_queue_wait(s);

> @@ -1126,8 +1127,8 @@ static inline bool nvme_qiov_aligned(BlockDriverState *bs,

>       BDRVNVMeState *s = bs->opaque;

>   

>       for (i = 0; i < qiov->niov; ++i) {

> -        if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base, s->page_size) ||

> -            !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, s->page_size)) {

> +        if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base, qemu_real_host_page_size) ||

> +            !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size)) {

>               trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base,

>                                         qiov->iov[i].iov_len, s->page_size);

>               return false;

> @@ -1143,7 +1144,7 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,

>       int r;

>       uint8_t *buf = NULL;

>       QEMUIOVector local_qiov;

> -

> +    size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size);

>       assert(QEMU_IS_ALIGNED(offset, s->page_size));

>       assert(QEMU_IS_ALIGNED(bytes, s->page_size));

>       assert(bytes <= s->max_transfer);

> @@ -1151,7 +1152,7 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,

>           return nvme_co_prw_aligned(bs, offset, bytes, qiov, is_write, flags);

>       }

>       trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write);

> -    buf = qemu_try_memalign(s->page_size, bytes);

> +    buf = qemu_try_memalign(qemu_real_host_page_size, len);

>   

>       if (!buf) {

>           return -ENOMEM;

>
diff mbox series

Patch

diff --git a/block/nvme.c b/block/nvme.c
index be8ec48bf2..45807ed110 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -978,11 +978,12 @@  static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
     for (i = 0; i < qiov->niov; ++i) {
         bool retry = true;
         uint64_t iova;
+        size_t len = QEMU_ALIGN_UP(qiov->iov[i].iov_len,
+                                   qemu_real_host_page_size);
 try_map:
         r = qemu_vfio_dma_map(s->vfio,
                               qiov->iov[i].iov_base,
-                              qiov->iov[i].iov_len,
-                              true, &iova);
+                              len, true, &iova);
         if (r == -ENOMEM && retry) {
             retry = false;
             trace_nvme_dma_flush_queue_wait(s);
@@ -1126,8 +1127,8 @@  static inline bool nvme_qiov_aligned(BlockDriverState *bs,
     BDRVNVMeState *s = bs->opaque;
 
     for (i = 0; i < qiov->niov; ++i) {
-        if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base, s->page_size) ||
-            !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, s->page_size)) {
+        if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base, qemu_real_host_page_size) ||
+            !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size)) {
             trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base,
                                       qiov->iov[i].iov_len, s->page_size);
             return false;
@@ -1143,7 +1144,7 @@  static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
     int r;
     uint8_t *buf = NULL;
     QEMUIOVector local_qiov;
-
+    size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size);
     assert(QEMU_IS_ALIGNED(offset, s->page_size));
     assert(QEMU_IS_ALIGNED(bytes, s->page_size));
     assert(bytes <= s->max_transfer);
@@ -1151,7 +1152,7 @@  static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
         return nvme_co_prw_aligned(bs, offset, bytes, qiov, is_write, flags);
     }
     trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write);
-    buf = qemu_try_memalign(s->page_size, bytes);
+    buf = qemu_try_memalign(qemu_real_host_page_size, len);
 
     if (!buf) {
         return -ENOMEM;