@@ -826,7 +826,8 @@ static inline void *pci_dma_map(PCIDevice *dev, dma_addr_t addr,
{
void *buf;
- buf = dma_memory_map(pci_get_address_space(dev), addr, plen, dir);
+ buf = dma_memory_map(pci_get_address_space(dev), addr, plen, dir,
+ MEMTXATTRS_UNSPECIFIED);
return buf;
}
@@ -202,16 +202,17 @@ MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr,
* @addr: address within that address space
* @len: pointer to length of buffer; updated on return
* @dir: indicates the transfer direction
+ * @attrs: memory attributes
*/
static inline void *dma_memory_map(AddressSpace *as,
dma_addr_t addr, dma_addr_t *len,
- DMADirection dir)
+ DMADirection dir, MemTxAttrs attrs)
{
hwaddr xlen = *len;
void *p;
p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE,
- MEMTXATTRS_UNSPECIFIED);
+ attrs);
*len = xlen;
return p;
}
@@ -143,7 +143,8 @@ static void dma_blk_cb(void *opaque, int ret)
while (dbs->sg_cur_index < dbs->sg->nsg) {
cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
- mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir);
+ mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir,
+ MEMTXATTRS_UNSPECIFIED);
/*
* Make reads deterministic in icount mode. Windows sometimes issues
* disk read requests with overlapping SGs. It leads
@@ -649,7 +649,9 @@ int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
uint32_t l = le32_to_cpu(ents[i].length);
hwaddr len = l;
(*iov)[i].iov_base = dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
- a, &len, DMA_DIRECTION_TO_DEVICE);
+ a, &len,
+ DMA_DIRECTION_TO_DEVICE,
+ MEMTXATTRS_UNSPECIFIED);
(*iov)[i].iov_len = len;
if (addr) {
(*addr)[i] = a;
@@ -1054,7 +1056,9 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
hwaddr len = res->iov[i].iov_len;
res->iov[i].iov_base =
dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
- res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE);
+ res->addrs[i], &len,
+ DMA_DIRECTION_TO_DEVICE,
+ MEMTXATTRS_UNSPECIFIED);
if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
/* Clean up the half-a-mapping we just created... */
@@ -372,7 +372,8 @@ static ssize_t gpadl_iter_io(GpadlIter *iter, void *buf, uint32_t len)
maddr = (iter->gpadl->gfns[idx] << TARGET_PAGE_BITS) | off_in_page;
- iter->map = dma_memory_map(iter->as, maddr, &mlen, iter->dir);
+ iter->map = dma_memory_map(iter->as, maddr, &mlen, iter->dir,
+ MEMTXATTRS_UNSPECIFIED);
if (mlen != pgleft) {
dma_memory_unmap(iter->as, iter->map, mlen, iter->dir, 0);
iter->map = NULL;
@@ -489,7 +490,8 @@ int vmbus_map_sgl(VMBusChanReq *req, DMADirection dir, struct iovec *iov,
goto err;
}
- iov[ret_cnt].iov_base = dma_memory_map(sgl->as, a, &l, dir);
+ iov[ret_cnt].iov_base = dma_memory_map(sgl->as, a, &l, dir,
+ MEMTXATTRS_UNSPECIFIED);
if (!l) {
ret = -EFAULT;
goto err;
@@ -565,7 +567,7 @@ static vmbus_ring_buffer *ringbuf_map_hdr(VMBusRingBufCommon *ringbuf)
dma_addr_t mlen = sizeof(*rb);
rb = dma_memory_map(ringbuf->as, ringbuf->rb_addr, &mlen,
- DMA_DIRECTION_FROM_DEVICE);
+ DMA_DIRECTION_FROM_DEVICE, MEMTXATTRS_UNSPECIFIED);
if (mlen != sizeof(*rb)) {
dma_memory_unmap(ringbuf->as, rb, mlen,
DMA_DIRECTION_FROM_DEVICE, 0);
@@ -249,7 +249,8 @@ static void map_page(AddressSpace *as, uint8_t **ptr, uint64_t addr,
dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len);
}
- *ptr = dma_memory_map(as, addr, &len, DMA_DIRECTION_FROM_DEVICE);
+ *ptr = dma_memory_map(as, addr, &len, DMA_DIRECTION_FROM_DEVICE,
+ MEMTXATTRS_UNSPECIFIED);
if (len < wanted) {
dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len);
*ptr = NULL;
@@ -938,7 +939,8 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist,
/* map PRDT */
if (!(prdt = dma_memory_map(ad->hba->as, prdt_addr, &prdt_len,
- DMA_DIRECTION_TO_DEVICE))){
+ DMA_DIRECTION_TO_DEVICE,
+ MEMTXATTRS_UNSPECIFIED))) {
trace_ahci_populate_sglist_no_map(ad->hba, ad->port_no);
return -1;
}
@@ -1299,7 +1301,8 @@ static int handle_cmd(AHCIState *s, int port, uint8_t slot)
tbl_addr = le64_to_cpu(cmd->tbl_addr);
cmd_len = 0x80;
cmd_fis = dma_memory_map(s->as, tbl_addr, &cmd_len,
- DMA_DIRECTION_FROM_DEVICE);
+ DMA_DIRECTION_FROM_DEVICE,
+ MEMTXATTRS_UNSPECIFIED);
if (!cmd_fis) {
trace_handle_cmd_badfis(s, port);
return -1;
@@ -36,7 +36,8 @@ int usb_packet_map(USBPacket *p, QEMUSGList *sgl)
while (len) {
dma_addr_t xlen = len;
- mem = dma_memory_map(sgl->as, base, &xlen, dir);
+ mem = dma_memory_map(sgl->as, base, &xlen, dir,
+ MEMTXATTRS_UNSPECIFIED);
if (!mem) {
goto err;
}
@@ -1320,7 +1320,8 @@ static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
is_write ?
DMA_DIRECTION_FROM_DEVICE :
- DMA_DIRECTION_TO_DEVICE);
+ DMA_DIRECTION_TO_DEVICE,
+ MEMTXATTRS_UNSPECIFIED);
if (!iov[num_sg].iov_base) {
virtio_error(vdev, "virtio: bogus descriptor or out of resources");
goto out;
@@ -1369,7 +1370,8 @@ static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
sg[i].iov_base = dma_memory_map(vdev->dma_as,
addr[i], &len, is_write ?
DMA_DIRECTION_FROM_DEVICE :
- DMA_DIRECTION_TO_DEVICE);
+ DMA_DIRECTION_TO_DEVICE,
+ MEMTXATTRS_UNSPECIFIED);
if (!sg[i].iov_base) {
error_report("virtio: error trying to map MMIO memory");
exit(1);