From patchwork Wed Jun 20 13:06:17 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Peter Maydell X-Patchwork-Id: 139323 Delivered-To: patches@linaro.org Received: by 2002:a2e:970d:0:0:0:0:0 with SMTP id r13-v6csp768955lji; Wed, 20 Jun 2018 06:06:22 -0700 (PDT) X-Google-Smtp-Source: ADUXVKJzCa94cfwRgceyaeFJKbuOSx4HBStuXDhTZuc8wSPP19FJwXGOyfvV7B6FcigiPQ2a3koC X-Received: by 2002:adf:91e5:: with SMTP id 92-v6mr17749145wri.124.1529499982795; Wed, 20 Jun 2018 06:06:22 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1529499982; cv=none; d=google.com; s=arc-20160816; b=S6tZmaX8Fajx70frKQ3kZI0x4BI2Yd0JF0gp1tBTbXQFyz2D5knkXjn9uXKhxnzs60 18Q4jS0H6BCbpBQEv7IW6fdjvAzZk1r2ePkI0W4WySzQ3e36BLemzR1OUZVQvE/IhK2L g6lcwyrbXaYew7bFXCZA+Hh85W6W7oiDne6GWNscvsXTLDtWN8qCs6t+CoOuNUsmCO0L LeRH/7FH7dsxN07NepKwn5l2Erb9N/yymYr//RreUQyTHbce2Axpb1zhmzBWwPQotKfj IIq8a3bL/0w1c8CftJBwkVbwqdqKS+m4jPH9J/Cj3gDbnLCewKnwjd1vHlLIeBPQWsRi 8R+g== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=references:in-reply-to:message-id:date:subject:cc:to:from :arc-authentication-results; bh=yghLv4sh3jmA1pc8aez/DP1V1ISQeIVBqKF2SwM/k2w=; b=Ql3lDGcUCkCydCXTVM46jw1S5ErdGRmfTb0LktkBxsFKLMhQ2U3V8kcHIod4laTDLf Iq29OLTpaSBPhSS5cRSRo5Vn+FgzwhV7Om62Kj4VSYe8qft5M+tpFaYEVxpKU7XPEtMN DmNsIHYnCk2VXsA2Nt7Ko9s3RFSB7+DKkJ6sSoQCvm8NICE26LitX9SZMtLIg01rucby t02/JiAWlj2rcsSFmz6FSLO5JhsOyVMWS6pN3+de8HV2qH2+3n7vDbjLbaVpV9far9Af QT0FrdvTfDHliIhQZzJN5OGErnaV2vNu0ZoDn5mToC4ikO39auST6jP/7jwCC+G2p7Rr QSRg== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: best guess record for domain of pm215@archaic.org.uk designates 2001:8b0:1d0::2 as permitted sender) smtp.mailfrom=pm215@archaic.org.uk; dmarc=fail (p=NONE sp=NONE dis=NONE) header.from=linaro.org Return-Path: Received: from orth.archaic.org.uk (orth.archaic.org.uk. [2001:8b0:1d0::2]) by mx.google.com with ESMTPS id c84-v6si2331858wmi.120.2018.06.20.06.06.22 for (version=TLS1_2 cipher=ECDHE-RSA-CHACHA20-POLY1305 bits=256/256); Wed, 20 Jun 2018 06:06:22 -0700 (PDT) Received-SPF: pass (google.com: best guess record for domain of pm215@archaic.org.uk designates 2001:8b0:1d0::2 as permitted sender) client-ip=2001:8b0:1d0::2; Authentication-Results: mx.google.com; spf=pass (google.com: best guess record for domain of pm215@archaic.org.uk designates 2001:8b0:1d0::2 as permitted sender) smtp.mailfrom=pm215@archaic.org.uk; dmarc=fail (p=NONE sp=NONE dis=NONE) header.from=linaro.org Received: from pm215 by orth.archaic.org.uk with local (Exim 4.89) (envelope-from ) id 1fVcoX-0001Vm-8P; Wed, 20 Jun 2018 14:06:21 +0100 From: Peter Maydell To: qemu-arm@nongnu.org, qemu-devel@nongnu.org Cc: patches@linaro.org, Paolo Bonzini , Richard Henderson Subject: [PATCH 1/3] tcg: Support MMU protection regions smaller than TARGET_PAGE_SIZE Date: Wed, 20 Jun 2018 14:06:17 +0100 Message-Id: <20180620130619.11362-2-peter.maydell@linaro.org> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20180620130619.11362-1-peter.maydell@linaro.org> References: <20180620130619.11362-1-peter.maydell@linaro.org> Add support for MMU protection regions that are smaller than TARGET_PAGE_SIZE. We do this by marking the TLB entry for those pages with a flag TLB_RECHECK. This flag causes us to always take the slow-path for accesses. In the slow path we can then special case them to always call tlb_fill() again, so we have the correct information for the exact address being accessed. This change allows us to handle reading and writing from small regions; we cannot deal with execution from the small region. Signed-off-by: Peter Maydell --- accel/tcg/softmmu_template.h | 24 ++++--- include/exec/cpu-all.h | 5 +- accel/tcg/cputlb.c | 131 +++++++++++++++++++++++++++++------ 3 files changed, 130 insertions(+), 30 deletions(-) -- 2.17.1 diff --git a/accel/tcg/softmmu_template.h b/accel/tcg/softmmu_template.h index 239ea6692b4..c47591c9709 100644 --- a/accel/tcg/softmmu_template.h +++ b/accel/tcg/softmmu_template.h @@ -98,10 +98,12 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, size_t mmu_idx, size_t index, target_ulong addr, - uintptr_t retaddr) + uintptr_t retaddr, + bool recheck) { CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; - return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, DATA_SIZE); + return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, recheck, + DATA_SIZE); } #endif @@ -138,7 +140,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ - res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr); + res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr, + tlb_addr & TLB_RECHECK); res = TGT_LE(res); return res; } @@ -205,7 +208,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ - res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr); + res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr, + tlb_addr & TLB_RECHECK); res = TGT_BE(res); return res; } @@ -259,10 +263,12 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env, size_t mmu_idx, size_t index, DATA_TYPE val, target_ulong addr, - uintptr_t retaddr) + uintptr_t retaddr, + bool recheck) { CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; - return io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, DATA_SIZE); + return io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, + recheck, DATA_SIZE); } void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, @@ -298,7 +304,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ val = TGT_LE(val); - glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr); + glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, + retaddr, tlb_addr & TLB_RECHECK); return; } @@ -375,7 +382,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ val = TGT_BE(val); - glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr); + glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr, + tlb_addr & TLB_RECHECK); return; } diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 7fa726b8e36..7338f57062f 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -330,11 +330,14 @@ CPUArchState *cpu_copy(CPUArchState *env); #define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2)) /* Set if TLB entry is an IO callback. */ #define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3)) +/* Set if TLB entry must have MMU lookup repeated for every access */ +#define TLB_RECHECK (1 << (TARGET_PAGE_BITS - 4)) /* Use this mask to check interception with an alignment mask * in a TCG backend. */ -#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO) +#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \ + | TLB_RECHECK) void dump_exec_info(FILE *f, fprintf_function cpu_fprintf); void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf); diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 0a721bb9c40..d893452295f 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -621,27 +621,42 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, target_ulong code_address; uintptr_t addend; CPUTLBEntry *te, *tv, tn; - hwaddr iotlb, xlat, sz; + hwaddr iotlb, xlat, sz, paddr_page; + target_ulong vaddr_page; unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE; int asidx = cpu_asidx_from_attrs(cpu, attrs); assert_cpu_is_self(cpu); - assert(size >= TARGET_PAGE_SIZE); - if (size != TARGET_PAGE_SIZE) { - tlb_add_large_page(env, vaddr, size); - } - sz = size; - section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz, - attrs, &prot); + if (size < TARGET_PAGE_SIZE) { + sz = TARGET_PAGE_SIZE; + } else { + if (size > TARGET_PAGE_SIZE) { + tlb_add_large_page(env, vaddr, size); + } + sz = size; + } + vaddr_page = vaddr & TARGET_PAGE_MASK; + paddr_page = paddr & TARGET_PAGE_MASK; + + section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, + &xlat, &sz, attrs, &prot); assert(sz >= TARGET_PAGE_SIZE); tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx " prot=%x idx=%d\n", vaddr, paddr, prot, mmu_idx); - address = vaddr; - if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) { + address = vaddr_page; + if (size < TARGET_PAGE_SIZE) { + /* + * Slow-path the TLB entries; we will repeat the MMU check and TLB + * fill on every access. + */ + address |= TLB_RECHECK; + } + if (!memory_region_is_ram(section->mr) && + !memory_region_is_romd(section->mr)) { /* IO memory case */ address |= TLB_MMIO; addend = 0; @@ -651,10 +666,10 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, } code_address = address; - iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat, - prot, &address); + iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page, + paddr_page, xlat, prot, &address); - index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + index = (vaddr_page >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); te = &env->tlb_table[mmu_idx][index]; /* do not discard the translation in te, evict it into a victim tlb */ tv = &env->tlb_v_table[mmu_idx][vidx]; @@ -670,18 +685,18 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, * TARGET_PAGE_BITS, and either * + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM) * + the offset within section->mr of the page base (otherwise) - * We subtract the vaddr (which is page aligned and thus won't + * We subtract the vaddr_page (which is page aligned and thus won't * disturb the low bits) to give an offset which can be added to the * (non-page-aligned) vaddr of the eventual memory access to get * the MemoryRegion offset for the access. Note that the vaddr we * subtract here is that of the page base, and not the same as the * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). */ - env->iotlb[mmu_idx][index].addr = iotlb - vaddr; + env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page; env->iotlb[mmu_idx][index].attrs = attrs; /* Now calculate the new entry */ - tn.addend = addend - vaddr; + tn.addend = addend - vaddr_page; if (prot & PAGE_READ) { tn.addr_read = address; } else { @@ -702,7 +717,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, tn.addr_write = address | TLB_MMIO; } else if (memory_region_is_ram(section->mr) && cpu_physical_memory_is_clean( - memory_region_get_ram_addr(section->mr) + xlat)) { + memory_region_get_ram_addr(section->mr) + xlat)) { tn.addr_write = address | TLB_NOTDIRTY; } else { tn.addr_write = address; @@ -775,7 +790,8 @@ static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, int mmu_idx, - target_ulong addr, uintptr_t retaddr, int size) + target_ulong addr, uintptr_t retaddr, + bool recheck, int size) { CPUState *cpu = ENV_GET_CPU(env); hwaddr mr_offset; @@ -785,6 +801,29 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, bool locked = false; MemTxResult r; + if (recheck) { + /* + * This is a TLB_RECHECK access, where the MMU protection + * covers a smaller range than a target page, and we must + * repeat the MMU check here. This tlb_fill() call might + * longjump out if this access should cause a guest exception. + */ + int index; + target_ulong tlb_addr; + + tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); + + index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + tlb_addr = env->tlb_table[mmu_idx][index].addr_read; + if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { + /* RAM access */ + uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend; + + return ldn_p((void *)haddr, size); + } + /* Fall through for handling IO accesses */ + } + section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); mr = section->mr; mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; @@ -819,7 +858,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, int mmu_idx, uint64_t val, target_ulong addr, - uintptr_t retaddr, int size) + uintptr_t retaddr, bool recheck, int size) { CPUState *cpu = ENV_GET_CPU(env); hwaddr mr_offset; @@ -828,6 +867,30 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, bool locked = false; MemTxResult r; + if (recheck) { + /* + * This is a TLB_RECHECK access, where the MMU protection + * covers a smaller range than a target page, and we must + * repeat the MMU check here. This tlb_fill() call might + * longjump out if this access should cause a guest exception. + */ + int index; + target_ulong tlb_addr; + + tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); + + index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { + /* RAM access */ + uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend; + + stn_p((void *)haddr, size, val); + return; + } + /* Fall through for handling IO accesses */ + } + section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); mr = section->mr; mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; @@ -911,6 +974,32 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); } } + + if (unlikely(env->tlb_table[mmu_idx][index].addr_code & TLB_RECHECK)) { + /* + * This is a TLB_RECHECK access, where the MMU protection + * covers a smaller range than a target page, and we must + * repeat the MMU check here. This tlb_fill() call might + * longjump out if this access should cause a guest exception. + */ + int index; + target_ulong tlb_addr; + + tlb_fill(cpu, addr, 0, MMU_INST_FETCH, mmu_idx, 0); + + index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + tlb_addr = env->tlb_table[mmu_idx][index].addr_code; + if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { + /* RAM access. We can't handle this, so for now just stop */ + cpu_abort(cpu, "Unable to handle guest executing from RAM within " + "a small MPU region at 0x" TARGET_FMT_lx, addr); + } + /* + * Fall through to handle IO accesses (which will almost certainly + * also result in failure) + */ + } + iotlbentry = &env->iotlb[mmu_idx][index]; section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); mr = section->mr; @@ -1019,8 +1108,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK; } - /* Notice an IO access */ - if (unlikely(tlb_addr & TLB_MMIO)) { + /* Notice an IO access or a needs-MMU-lookup access */ + if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) { /* There's really nothing that can be done to support this apart from stop-the-world. */ goto stop_the_world; From patchwork Wed Jun 20 13:06:18 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Peter Maydell X-Patchwork-Id: 139326 Delivered-To: patches@linaro.org Received: by 2002:a2e:970d:0:0:0:0:0 with SMTP id r13-v6csp769282lji; Wed, 20 Jun 2018 06:06:35 -0700 (PDT) X-Google-Smtp-Source: ADUXVKK19zN69WwQlD+MNrToLqjRxHr0ITbUkoTOkDO2hAoA2DP9Baddl0iBtTBuuaUIiF0o+0De X-Received: by 2002:a17:902:89:: with SMTP id a9-v6mr23214832pla.326.1529499991877; Wed, 20 Jun 2018 06:06:31 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1529499986; cv=none; d=google.com; s=arc-20160816; b=LkmVpUCGvAL9OGwZRzUj0ZAsWU/ncXitzQ9TI7GXdfXJ7LjWR5ZOKaKsSe+1urphaN YlWl5o/zY0j2uNw83WX0R/pIn+CixAmt5t/rwhpvUf1ZgGjILkMIrB+L5QYhlp840rFb QZREe5whaUQS0SrqBU8/udklgiAFzf65Z6GfEGnCdUp+Zs78gBsi9aje7TSzdSuzG0fN gvb9QhrNz9ibSI0Em2MdSElQ7VwzgqBKudYzJYPlBfqRG2P0/rFaGoh593O1eeS8gd0G /bXOzVuRK4LIVFdEve/gEiBi2fhiILEorNV2U+ahi/3fQMXbwAnnwq083Hnee6+HckW/ gz7w== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=references:in-reply-to:message-id:date:subject:cc:to:from :arc-authentication-results; bh=ws2eHmbcddDFykZSrslDEX6wlfHRJQ/kUai1F4PfpDY=; b=Tzic2DyFjsHpA5/xYOmxRaeFEnAqVO13pUKeD5J8MDalRdS/rBuVXpXcfoiedDp+wF n8XCpNG4gaNBdN1OpXXEPCNJW4sZXvog/n5rvNmcy/QwqwnCCody2H+z5Jtji8ynpOZS 5Tcc5o04Yn2KCy4nxL+IPT0wE893h0nSdvtlFRXc6IzU8eblQx8IzxfxY3PGhtoABVdw HNcd/boL4e8a9z4gZa+1dtrOIfnXmpxvXEd2BHTzofRAYQIIH86M1+IYTneJHDsRq0+Y p1aZiF8xZZzEjjh0xp1YuuR4pUPX7qugYZzJRVQHtK7m7TsA2BfaqnbCIVa92OZ6pw3A QFqQ== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: best guess record for domain of pm215@archaic.org.uk designates 2001:8b0:1d0::2 as permitted sender) smtp.mailfrom=pm215@archaic.org.uk; dmarc=fail (p=NONE sp=NONE dis=NONE) header.from=linaro.org Return-Path: Received: from orth.archaic.org.uk (orth.archaic.org.uk. [2001:8b0:1d0::2]) by mx.google.com with ESMTPS id l76-v6si587112pga.120.2018.06.20.06.06.25 for (version=TLS1_2 cipher=ECDHE-RSA-CHACHA20-POLY1305 bits=256/256); Wed, 20 Jun 2018 06:06:26 -0700 (PDT) Received-SPF: pass (google.com: best guess record for domain of pm215@archaic.org.uk designates 2001:8b0:1d0::2 as permitted sender) client-ip=2001:8b0:1d0::2; Authentication-Results: mx.google.com; spf=pass (google.com: best guess record for domain of pm215@archaic.org.uk designates 2001:8b0:1d0::2 as permitted sender) smtp.mailfrom=pm215@archaic.org.uk; dmarc=fail (p=NONE sp=NONE dis=NONE) header.from=linaro.org Received: from pm215 by orth.archaic.org.uk with local (Exim 4.89) (envelope-from ) id 1fVcoX-0001Vz-W7; Wed, 20 Jun 2018 14:06:21 +0100 From: Peter Maydell To: qemu-arm@nongnu.org, qemu-devel@nongnu.org Cc: patches@linaro.org, Paolo Bonzini , Richard Henderson Subject: [PATCH 2/3] target/arm: Set page (region) size in get_phys_addr_pmsav7() Date: Wed, 20 Jun 2018 14:06:18 +0100 Message-Id: <20180620130619.11362-3-peter.maydell@linaro.org> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20180620130619.11362-1-peter.maydell@linaro.org> References: <20180620130619.11362-1-peter.maydell@linaro.org> We want to handle small MPU region sizes for ARMv7M. To do this, make get_phys_addr_pmsav7() set the page size to the region size if it is less that TARGET_PAGE_SIZE, rather than working only in TARGET_PAGE_SIZE chunks. Since the core TCG code con't handle execution from small MPU regions, we strip the exec permission from them so that any execution attempts will cause an MPU exception, rather than allowing it to end up with a cpu_abort() in get_page_addr_code(). (The previous code's intention was to make any small page be treated as having no permissions, but unfortunately errors in the implementation meant that it didn't behave that way. It's possible that some binaries using small regions were accidentally working with our old behaviour and won't now.) Signed-off-by: Peter Maydell --- target/arm/helper.c | 37 ++++++++++++++++++++++++++----------- 1 file changed, 26 insertions(+), 11 deletions(-) -- 2.17.1 diff --git a/target/arm/helper.c b/target/arm/helper.c index 1248d84e6fa..a7edeb66633 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -9596,6 +9596,7 @@ static inline bool m_is_system_region(CPUARMState *env, uint32_t address) static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, int *prot, + target_ulong *page_size, ARMMMUFaultInfo *fi) { ARMCPU *cpu = arm_env_get_cpu(env); @@ -9603,6 +9604,7 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, bool is_user = regime_is_user(env, mmu_idx); *phys_ptr = address; + *page_size = TARGET_PAGE_SIZE; *prot = 0; if (regime_translation_disabled(env, mmu_idx) || @@ -9675,16 +9677,12 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, rsize++; } } - if (rsize < TARGET_PAGE_BITS) { - qemu_log_mask(LOG_UNIMP, - "DRSR[%d]: No support for MPU (sub)region size of" - " %" PRIu32 " bytes. Minimum is %d.\n", - n, (1 << rsize), TARGET_PAGE_SIZE); - continue; - } if (srdis) { continue; } + if (rsize < TARGET_PAGE_BITS) { + *page_size = 1 << rsize; + } break; } @@ -9765,6 +9763,17 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, fi->type = ARMFault_Permission; fi->level = 1; + /* + * Core QEMU code can't handle execution from small pages yet, so + * don't try it. This way we'll get an MPU exception, rather than + * eventually causing QEMU to exit in get_page_addr_code(). + */ + if (*page_size < TARGET_PAGE_SIZE && (*prot & PAGE_EXEC)) { + qemu_log_mask(LOG_UNIMP, + "MPU: No support for execution from regions " + "smaller than 1K\n"); + *prot &= ~PAGE_EXEC; + } return !(*prot & (1 << access_type)); } @@ -10334,7 +10343,7 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address, } else if (arm_feature(env, ARM_FEATURE_V7)) { /* PMSAv7 */ ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, - phys_ptr, prot, fi); + phys_ptr, prot, page_size, fi); } else { /* Pre-v7 MPU */ ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, @@ -10396,9 +10405,15 @@ bool arm_tlb_fill(CPUState *cs, vaddr address, core_to_arm_mmu_idx(env, mmu_idx), &phys_addr, &attrs, &prot, &page_size, fi, NULL); if (!ret) { - /* Map a single [sub]page. */ - phys_addr &= TARGET_PAGE_MASK; - address &= TARGET_PAGE_MASK; + /* + * Map a single [sub]page. Regions smaller than our declared + * target page size are handled specially, so for those we + * pass in the exact addresses. + */ + if (page_size >= TARGET_PAGE_SIZE) { + phys_addr &= TARGET_PAGE_MASK; + address &= TARGET_PAGE_MASK; + } tlb_set_page_with_attrs(cs, address, phys_addr, attrs, prot, mmu_idx, page_size); return 0; From patchwork Wed Jun 20 13:06:19 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Peter Maydell X-Patchwork-Id: 139325 Delivered-To: patches@linaro.org Received: by 2002:a2e:970d:0:0:0:0:0 with SMTP id r13-v6csp768969lji; Wed, 20 Jun 2018 06:06:23 -0700 (PDT) X-Google-Smtp-Source: ADUXVKJvewPxzcOw+UgWTjfN7n4NZT26vBuSyPsZZU0ypYeAw6AUSBuqx/Th6b4gIB2qICNSNa9b X-Received: by 2002:adf:fb43:: with SMTP id c3-v6mr18704303wrs.32.1529499983509; Wed, 20 Jun 2018 06:06:23 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1529499983; cv=none; d=google.com; s=arc-20160816; b=h6jNgstXbvt6/pnr7byXH4OTbuXrNQhduhhmAwMDFCyo8s2Akixh1ihQv6cpMSeUjh LldZjyeEEqiKA5LZxHFmNQIU9H2xWHxgnLsrAQjLYdYbBIirm9WjGTsyWvD89hn7vavl A9Mexx22k3L4t28gw9ZMHw/qcyMlHbhZe++06U5fUi6CY/wSiv8d8UL0O7/8Amt7kfVq yDZv85BBCj1a4zuJR91BHpl0pCBLJHsLiB2MHetWh97hNi0SriovK4lPrLVOjemFev5q ZpJu/Xq5f2aU9JNpbPa3MMqQHABTglforP9wjoHEgP/EgZNDDs4yTZ/EDOs2JEh5v0R2 fDqw== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=references:in-reply-to:message-id:date:subject:cc:to:from :arc-authentication-results; bh=Itc9X3vihKYwdXXtjt+wSTLIHbvGZ3aJCG2YHUJl5Gg=; b=iBcZ4puPCFu1SkOfvex8FzQmFACBp54WepPN9KcT5xy3j7ZeZFPg/3rctllQsm0t7k POvPzcwg4F3KexCDpnGUXtKyT/e4oM5B/Hw14I5BXiJfSt7b7HtvMrJXX8I3erHBv9AV B5eA7Jn4wZGLRQgtJUCCME+ct5jqtHBwo7OCYNpZDfgITI6PideR0IVRzArYe5UDdOso iBsvSWTb7DH+5ykxClWiU32EzdjSKPl+DOBFsLA0hbuqwiV7V4/lXrTLo0JuYIzj+aNF YRxYTv8XUNCLsNqUnXaP32WuXDJoq/FCfRbP7LiEaleuP74t7cRK4pnrSHzxnOeKZ9eG rKYg== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: best guess record for domain of pm215@archaic.org.uk designates 2001:8b0:1d0::2 as permitted sender) smtp.mailfrom=pm215@archaic.org.uk; dmarc=fail (p=NONE sp=NONE dis=NONE) header.from=linaro.org Return-Path: Received: from orth.archaic.org.uk (orth.archaic.org.uk. [2001:8b0:1d0::2]) by mx.google.com with ESMTPS id y31-v6si2374903wrb.46.2018.06.20.06.06.23 for (version=TLS1_2 cipher=ECDHE-RSA-CHACHA20-POLY1305 bits=256/256); Wed, 20 Jun 2018 06:06:23 -0700 (PDT) Received-SPF: pass (google.com: best guess record for domain of pm215@archaic.org.uk designates 2001:8b0:1d0::2 as permitted sender) client-ip=2001:8b0:1d0::2; Authentication-Results: mx.google.com; spf=pass (google.com: best guess record for domain of pm215@archaic.org.uk designates 2001:8b0:1d0::2 as permitted sender) smtp.mailfrom=pm215@archaic.org.uk; dmarc=fail (p=NONE sp=NONE dis=NONE) header.from=linaro.org Received: from pm215 by orth.archaic.org.uk with local (Exim 4.89) (envelope-from ) id 1fVcoY-0001WK-SU; Wed, 20 Jun 2018 14:06:22 +0100 From: Peter Maydell To: qemu-arm@nongnu.org, qemu-devel@nongnu.org Cc: patches@linaro.org, Paolo Bonzini , Richard Henderson Subject: [PATCH 3/3] target/arm: Handle small regions in get_phys_addr_pmsav8() Date: Wed, 20 Jun 2018 14:06:19 +0100 Message-Id: <20180620130619.11362-4-peter.maydell@linaro.org> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20180620130619.11362-1-peter.maydell@linaro.org> References: <20180620130619.11362-1-peter.maydell@linaro.org> Allow ARMv8M to handle small MPU and SAU region sizes, by making get_phys_add_pmsav8() set the page size to the 1 if the MPU or SAU region covers less than a TARGET_PAGE_SIZE. We choose to use a size of 1 because it makes no difference to the core code, and avoids having to track both the base and limit for SAU and MPU and then convert into an artificially restricted "page size" that the core code will then ignore. Since the core TCG code can't handle execution from small MPU regions, we strip the exec permission from them so that any execution attempts will cause an MPU exception, rather than allowing it to end up with a cpu_abort() in get_page_addr_code(). (The previous code's intention was to make any small page be treated as having no permissions, but unfortunately errors in the implementation meant that it didn't behave that way. It's possible that some binaries using small regions were accidentally working with our old behaviour and won't now.) We also retain an existing bug, where we ignored the possibility that the SAU region might not cover the entire page, in the case of executable regions. This is necessary because some currently-working guest code images rely on being able to execute from addresses which are covered by a page-sized MPU region but a smaller SAU region. We can remove this workaround if we ever support execution from small regions. Signed-off-by: Peter Maydell --- target/arm/helper.c | 78 ++++++++++++++++++++++++++++++++------------- 1 file changed, 55 insertions(+), 23 deletions(-) -- 2.17.1 diff --git a/target/arm/helper.c b/target/arm/helper.c index a7edeb66633..3c6a4c565b1 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -41,6 +41,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, /* Security attributes for an address, as returned by v8m_security_lookup. */ typedef struct V8M_SAttributes { + bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ bool ns; bool nsc; uint8_t sregion; @@ -9804,6 +9805,8 @@ static void v8m_security_lookup(CPUARMState *env, uint32_t address, int r; bool idau_exempt = false, idau_ns = true, idau_nsc = true; int idau_region = IREGION_NOTVALID; + uint32_t addr_page_base = address & TARGET_PAGE_MASK; + uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); if (cpu->idau) { IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); @@ -9841,6 +9844,9 @@ static void v8m_security_lookup(CPUARMState *env, uint32_t address, uint32_t limit = env->sau.rlar[r] | 0x1f; if (base <= address && limit >= address) { + if (base > addr_page_base || limit < addr_page_limit) { + sattrs->subpage = true; + } if (sattrs->srvalid) { /* If we hit in more than one region then we must report * as Secure, not NS-Callable, with no valid region @@ -9880,13 +9886,16 @@ static void v8m_security_lookup(CPUARMState *env, uint32_t address, static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *txattrs, - int *prot, ARMMMUFaultInfo *fi, uint32_t *mregion) + int *prot, bool *is_subpage, + ARMMMUFaultInfo *fi, uint32_t *mregion) { /* Perform a PMSAv8 MPU lookup (without also doing the SAU check * that a full phys-to-virt translation does). * mregion is (if not NULL) set to the region number which matched, * or -1 if no region number is returned (MPU off, address did not * hit a region, address hit in multiple regions). + * We set is_subpage to true if the region hit doesn't cover the + * entire TARGET_PAGE the address is within. */ ARMCPU *cpu = arm_env_get_cpu(env); bool is_user = regime_is_user(env, mmu_idx); @@ -9894,7 +9903,10 @@ static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, int n; int matchregion = -1; bool hit = false; + uint32_t addr_page_base = address & TARGET_PAGE_MASK; + uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); + *is_subpage = false; *phys_ptr = address; *prot = 0; if (mregion) { @@ -9932,6 +9944,10 @@ static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, continue; } + if (base > addr_page_base || limit < addr_page_limit) { + *is_subpage = true; + } + if (hit) { /* Multiple regions match -- always a failure (unlike * PMSAv7 where highest-numbered-region wins) @@ -9943,23 +9959,6 @@ static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, matchregion = n; hit = true; - - if (base & ~TARGET_PAGE_MASK) { - qemu_log_mask(LOG_UNIMP, - "MPU_RBAR[%d]: No support for MPU region base" - "address of 0x%" PRIx32 ". Minimum alignment is " - "%d\n", - n, base, TARGET_PAGE_BITS); - continue; - } - if ((limit + 1) & ~TARGET_PAGE_MASK) { - qemu_log_mask(LOG_UNIMP, - "MPU_RBAR[%d]: No support for MPU region limit" - "address of 0x%" PRIx32 ". Minimum alignment is " - "%d\n", - n, limit, TARGET_PAGE_BITS); - continue; - } } } @@ -9995,6 +9994,18 @@ static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, fi->type = ARMFault_Permission; fi->level = 1; + /* + * Core QEMU code can't handle execution from small pages yet, so + * don't try it. This means any attempted execution will generate + * an MPU exception, rather than eventually causing QEMU to exit in + * get_page_addr_code(). + */ + if (*is_subpage && (*prot & PAGE_EXEC)) { + qemu_log_mask(LOG_UNIMP, + "MPU: No support for execution from regions " + "smaller than 1K\n"); + *prot &= ~PAGE_EXEC; + } return !(*prot & (1 << access_type)); } @@ -10002,10 +10013,13 @@ static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *txattrs, - int *prot, ARMMMUFaultInfo *fi) + int *prot, target_ulong *page_size, + ARMMMUFaultInfo *fi) { uint32_t secure = regime_is_secure(env, mmu_idx); V8M_SAttributes sattrs = {}; + bool ret; + bool mpu_is_subpage; if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); @@ -10033,6 +10047,7 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, } else { fi->type = ARMFault_QEMU_SFault; } + *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; *phys_ptr = address; *prot = 0; return true; @@ -10055,6 +10070,7 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). */ fi->type = ARMFault_QEMU_SFault; + *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; *phys_ptr = address; *prot = 0; return true; @@ -10062,8 +10078,22 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, } } - return pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, - txattrs, prot, fi, NULL); + ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, + txattrs, prot, &mpu_is_subpage, fi, NULL); + /* + * TODO: this is a temporary hack to ignore the fact that the SAU region + * is smaller than a page if this is an executable region. We never + * supported small MPU regions, but we did (accidentally) allow small + * SAU regions, and if we now made small SAU regions not be executable + * then this would break previously working guest code. We can't + * remove this until/unless we implement support for execution from + * small regions. + */ + if (*prot & PAGE_EXEC) { + sattrs.subpage = false; + } + *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE; + return ret; } static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, @@ -10339,7 +10369,7 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address, if (arm_feature(env, ARM_FEATURE_V8)) { /* PMSAv8 */ ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, - phys_ptr, attrs, prot, fi); + phys_ptr, attrs, prot, page_size, fi); } else if (arm_feature(env, ARM_FEATURE_V7)) { /* PMSAv7 */ ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, @@ -10757,6 +10787,7 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) uint32_t mregion; bool targetpriv; bool targetsec = env->v7m.secure; + bool is_subpage; /* Work out what the security state and privilege level we're * interested in is... @@ -10786,7 +10817,8 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) if (arm_current_el(env) != 0 || alt) { /* We can ignore the return value as prot is always set */ pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, - &phys_addr, &attrs, &prot, &fi, &mregion); + &phys_addr, &attrs, &prot, &is_subpage, + &fi, &mregion); if (mregion == -1) { mrvalid = false; mregion = 0;