diff mbox series

[v5,17/55] plugins: implement helpers for resolving hwaddr

Message ID 20191014104948.4291-18-alex.bennee@linaro.org
State New
Headers show
Series Support for TCG plugins | expand

Commit Message

Alex Bennée Oct. 14, 2019, 10:49 a.m. UTC
We need to keep a local per-cpu copy of the data as other threads may
be running. We use a automatically growing array and re-use the space
for subsequent queries.

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>


---
v5
  - use TLS instead of racy GArray
  - add more commentary regarding success
  - error_report if we fail
---
 accel/tcg/cputlb.c      | 35 +++++++++++++++++++++++++++++++++++
 include/exec/exec-all.h | 20 ++++++++++++++++++++
 include/qemu/plugin.h   |  6 ++++++
 plugins/api.c           | 34 +++++++++++++++++++++++++++++++++-
 4 files changed, 94 insertions(+), 1 deletion(-)

-- 
2.20.1

Comments

Richard Henderson Oct. 14, 2019, 3:45 p.m. UTC | #1
On 10/14/19 3:49 AM, Alex Bennée wrote:
> +bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,

> +                       bool is_store, struct qemu_plugin_hwaddr *data)

> +{

> +    CPUArchState *env = cpu->env_ptr;

> +    CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);

> +    target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;

> +

> +    if (likely(tlb_hit(tlb_addr, addr))) {

> +        if (tlb_addr & TLB_MMIO) {

> +            data->hostaddr = 0;

> +            data->is_io = true;

> +            /* XXX: lookup device */

> +        } else {

> +            data->hostaddr = addr + tlbe->addend;

> +            data->is_io = false;


...

>  uint64_t qemu_plugin_hwaddr_to_raddr(const struct qemu_plugin_hwaddr *haddr)

>  {

> +#ifdef CONFIG_SOFTMMU

> +    ram_addr_t ram_addr = 0;

> +

> +    if (haddr && !haddr->is_io) {

> +        ram_addr = qemu_ram_addr_from_host((void *) haddr->hostaddr);

> +        if (ram_addr == RAM_ADDR_INVALID) {


So, did you want the host address or the ram_addr?

If you really only want the ram_addr then you can get
that directly from the (io)tlb:

    uintptr_t index = tlb_index(env, mmu_idx, addr);
    CPUTLB *tlb = &cpu_neg(cpu)->tlb;
    CPUIOTLBEntry *iotlbentry = &tlb->d[mmu_idx].iotlb[index];

    data->raddr = addr + iotlbentry->addr;

That said, what you have works.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>



r~
Peter Maydell Oct. 14, 2019, 3:54 p.m. UTC | #2
On Mon, 14 Oct 2019 at 12:25, Alex Bennée <alex.bennee@linaro.org> wrote:
>

> We need to keep a local per-cpu copy of the data as other threads may

> be running. We use a automatically growing array and re-use the space

> for subsequent queries.

>



> +#ifdef CONFIG_SOFTMMU

> +static __thread struct qemu_plugin_hwaddr hwaddr_info;

> +

> +struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info,

> +                                                  uint64_t vaddr)

> +{

> +    CPUState *cpu = current_cpu;

> +    unsigned int mmu_idx = info >> TRACE_MEM_MMU_SHIFT;

> +

> +    if (!tlb_plugin_lookup(cpu, vaddr, mmu_idx,

> +                           info & TRACE_MEM_ST, &hwaddr_info)) {

> +        error_report("invalid use of qemu_plugin_get_hwaddr");

> +        return NULL;

> +    }

> +

> +    return &hwaddr_info;

> +}


Apologies for dropping into the middle of this patchset, but
this API looks a bit odd. A hwaddr alone isn't a complete
definition of an access -- you need an (AddressSpace, hwaddr)
tuple for that. So this API looks like it doesn't really cope
with things like TrustZone ?

>  uint64_t qemu_plugin_hwaddr_to_raddr(const struct qemu_plugin_hwaddr *haddr)

>  {

> +#ifdef CONFIG_SOFTMMU

> +    ram_addr_t ram_addr = 0;

> +

> +    if (haddr && !haddr->is_io) {

> +        ram_addr = qemu_ram_addr_from_host((void *) haddr->hostaddr);

> +        if (ram_addr == RAM_ADDR_INVALID) {

> +            error_report("Bad ram pointer %"PRIx64"", haddr->hostaddr);

> +            abort();

> +        }

> +    }

> +    return ram_addr;

> +#else

>      return 0;

> +#endif

>  }


This looks odd to see in the plugin API -- ramaddrs should
be a QEMU internal concept, shouldn't they?

thanks
-- PMM
Alex Bennée Oct. 14, 2019, 4:34 p.m. UTC | #3
Peter Maydell <peter.maydell@linaro.org> writes:

> On Mon, 14 Oct 2019 at 12:25, Alex Bennée <alex.bennee@linaro.org> wrote:

>>

>> We need to keep a local per-cpu copy of the data as other threads may

>> be running. We use a automatically growing array and re-use the space

>> for subsequent queries.

>>

>

>

>> +#ifdef CONFIG_SOFTMMU

>> +static __thread struct qemu_plugin_hwaddr hwaddr_info;

>> +

>> +struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info,

>> +                                                  uint64_t vaddr)

>> +{

>> +    CPUState *cpu = current_cpu;

>> +    unsigned int mmu_idx = info >> TRACE_MEM_MMU_SHIFT;

>> +

>> +    if (!tlb_plugin_lookup(cpu, vaddr, mmu_idx,

>> +                           info & TRACE_MEM_ST, &hwaddr_info)) {

>> +        error_report("invalid use of qemu_plugin_get_hwaddr");

>> +        return NULL;

>> +    }

>> +

>> +    return &hwaddr_info;

>> +}

>

> Apologies for dropping into the middle of this patchset, but

> this API looks a bit odd. A hwaddr alone isn't a complete

> definition of an access -- you need an (AddressSpace, hwaddr)

> tuple for that. So this API looks like it doesn't really cope

> with things like TrustZone ?


Aren't hwaddr's unique across the bus? Or is this because you would have
two address buses (secure and non-secure) with different address lines to
different chips?

But surely we have all the information we need because we've hooked the
two things that QEMU's softmmu code knows. The mmu_idx and the vaddr
with which the slow path can figure out what it needs.

>>  uint64_t qemu_plugin_hwaddr_to_raddr(const struct qemu_plugin_hwaddr *haddr)

>>  {

>> +#ifdef CONFIG_SOFTMMU

>> +    ram_addr_t ram_addr = 0;

>> +

>> +    if (haddr && !haddr->is_io) {

>> +        ram_addr = qemu_ram_addr_from_host((void *) haddr->hostaddr);

>> +        if (ram_addr == RAM_ADDR_INVALID) {

>> +            error_report("Bad ram pointer %"PRIx64"", haddr->hostaddr);

>> +            abort();

>> +        }

>> +    }

>> +    return ram_addr;

>> +#else

>>      return 0;

>> +#endif

>>  }

>

> This looks odd to see in the plugin API -- ramaddrs should

> be a QEMU internal concept, shouldn't they?


Hmm maybe.. I guess it's a special case of device offset. Do you want to
drop this part for now?

>

> thanks

> -- PMM



--
Alex Bennée
Peter Maydell Oct. 14, 2019, 4:56 p.m. UTC | #4
On Mon, 14 Oct 2019 at 17:34, Alex Bennée <alex.bennee@linaro.org> wrote:
> Peter Maydell <peter.maydell@linaro.org> writes:

> > Apologies for dropping into the middle of this patchset, but

> > this API looks a bit odd. A hwaddr alone isn't a complete

> > definition of an access -- you need an (AddressSpace, hwaddr)

> > tuple for that. So this API looks like it doesn't really cope

> > with things like TrustZone ?

>

> Aren't hwaddr's unique across the bus? Or is this because you would have

> two address buses (secure and non-secure) with different address lines to

> different chips?


Architecturally, there are two completely separate physical
address spaces for Secure and NonSecure.
In actual Arm hardware, what you get is a bus/interconnect
where effectively the 'S/NS' line is an extra address bit.
The interconnect can set things up however it likes, but the
most common setup is "most devices mapped at the same address
in both S and NS", aka "most decode ignores the S/NS signal",
with some "this device is only present in S". There's no inherent
reason you couldn't have "address A is device X in S and device
Y in NS", though.

> But surely we have all the information we need because we've hooked the

> two things that QEMU's softmmu code knows. The mmu_idx and the vaddr

> with which the slow path can figure out what it needs.


I think the slowpath uses the MemTxAttrs rather than the mmu_idx:
iotlb_to_section() calls cpu_asidx_from_attrs() and then pulls the
AddressSpace out of cpu->cpu_ases[].

From the point of view of a plugin, it probably wants
the (hwaddr, MemTxAttrs) for the access.

> > This looks odd to see in the plugin API -- ramaddrs should

> > be a QEMU internal concept, shouldn't they?

>

> Hmm maybe.. I guess it's a special case of device offset. Do you want to

> drop this part for now?


I'd need to go and look at the overall plugin-facing API to
be sure, but in general my expectation would be that the
whole API should be in terms of guest-architectural concepts,
with no QEMU-isms like ramaddrs leaking through.

thanks
-- PMM
diff mbox series

Patch

diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index b587d910f8..2937dac7a5 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1247,6 +1247,41 @@  void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
     return (void *)((uintptr_t)addr + entry->addend);
 }
 
+
+#ifdef CONFIG_PLUGIN
+/*
+ * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
+ * This should be a hot path as we will have just looked this path up
+ * in the softmmu lookup code (or helper). We don't handle re-fills or
+ * checking the victim table. This is purely informational.
+ *
+ * This should never fail as the memory access being instrumented
+ * should have just filled the TLB.
+ */
+
+bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
+                       bool is_store, struct qemu_plugin_hwaddr *data)
+{
+    CPUArchState *env = cpu->env_ptr;
+    CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
+    target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
+
+    if (likely(tlb_hit(tlb_addr, addr))) {
+        if (tlb_addr & TLB_MMIO) {
+            data->hostaddr = 0;
+            data->is_io = true;
+            /* XXX: lookup device */
+        } else {
+            data->hostaddr = addr + tlbe->addend;
+            data->is_io = false;
+        }
+        return true;
+    }
+    return false;
+}
+
+#endif
+
 /* Probe for a read-modify-write atomic operation.  Do not allow unaligned
  * operations, or io operations to proceed.  Return the host address.  */
 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index ba2f501f0f..47a071fee0 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -261,6 +261,20 @@  void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
                   hwaddr paddr, int prot,
                   int mmu_idx, target_ulong size);
+
+/**
+ * tlb_plugin_lookup: query last TLB lookup
+ * @cpu: cpu environment
+ *
+ * This function can be used directly after a memory operation to
+ * query information about the access. It is used by the plugin
+ * infrastructure to expose more information about the address.
+ *
+ * It would only fail if not called from an instrumented memory access
+ * which would be an abuse of the API.
+ */
+bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
+                       bool is_store, struct qemu_plugin_hwaddr *data);
 #else
 static inline void tlb_init(CPUState *cpu)
 {
@@ -310,6 +324,12 @@  static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
                                                        uint16_t idxmap)
 {
 }
+static inline bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr,
+                                     int mmu_idx, bool is_store,
+                                     struct qemu_plugin_hwaddr *data)
+{
+    return false;
+}
 #endif
 void *probe_access(CPUArchState *env, target_ulong addr, int size,
                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h
index 3c46a24166..657345df60 100644
--- a/include/qemu/plugin.h
+++ b/include/qemu/plugin.h
@@ -182,6 +182,12 @@  struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct qemu_plugin_tb *tb)
     return insn;
 }
 
+struct qemu_plugin_hwaddr {
+    uint64_t hostaddr;
+    bool is_io;
+};
+
+
 #ifdef CONFIG_PLUGIN
 
 void qemu_plugin_vcpu_init_hook(CPUState *cpu);
diff --git a/plugins/api.c b/plugins/api.c
index 3de05719a8..fbacd78df6 100644
--- a/plugins/api.c
+++ b/plugins/api.c
@@ -39,7 +39,7 @@ 
 #include "cpu.h"
 #include "sysemu/sysemu.h"
 #include "tcg/tcg.h"
-#include "trace/mem-internal.h" /* mem_info macros */
+#include "exec/exec-all.h"
 #include "plugin.h"
 #ifndef CONFIG_USER_ONLY
 #include "hw/boards.h"
@@ -240,11 +240,30 @@  bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info)
  * Virtual Memory queries
  */
 
+#ifdef CONFIG_SOFTMMU
+static __thread struct qemu_plugin_hwaddr hwaddr_info;
+
+struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info,
+                                                  uint64_t vaddr)
+{
+    CPUState *cpu = current_cpu;
+    unsigned int mmu_idx = info >> TRACE_MEM_MMU_SHIFT;
+
+    if (!tlb_plugin_lookup(cpu, vaddr, mmu_idx,
+                           info & TRACE_MEM_ST, &hwaddr_info)) {
+        error_report("invalid use of qemu_plugin_get_hwaddr");
+        return NULL;
+    }
+
+    return &hwaddr_info;
+}
+#else
 struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info,
                                                   uint64_t vaddr)
 {
     return NULL;
 }
+#endif
 
 bool qemu_plugin_hwaddr_is_io(struct qemu_plugin_hwaddr *hwaddr)
 {
@@ -253,7 +272,20 @@  bool qemu_plugin_hwaddr_is_io(struct qemu_plugin_hwaddr *hwaddr)
 
 uint64_t qemu_plugin_hwaddr_to_raddr(const struct qemu_plugin_hwaddr *haddr)
 {
+#ifdef CONFIG_SOFTMMU
+    ram_addr_t ram_addr = 0;
+
+    if (haddr && !haddr->is_io) {
+        ram_addr = qemu_ram_addr_from_host((void *) haddr->hostaddr);
+        if (ram_addr == RAM_ADDR_INVALID) {
+            error_report("Bad ram pointer %"PRIx64"", haddr->hostaddr);
+            abort();
+        }
+    }
+    return ram_addr;
+#else
     return 0;
+#endif
 }
 
 /*