diff mbox

[1/7] hw/arm_mptimer.c: Turn ARM MPcore private timers into qdev devices

Message ID 1323103220-1636-2-git-send-email-peter.maydell@linaro.org
State Accepted
Headers show

Commit Message

Peter Maydell Dec. 5, 2011, 4:40 p.m. UTC
Turn the ARM MPcore private timer/watchdog blocks into separate
qdev devices. This will allow us to share them neatly between
11MPCore and A9MPcore.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
 Makefile.target  |    1 +
 hw/arm_mptimer.c |  332 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 hw/mpcore.c      |  184 +++++-------------------------
 3 files changed, 365 insertions(+), 152 deletions(-)
 create mode 100644 hw/arm_mptimer.c

Comments

Evgeny Voevodin Jan. 13, 2012, 7:18 a.m. UTC | #1
On 12/05/2011 08:40 PM, Peter Maydell wrote:
> -/* Return conversion factor from mpcore timer ticks to qemu timer ticks.  */
> -static inline uint32_t mpcore_timer_scale(mpcore_timer_state *s)
> -{
> -    return (((s->control>>  8)&  0xff) + 1) * 10;
> -}

Dear Peter, could you please explain why such a conversion used to gain 
qemu ticks from mpcore timer ticks, actually why to multiply by 10?
AFAIK by default to get timer ticks QEMU uses host's time. Also cortex 
documentation says that mpcore timer tick interval should be calculated 
in this way: ((prescaler + 1) * (load + 1))/freq
Later mpcore_timer_reload uses this code to update QEMU ticks:

> -    s->tick += (int64_t)s->count * mpcore_timer_scale(s);

I see that equation has transformed to: (prescaler * load) * 10
As I understand, this means that arm core internal timer is working at 
frequency 10 times less (in ideal) then host.
diff mbox

Patch

diff --git a/Makefile.target b/Makefile.target
index a111521..39b2e5a 100644
--- a/Makefile.target
+++ b/Makefile.target
@@ -344,6 +344,7 @@  obj-arm-y = integratorcp.o versatilepb.o arm_pic.o arm_timer.o
 obj-arm-y += arm_boot.o pl011.o pl031.o pl050.o pl080.o pl110.o pl181.o pl190.o
 obj-arm-y += versatile_pci.o
 obj-arm-y += realview_gic.o realview.o arm_sysctl.o arm11mpcore.o a9mpcore.o
+obj-arm-y += arm_mptimer.o
 obj-arm-y += armv7m.o armv7m_nvic.o stellaris.o pl022.o stellaris_enet.o
 obj-arm-y += pl061.o
 obj-arm-y += arm-semi.o
diff --git a/hw/arm_mptimer.c b/hw/arm_mptimer.c
new file mode 100644
index 0000000..455a0aa
--- /dev/null
+++ b/hw/arm_mptimer.c
@@ -0,0 +1,332 @@ 
+/*
+ * Private peripheral timer/watchdog blocks for ARM 11MPCore and A9MP
+ *
+ * Copyright (c) 2006-2007 CodeSourcery.
+ * Copyright (c) 2011 Linaro Limited
+ * Written by Paul Brook, Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "sysbus.h"
+#include "qemu-timer.h"
+
+/* This device implements the per-cpu private timer and watchdog block
+ * which is used in both the ARM11MPCore and Cortex-A9MP.
+ */
+
+#define MAX_CPUS 4
+
+/* State of a single timer or watchdog block */
+typedef struct {
+    uint32_t count;
+    uint32_t load;
+    uint32_t control;
+    uint32_t status;
+    int64_t tick;
+    QEMUTimer *timer;
+    qemu_irq irq;
+    MemoryRegion iomem;
+} timerblock;
+
+typedef struct {
+    SysBusDevice busdev;
+    uint32_t num_cpu;
+    timerblock timerblock[MAX_CPUS * 2];
+    MemoryRegion iomem[2];
+} arm_mptimer_state;
+
+static inline int get_current_cpu(arm_mptimer_state *s)
+{
+    if (cpu_single_env->cpu_index >= s->num_cpu) {
+        hw_error("arm_mptimer: num-cpu %d but this cpu is %d!\n",
+                 s->num_cpu, cpu_single_env->cpu_index);
+    }
+    return cpu_single_env->cpu_index;
+}
+
+static inline void timerblock_update_irq(timerblock *tb)
+{
+    qemu_set_irq(tb->irq, tb->status);
+}
+
+/* Return conversion factor from mpcore timer ticks to qemu timer ticks.  */
+static inline uint32_t timerblock_scale(timerblock *tb)
+{
+    return (((tb->control >> 8) & 0xff) + 1) * 10;
+}
+
+static void timerblock_reload(timerblock *tb, int restart)
+{
+    if (tb->count == 0) {
+        return;
+    }
+    if (restart) {
+        tb->tick = qemu_get_clock_ns(vm_clock);
+    }
+    tb->tick += (int64_t)tb->count * timerblock_scale(tb);
+    qemu_mod_timer(tb->timer, tb->tick);
+}
+
+static void timerblock_tick(void *opaque)
+{
+    timerblock *tb = (timerblock *)opaque;
+    tb->status = 1;
+    if (tb->control & 2) {
+        tb->count = tb->load;
+        timerblock_reload(tb, 0);
+    } else {
+        tb->count = 0;
+    }
+    timerblock_update_irq(tb);
+}
+
+static uint64_t timerblock_read(void *opaque, target_phys_addr_t addr,
+                                unsigned size)
+{
+    timerblock *tb = (timerblock *)opaque;
+    int64_t val;
+    addr &= 0x1f;
+    switch (addr) {
+    case 0: /* Load */
+        return tb->load;
+    case 4: /* Counter.  */
+        if (((tb->control & 1) == 0) || (tb->count == 0)) {
+            return 0;
+        }
+        /* Slow and ugly, but hopefully won't happen too often.  */
+        val = tb->tick - qemu_get_clock_ns(vm_clock);
+        val /= timerblock_scale(tb);
+        if (val < 0) {
+            val = 0;
+        }
+        return val;
+    case 8: /* Control.  */
+        return tb->control;
+    case 12: /* Interrupt status.  */
+        return tb->status;
+    default:
+        return 0;
+    }
+}
+
+static void timerblock_write(void *opaque, target_phys_addr_t addr,
+                             uint64_t value, unsigned size)
+{
+    timerblock *tb = (timerblock *)opaque;
+    int64_t old;
+    addr &= 0x1f;
+    switch (addr) {
+    case 0: /* Load */
+        tb->load = value;
+        /* Fall through.  */
+    case 4: /* Counter.  */
+        if ((tb->control & 1) && tb->count) {
+            /* Cancel the previous timer.  */
+            qemu_del_timer(tb->timer);
+        }
+        tb->count = value;
+        if (tb->control & 1) {
+            timerblock_reload(tb, 1);
+        }
+        break;
+    case 8: /* Control.  */
+        old = tb->control;
+        tb->control = value;
+        if (((old & 1) == 0) && (value & 1)) {
+            if (tb->count == 0 && (tb->control & 2)) {
+                tb->count = tb->load;
+            }
+            timerblock_reload(tb, 1);
+        }
+        break;
+    case 12: /* Interrupt status.  */
+        tb->status &= ~value;
+        timerblock_update_irq(tb);
+        break;
+    }
+}
+
+/* Wrapper functions to implement the "read timer/watchdog for
+ * the current CPU" memory regions.
+ */
+static uint64_t arm_thistimer_read(void *opaque, target_phys_addr_t addr,
+                                   unsigned size)
+{
+    arm_mptimer_state *s = (arm_mptimer_state *)opaque;
+    int id = get_current_cpu(s);
+    return timerblock_read(&s->timerblock[id * 2], addr, size);
+}
+
+static void arm_thistimer_write(void *opaque, target_phys_addr_t addr,
+                                uint64_t value, unsigned size)
+{
+    arm_mptimer_state *s = (arm_mptimer_state *)opaque;
+    int id = get_current_cpu(s);
+    timerblock_write(&s->timerblock[id * 2], addr, value, size);
+}
+
+static uint64_t arm_thiswdog_read(void *opaque, target_phys_addr_t addr,
+                                  unsigned size)
+{
+    arm_mptimer_state *s = (arm_mptimer_state *)opaque;
+    int id = get_current_cpu(s);
+    return timerblock_read(&s->timerblock[id * 2 + 1], addr, size);
+}
+
+static void arm_thiswdog_write(void *opaque, target_phys_addr_t addr,
+                               uint64_t value, unsigned size)
+{
+    arm_mptimer_state *s = (arm_mptimer_state *)opaque;
+    int id = get_current_cpu(s);
+    timerblock_write(&s->timerblock[id * 2 + 1], addr, value, size);
+}
+
+static const MemoryRegionOps arm_thistimer_ops = {
+    .read = arm_thistimer_read,
+    .write = arm_thistimer_write,
+    .valid = {
+        .min_access_size = 4,
+        .max_access_size = 4,
+    },
+    .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static const MemoryRegionOps arm_thiswdog_ops = {
+    .read = arm_thiswdog_read,
+    .write = arm_thiswdog_write,
+    .valid = {
+        .min_access_size = 4,
+        .max_access_size = 4,
+    },
+    .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static const MemoryRegionOps timerblock_ops = {
+    .read = timerblock_read,
+    .write = timerblock_write,
+    .valid = {
+        .min_access_size = 4,
+        .max_access_size = 4,
+    },
+    .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void timerblock_reset(timerblock *tb)
+{
+    tb->count = 0;
+    tb->load = 0;
+    tb->control = 0;
+    tb->status = 0;
+    tb->tick = 0;
+}
+
+static void arm_mptimer_reset(DeviceState *dev)
+{
+    arm_mptimer_state *s =
+        FROM_SYSBUS(arm_mptimer_state, sysbus_from_qdev(dev));
+    int i;
+    /* We reset every timer in the array, not just the ones we're using,
+     * because vmsave will look at every array element.
+     */
+    for (i = 0; i < ARRAY_SIZE(s->timerblock); i++) {
+        timerblock_reset(&s->timerblock[i]);
+    }
+}
+
+static int arm_mptimer_init(SysBusDevice *dev)
+{
+    arm_mptimer_state *s = FROM_SYSBUS(arm_mptimer_state, dev);
+    int i;
+    if (s->num_cpu < 1 || s->num_cpu > MAX_CPUS) {
+        hw_error("%s: num-cpu must be between 1 and %d\n", __func__, MAX_CPUS);
+    }
+    /* We implement one timer and one watchdog block per CPU, and
+     * expose multiple MMIO regions:
+     *  * region 0 is "timer for this core"
+     *  * region 1 is "watchdog for this core"
+     *  * region 2 is "timer for core 0"
+     *  * region 3 is "watchdog for core 0"
+     *  * region 4 is "timer for core 1"
+     *  * region 5 is "watchdog for core 1"
+     * and so on.
+     * The outgoing interrupt lines are
+     *  * timer for core 0
+     *  * watchdog for core 0
+     *  * timer for core 1
+     *  * watchdog for core 1
+     * and so on.
+     */
+    memory_region_init_io(&s->iomem[0], &arm_thistimer_ops, s,
+                          "arm_mptimer_timer", 0x20);
+    sysbus_init_mmio(dev, &s->iomem[0]);
+    memory_region_init_io(&s->iomem[1], &arm_thiswdog_ops, s,
+                          "arm_mptimer_wdog", 0x20);
+    sysbus_init_mmio(dev, &s->iomem[1]);
+    for (i = 0; i < (s->num_cpu * 2); i++) {
+        timerblock *tb = &s->timerblock[i];
+        tb->timer = qemu_new_timer_ns(vm_clock, timerblock_tick, tb);
+        sysbus_init_irq(dev, &tb->irq);
+        memory_region_init_io(&tb->iomem, &timerblock_ops, tb,
+                              "arm_mptimer_timerblock", 0x20);
+        sysbus_init_mmio(dev, &tb->iomem);
+    }
+
+    return 0;
+}
+
+static const VMStateDescription vmstate_timerblock = {
+    .name = "arm_mptimer_timerblock",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT32(count, timerblock),
+        VMSTATE_UINT32(load, timerblock),
+        VMSTATE_UINT32(control, timerblock),
+        VMSTATE_UINT32(status, timerblock),
+        VMSTATE_INT64(tick, timerblock),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static const VMStateDescription vmstate_arm_mptimer = {
+    .name = "arm_mptimer",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_STRUCT_ARRAY(timerblock, arm_mptimer_state, (MAX_CPUS * 2),
+                             1, vmstate_timerblock, timerblock),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static SysBusDeviceInfo arm_mptimer_info = {
+    .init = arm_mptimer_init,
+    .qdev.name = "arm_mptimer",
+    .qdev.size = sizeof(arm_mptimer_state),
+    .qdev.vmsd = &vmstate_arm_mptimer,
+    .qdev.reset = arm_mptimer_reset,
+    .qdev.no_user = 1,
+    .qdev.props = (Property[]) {
+        DEFINE_PROP_UINT32("num-cpu", arm_mptimer_state, num_cpu, 0),
+        DEFINE_PROP_END_OF_LIST()
+    }
+};
+
+static void arm_mptimer_register_devices(void)
+{
+    sysbus_register_withprop(&arm_mptimer_info);
+}
+
+device_init(arm_mptimer_register_devices)
diff --git a/hw/mpcore.c b/hw/mpcore.c
index 4357d12..3d64609 100644
--- a/hw/mpcore.c
+++ b/hw/mpcore.c
@@ -22,135 +22,18 @@  gic_get_current_cpu(void)
 
 /* MPCore private memory region.  */
 
-typedef struct {
-    uint32_t count;
-    uint32_t load;
-    uint32_t control;
-    uint32_t status;
-    uint32_t old_status;
-    int64_t tick;
-    QEMUTimer *timer;
-    struct mpcore_priv_state *mpcore;
-    int id; /* Encodes both timer/watchdog and CPU.  */
-} mpcore_timer_state;
-
 typedef struct mpcore_priv_state {
     gic_state gic;
     uint32_t scu_control;
     int iomemtype;
-    mpcore_timer_state timer[8];
+    uint32_t old_timer_status[8];
     uint32_t num_cpu;
+    qemu_irq *timer_irq;
     MemoryRegion iomem;
     MemoryRegion container;
+    DeviceState *mptimer;
 } mpcore_priv_state;
 
-/* Per-CPU Timers.  */
-
-static inline void mpcore_timer_update_irq(mpcore_timer_state *s)
-{
-    if (s->status & ~s->old_status) {
-        gic_set_pending_private(&s->mpcore->gic, s->id >> 1, 29 + (s->id & 1));
-    }
-    s->old_status = s->status;
-}
-
-/* Return conversion factor from mpcore timer ticks to qemu timer ticks.  */
-static inline uint32_t mpcore_timer_scale(mpcore_timer_state *s)
-{
-    return (((s->control >> 8) & 0xff) + 1) * 10;
-}
-
-static void mpcore_timer_reload(mpcore_timer_state *s, int restart)
-{
-    if (s->count == 0)
-        return;
-    if (restart)
-        s->tick = qemu_get_clock_ns(vm_clock);
-    s->tick += (int64_t)s->count * mpcore_timer_scale(s);
-    qemu_mod_timer(s->timer, s->tick);
-}
-
-static void mpcore_timer_tick(void *opaque)
-{
-    mpcore_timer_state *s = (mpcore_timer_state *)opaque;
-    s->status = 1;
-    if (s->control & 2) {
-        s->count = s->load;
-        mpcore_timer_reload(s, 0);
-    } else {
-        s->count = 0;
-    }
-    mpcore_timer_update_irq(s);
-}
-
-static uint32_t mpcore_timer_read(mpcore_timer_state *s, int offset)
-{
-    int64_t val;
-    switch (offset) {
-    case 0: /* Load */
-        return s->load;
-        /* Fall through.  */
-    case 4: /* Counter.  */
-        if (((s->control & 1) == 0) || (s->count == 0))
-            return 0;
-        /* Slow and ugly, but hopefully won't happen too often.  */
-        val = s->tick - qemu_get_clock_ns(vm_clock);
-        val /= mpcore_timer_scale(s);
-        if (val < 0)
-            val = 0;
-        return val;
-    case 8: /* Control.  */
-        return s->control;
-    case 12: /* Interrupt status.  */
-        return s->status;
-    default:
-        return 0;
-    }
-}
-
-static void mpcore_timer_write(mpcore_timer_state *s, int offset,
-                               uint32_t value)
-{
-    int64_t old;
-    switch (offset) {
-    case 0: /* Load */
-        s->load = value;
-        /* Fall through.  */
-    case 4: /* Counter.  */
-        if ((s->control & 1) && s->count) {
-            /* Cancel the previous timer.  */
-            qemu_del_timer(s->timer);
-        }
-        s->count = value;
-        if (s->control & 1) {
-            mpcore_timer_reload(s, 1);
-        }
-        break;
-    case 8: /* Control.  */
-        old = s->control;
-        s->control = value;
-        if (((old & 1) == 0) && (value & 1)) {
-            if (s->count == 0 && (s->control & 2))
-                s->count = s->load;
-            mpcore_timer_reload(s, 1);
-        }
-        break;
-    case 12: /* Interrupt status.  */
-        s->status &= ~value;
-        mpcore_timer_update_irq(s);
-        break;
-    }
-}
-
-static void mpcore_timer_init(mpcore_priv_state *mpcore,
-                              mpcore_timer_state *s, int id)
-{
-    s->id = id;
-    s->mpcore = mpcore;
-    s->timer = qemu_new_timer_ns(vm_clock, mpcore_timer_tick, s);
-}
-
-
 /* Per-CPU private memory mapped IO.  */
 
 static uint64_t mpcore_priv_read(void *opaque, target_phys_addr_t offset,
@@ -185,20 +68,6 @@  static uint64_t mpcore_priv_read(void *opaque, target_phys_addr_t offset,
             }
         }
         return gic_cpu_read(&s->gic, id, offset & 0xff);
-    } else if (offset < 0xb00) {
-        /* Timers.  */
-        if (offset < 0x700) {
-            id = gic_get_current_cpu();
-        } else {
-            id = (offset - 0x700) >> 8;
-            if (id >= s->num_cpu) {
-                return 0;
-            }
-        }
-        id <<= 1;
-        if (offset & 0x20)
-          id++;
-        return mpcore_timer_read(&s->timer[id], offset & 0xf);
     }
 bad_reg:
     hw_error("mpcore_priv_read: Bad offset %x\n", (int)offset);
@@ -233,20 +102,6 @@  static void mpcore_priv_write(void *opaque, target_phys_addr_t offset,
         if (id < s->num_cpu) {
             gic_cpu_write(&s->gic, id, offset & 0xff, value);
         }
-    } else if (offset < 0xb00) {
-        /* Timers.  */
-        if (offset < 0x700) {
-            id = gic_get_current_cpu();
-        } else {
-            id = (offset - 0x700) >> 8;
-        }
-        if (id < s->num_cpu) {
-            id <<= 1;
-            if (offset & 0x20)
-              id++;
-            mpcore_timer_write(&s->timer[id], offset & 0xf, value);
-        }
-        return;
     }
     return;
 bad_reg:
@@ -259,25 +114,50 @@  static const MemoryRegionOps mpcore_priv_ops = {
     .endianness = DEVICE_NATIVE_ENDIAN,
 };
 
+static void mpcore_timer_irq_handler(void *opaque, int irq, int level)
+{
+    mpcore_priv_state *s = (mpcore_priv_state *)opaque;
+    if (level && !s->old_timer_status[irq]) {
+        gic_set_pending_private(&s->gic, irq >> 1, 29 + (irq & 1));
+    }
+    s->old_timer_status[irq] = level;
+}
+
 static void mpcore_priv_map_setup(mpcore_priv_state *s)
 {
+    int i;
+    SysBusDevice *busdev = sysbus_from_qdev(s->mptimer);
     memory_region_init(&s->container, "mpcode-priv-container", 0x2000);
     memory_region_init_io(&s->iomem, &mpcore_priv_ops, s, "mpcode-priv",
                           0x1000);
     memory_region_add_subregion(&s->container, 0, &s->iomem);
+    /* Add the regions for timer and watchdog for "current CPU" and
+     * for each specific CPU.
+     */
+    s->timer_irq = qemu_allocate_irqs(mpcore_timer_irq_handler,
+                                      s, (s->num_cpu + 1) * 2);
+    for (i = 0; i < (s->num_cpu + 1) * 2; i++) {
+        /* Timers at 0x600, 0x700, ...; watchdogs at 0x620, 0x720, ... */
+        target_phys_addr_t offset = 0x600 + (i >> 1) * 0x100 + (i & 1) * 0x20;
+        memory_region_add_subregion(&s->container, offset,
+                                    sysbus_mmio_get_region(busdev, i));
+    }
     memory_region_add_subregion(&s->container, 0x1000, &s->gic.iomem);
+    /* Wire up the interrupt from each watchdog and timer. */
+    for (i = 0; i < s->num_cpu * 2; i++) {
+        sysbus_connect_irq(busdev, i, s->timer_irq[i]);
+    }
 }
 
 static int mpcore_priv_init(SysBusDevice *dev)
 {
     mpcore_priv_state *s = FROM_SYSBUSGIC(mpcore_priv_state, dev);
-    int i;
 
     gic_init(&s->gic, s->num_cpu);
+    s->mptimer = qdev_create(NULL, "arm_mptimer");
+    qdev_prop_set_uint32(s->mptimer, "num-cpu", s->num_cpu);
+    qdev_init_nofail(s->mptimer);
     mpcore_priv_map_setup(s);
     sysbus_init_mmio(dev, &s->container);
-    for (i = 0; i < s->num_cpu * 2; i++) {
-        mpcore_timer_init(s, &s->timer[i], i);
-    }
     return 0;
 }