diff mbox series

[v7,06/12] migration/dirtyrate: Record hash results for each sampled page

Message ID 1599661096-127913-7-git-send-email-zhengchuan@huawei.com
State New
Headers show
Series *** A Method for evaluating dirty page rate *** | expand

Commit Message

Zheng Chuan Sept. 9, 2020, 2:18 p.m. UTC
Record hash results for each sampled page, crc32 is taken to calculate
hash results for each sampled length in TARGET_PAGE_SIZE.

Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
Reviewed-by: David Edmondson <david.edmondson@oracle.com>
---
 migration/dirtyrate.c | 125 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 125 insertions(+)

Comments

Li Qiang Sept. 10, 2020, 1:51 p.m. UTC | #1
Chuan Zheng <zhengchuan@huawei.com> 于2020年9月9日周三 下午10:14写道:
>
> Record hash results for each sampled page, crc32 is taken to calculate
> hash results for each sampled length in TARGET_PAGE_SIZE.
>
> Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
> Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
> Reviewed-by: David Edmondson <david.edmondson@oracle.com>
> ---
>  migration/dirtyrate.c | 125 ++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 125 insertions(+)
>
> diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
> index d56cd93..bc87269 100644
> --- a/migration/dirtyrate.c
> +++ b/migration/dirtyrate.c
> @@ -10,6 +10,7 @@
>   * See the COPYING file in the top-level directory.
>   */
>
> +#include <zlib.h>
>  #include "qemu/osdep.h"
>  #include "qapi/error.h"
>  #include "cpu.h"
> @@ -68,6 +69,130 @@ static void update_dirtyrate(uint64_t msec)
>      DirtyStat.dirty_rate = dirtyrate;
>  }
>
> +/*
> + * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
> + * in ramblock, which starts from ramblock base address.
> + */
> +static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
> +                                      uint64_t vfn)
> +{
> +    uint32_t crc;
> +
> +    crc = crc32(0, (info->ramblock_addr +
> +                vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE);
> +
> +    return crc;
> +}
> +
> +static int save_ramblock_hash(struct RamblockDirtyInfo *info)
> +{
> +    unsigned int sample_pages_count;
> +    int i;
> +    GRand *rand;
> +
> +    sample_pages_count = info->sample_pages_count;
> +
> +    /* ramblock size less than one page, return success to skip this ramblock */
> +    if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
> +        return 0;
> +    }
> +
> +    info->hash_result = g_try_malloc0_n(sample_pages_count,
> +                                        sizeof(uint32_t));
> +    if (!info->hash_result) {
> +        return -1;
> +    }
> +
> +    info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
> +                                            sizeof(uint64_t));
> +    if (!info->sample_page_vfn) {
> +        g_free(info->hash_result);
> +        return -1;
> +    }
> +
> +    rand  = g_rand_new();
> +    for (i = 0; i < sample_pages_count; i++) {
> +        info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
> +                                                    info->ramblock_pages - 1);
> +        info->hash_result[i] = get_ramblock_vfn_hash(info,
> +                                                     info->sample_page_vfn[i]);
> +    }
> +    g_rand_free(rand);
> +
> +    return 0;
> +}
> +
> +static void get_ramblock_dirty_info(RAMBlock *block,
> +                                    struct RamblockDirtyInfo *info,
> +                                    struct DirtyRateConfig *config)
> +{
> +    uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
> +
> +    /* Right shift 30 bits to calc ramblock size in GB */
> +    info->sample_pages_count = (qemu_ram_get_used_length(block) *
> +                                sample_pages_per_gigabytes) >> 30;
> +    /* Right shift TARGET_PAGE_BITS to calc page count */
> +    info->ramblock_pages = qemu_ram_get_used_length(block) >>
> +                           TARGET_PAGE_BITS;
> +    info->ramblock_addr = qemu_ram_get_host_addr(block);
> +    strcpy(info->idstr, qemu_ram_get_idstr(block));
> +}
> +
> +static struct RamblockDirtyInfo *
> +alloc_ramblock_dirty_info(int *block_index,
> +                          struct RamblockDirtyInfo *block_dinfo)
> +{
> +    struct RamblockDirtyInfo *info = NULL;
> +    int index = *block_index;
> +
> +    if (!block_dinfo) {
> +        index = 0;
> +        block_dinfo = g_try_new(struct RamblockDirtyInfo, 1);
> +    } else {
> +        index++;
> +        block_dinfo = g_try_realloc(block_dinfo, (index + 1) *
> +                                    sizeof(struct RamblockDirtyInfo));
> +    }
> +    if (!block_dinfo) {
> +        return NULL;

What if this case happens the 'index' has been increased?  but the
allocation is failed.

> +    }
> +
> +    info = &block_dinfo[index];
> +    *block_index = index;
> +    memset(info, 0, sizeof(struct RamblockDirtyInfo));
> +
> +    return block_dinfo;
> +}
> +
> +static int record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
> +                                     struct DirtyRateConfig config,
> +                                     int *block_index)
> +{
> +    struct RamblockDirtyInfo *info = NULL;
> +    struct RamblockDirtyInfo *dinfo = NULL;
> +    RAMBlock *block = NULL;
> +    int index = 0;
> +
> +    RAMBLOCK_FOREACH_MIGRATABLE(block) {
> +        dinfo = alloc_ramblock_dirty_info(&index, dinfo);

Here for every migratable block, you call 'alloc_ramblock_dirty_info'.
This also complicates the 'alloc_ramblock_dirty_info' itself as:
1. you need to differentiate the first and other element.
2. you need to use two out parameter which seems can make confusion.

Could we allocates this array at onetime.  This maybe two iteration
the ram block list.
But I think may make the code more simple and clean.

Thank,s
Li Qiang

> +        if (dinfo == NULL) {
> +            return -1;
> +        }
> +        info = &dinfo[index];
> +        get_ramblock_dirty_info(block, info, &config);
> +        if (save_ramblock_hash(info) < 0) {
> +            *block_dinfo = dinfo;
> +            *block_index = index;

As the first comment, here 'index' seems not right?


Thanks,
Li Qiang
> +            return -1;
> +        }
> +    }
> +
> +    *block_dinfo = dinfo;
> +    *block_index = index;
> +
> +    return 0;
> +}
> +
>  static void calculate_dirtyrate(struct DirtyRateConfig config)
>  {
>      /* todo */
> --
> 1.8.3.1
>
>
Zheng Chuan Sept. 13, 2020, 2:59 a.m. UTC | #2
On 2020/9/10 21:51, Li Qiang wrote:
> Chuan Zheng <zhengchuan@huawei.com> 于2020年9月9日周三 下午10:14写道:
>>
>> Record hash results for each sampled page, crc32 is taken to calculate
>> hash results for each sampled length in TARGET_PAGE_SIZE.
>>
>> Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
>> Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
>> Reviewed-by: David Edmondson <david.edmondson@oracle.com>
>> ---
>>  migration/dirtyrate.c | 125 ++++++++++++++++++++++++++++++++++++++++++++++++++
>>  1 file changed, 125 insertions(+)
>>
>> diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
>> index d56cd93..bc87269 100644
>> --- a/migration/dirtyrate.c
>> +++ b/migration/dirtyrate.c
>> @@ -10,6 +10,7 @@
>>   * See the COPYING file in the top-level directory.
>>   */
>>
>> +#include <zlib.h>
>>  #include "qemu/osdep.h"
>>  #include "qapi/error.h"
>>  #include "cpu.h"
>> @@ -68,6 +69,130 @@ static void update_dirtyrate(uint64_t msec)
>>      DirtyStat.dirty_rate = dirtyrate;
>>  }
>>
>> +/*
>> + * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
>> + * in ramblock, which starts from ramblock base address.
>> + */
>> +static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
>> +                                      uint64_t vfn)
>> +{
>> +    uint32_t crc;
>> +
>> +    crc = crc32(0, (info->ramblock_addr +
>> +                vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE);
>> +
>> +    return crc;
>> +}
>> +
>> +static int save_ramblock_hash(struct RamblockDirtyInfo *info)
>> +{
>> +    unsigned int sample_pages_count;
>> +    int i;
>> +    GRand *rand;
>> +
>> +    sample_pages_count = info->sample_pages_count;
>> +
>> +    /* ramblock size less than one page, return success to skip this ramblock */
>> +    if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
>> +        return 0;
>> +    }
>> +
>> +    info->hash_result = g_try_malloc0_n(sample_pages_count,
>> +                                        sizeof(uint32_t));
>> +    if (!info->hash_result) {
>> +        return -1;
>> +    }
>> +
>> +    info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
>> +                                            sizeof(uint64_t));
>> +    if (!info->sample_page_vfn) {
>> +        g_free(info->hash_result);
>> +        return -1;
>> +    }
>> +
>> +    rand  = g_rand_new();
>> +    for (i = 0; i < sample_pages_count; i++) {
>> +        info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
>> +                                                    info->ramblock_pages - 1);
>> +        info->hash_result[i] = get_ramblock_vfn_hash(info,
>> +                                                     info->sample_page_vfn[i]);
>> +    }
>> +    g_rand_free(rand);
>> +
>> +    return 0;
>> +}
>> +
>> +static void get_ramblock_dirty_info(RAMBlock *block,
>> +                                    struct RamblockDirtyInfo *info,
>> +                                    struct DirtyRateConfig *config)
>> +{
>> +    uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
>> +
>> +    /* Right shift 30 bits to calc ramblock size in GB */
>> +    info->sample_pages_count = (qemu_ram_get_used_length(block) *
>> +                                sample_pages_per_gigabytes) >> 30;
>> +    /* Right shift TARGET_PAGE_BITS to calc page count */
>> +    info->ramblock_pages = qemu_ram_get_used_length(block) >>
>> +                           TARGET_PAGE_BITS;
>> +    info->ramblock_addr = qemu_ram_get_host_addr(block);
>> +    strcpy(info->idstr, qemu_ram_get_idstr(block));
>> +}
>> +
>> +static struct RamblockDirtyInfo *
>> +alloc_ramblock_dirty_info(int *block_index,
>> +                          struct RamblockDirtyInfo *block_dinfo)
>> +{
>> +    struct RamblockDirtyInfo *info = NULL;
>> +    int index = *block_index;
>> +
>> +    if (!block_dinfo) {
>> +        index = 0;
>> +        block_dinfo = g_try_new(struct RamblockDirtyInfo, 1);
>> +    } else {
>> +        index++;
>> +        block_dinfo = g_try_realloc(block_dinfo, (index + 1) *
>> +                                    sizeof(struct RamblockDirtyInfo));
>> +    }
>> +    if (!block_dinfo) {
>> +        return NULL;
> 
> What if this case happens the 'index' has been increased?  but the
> allocation is failed.
> 
>> +    }
>> +
>> +    info = &block_dinfo[index];
>> +    *block_index = index;
>> +    memset(info, 0, sizeof(struct RamblockDirtyInfo));
>> +
>> +    return block_dinfo;
>> +}
>> +
>> +static int record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
>> +                                     struct DirtyRateConfig config,
>> +                                     int *block_index)
>> +{
>> +    struct RamblockDirtyInfo *info = NULL;
>> +    struct RamblockDirtyInfo *dinfo = NULL;
>> +    RAMBlock *block = NULL;
>> +    int index = 0;
>> +
>> +    RAMBLOCK_FOREACH_MIGRATABLE(block) {
>> +        dinfo = alloc_ramblock_dirty_info(&index, dinfo);
> 
> Here for every migratable block, you call 'alloc_ramblock_dirty_info'.
> This also complicates the 'alloc_ramblock_dirty_info' itself as:
> 1. you need to differentiate the first and other element.
> 2. you need to use two out parameter which seems can make confusion.
> 
> Could we allocates this array at onetime.  This maybe two iteration
> the ram block list.
> But I think may make the code more simple and clean.
> 
> Thank,s
> Li Qiang
> 
Hi, Qiang.
Thank you for your review.
I am not sure if i fully understand what's you mean:)
You mean we first record total index by first iteration
the ram block list and allocate array at onetime?

>> +        if (dinfo == NULL) {
>> +            return -1;
>> +        }
>> +        info = &dinfo[index];
>> +        get_ramblock_dirty_info(block, info, &config);
>> +        if (save_ramblock_hash(info) < 0) {
>> +            *block_dinfo = dinfo;
>> +            *block_index = index;
> 
> As the first comment, here 'index' seems not right?
> 
> 
> Thanks,
> Li Qiang
>> +            return -1;
>> +        }
>> +    }
>> +
>> +    *block_dinfo = dinfo;
>> +    *block_index = index;
>> +
>> +    return 0;
>> +}
>> +
>>  static void calculate_dirtyrate(struct DirtyRateConfig config)
>>  {
>>      /* todo */
>> --
>> 1.8.3.1
>>
>>
> 
> .
>
Li Qiang Sept. 14, 2020, 11:25 a.m. UTC | #3
Zheng Chuan <zhengchuan@huawei.com> 于2020年9月13日周日 上午10:59写道:
>
>
>
> On 2020/9/10 21:51, Li Qiang wrote:
> > Chuan Zheng <zhengchuan@huawei.com> 于2020年9月9日周三 下午10:14写道:
> >>
> >> Record hash results for each sampled page, crc32 is taken to calculate
> >> hash results for each sampled length in TARGET_PAGE_SIZE.
> >>
> >> Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
> >> Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
> >> Reviewed-by: David Edmondson <david.edmondson@oracle.com>
> >> ---
> >>  migration/dirtyrate.c | 125 ++++++++++++++++++++++++++++++++++++++++++++++++++
> >>  1 file changed, 125 insertions(+)
> >>
> >> diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
> >> index d56cd93..bc87269 100644
> >> --- a/migration/dirtyrate.c
> >> +++ b/migration/dirtyrate.c
> >> @@ -10,6 +10,7 @@
> >>   * See the COPYING file in the top-level directory.
> >>   */
> >>
> >> +#include <zlib.h>
> >>  #include "qemu/osdep.h"
> >>  #include "qapi/error.h"
> >>  #include "cpu.h"
> >> @@ -68,6 +69,130 @@ static void update_dirtyrate(uint64_t msec)
> >>      DirtyStat.dirty_rate = dirtyrate;
> >>  }
> >>
> >> +/*
> >> + * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
> >> + * in ramblock, which starts from ramblock base address.
> >> + */
> >> +static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
> >> +                                      uint64_t vfn)
> >> +{
> >> +    uint32_t crc;
> >> +
> >> +    crc = crc32(0, (info->ramblock_addr +
> >> +                vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE);
> >> +
> >> +    return crc;
> >> +}
> >> +
> >> +static int save_ramblock_hash(struct RamblockDirtyInfo *info)
> >> +{
> >> +    unsigned int sample_pages_count;
> >> +    int i;
> >> +    GRand *rand;
> >> +
> >> +    sample_pages_count = info->sample_pages_count;
> >> +
> >> +    /* ramblock size less than one page, return success to skip this ramblock */
> >> +    if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
> >> +        return 0;
> >> +    }
> >> +
> >> +    info->hash_result = g_try_malloc0_n(sample_pages_count,
> >> +                                        sizeof(uint32_t));
> >> +    if (!info->hash_result) {
> >> +        return -1;
> >> +    }
> >> +
> >> +    info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
> >> +                                            sizeof(uint64_t));
> >> +    if (!info->sample_page_vfn) {
> >> +        g_free(info->hash_result);
> >> +        return -1;
> >> +    }
> >> +
> >> +    rand  = g_rand_new();
> >> +    for (i = 0; i < sample_pages_count; i++) {
> >> +        info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
> >> +                                                    info->ramblock_pages - 1);
> >> +        info->hash_result[i] = get_ramblock_vfn_hash(info,
> >> +                                                     info->sample_page_vfn[i]);
> >> +    }
> >> +    g_rand_free(rand);
> >> +
> >> +    return 0;
> >> +}
> >> +
> >> +static void get_ramblock_dirty_info(RAMBlock *block,
> >> +                                    struct RamblockDirtyInfo *info,
> >> +                                    struct DirtyRateConfig *config)
> >> +{
> >> +    uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
> >> +
> >> +    /* Right shift 30 bits to calc ramblock size in GB */
> >> +    info->sample_pages_count = (qemu_ram_get_used_length(block) *
> >> +                                sample_pages_per_gigabytes) >> 30;
> >> +    /* Right shift TARGET_PAGE_BITS to calc page count */
> >> +    info->ramblock_pages = qemu_ram_get_used_length(block) >>
> >> +                           TARGET_PAGE_BITS;
> >> +    info->ramblock_addr = qemu_ram_get_host_addr(block);
> >> +    strcpy(info->idstr, qemu_ram_get_idstr(block));
> >> +}
> >> +
> >> +static struct RamblockDirtyInfo *
> >> +alloc_ramblock_dirty_info(int *block_index,
> >> +                          struct RamblockDirtyInfo *block_dinfo)
> >> +{
> >> +    struct RamblockDirtyInfo *info = NULL;
> >> +    int index = *block_index;
> >> +
> >> +    if (!block_dinfo) {
> >> +        index = 0;
> >> +        block_dinfo = g_try_new(struct RamblockDirtyInfo, 1);
> >> +    } else {
> >> +        index++;
> >> +        block_dinfo = g_try_realloc(block_dinfo, (index + 1) *
> >> +                                    sizeof(struct RamblockDirtyInfo));
> >> +    }
> >> +    if (!block_dinfo) {
> >> +        return NULL;
> >
> > What if this case happens the 'index' has been increased?  but the
> > allocation is failed.
> >
> >> +    }
> >> +
> >> +    info = &block_dinfo[index];
> >> +    *block_index = index;
> >> +    memset(info, 0, sizeof(struct RamblockDirtyInfo));
> >> +
> >> +    return block_dinfo;
> >> +}
> >> +
> >> +static int record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
> >> +                                     struct DirtyRateConfig config,
> >> +                                     int *block_index)
> >> +{
> >> +    struct RamblockDirtyInfo *info = NULL;
> >> +    struct RamblockDirtyInfo *dinfo = NULL;
> >> +    RAMBlock *block = NULL;
> >> +    int index = 0;
> >> +
> >> +    RAMBLOCK_FOREACH_MIGRATABLE(block) {
> >> +        dinfo = alloc_ramblock_dirty_info(&index, dinfo);
> >
> > Here for every migratable block, you call 'alloc_ramblock_dirty_info'.
> > This also complicates the 'alloc_ramblock_dirty_info' itself as:
> > 1. you need to differentiate the first and other element.
> > 2. you need to use two out parameter which seems can make confusion.
> >
> > Could we allocates this array at onetime.  This maybe two iteration
> > the ram block list.
> > But I think may make the code more simple and clean.
> >
> > Thank,s
> > Li Qiang
> >
> Hi, Qiang.
> Thank you for your review.
> I am not sure if i fully understand what's you mean:)
> You mean we first record total index by first iteration
> the ram block list and allocate array at onetime?

Hi Chuan,

Yes, this is what I mean.
I have just see your new patches, will review asap.

Thanks,
Li Qiang

>
> >> +        if (dinfo == NULL) {
> >> +            return -1;
> >> +        }
> >> +        info = &dinfo[index];
> >> +        get_ramblock_dirty_info(block, info, &config);
> >> +        if (save_ramblock_hash(info) < 0) {
> >> +            *block_dinfo = dinfo;
> >> +            *block_index = index;
> >
> > As the first comment, here 'index' seems not right?
> >
> >
> > Thanks,
> > Li Qiang
> >> +            return -1;
> >> +        }
> >> +    }
> >> +
> >> +    *block_dinfo = dinfo;
> >> +    *block_index = index;
> >> +
> >> +    return 0;
> >> +}
> >> +
> >>  static void calculate_dirtyrate(struct DirtyRateConfig config)
> >>  {
> >>      /* todo */
> >> --
> >> 1.8.3.1
> >>
> >>
> >
> > .
> >
diff mbox series

Patch

diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
index d56cd93..bc87269 100644
--- a/migration/dirtyrate.c
+++ b/migration/dirtyrate.c
@@ -10,6 +10,7 @@ 
  * See the COPYING file in the top-level directory.
  */
 
+#include <zlib.h>
 #include "qemu/osdep.h"
 #include "qapi/error.h"
 #include "cpu.h"
@@ -68,6 +69,130 @@  static void update_dirtyrate(uint64_t msec)
     DirtyStat.dirty_rate = dirtyrate;
 }
 
+/*
+ * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
+ * in ramblock, which starts from ramblock base address.
+ */
+static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
+                                      uint64_t vfn)
+{
+    uint32_t crc;
+
+    crc = crc32(0, (info->ramblock_addr +
+                vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE);
+
+    return crc;
+}
+
+static int save_ramblock_hash(struct RamblockDirtyInfo *info)
+{
+    unsigned int sample_pages_count;
+    int i;
+    GRand *rand;
+
+    sample_pages_count = info->sample_pages_count;
+
+    /* ramblock size less than one page, return success to skip this ramblock */
+    if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
+        return 0;
+    }
+
+    info->hash_result = g_try_malloc0_n(sample_pages_count,
+                                        sizeof(uint32_t));
+    if (!info->hash_result) {
+        return -1;
+    }
+
+    info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
+                                            sizeof(uint64_t));
+    if (!info->sample_page_vfn) {
+        g_free(info->hash_result);
+        return -1;
+    }
+
+    rand  = g_rand_new();
+    for (i = 0; i < sample_pages_count; i++) {
+        info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
+                                                    info->ramblock_pages - 1);
+        info->hash_result[i] = get_ramblock_vfn_hash(info,
+                                                     info->sample_page_vfn[i]);
+    }
+    g_rand_free(rand);
+
+    return 0;
+}
+
+static void get_ramblock_dirty_info(RAMBlock *block,
+                                    struct RamblockDirtyInfo *info,
+                                    struct DirtyRateConfig *config)
+{
+    uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
+
+    /* Right shift 30 bits to calc ramblock size in GB */
+    info->sample_pages_count = (qemu_ram_get_used_length(block) *
+                                sample_pages_per_gigabytes) >> 30;
+    /* Right shift TARGET_PAGE_BITS to calc page count */
+    info->ramblock_pages = qemu_ram_get_used_length(block) >>
+                           TARGET_PAGE_BITS;
+    info->ramblock_addr = qemu_ram_get_host_addr(block);
+    strcpy(info->idstr, qemu_ram_get_idstr(block));
+}
+
+static struct RamblockDirtyInfo *
+alloc_ramblock_dirty_info(int *block_index,
+                          struct RamblockDirtyInfo *block_dinfo)
+{
+    struct RamblockDirtyInfo *info = NULL;
+    int index = *block_index;
+
+    if (!block_dinfo) {
+        index = 0;
+        block_dinfo = g_try_new(struct RamblockDirtyInfo, 1);
+    } else {
+        index++;
+        block_dinfo = g_try_realloc(block_dinfo, (index + 1) *
+                                    sizeof(struct RamblockDirtyInfo));
+    }
+    if (!block_dinfo) {
+        return NULL;
+    }
+
+    info = &block_dinfo[index];
+    *block_index = index;
+    memset(info, 0, sizeof(struct RamblockDirtyInfo));
+
+    return block_dinfo;
+}
+
+static int record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
+                                     struct DirtyRateConfig config,
+                                     int *block_index)
+{
+    struct RamblockDirtyInfo *info = NULL;
+    struct RamblockDirtyInfo *dinfo = NULL;
+    RAMBlock *block = NULL;
+    int index = 0;
+
+    RAMBLOCK_FOREACH_MIGRATABLE(block) {
+        dinfo = alloc_ramblock_dirty_info(&index, dinfo);
+        if (dinfo == NULL) {
+            return -1;
+        }
+        info = &dinfo[index];
+        get_ramblock_dirty_info(block, info, &config);
+        if (save_ramblock_hash(info) < 0) {
+            *block_dinfo = dinfo;
+            *block_index = index;
+            return -1;
+        }
+    }
+
+    *block_dinfo = dinfo;
+    *block_index = index;
+
+    return 0;
+}
+
 static void calculate_dirtyrate(struct DirtyRateConfig config)
 {
     /* todo */