diff mbox

[v2,9/9] arm64: KVM: vgic: deal with GIC sub-page alignment

Message ID 1403169693-13982-10-git-send-email-marc.zyngier@arm.com
State New
Headers show

Commit Message

Marc Zyngier June 19, 2014, 9:21 a.m. UTC
The GIC CPU interface is always 4k aligned. If the host is using
64k pages, it is critical to place the guest's GICC interface at the
same relative alignment as the host's GICV. Failure to do so results
in an impossibility for the guest to deal with interrupts.

Add a KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET attribute for the VGIC, allowing
userspace to retrieve the GICV offset in a page. It becomes then trivial
to adjust the GICC base address for the guest.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
 arch/arm/include/uapi/asm/kvm.h   | 1 +
 arch/arm64/include/uapi/asm/kvm.h | 1 +
 virt/kvm/arm/vgic.c               | 7 +++++++
 3 files changed, 9 insertions(+)

Comments

Joel Schopp June 24, 2014, 7:28 p.m. UTC | #1
On 06/19/2014 04:21 AM, Marc Zyngier wrote:
> The GIC CPU interface is always 4k aligned. If the host is using
> 64k pages, it is critical to place the guest's GICC interface at the
> same relative alignment as the host's GICV. Failure to do so results
> in an impossibility for the guest to deal with interrupts.
>
> Add a KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET attribute for the VGIC, allowing
> userspace to retrieve the GICV offset in a page. It becomes then trivial
> to adjust the GICC base address for the guest.

Does this mean there is a corresponding patch for qemu?

>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>   arch/arm/include/uapi/asm/kvm.h   | 1 +
>   arch/arm64/include/uapi/asm/kvm.h | 1 +
>   virt/kvm/arm/vgic.c               | 7 +++++++
>   3 files changed, 9 insertions(+)
>
> diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
> index 8b51c1a..056b782 100644
> --- a/arch/arm/include/uapi/asm/kvm.h
> +++ b/arch/arm/include/uapi/asm/kvm.h
> @@ -174,6 +174,7 @@ struct kvm_arch_memory_slot {
>   #define   KVM_DEV_ARM_VGIC_OFFSET_SHIFT	0
>   #define   KVM_DEV_ARM_VGIC_OFFSET_MASK	(0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
>   #define KVM_DEV_ARM_VGIC_GRP_NR_IRQS	3
> +#define KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET 4
>   
>   /* KVM_IRQ_LINE irq field index values */
>   #define KVM_ARM_IRQ_TYPE_SHIFT		24
> diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
> index b5cd6ed..5513de4 100644
> --- a/arch/arm64/include/uapi/asm/kvm.h
> +++ b/arch/arm64/include/uapi/asm/kvm.h
> @@ -160,6 +160,7 @@ struct kvm_arch_memory_slot {
>   #define   KVM_DEV_ARM_VGIC_OFFSET_SHIFT	0
>   #define   KVM_DEV_ARM_VGIC_OFFSET_MASK	(0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
>   #define KVM_DEV_ARM_VGIC_GRP_NR_IRQS	3
> +#define KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET 4
>   
>   /* KVM_IRQ_LINE irq field index values */
>   #define KVM_ARM_IRQ_TYPE_SHIFT		24
> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
> index b0cd417..68ac9c6 100644
> --- a/virt/kvm/arm/vgic.c
> +++ b/virt/kvm/arm/vgic.c
> @@ -2228,6 +2228,12 @@ static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
>   		r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr);
>   		break;
>   	}
> +	case KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET: {
> +		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
> +		u32 val = vgic->vcpu_base & ~PAGE_MASK;
> +		r = put_user(val, uaddr);
> +		break;
> +	}
>   
>   	}
>   
> @@ -2265,6 +2271,7 @@ static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
>   		offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
>   		return vgic_has_attr_regs(vgic_cpu_ranges, offset);
>   	case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
> +	case KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET:
>   		return 0;
>   	}
>   	return -ENXIO;
Peter Maydell June 24, 2014, 10:28 p.m. UTC | #2
On 24 June 2014 20:28, Joel Schopp <joel.schopp@amd.com> wrote:
>
> On 06/19/2014 04:21 AM, Marc Zyngier wrote:
>>
>> The GIC CPU interface is always 4k aligned. If the host is using
>> 64k pages, it is critical to place the guest's GICC interface at the
>> same relative alignment as the host's GICV. Failure to do so results
>> in an impossibility for the guest to deal with interrupts.
>>
>> Add a KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET attribute for the VGIC, allowing
>> userspace to retrieve the GICV offset in a page. It becomes then trivial
>> to adjust the GICC base address for the guest.
>
>
> Does this mean there is a corresponding patch for qemu?

Not as far as I know. It's a bit awkward on the QEMU end because
we really want to provide the guest a consistent memory map
regardless of the host CPU. So at best we'd probably use it to
say "sorry, can't run on this CPU/host kernel".

(That said, if you think you can make QEMU usefully use the
information and want to write a QEMU patch I'm not averse
to the idea.)

kvmtool is probably better placed to take advantage of it since
it takes more of a "deal with what the host provides you"
philosophy.

thanks
-- PMM
Joel Schopp June 25, 2014, 2:56 p.m. UTC | #3
On 06/24/2014 05:28 PM, Peter Maydell wrote:
> On 24 June 2014 20:28, Joel Schopp <joel.schopp@amd.com> wrote:
>> On 06/19/2014 04:21 AM, Marc Zyngier wrote:
>>> The GIC CPU interface is always 4k aligned. If the host is using
>>> 64k pages, it is critical to place the guest's GICC interface at the
>>> same relative alignment as the host's GICV. Failure to do so results
>>> in an impossibility for the guest to deal with interrupts.
>>>
>>> Add a KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET attribute for the VGIC, allowing
>>> userspace to retrieve the GICV offset in a page. It becomes then trivial
>>> to adjust the GICC base address for the guest.
>>
>> Does this mean there is a corresponding patch for qemu?
> Not as far as I know. It's a bit awkward on the QEMU end because
> we really want to provide the guest a consistent memory map
> regardless of the host CPU. So at best we'd probably use it to
> say "sorry, can't run on this CPU/host kernel".
I think most arm64 servers are going to run with 64k pages.  It seems 
like a major problem to have qemu not work on these systems.

>
> (That said, if you think you can make QEMU usefully use the
> information and want to write a QEMU patch I'm not averse
> to the idea.)
I'll have to think about this approach some more, but I'm not opposed to 
doing the work if I thought it was the right thing to do.

>
> kvmtool is probably better placed to take advantage of it since
> it takes more of a "deal with what the host provides you"
> philosophy.
kvmtool is fun as a play toy, but in the real world nobody is building 
clouds using kvmtool, they use kvm with qemu.

>
> thanks
> -- PMM
Marc Zyngier June 25, 2014, 3 p.m. UTC | #4
On 25/06/14 15:56, Joel Schopp wrote:
> 
> On 06/24/2014 05:28 PM, Peter Maydell wrote:
>> On 24 June 2014 20:28, Joel Schopp <joel.schopp@amd.com> wrote:
>>> On 06/19/2014 04:21 AM, Marc Zyngier wrote:
>>>> The GIC CPU interface is always 4k aligned. If the host is using
>>>> 64k pages, it is critical to place the guest's GICC interface at the
>>>> same relative alignment as the host's GICV. Failure to do so results
>>>> in an impossibility for the guest to deal with interrupts.
>>>>
>>>> Add a KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET attribute for the VGIC, allowing
>>>> userspace to retrieve the GICV offset in a page. It becomes then trivial
>>>> to adjust the GICC base address for the guest.
>>>
>>> Does this mean there is a corresponding patch for qemu?
>> Not as far as I know. It's a bit awkward on the QEMU end because
>> we really want to provide the guest a consistent memory map
>> regardless of the host CPU. So at best we'd probably use it to
>> say "sorry, can't run on this CPU/host kernel".
> I think most arm64 servers are going to run with 64k pages.  It seems 
> like a major problem to have qemu not work on these systems.

How many of them will be with the GICC *not* 64kB aligned?

>>
>> (That said, if you think you can make QEMU usefully use the
>> information and want to write a QEMU patch I'm not averse
>> to the idea.)
> I'll have to think about this approach some more, but I'm not opposed to 
> doing the work if I thought it was the right thing to do.
> 
>>
>> kvmtool is probably better placed to take advantage of it since
>> it takes more of a "deal with what the host provides you"
>> philosophy.
> kvmtool is fun as a play toy, but in the real world nobody is building 
> clouds using kvmtool, they use kvm with qemu.

A play toy? Hmmm. Do you realise that most of KVM on arm64 has been
written using this play toy?

	M.
Joel Schopp June 25, 2014, 3:09 p.m. UTC | #5
On 06/25/2014 10:00 AM, Marc Zyngier wrote:
> On 25/06/14 15:56, Joel Schopp wrote:
>> On 06/24/2014 05:28 PM, Peter Maydell wrote:
>>> On 24 June 2014 20:28, Joel Schopp <joel.schopp@amd.com> wrote:
>>>> On 06/19/2014 04:21 AM, Marc Zyngier wrote:
>>>>> The GIC CPU interface is always 4k aligned. If the host is using
>>>>> 64k pages, it is critical to place the guest's GICC interface at the
>>>>> same relative alignment as the host's GICV. Failure to do so results
>>>>> in an impossibility for the guest to deal with interrupts.
>>>>>
>>>>> Add a KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET attribute for the VGIC, allowing
>>>>> userspace to retrieve the GICV offset in a page. It becomes then trivial
>>>>> to adjust the GICC base address for the guest.
>>>> Does this mean there is a corresponding patch for qemu?
>>> Not as far as I know. It's a bit awkward on the QEMU end because
>>> we really want to provide the guest a consistent memory map
>>> regardless of the host CPU. So at best we'd probably use it to
>>> say "sorry, can't run on this CPU/host kernel".
>> I think most arm64 servers are going to run with 64k pages.  It seems
>> like a major problem to have qemu not work on these systems.
> How many of them will be with the GICC *not* 64kB aligned?

If I'm reading the Server Base System Architecture v2.2 Appendix F 
correctly all of them.  Here's the relevant quote: "In a 64KB 
translation granule system this means that GICC needs to have its base 
at 4KB below a 64KB boundary."
>
>>> (That said, if you think you can make QEMU usefully use the
>>> information and want to write a QEMU patch I'm not averse
>>> to the idea.)
>> I'll have to think about this approach some more, but I'm not opposed to
>> doing the work if I thought it was the right thing to do.
>>
>>> kvmtool is probably better placed to take advantage of it since
>>> it takes more of a "deal with what the host provides you"
>>> philosophy.
>> kvmtool is fun as a play toy, but in the real world nobody is building
>> clouds using kvmtool, they use kvm with qemu.
> A play toy? Hmmm. Do you realise that most of KVM on arm64 has been
> written using this play toy?

I meant no insult.  I really like kvmtool.  I'm just saying that the 
eventual end users of these systems will want to run qemu and not kvmtool.
Peter Maydell June 25, 2014, 5:34 p.m. UTC | #6
On 25 June 2014 15:56, Joel Schopp <joel.schopp@amd.com> wrote:
> On 06/24/2014 05:28 PM, Peter Maydell wrote:
>> On 24 June 2014 20:28, Joel Schopp <joel.schopp@amd.com> wrote:
>>> Does this mean there is a corresponding patch for qemu?
>>
>> Not as far as I know. It's a bit awkward on the QEMU end because
>> we really want to provide the guest a consistent memory map
>> regardless of the host CPU. So at best we'd probably use it to
>> say "sorry, can't run on this CPU/host kernel".
>
> I think most arm64 servers are going to run with 64k pages.  It seems like a
> major problem to have qemu not work on these systems.

QEMU should already work fine on servers with 64K pages;
you just need to have the host offset of the GICV within the 64K page
and the guest offset of the GICC within the 64K page be the same
(and at the moment both must also be zero, which I believe is true
for all of them at the moment except possibly the AEM model;
counterexamples welcome). Disclaimer: I haven't personally
tested this, but on the other hand I don't think anybody's
reported it as not working either.

Notice that we don't care at all about the host's GICC offset,
because it's the GICV we're going to use as the guest GICC.

That said, yes, QEMU ought really to be able to provide
support for "use what the host provides", in the same way
that we support "-cpu host" to mean 'virtualize whatever CPU
the host has'. It's just a little awkward because you're working
against the grain of some of QEMU's design; but it ought
to be usable for things like the "virt" machine model.

For the cases where QEMU is being used to emulate
specific hardware to the guest (which we don't do right
now because we don't model any 64 bit boards other than
virt), we could use this ioctl to say "can't run this guest
on this host"; this is basically diagnosing a case in the
same class as "can't run a guest with a GICv2 if your
host's GICv3 doesn't implement v2 compatibility mode".

thanks
-- PMM
Joel Schopp June 25, 2014, 7:34 p.m. UTC | #7
On 06/25/2014 12:34 PM, Peter Maydell wrote:
> On 25 June 2014 15:56, Joel Schopp <joel.schopp@amd.com> wrote:
>> On 06/24/2014 05:28 PM, Peter Maydell wrote:
>>> On 24 June 2014 20:28, Joel Schopp <joel.schopp@amd.com> wrote:
>>>> Does this mean there is a corresponding patch for qemu?
>>> Not as far as I know. It's a bit awkward on the QEMU end because
>>> we really want to provide the guest a consistent memory map
>>> regardless of the host CPU. So at best we'd probably use it to
>>> say "sorry, can't run on this CPU/host kernel".
>> I think most arm64 servers are going to run with 64k pages.  It seems like a
>> major problem to have qemu not work on these systems.
> QEMU should already work fine on servers with 64K pages;
> you just need to have the host offset of the GICV within the 64K page
> and the guest offset of the GICC within the 64K page be the same
> (and at the moment both must also be zero, which I believe is true
> for all of them at the moment except possibly the AEM model;
> counterexamples welcome). Disclaimer: I haven't personally
> tested this, but on the other hand I don't think anybody's
> reported it as not working either.

It doesn't work for me.  Maybe I'm doing something wrong, but I can't 
see what.  I am unique in that I'm running a gic-400 (gicv2m) on aarch64 
hardware with 64k pages.  I'm also unique in that my hardware maps each 
4K gic entry to a 64K page (aliasing each 4k of gic 16 times in a 64K 
page, ie the gic virtual ic is at 0xe1140000 and 0xe1141000 and 
0xe1142000, etc).  This is inline with appendix F of the server base 
system architecture.  This is inconvenient when the size is 0x2000 
(8K).  As a result all the offsets in the device tree entries are to the 
last 4K in the page so that an 8K read will read the last 4k from one 
page and the first 4k from the next and actually get 8k of the gic.


         gic: interrupt-controller@e1101000 {
                 compatible = "arm,gic-400";
                 #interrupt-cells = <3>;
                 #address-cells = <0>;
                 interrupt-controller;
                 msi-controller;
                 reg = <0x0 0xe1110000 0 0x1000>, /* gic dist */
                       <0x0 0xe112f000 0 0x2000>, /* gic cpu */
                       <0x0 0xe114f000 0 0x2000>, /* gic virtual ic*/
                       <0x0 0xe116f000 0 0x2000>, /* gic virtual cpu*/
                       <0x0 0xe1180000 0 0x1000>; /* gic msi */

                 interrupts = <1 8 0xf04>;
         };


My concern here is that if userspace is going to look at 8k starting at 
the beginning of the page, guest offset 0 in your terminology, (say 
0xe1140000) instead of starting at the last 4k of the page, offset 
0xf000 (say 0xe114f000) it is going to get the second 4k wrong by 
reading 0xe1141000 instead of 0xe1150000.
Peter Maydell June 25, 2014, 8:45 p.m. UTC | #8
On 25 June 2014 20:34, Joel Schopp <joel.schopp@amd.com> wrote:
> It doesn't work for me.  Maybe I'm doing something wrong, but I can't see
> what.  I am unique in that I'm running a gic-400 (gicv2m) on aarch64
> hardware with 64k pages.  I'm also unique in that my hardware maps each 4K
> gic entry to a 64K page (aliasing each 4k of gic 16 times in a 64K page, ie
> the gic virtual ic is at 0xe1140000 and 0xe1141000 and 0xe1142000, etc).
>
> This is inline with appendix F of the server base system architecture.  This
> is inconvenient when the size is 0x2000 (8K).  As a result all the offsets
> in the device tree entries are to the last 4K in the page so that an 8K read
> will read the last 4k from one page and the first 4k from the next and
> actually get 8k of the gic.
>
>
>         gic: interrupt-controller@e1101000 {
>                 compatible = "arm,gic-400";
>                 #interrupt-cells = <3>;
>                 #address-cells = <0>;
>                 interrupt-controller;
>                 msi-controller;
>                 reg = <0x0 0xe1110000 0 0x1000>, /* gic dist */
>                       <0x0 0xe112f000 0 0x2000>, /* gic cpu */
>                       <0x0 0xe114f000 0 0x2000>, /* gic virtual ic*/
>                       <0x0 0xe116f000 0 0x2000>, /* gic virtual cpu*/
>                       <0x0 0xe1180000 0 0x1000>; /* gic msi */

Right, this is the oddball case we don't yet support for 64K pages
(though as you say it is a permitted configuration per the SBSA).

>                 interrupts = <1 8 0xf04>;
>         };
>
>
> My concern here is that if userspace is going to look at 8k starting at the
> beginning of the page, guest offset 0 in your terminology, (say 0xe1140000)
> instead of starting at the last 4k of the page, offset 0xf000 (say
> 0xe114f000) it is going to get the second 4k wrong by reading 0xe1141000
> instead of 0xe1150000.

Userspace doesn't actually look at anything in the GICC. It just asks
the kernel to put the guest GICC (ie the mapping of the host GICV)
at a particular base address which happens to be a multiple of 64K.
In this case if the host kernel is using 64K pages then the KVM
kernel code ought to say "sorry, can't do that" when we tell it the
base address. (That is, it's impossible to give the guest a VM
where the GICC it sees is at a 64K boundary on your hardware
and host kernel config, and hopefully we report that in a not totally
opaque fashion.)

If you hack QEMU's memory map for the virt board so instead of
    [VIRT_GIC_CPU] = { 0x8010000, 0x10000 },
we have
    [VIRT_GIC_CPU] = { 0x801f000, 0x2000 },

does it work? If QEMU supported this VGIC_GRP_ADDR_OFFSET
query then all it would do would be to change that offset and size.
It would be good to know if there are other problems beyond that...

(Conveniently, Linux guests won't currently try to look at the second
4K page of their GICC...)

thanks
-- PMM
Joel Schopp June 25, 2014, 9:18 p.m. UTC | #9
On 06/25/2014 03:45 PM, Peter Maydell wrote:
> On 25 June 2014 20:34, Joel Schopp <joel.schopp@amd.com> wrote:
>> It doesn't work for me.  Maybe I'm doing something wrong, but I can't see
>> what.  I am unique in that I'm running a gic-400 (gicv2m) on aarch64
>> hardware with 64k pages.  I'm also unique in that my hardware maps each 4K
>> gic entry to a 64K page (aliasing each 4k of gic 16 times in a 64K page, ie
>> the gic virtual ic is at 0xe1140000 and 0xe1141000 and 0xe1142000, etc).
>>
>> This is inline with appendix F of the server base system architecture.  This
>> is inconvenient when the size is 0x2000 (8K).  As a result all the offsets
>> in the device tree entries are to the last 4K in the page so that an 8K read
>> will read the last 4k from one page and the first 4k from the next and
>> actually get 8k of the gic.
>>
>>
>>          gic: interrupt-controller@e1101000 {
>>                  compatible = "arm,gic-400";
>>                  #interrupt-cells = <3>;
>>                  #address-cells = <0>;
>>                  interrupt-controller;
>>                  msi-controller;
>>                  reg = <0x0 0xe1110000 0 0x1000>, /* gic dist */
>>                        <0x0 0xe112f000 0 0x2000>, /* gic cpu */
>>                        <0x0 0xe114f000 0 0x2000>, /* gic virtual ic*/
>>                        <0x0 0xe116f000 0 0x2000>, /* gic virtual cpu*/
>>                        <0x0 0xe1180000 0 0x1000>; /* gic msi */
> Right, this is the oddball case we don't yet support for 64K pages
> (though as you say it is a permitted configuration per the SBSA).
At least I know I'm not going crazy.
>
>>                  interrupts = <1 8 0xf04>;
>>          };
>>
>>
>> My concern here is that if userspace is going to look at 8k starting at the
>> beginning of the page, guest offset 0 in your terminology, (say 0xe1140000)
>> instead of starting at the last 4k of the page, offset 0xf000 (say
>> 0xe114f000) it is going to get the second 4k wrong by reading 0xe1141000
>> instead of 0xe1150000.
> Userspace doesn't actually look at anything in the GICC. It just asks
> the kernel to put the guest GICC (ie the mapping of the host GICV)
> at a particular base address which happens to be a multiple of 64K.
> In this case if the host kernel is using 64K pages then the KVM
> kernel code ought to say "sorry, can't do that" when we tell it the
> base address. (That is, it's impossible to give the guest a VM
> where the GICC it sees is at a 64K boundary on your hardware
> and host kernel config, and hopefully we report that in a not totally
> opaque fashion.)
The errors I'm seeing look like:
from qemu:
error: kvm run failed Bad address
Aborted (core dumped)

from kvm:
[ 7931.722965] kvm [1208]: Unsupported fault status: EC=0x20 DFCS=0x14

from kvmtool:
from lkvm (kvmtool):
   Warning: /extra/rootfs/boot/Image is not a bzImage. Trying to load it 
as a flat binary...
   Info: Loaded kernel to 0x80080000 (10212384 bytes)
   Info: Placing fdt at 0x8fe00000 - 0x8fffffff
   Info: virtio-mmio.devices=0x200@0x10000:36

KVM_RUN failed: Bad address


>
> If you hack QEMU's memory map for the virt board so instead of
>      [VIRT_GIC_CPU] = { 0x8010000, 0x10000 },
> we have
>      [VIRT_GIC_CPU] = { 0x801f000, 0x2000 },
No change in result, not to say that this wouldn't work if some other 
unknown problem were fixed.
>
> does it work? If QEMU supported this VGIC_GRP_ADDR_OFFSET
> query then all it would do would be to change that offset and size.
> It would be good to know if there are other problems beyond that...
>
> (Conveniently, Linux guests won't currently try to look at the second
> 4K page of their GICC...)
That's handy.
diff mbox

Patch

diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 8b51c1a..056b782 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -174,6 +174,7 @@  struct kvm_arch_memory_slot {
 #define   KVM_DEV_ARM_VGIC_OFFSET_SHIFT	0
 #define   KVM_DEV_ARM_VGIC_OFFSET_MASK	(0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
 #define KVM_DEV_ARM_VGIC_GRP_NR_IRQS	3
+#define KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET 4
 
 /* KVM_IRQ_LINE irq field index values */
 #define KVM_ARM_IRQ_TYPE_SHIFT		24
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index b5cd6ed..5513de4 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -160,6 +160,7 @@  struct kvm_arch_memory_slot {
 #define   KVM_DEV_ARM_VGIC_OFFSET_SHIFT	0
 #define   KVM_DEV_ARM_VGIC_OFFSET_MASK	(0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
 #define KVM_DEV_ARM_VGIC_GRP_NR_IRQS	3
+#define KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET 4
 
 /* KVM_IRQ_LINE irq field index values */
 #define KVM_ARM_IRQ_TYPE_SHIFT		24
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index b0cd417..68ac9c6 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -2228,6 +2228,12 @@  static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
 		r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr);
 		break;
 	}
+	case KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET: {
+		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+		u32 val = vgic->vcpu_base & ~PAGE_MASK;
+		r = put_user(val, uaddr);
+		break;
+	}
 
 	}
 
@@ -2265,6 +2271,7 @@  static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
 		offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
 		return vgic_has_attr_regs(vgic_cpu_ranges, offset);
 	case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
+	case KVM_DEV_ARM_VGIC_GRP_ADDR_OFFSET:
 		return 0;
 	}
 	return -ENXIO;