diff mbox series

[Xen-devel,3/3] xen/arm: p2m: Perform local TLB invalidation on vCPU migration

Message ID 20170308180602.24430-4-julien.grall@arm.com
State New
Headers show
Series xen/arm: p2m: Perform local invalidation on vCPU migration | expand

Commit Message

Julien Grall March 8, 2017, 6:06 p.m. UTC
The ARM architecture allows an OS to have per-CPU page tables, as it
guarantees that TLBs never migrate from one CPU to another.

This works fine until this is done in a guest. Consider the following
scenario:
    - vcpu-0 maps P to V
    - vpcu-1 maps P' to V

If run on the same physical CPU, vcpu-1 can hit in TLBs generated by
vcpu-0 accesses, and access the wrong physical page.

The solution to this is to keep a per-p2m map of which vCPU ran the last
on each given pCPU and invalidate local TLBs if two vPCUs from the same
VM run on the same CPU.

Unfortunately it is not possible to allocate per-cpu variable on the
fly. So for now the size of the array is NR_CPUS, this is fine because
we still have space in the structure domain. We may want to add an
helper to allocate per-cpu variable in the future.

Signed-off-by: Julien Grall <julien.grall@arm.com>

---
    This patch is a candidate for backport to Xen 4.8, 4.7 and 4.6.
---
 xen/arch/arm/p2m.c        | 24 ++++++++++++++++++++++++
 xen/include/asm-arm/p2m.h |  3 +++
 2 files changed, 27 insertions(+)

Comments

Stefano Stabellini March 8, 2017, 6:58 p.m. UTC | #1
On Wed, 8 Mar 2017, Julien Grall wrote:
> The ARM architecture allows an OS to have per-CPU page tables, as it
> guarantees that TLBs never migrate from one CPU to another.
> 
> This works fine until this is done in a guest. Consider the following
> scenario:
>     - vcpu-0 maps P to V
>     - vpcu-1 maps P' to V
> 
> If run on the same physical CPU, vcpu-1 can hit in TLBs generated by
> vcpu-0 accesses, and access the wrong physical page.
> 
> The solution to this is to keep a per-p2m map of which vCPU ran the last
> on each given pCPU and invalidate local TLBs if two vPCUs from the same
> VM run on the same CPU.
> 
> Unfortunately it is not possible to allocate per-cpu variable on the
> fly. So for now the size of the array is NR_CPUS, this is fine because
> we still have space in the structure domain. We may want to add an
> helper to allocate per-cpu variable in the future.

I think I understand the problem, thanks for the description. I have a
question: when a vcpu of a different domain is scheduled on a pcpu,
there is no impact on tlb flushes in relation to this problem, is there?

For example:

1) vcpu0/domain1 -> pcpu0
2) vcpu0/domain2 -> pcpu0
3) vcpu1/domain1 -> pcpu0

In 3) we still need to do the flush_tlb_local(), no matter if 2)
happened or not, right?

Also, I have one suggestion below.


> Signed-off-by: Julien Grall <julien.grall@arm.com>
> 
> ---
>     This patch is a candidate for backport to Xen 4.8, 4.7 and 4.6.
> ---
>  xen/arch/arm/p2m.c        | 24 ++++++++++++++++++++++++
>  xen/include/asm-arm/p2m.h |  3 +++
>  2 files changed, 27 insertions(+)
> 
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index 1fc6ca3bb2..626376090d 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -130,6 +130,7 @@ void p2m_restore_state(struct vcpu *n)
>  {
>      register_t hcr;
>      struct p2m_domain *p2m = &n->domain->arch.p2m;
> +    uint8_t *last_vcpu_ran;
>  
>      if ( is_idle_vcpu(n) )
>          return;
> @@ -149,6 +150,17 @@ void p2m_restore_state(struct vcpu *n)
>  
>      WRITE_SYSREG(hcr, HCR_EL2);
>      isb();
> +
> +    last_vcpu_ran = &p2m->last_vcpu_ran[smp_processor_id()];
> +
> +    /*
> +     * Flush local TLB for the domain to prevent wrong TLB translation
> +     * when running multiple vCPU of the same domain on a single pCPU.
> +     */
> +    if ( *last_vcpu_ran != INVALID_VCPU_ID && *last_vcpu_ran != n->vcpu_id )
> +        flush_tlb_local();
> +
> +    *last_vcpu_ran = n->vcpu_id;
>  }
>  
>  static void p2m_flush_tlb(struct p2m_domain *p2m)
> @@ -1247,6 +1259,7 @@ int p2m_init(struct domain *d)
>  {
>      struct p2m_domain *p2m = &d->arch.p2m;
>      int rc = 0;
> +    unsigned int cpu;
>  
>      rwlock_init(&p2m->lock);
>      INIT_PAGE_LIST_HEAD(&p2m->pages);
> @@ -1275,6 +1288,17 @@ int p2m_init(struct domain *d)
>  
>      rc = p2m_alloc_table(d);
>  
> +    /*
> +     * Make sure that the type chosen to is able to store the an vCPU ID
> +     * between 0 and the maximum of virtual CPUS supported as long as
> +     * the INVALID_VCPU_ID.
> +     */
> +    BUILD_BUG_ON((1 << (sizeof(p2m->last_vcpu_ran[0]) * 8)) < MAX_VIRT_CPUS);
> +    BUILD_BUG_ON((1 << (sizeof(p2m->last_vcpu_ran[0])* 8)) < INVALID_VCPU_ID);
> +
> +    for_each_possible_cpu(cpu)
> +       p2m->last_vcpu_ran[cpu] = INVALID_VCPU_ID;
> +
>      return rc;
>  }
>  
> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> index 0899523084..18c57f936e 100644
> --- a/xen/include/asm-arm/p2m.h
> +++ b/xen/include/asm-arm/p2m.h
> @@ -96,6 +96,9 @@ struct p2m_domain {
>  
>      /* back pointer to domain */
>      struct domain *domain;
> +
> +    /* Keeping track on which CPU this p2m was used and for which vCPU */
> +    uint8_t last_vcpu_ran[NR_CPUS];
>  };

NR_CPUS is usually much larger than cpu_possible_map. I think it would
be best to allocate last_vcpu_ran dynamically (after cpu_possible_map is
set). It should reduce the memory impact significantly.
Julien Grall March 8, 2017, 7:27 p.m. UTC | #2
Hi Stefano,

On 08/03/17 18:58, Stefano Stabellini wrote:
> On Wed, 8 Mar 2017, Julien Grall wrote:
>> The ARM architecture allows an OS to have per-CPU page tables, as it
>> guarantees that TLBs never migrate from one CPU to another.
>>
>> This works fine until this is done in a guest. Consider the following
>> scenario:
>>     - vcpu-0 maps P to V
>>     - vpcu-1 maps P' to V
>>
>> If run on the same physical CPU, vcpu-1 can hit in TLBs generated by
>> vcpu-0 accesses, and access the wrong physical page.
>>
>> The solution to this is to keep a per-p2m map of which vCPU ran the last
>> on each given pCPU and invalidate local TLBs if two vPCUs from the same
>> VM run on the same CPU.
>>
>> Unfortunately it is not possible to allocate per-cpu variable on the
>> fly. So for now the size of the array is NR_CPUS, this is fine because
>> we still have space in the structure domain. We may want to add an
>> helper to allocate per-cpu variable in the future.
>
> I think I understand the problem, thanks for the description. I have a
> question: when a vcpu of a different domain is scheduled on a pcpu,
> there is no impact on tlb flushes in relation to this problem, is there?
>
> For example:
>
> 1) vcpu0/domain1 -> pcpu0
> 2) vcpu0/domain2 -> pcpu0
> 3) vcpu1/domain1 -> pcpu0
>
> In 3) we still need to do the flush_tlb_local(), no matter if 2)
> happened or not, right?

That's correct. The last vCPU running is stored per p2m domain and 
belongs to the associated domain.

>
> Also, I have one suggestion below.

[...]

>> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
>> index 0899523084..18c57f936e 100644
>> --- a/xen/include/asm-arm/p2m.h
>> +++ b/xen/include/asm-arm/p2m.h
>> @@ -96,6 +96,9 @@ struct p2m_domain {
>>
>>      /* back pointer to domain */
>>      struct domain *domain;
>> +
>> +    /* Keeping track on which CPU this p2m was used and for which vCPU */
>> +    uint8_t last_vcpu_ran[NR_CPUS];
>>  };
>
> NR_CPUS is usually much larger than cpu_possible_map. I think it would
> be best to allocate last_vcpu_ran dynamically (after cpu_possible_map is
> set). It should reduce the memory impact significantly.

Well, I thought about it when I wrote the patch. p2m_domain is currently 
embedded in the structure domain and Xen will always allocate a page 
even if structure is smaller. So for now, static allocation is the best.

If in the future we decide to allocate multiple p2m_domain for a domain 
(e.g for feature like altp2m), then the benefit of dynamic allocation is 
still questionable. The altp2m code will already use more memory because 
of the page table, so I don't think it is a concern to use 128-bytes 
more per p2m here.

Also, I noticed that NR_CPUS is set to 128 for both 32-bit and 64-bits, 
but technically 32-bit will not use more than 8 CPUs due to the 
limitation on GICv2 (we don't support GICv3 on 32-bit).

Cheers,
Stefano Stabellini March 8, 2017, 7:48 p.m. UTC | #3
On Wed, 8 Mar 2017, Julien Grall wrote:
> Hi Stefano,
> 
> On 08/03/17 18:58, Stefano Stabellini wrote:
> > On Wed, 8 Mar 2017, Julien Grall wrote:
> > > The ARM architecture allows an OS to have per-CPU page tables, as it
> > > guarantees that TLBs never migrate from one CPU to another.
> > > 
> > > This works fine until this is done in a guest. Consider the following
> > > scenario:
> > >     - vcpu-0 maps P to V
> > >     - vpcu-1 maps P' to V
> > > 
> > > If run on the same physical CPU, vcpu-1 can hit in TLBs generated by
> > > vcpu-0 accesses, and access the wrong physical page.
> > > 
> > > The solution to this is to keep a per-p2m map of which vCPU ran the last
> > > on each given pCPU and invalidate local TLBs if two vPCUs from the same
> > > VM run on the same CPU.
> > > 
> > > Unfortunately it is not possible to allocate per-cpu variable on the
> > > fly. So for now the size of the array is NR_CPUS, this is fine because
> > > we still have space in the structure domain. We may want to add an
> > > helper to allocate per-cpu variable in the future.
> > 
> > I think I understand the problem, thanks for the description. I have a
> > question: when a vcpu of a different domain is scheduled on a pcpu,
> > there is no impact on tlb flushes in relation to this problem, is there?
> > 
> > For example:
> > 
> > 1) vcpu0/domain1 -> pcpu0
> > 2) vcpu0/domain2 -> pcpu0
> > 3) vcpu1/domain1 -> pcpu0
> > 
> > In 3) we still need to do the flush_tlb_local(), no matter if 2)
> > happened or not, right?
> 
> That's correct. The last vCPU running is stored per p2m domain and belongs to
> the associated domain.
> 
> > 
> > Also, I have one suggestion below.
> 
> [...]
> 
> > > diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> > > index 0899523084..18c57f936e 100644
> > > --- a/xen/include/asm-arm/p2m.h
> > > +++ b/xen/include/asm-arm/p2m.h
> > > @@ -96,6 +96,9 @@ struct p2m_domain {
> > > 
> > >      /* back pointer to domain */
> > >      struct domain *domain;
> > > +
> > > +    /* Keeping track on which CPU this p2m was used and for which vCPU */
> > > +    uint8_t last_vcpu_ran[NR_CPUS];
> > >  };
> > 
> > NR_CPUS is usually much larger than cpu_possible_map. I think it would
> > be best to allocate last_vcpu_ran dynamically (after cpu_possible_map is
> > set). It should reduce the memory impact significantly.
> 
> Well, I thought about it when I wrote the patch. p2m_domain is currently
> embedded in the structure domain and Xen will always allocate a page even if
> structure is smaller. So for now, static allocation is the best.

Maybe I misunderstood your answer or my suggestion wasn't clear. What
is the problem with something like the following (untested)?diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index bb327da..457038a 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -635,6 +635,7 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags,
     if ( is_hardware_domain(d) && (rc = domain_vuart_init(d)) )
         goto fail;
 
+    d->arch.p2m.last_vcpu_ran = xmalloc_array(uint8_t, num_possible_cpus());
     return 0;
 
 fail:
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 0899523..606a875 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -96,6 +96,8 @@ struct p2m_domain {
 
     /* back pointer to domain */
     struct domain *domain;
+
+    uint8_t *last_vcpu_ran;
 };
 
 /*

Julien Grall March 8, 2017, 9:37 p.m. UTC | #4
On 08/03/2017 19:48, Stefano Stabellini wrote:
> On Wed, 8 Mar 2017, Julien Grall wrote:
>> Hi Stefano,
>>
>> On 08/03/17 18:58, Stefano Stabellini wrote:
>>> On Wed, 8 Mar 2017, Julien Grall wrote:
>>>> The ARM architecture allows an OS to have per-CPU page tables, as it
>>>> guarantees that TLBs never migrate from one CPU to another.
>>>>
>>>> This works fine until this is done in a guest. Consider the following
>>>> scenario:
>>>>     - vcpu-0 maps P to V
>>>>     - vpcu-1 maps P' to V
>>>>
>>>> If run on the same physical CPU, vcpu-1 can hit in TLBs generated by
>>>> vcpu-0 accesses, and access the wrong physical page.
>>>>
>>>> The solution to this is to keep a per-p2m map of which vCPU ran the last
>>>> on each given pCPU and invalidate local TLBs if two vPCUs from the same
>>>> VM run on the same CPU.
>>>>
>>>> Unfortunately it is not possible to allocate per-cpu variable on the
>>>> fly. So for now the size of the array is NR_CPUS, this is fine because
>>>> we still have space in the structure domain. We may want to add an
>>>> helper to allocate per-cpu variable in the future.
>>>
>>> I think I understand the problem, thanks for the description. I have a
>>> question: when a vcpu of a different domain is scheduled on a pcpu,
>>> there is no impact on tlb flushes in relation to this problem, is there?
>>>
>>> For example:
>>>
>>> 1) vcpu0/domain1 -> pcpu0
>>> 2) vcpu0/domain2 -> pcpu0
>>> 3) vcpu1/domain1 -> pcpu0
>>>
>>> In 3) we still need to do the flush_tlb_local(), no matter if 2)
>>> happened or not, right?
>>
>> That's correct. The last vCPU running is stored per p2m domain and belongs to
>> the associated domain.
>>
>>>
>>> Also, I have one suggestion below.
>>
>> [...]
>>
>>>> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
>>>> index 0899523084..18c57f936e 100644
>>>> --- a/xen/include/asm-arm/p2m.h
>>>> +++ b/xen/include/asm-arm/p2m.h
>>>> @@ -96,6 +96,9 @@ struct p2m_domain {
>>>>
>>>>      /* back pointer to domain */
>>>>      struct domain *domain;
>>>> +
>>>> +    /* Keeping track on which CPU this p2m was used and for which vCPU */
>>>> +    uint8_t last_vcpu_ran[NR_CPUS];
>>>>  };
>>>
>>> NR_CPUS is usually much larger than cpu_possible_map. I think it would
>>> be best to allocate last_vcpu_ran dynamically (after cpu_possible_map is
>>> set). It should reduce the memory impact significantly.
>>
>> Well, I thought about it when I wrote the patch. p2m_domain is currently
>> embedded in the structure domain and Xen will always allocate a page even if
>> structure is smaller. So for now, static allocation is the best.
>
> Maybe I misunderstood your answer or my suggestion wasn't clear. What
> is the problem with something like the following (untested)?

The problem is you allocate more memory than my current solution. As 
your code below clearly show it, p2m_domain is directly integrated in 
arch_domain.

The structure domain can never be bigger than PAGE_SIZE (see 
alloc_domanin_struct) and we will always allocate a page no matter the 
size of the structure domain. This patch is just filling hole in the page.

So why would we want to switch to something the increase the memory impact?

Cheers,
Julien Grall March 8, 2017, 9:37 p.m. UTC | #5
On 08/03/2017 19:48, Stefano Stabellini wrote:
> On Wed, 8 Mar 2017, Julien Grall wrote:
>> Hi Stefano,
>>
>> On 08/03/17 18:58, Stefano Stabellini wrote:
>>> On Wed, 8 Mar 2017, Julien Grall wrote:
>>>> The ARM architecture allows an OS to have per-CPU page tables, as it
>>>> guarantees that TLBs never migrate from one CPU to another.
>>>>
>>>> This works fine until this is done in a guest. Consider the following
>>>> scenario:
>>>>     - vcpu-0 maps P to V
>>>>     - vpcu-1 maps P' to V
>>>>
>>>> If run on the same physical CPU, vcpu-1 can hit in TLBs generated by
>>>> vcpu-0 accesses, and access the wrong physical page.
>>>>
>>>> The solution to this is to keep a per-p2m map of which vCPU ran the last
>>>> on each given pCPU and invalidate local TLBs if two vPCUs from the same
>>>> VM run on the same CPU.
>>>>
>>>> Unfortunately it is not possible to allocate per-cpu variable on the
>>>> fly. So for now the size of the array is NR_CPUS, this is fine because
>>>> we still have space in the structure domain. We may want to add an
>>>> helper to allocate per-cpu variable in the future.
>>>
>>> I think I understand the problem, thanks for the description. I have a
>>> question: when a vcpu of a different domain is scheduled on a pcpu,
>>> there is no impact on tlb flushes in relation to this problem, is there?
>>>
>>> For example:
>>>
>>> 1) vcpu0/domain1 -> pcpu0
>>> 2) vcpu0/domain2 -> pcpu0
>>> 3) vcpu1/domain1 -> pcpu0
>>>
>>> In 3) we still need to do the flush_tlb_local(), no matter if 2)
>>> happened or not, right?
>>
>> That's correct. The last vCPU running is stored per p2m domain and belongs to
>> the associated domain.
>>
>>>
>>> Also, I have one suggestion below.
>>
>> [...]
>>
>>>> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
>>>> index 0899523084..18c57f936e 100644
>>>> --- a/xen/include/asm-arm/p2m.h
>>>> +++ b/xen/include/asm-arm/p2m.h
>>>> @@ -96,6 +96,9 @@ struct p2m_domain {
>>>>
>>>>      /* back pointer to domain */
>>>>      struct domain *domain;
>>>> +
>>>> +    /* Keeping track on which CPU this p2m was used and for which vCPU */
>>>> +    uint8_t last_vcpu_ran[NR_CPUS];
>>>>  };
>>>
>>> NR_CPUS is usually much larger than cpu_possible_map. I think it would
>>> be best to allocate last_vcpu_ran dynamically (after cpu_possible_map is
>>> set). It should reduce the memory impact significantly.
>>
>> Well, I thought about it when I wrote the patch. p2m_domain is currently
>> embedded in the structure domain and Xen will always allocate a page even if
>> structure is smaller. So for now, static allocation is the best.
>
> Maybe I misunderstood your answer or my suggestion wasn't clear. What
> is the problem with something like the following (untested)?

The problem is you allocate more memory than my current solution. As 
your code below clearly show it, p2m_domain is directly integrated in 
arch_domain.

The structure domain can never be bigger than PAGE_SIZE (see 
alloc_domanin_struct) and we will always allocate a page no matter the 
size of the structure domain. This patch is just filling hole in the page.

So why would we want to switch to something that increase the memory impact?

Cheers,
Stefano Stabellini March 14, 2017, 10:39 p.m. UTC | #6
On Wed, 8 Mar 2017, Julien Grall wrote:
> On 08/03/2017 19:48, Stefano Stabellini wrote:
> > On Wed, 8 Mar 2017, Julien Grall wrote:
> > > Hi Stefano,
> > > 
> > > On 08/03/17 18:58, Stefano Stabellini wrote:
> > > > On Wed, 8 Mar 2017, Julien Grall wrote:
> > > > > The ARM architecture allows an OS to have per-CPU page tables, as it
> > > > > guarantees that TLBs never migrate from one CPU to another.
> > > > > 
> > > > > This works fine until this is done in a guest. Consider the following
> > > > > scenario:
> > > > >     - vcpu-0 maps P to V
> > > > >     - vpcu-1 maps P' to V
> > > > > 
> > > > > If run on the same physical CPU, vcpu-1 can hit in TLBs generated by
> > > > > vcpu-0 accesses, and access the wrong physical page.
> > > > > 
> > > > > The solution to this is to keep a per-p2m map of which vCPU ran the
> > > > > last
> > > > > on each given pCPU and invalidate local TLBs if two vPCUs from the
> > > > > same
> > > > > VM run on the same CPU.
> > > > > 
> > > > > Unfortunately it is not possible to allocate per-cpu variable on the
> > > > > fly. So for now the size of the array is NR_CPUS, this is fine because
> > > > > we still have space in the structure domain. We may want to add an
> > > > > helper to allocate per-cpu variable in the future.
> > > > 
> > > > I think I understand the problem, thanks for the description. I have a
> > > > question: when a vcpu of a different domain is scheduled on a pcpu,
> > > > there is no impact on tlb flushes in relation to this problem, is there?
> > > > 
> > > > For example:
> > > > 
> > > > 1) vcpu0/domain1 -> pcpu0
> > > > 2) vcpu0/domain2 -> pcpu0
> > > > 3) vcpu1/domain1 -> pcpu0
> > > > 
> > > > In 3) we still need to do the flush_tlb_local(), no matter if 2)
> > > > happened or not, right?
> > > 
> > > That's correct. The last vCPU running is stored per p2m domain and belongs
> > > to
> > > the associated domain.
> > > 
> > > > 
> > > > Also, I have one suggestion below.
> > > 
> > > [...]
> > > 
> > > > > diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> > > > > index 0899523084..18c57f936e 100644
> > > > > --- a/xen/include/asm-arm/p2m.h
> > > > > +++ b/xen/include/asm-arm/p2m.h
> > > > > @@ -96,6 +96,9 @@ struct p2m_domain {
> > > > > 
> > > > >      /* back pointer to domain */
> > > > >      struct domain *domain;
> > > > > +
> > > > > +    /* Keeping track on which CPU this p2m was used and for which
> > > > > vCPU */
> > > > > +    uint8_t last_vcpu_ran[NR_CPUS];
> > > > >  };
> > > > 
> > > > NR_CPUS is usually much larger than cpu_possible_map. I think it would
> > > > be best to allocate last_vcpu_ran dynamically (after cpu_possible_map is
> > > > set). It should reduce the memory impact significantly.
> > > 
> > > Well, I thought about it when I wrote the patch. p2m_domain is currently
> > > embedded in the structure domain and Xen will always allocate a page even
> > > if
> > > structure is smaller. So for now, static allocation is the best.
> > 
> > Maybe I misunderstood your answer or my suggestion wasn't clear. What
> > is the problem with something like the following (untested)?
> 
> The problem is you allocate more memory than my current solution. As your code
> below clearly show it, p2m_domain is directly integrated in arch_domain.
> 
> The structure domain can never be bigger than PAGE_SIZE (see
> alloc_domanin_struct) and we will always allocate a page no matter the size of
> the structure domain. This patch is just filling hole in the page.
> 
> So why would we want to switch to something the increase the memory impact?

I see your point, it is a good idea to use the remaining space in struct
domain. The only issue is that a user could set CONFIG_NR_CPUS large
enough to push last_vcpu_ran beyond the page boundary. I think we need
to add a BUILD_BUG_ON(sizeof(struct domain) >= PAGE_SIZE).
Julien Grall March 14, 2017, 11:23 p.m. UTC | #7
Hi Stefano,

On 03/14/2017 10:39 PM, Stefano Stabellini wrote:
> On Wed, 8 Mar 2017, Julien Grall wrote:
>> On 08/03/2017 19:48, Stefano Stabellini wrote:
>>> On Wed, 8 Mar 2017, Julien Grall wrote:
> I see your point, it is a good idea to use the remaining space in struct
> domain. The only issue is that a user could set CONFIG_NR_CPUS large
> enough to push last_vcpu_ran beyond the page boundary. I think we need
> to add a BUILD_BUG_ON(sizeof(struct domain) >= PAGE_SIZE).

There is already a BUILD_BUG_ON(sizeof(struct domain) > PAGE_SIZE) in 
alloc_domain_struct. Would it be enough for you?

Cheers,
Stefano Stabellini March 14, 2017, 11:29 p.m. UTC | #8
On Tue, 14 Mar 2017, Julien Grall wrote:
> Hi Stefano,
> 
> On 03/14/2017 10:39 PM, Stefano Stabellini wrote:
> > On Wed, 8 Mar 2017, Julien Grall wrote:
> > > On 08/03/2017 19:48, Stefano Stabellini wrote:
> > > > On Wed, 8 Mar 2017, Julien Grall wrote:
> > I see your point, it is a good idea to use the remaining space in struct
> > domain. The only issue is that a user could set CONFIG_NR_CPUS large
> > enough to push last_vcpu_ran beyond the page boundary. I think we need
> > to add a BUILD_BUG_ON(sizeof(struct domain) >= PAGE_SIZE).
> 
> There is already a BUILD_BUG_ON(sizeof(struct domain) > PAGE_SIZE) in
> alloc_domain_struct. Would it be enough for you?

Yeah, we don't need two :)
diff mbox series

Patch

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 1fc6ca3bb2..626376090d 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -130,6 +130,7 @@  void p2m_restore_state(struct vcpu *n)
 {
     register_t hcr;
     struct p2m_domain *p2m = &n->domain->arch.p2m;
+    uint8_t *last_vcpu_ran;
 
     if ( is_idle_vcpu(n) )
         return;
@@ -149,6 +150,17 @@  void p2m_restore_state(struct vcpu *n)
 
     WRITE_SYSREG(hcr, HCR_EL2);
     isb();
+
+    last_vcpu_ran = &p2m->last_vcpu_ran[smp_processor_id()];
+
+    /*
+     * Flush local TLB for the domain to prevent wrong TLB translation
+     * when running multiple vCPU of the same domain on a single pCPU.
+     */
+    if ( *last_vcpu_ran != INVALID_VCPU_ID && *last_vcpu_ran != n->vcpu_id )
+        flush_tlb_local();
+
+    *last_vcpu_ran = n->vcpu_id;
 }
 
 static void p2m_flush_tlb(struct p2m_domain *p2m)
@@ -1247,6 +1259,7 @@  int p2m_init(struct domain *d)
 {
     struct p2m_domain *p2m = &d->arch.p2m;
     int rc = 0;
+    unsigned int cpu;
 
     rwlock_init(&p2m->lock);
     INIT_PAGE_LIST_HEAD(&p2m->pages);
@@ -1275,6 +1288,17 @@  int p2m_init(struct domain *d)
 
     rc = p2m_alloc_table(d);
 
+    /*
+     * Make sure that the type chosen to is able to store the an vCPU ID
+     * between 0 and the maximum of virtual CPUS supported as long as
+     * the INVALID_VCPU_ID.
+     */
+    BUILD_BUG_ON((1 << (sizeof(p2m->last_vcpu_ran[0]) * 8)) < MAX_VIRT_CPUS);
+    BUILD_BUG_ON((1 << (sizeof(p2m->last_vcpu_ran[0])* 8)) < INVALID_VCPU_ID);
+
+    for_each_possible_cpu(cpu)
+       p2m->last_vcpu_ran[cpu] = INVALID_VCPU_ID;
+
     return rc;
 }
 
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 0899523084..18c57f936e 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -96,6 +96,9 @@  struct p2m_domain {
 
     /* back pointer to domain */
     struct domain *domain;
+
+    /* Keeping track on which CPU this p2m was used and for which vCPU */
+    uint8_t last_vcpu_ran[NR_CPUS];
 };
 
 /*