@@ -299,7 +299,7 @@ static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call)
* Fill in protocol and format specifiers. This can be called very early
* in the boot, so use rip-relative references as needed.
*/
- ghcb->protocol_version = RIP_REL_REF(ghcb_version);
+ ghcb->protocol_version = ghcb_version;
ghcb->ghcb_usage = GHCB_DEFAULT_USAGE;
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_SNP_RUN_VMPL);
@@ -656,9 +656,9 @@ snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0;
/* Skip post-processing for out-of-range zero leafs. */
- if (!(leaf->fn <= RIP_REL_REF(cpuid_std_range_max) ||
- (leaf->fn >= 0x40000000 && leaf->fn <= RIP_REL_REF(cpuid_hyp_range_max)) ||
- (leaf->fn >= 0x80000000 && leaf->fn <= RIP_REL_REF(cpuid_ext_range_max))))
+ if (!(leaf->fn <= cpuid_std_range_max ||
+ (leaf->fn >= 0x40000000 && leaf->fn <= cpuid_hyp_range_max) ||
+ (leaf->fn >= 0x80000000 && leaf->fn <= cpuid_ext_range_max)))
return 0;
}
@@ -1179,11 +1179,11 @@ static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
if (fn->eax_in == 0x0)
- RIP_REL_REF(cpuid_std_range_max) = fn->eax;
+ cpuid_std_range_max = fn->eax;
else if (fn->eax_in == 0x40000000)
- RIP_REL_REF(cpuid_hyp_range_max) = fn->eax;
+ cpuid_hyp_range_max = fn->eax;
else if (fn->eax_in == 0x80000000)
- RIP_REL_REF(cpuid_ext_range_max) = fn->eax;
+ cpuid_ext_range_max = fn->eax;
}
}
@@ -1229,11 +1229,7 @@ static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
{
int ret;
- /*
- * This can be called very early during boot, so use rIP-relative
- * references as needed.
- */
- if (RIP_REL_REF(snp_vmpl)) {
+ if (snp_vmpl) {
svsm_pval_4k_page(paddr, validate);
} else {
ret = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
@@ -1377,7 +1373,7 @@ static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info)
if (!secrets_page->svsm_guest_vmpl)
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_VMPL0);
- RIP_REL_REF(snp_vmpl) = secrets_page->svsm_guest_vmpl;
+ snp_vmpl = secrets_page->svsm_guest_vmpl;
caa = secrets_page->svsm_caa;
@@ -1392,8 +1388,8 @@ static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info)
* The CA is identity mapped when this routine is called, both by the
* decompressor code and the early kernel code.
*/
- RIP_REL_REF(boot_svsm_caa) = (struct svsm_ca *)caa;
- RIP_REL_REF(boot_svsm_caa_pa) = caa;
+ boot_svsm_caa = (struct svsm_ca *)caa;
+ boot_svsm_caa_pa = caa;
/* Advertise the SVSM presence via CPUID. */
cpuid_table = (struct snp_cpuid_table *)snp_cpuid_get_table();
@@ -462,15 +462,10 @@ int svsm_perform_call_protocol(struct svsm_call *call)
*/
flags = native_local_irq_save();
- /*
- * Use rip-relative references when called early in the boot. If
- * ghcbs_initialized is set, then it is late in the boot and no need
- * to worry about rip-relative references in called functions.
- */
- if (RIP_REL_REF(sev_cfg).ghcbs_initialized)
+ if (sev_cfg.ghcbs_initialized)
ghcb = __sev_get_ghcb(&state);
- else if (RIP_REL_REF(boot_ghcb))
- ghcb = RIP_REL_REF(boot_ghcb);
+ else if (boot_ghcb)
+ ghcb = boot_ghcb;
else
ghcb = NULL;
@@ -479,7 +474,7 @@ int svsm_perform_call_protocol(struct svsm_call *call)
: svsm_perform_msr_protocol(call);
} while (ret == -EAGAIN);
- if (RIP_REL_REF(sev_cfg).ghcbs_initialized)
+ if (sev_cfg.ghcbs_initialized)
__sev_put_ghcb(&state);
native_local_irq_restore(flags);
@@ -542,7 +537,7 @@ void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
* This eliminates worries about jump tables or checking boot_cpu_data
* in the cc_platform_has() function.
*/
- if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
+ if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
return;
/*
@@ -561,7 +556,7 @@ void __head early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
* This eliminates worries about jump tables or checking boot_cpu_data
* in the cc_platform_has() function.
*/
- if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
+ if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
return;
/* Ask hypervisor to mark the memory pages shared in the RMP table. */
@@ -1356,8 +1351,8 @@ static __head void svsm_setup(struct cc_blob_sev_info *cc_info)
if (ret)
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CA_REMAP_FAIL);
- RIP_REL_REF(boot_svsm_caa) = (struct svsm_ca *)pa;
- RIP_REL_REF(boot_svsm_caa_pa) = pa;
+ boot_svsm_caa = (struct svsm_ca *)pa;
+ boot_svsm_caa_pa = pa;
}
bool __head snp_init(struct boot_params *bp)
@@ -68,28 +68,18 @@ extern u64 boot_svsm_caa_pa;
static __always_inline struct svsm_ca *svsm_get_caa(void)
{
- /*
- * Use rIP-relative references when called early in the boot. If
- * ->use_cas is set, then it is late in the boot and no need
- * to worry about rIP-relative references.
- */
- if (RIP_REL_REF(sev_cfg).use_cas)
+ if (sev_cfg.use_cas)
return this_cpu_read(svsm_caa);
else
- return RIP_REL_REF(boot_svsm_caa);
+ return boot_svsm_caa;
}
static __always_inline u64 svsm_get_caa_pa(void)
{
- /*
- * Use rIP-relative references when called early in the boot. If
- * ->use_cas is set, then it is late in the boot and no need
- * to worry about rIP-relative references.
- */
- if (RIP_REL_REF(sev_cfg).use_cas)
+ if (sev_cfg.use_cas)
return this_cpu_read(svsm_caa_pa);
else
- return RIP_REL_REF(boot_svsm_caa_pa);
+ return boot_svsm_caa_pa;
}
int svsm_perform_call_protocol(struct svsm_call *call);