@@ -171,7 +171,8 @@ uint64_t encl_get_entry(struct encl *encl, const char *symbol)
return 0;
}
-bool encl_load(const char *path, struct encl *encl, unsigned long heap_size)
+bool encl_load(const char *path, struct encl *encl, unsigned long heap_size,
+ unsigned long edmm_size)
{
const char device_path[] = "/dev/sgx_enclave";
struct encl_segment *seg;
@@ -300,7 +301,7 @@ bool encl_load(const char *path, struct encl *encl, unsigned long heap_size)
encl->src_size = encl->segment_tbl[j].offset + encl->segment_tbl[j].size;
- for (encl->encl_size = 4096; encl->encl_size < encl->src_size; )
+ for (encl->encl_size = 4096; encl->encl_size < encl->src_size + edmm_size;)
encl->encl_size <<= 1;
return true;
@@ -25,6 +25,8 @@ static const uint64_t MAGIC = 0x1122334455667788ULL;
static const uint64_t MAGIC2 = 0x8877665544332211ULL;
vdso_sgx_enter_enclave_t vdso_sgx_enter_enclave;
+static const unsigned long edmm_size = 8589934592; //8G
+
/*
* Security Information (SECINFO) data structure needed by a few SGX
* instructions (eg. ENCLU[EACCEPT] and ENCLU[EMODPE]) holds meta-data
@@ -183,7 +185,7 @@ static bool setup_test_encl(unsigned long heap_size, struct encl *encl,
unsigned int i;
void *addr;
- if (!encl_load("test_encl.elf", encl, heap_size)) {
+ if (!encl_load("test_encl.elf", encl, heap_size, edmm_size)) {
encl_delete(encl);
TH_LOG("Failed to load the test enclave.");
return false;
@@ -1210,6 +1212,122 @@ TEST_F(enclave, augment_via_eaccept)
munmap(addr, PAGE_SIZE);
}
+/*
+ * Test for the addition of large number of pages to an initialized enclave via
+ * a pre-emptive run of EACCEPT on page to be added.
+ */
+#define TIMEOUT_LONG 900 /* seconds */
+TEST_F_TIMEOUT(enclave, augment_via_eaccept_long, TIMEOUT_LONG)
+{
+ struct encl_op_get_from_addr get_addr_op;
+ struct encl_op_put_to_addr put_addr_op;
+ struct encl_op_eaccept eaccept_op;
+ size_t total_size = 0;
+ void *addr;
+ unsigned long i;
+
+ if (!sgx2_supported())
+ SKIP(return, "SGX2 not supported");
+
+ ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
+
+ memset(&self->run, 0, sizeof(self->run));
+ self->run.tcs = self->encl.encl_base;
+
+ for (i = 0; i < self->encl.nr_segments; i++) {
+ struct encl_segment *seg = &self->encl.segment_tbl[i];
+
+ total_size += seg->size;
+ TH_LOG("test enclave: total_size = %ld, seg->size = %ld", total_size, seg->size);
+ }
+
+ /*
+ * Actual enclave size is expected to be larger than the loaded
+ * test enclave since enclave size must be a power of 2 in bytes while
+ * test_encl does not consume it all.
+ */
+ EXPECT_LT(total_size + edmm_size, self->encl.encl_size);
+
+ /*
+ * mmap() a page at end of existing enclave to be used for dynamic
+ * EPC page.
+ *
+ * Kernel will allow new mapping using any permissions if it
+ * falls into the enclave's address range but not backed
+ * by existing enclave pages.
+ */
+ TH_LOG("mmaping pages at end of enclave...");
+ addr = mmap((void *)self->encl.encl_base + total_size, edmm_size,
+ PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | MAP_FIXED,
+ self->encl.fd, 0);
+ EXPECT_NE(addr, MAP_FAILED);
+
+ self->run.exception_vector = 0;
+ self->run.exception_error_code = 0;
+ self->run.exception_addr = 0;
+
+ /*
+ * Run EACCEPT on new page to trigger the #PF->EAUG->EACCEPT(again
+ * without a #PF). All should be transparent to userspace.
+ */
+ TH_LOG("Entering enclave to run EACCEPT for each page of %zd bytes may take a while ...",
+ edmm_size);
+ eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
+ eaccept_op.ret = 0;
+ eaccept_op.header.type = ENCL_OP_EACCEPT;
+
+ for (i = 0; i < edmm_size; i += 4096) {
+ eaccept_op.epc_addr = (uint64_t)(addr + i);
+
+ EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
+ if (self->run.exception_vector == 14 &&
+ self->run.exception_error_code == 4 &&
+ self->run.exception_addr == self->encl.encl_base) {
+ munmap(addr, edmm_size);
+ SKIP(return, "Kernel does not support adding pages to initialized enclave");
+ }
+
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+ ASSERT_EQ(eaccept_op.ret, 0);
+ ASSERT_EQ(self->run.function, EEXIT);
+ }
+
+ /*
+ * New page should be accessible from within enclave - attempt to
+ * write to it.
+ */
+ put_addr_op.value = MAGIC;
+ put_addr_op.addr = (unsigned long)addr;
+ put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /*
+ * Read memory from newly added page that was just written to,
+ * confirming that data previously written (MAGIC) is present.
+ */
+ get_addr_op.value = 0;
+ get_addr_op.addr = (unsigned long)addr;
+ get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
+
+ EXPECT_EQ(get_addr_op.value, MAGIC);
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ munmap(addr, edmm_size);
+}
+
/*
* SGX2 page type modification test in two phases:
* Phase 1:
@@ -35,7 +35,8 @@ extern unsigned char sign_key[];
extern unsigned char sign_key_end[];
void encl_delete(struct encl *ctx);
-bool encl_load(const char *path, struct encl *encl, unsigned long heap_size);
+bool encl_load(const char *path, struct encl *encl, unsigned long heap_size,
+ unsigned long edmm_size);
bool encl_measure(struct encl *encl);
bool encl_build(struct encl *encl);
uint64_t encl_get_entry(struct encl *encl, const char *symbol);
@@ -343,7 +343,7 @@ bool encl_measure(struct encl *encl)
if (!ctx)
goto err;
- if (!mrenclave_ecreate(ctx, encl->src_size))
+ if (!mrenclave_ecreate(ctx, encl->encl_size))
goto err;
for (i = 0; i < encl->nr_segments; i++) {