diff --git a/common/inc/internal/trts_inst.h b/common/inc/internal/trts_inst.h
index 4a86ce222..9121efb29 100644
--- a/common/inc/internal/trts_inst.h
+++ b/common/inc/internal/trts_inst.h
@@ -89,8 +89,8 @@ uint32_t sgx_get_rsrvmm_default_perm(void);
size_t get_stack_guard(void);
-int sgx_apply_epc_pages(void *start_address, size_t page_number);
-int sgx_trim_epc_pages(void *start_address, size_t page_number);
+int sgx_commit_rts_pages(void *start_address, size_t page_number);
+int sgx_uncommit_rts_pages(void *start_address, size_t page_number);
#ifdef __cplusplus
}
diff --git a/samplecode/emm/Makefile b/samplecode/emm/Makefile
new file mode 100644
index 000000000..a1e4431e5
--- /dev/null
+++ b/samplecode/emm/Makefile
@@ -0,0 +1,203 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+######## SGX SDK Settings ########
+
+SGX_SDK ?= /opt/intel/sgxsdk
+SGX_MODE ?= HW
+SGX_ARCH ?= x64
+
+TOP_DIR := ../..
+include $(TOP_DIR)/buildenv.mk
+
+ifeq ($(shell getconf LONG_BIT), 32)
+ SGX_ARCH := x86
+else ifeq ($(findstring -m32, $(CXXFLAGS)), -m32)
+ SGX_ARCH := x86
+endif
+
+ifeq ($(SGX_ARCH), x86)
+ SGX_COMMON_CFLAGS := -m32
+ SGX_LIBRARY_PATH := $(SGX_SDK)/lib
+ SGX_BIN_PATH := $(SGX_SDK)/bin/x86
+else
+ SGX_COMMON_CFLAGS := -m64
+ SGX_LIBRARY_PATH := $(SGX_SDK)/lib64
+ SGX_BIN_PATH := $(SGX_SDK)/bin/x64
+endif
+
+ifeq ($(SGX_DEBUG), 1)
+ SGX_COMMON_CFLAGS += -O0 -g
+ Rust_Build_Flags :=
+ Rust_Build_Out := debug
+else
+ SGX_COMMON_CFLAGS += -O2
+ Rust_Build_Flags := --release
+ Rust_Build_Out := release
+endif
+
+SGX_EDGER8R := $(SGX_BIN_PATH)/sgx_edger8r
+ifneq ($(SGX_MODE), HYPER)
+ SGX_ENCLAVE_SIGNER := $(SGX_BIN_PATH)/sgx_sign
+else
+ SGX_ENCLAVE_SIGNER := $(SGX_BIN_PATH)/sgx_sign_hyper
+ SGX_EDGER8R_MODE := --sgx-mode $(SGX_MODE)
+endif
+
+######## CUSTOM Settings ########
+
+CUSTOM_LIBRARY_PATH := ./lib
+CUSTOM_BIN_PATH := ./bin
+CUSTOM_SYSROOT_PATH := ./sysroot
+CUSTOM_EDL_PATH := $(ROOT_DIR)/sgx_edl/edl
+CUSTOM_COMMON_PATH := $(ROOT_DIR)/common
+
+######## EDL Settings ########
+
+Enclave_EDL_Files := enclave/enclave_t.c enclave/enclave_t.h app/enclave_u.c app/enclave_u.h
+
+######## APP Settings ########
+
+App_Rust_Flags := --release
+App_Src_Files := $(shell find app/ -type f -name '*.rs') $(shell find app/ -type f -name 'Cargo.toml')
+App_Include_Paths := -I ./app -I$(SGX_SDK)/include -I$(CUSTOM_COMMON_PATH)/inc -I$(CUSTOM_EDL_PATH)
+App_C_Flags := $(CFLAGS) $(SGX_COMMON_CFLAGS) -fPIC -Wno-attributes $(App_Include_Paths)
+
+App_Rust_Path := ./app/target/release
+App_Enclave_u_Object := $(CUSTOM_LIBRARY_PATH)/libenclave_u.a
+App_Name := $(CUSTOM_BIN_PATH)/app
+
+######## Enclave Settings ########
+
+# BUILD_STD=no use no_std
+# BUILD_STD=cargo use cargo-std-aware
+# BUILD_STD=xargo use xargo
+BUILD_STD ?= no
+
+Rust_Build_Target := x86_64-unknown-linux-sgx
+Rust_Target_Path := $(ROOT_DIR)/rustlib
+
+ifeq ($(BUILD_STD), cargo)
+ Rust_Build_Std := $(Rust_Build_Flags) -Zbuild-std=core,alloc
+ Rust_Std_Features := --features thread
+ Rust_Target_Flags := --target $(Rust_Target_Path)/$(Rust_Build_Target).json
+ Rust_Sysroot_Path := $(CURDIR)/sysroot
+ Rust_Sysroot_Flags := RUSTFLAGS="--sysroot $(Rust_Sysroot_Path)"
+endif
+
+RustEnclave_Build_Flags := $(Rust_Build_Flags)
+RustEnclave_Src_Files := $(shell find enclave/ -type f -name '*.rs') $(shell find enclave/ -type f -name 'Cargo.toml')
+RustEnclave_Include_Paths := -I$(CUSTOM_COMMON_PATH)/inc -I$(CUSTOM_COMMON_PATH)/inc/tlibc -I$(CUSTOM_EDL_PATH)
+
+RustEnclave_Link_Libs := -L$(CUSTOM_LIBRARY_PATH) -lenclave
+RustEnclave_C_Flags := $(CFLAGS) $(ENCLAVE_CFLAGS) $(SGX_COMMON_CFLAGS) $(RustEnclave_Include_Paths)
+RustEnclave_Link_Flags := -Wl,--no-undefined -nostdlib -nodefaultlibs -nostartfiles \
+ -Wl,--start-group $(RustEnclave_Link_Libs) -Wl,--end-group \
+ -Wl,--version-script=enclave/enclave.lds \
+ $(ENCLAVE_LDFLAGS)
+
+ifeq ($(BUILD_STD), cargo)
+ RustEnclave_Out_Path := ./enclave/target/$(Rust_Build_Target)/$(Rust_Build_Out)
+else ifeq ($(BUILD_STD), xargo)
+ RustEnclave_Out_Path := ./enclave/target/$(Rust_Build_Target)/$(Rust_Build_Out)
+else
+ RustEnclave_Out_Path := ./enclave/target/$(Rust_Build_Out)
+endif
+
+RustEnclave_Lib_Name := $(RustEnclave_Out_Path)/libemmtest.a
+RustEnclave_Name := $(CUSTOM_BIN_PATH)/enclave.so
+RustEnclave_Signed_Name := $(CUSTOM_BIN_PATH)/enclave.signed.so
+
+.PHONY: all
+all: $(Enclave_EDL_Files) $(App_Name) $(RustEnclave_Signed_Name)
+
+######## EDL Objects ########
+
+$(Enclave_EDL_Files): $(SGX_EDGER8R) enclave/enclave.edl
+ $(SGX_EDGER8R) $(SGX_EDGER8R_MODE) --trusted enclave/enclave.edl --search-path $(CUSTOM_COMMON_PATH)/inc --search-path $(CUSTOM_EDL_PATH) --trusted-dir enclave
+ $(SGX_EDGER8R) $(SGX_EDGER8R_MODE) --untrusted enclave/enclave.edl --search-path $(CUSTOM_COMMON_PATH)/inc --search-path $(CUSTOM_EDL_PATH) --untrusted-dir app
+ @echo "GEN => $(Enclave_EDL_Files)"
+
+######## App Objects ########
+
+app/enclave_u.o: $(Enclave_EDL_Files)
+ @$(CC) $(App_C_Flags) -c app/enclave_u.c -o $@
+
+$(App_Enclave_u_Object): app/enclave_u.o
+ @mkdir -p $(CUSTOM_LIBRARY_PATH)
+ @$(AR) rcsD $@ $^
+
+$(App_Name): $(App_Enclave_u_Object) app
+ @mkdir -p $(CUSTOM_BIN_PATH)
+ @cp $(App_Rust_Path)/app $(CUSTOM_BIN_PATH)
+ @echo "LINK => $@"
+
+######## Enclave Objects ########
+
+enclave/enclave_t.o: $(Enclave_EDL_Files)
+ @$(CC) $(RustEnclave_C_Flags) -c enclave/enclave_t.c -o $@
+
+$(RustEnclave_Name): enclave/enclave_t.o enclave
+ @mkdir -p $(CUSTOM_LIBRARY_PATH)
+ @mkdir -p $(CUSTOM_BIN_PATH)
+ @cp $(RustEnclave_Lib_Name) $(CUSTOM_LIBRARY_PATH)/libenclave.a
+ @$(CXX) enclave/enclave_t.o -o $@ $(RustEnclave_Link_Flags)
+ @echo "LINK => $@"
+
+$(RustEnclave_Signed_Name): $(RustEnclave_Name) enclave/config.xml
+ @$(SGX_ENCLAVE_SIGNER) sign -key enclave/private.pem -enclave $(RustEnclave_Name) -out $@ -config enclave/config.xml
+ @echo "SIGN => $@"
+
+######## Build App ########
+
+.PHONY: app
+app:
+ @cd app && SGX_SDK=$(SGX_SDK) cargo build $(App_Rust_Flags)
+
+######## Build Enclave ########
+
+.PHONY: enclave
+enclave:
+ifeq ($(BUILD_STD), cargo)
+ @cd $(Rust_Target_Path)/std && cargo build $(Rust_Build_Std) $(Rust_Target_Flags) $(Rust_Std_Features)
+
+ @rm -rf $(Rust_Sysroot_Path)
+ @mkdir -p $(Rust_Sysroot_Path)/lib/rustlib/$(Rust_Build_Target)/lib
+ @cp -r $(Rust_Target_Path)/std/target/$(Rust_Build_Target)/$(Rust_Build_Out)/deps/* $(Rust_Sysroot_Path)/lib/rustlib/$(Rust_Build_Target)/lib
+
+ @cd enclave && $(Rust_Sysroot_Flags) cargo build $(Rust_Target_Flags) $(RustEnclave_Build_Flags)
+
+else ifeq ($(BUILD_STD), xargo)
+ @cd enclave && RUST_TARGET_PATH=$(Rust_Target_Path) xargo build --target $(Rust_Build_Target) $(RustEnclave_Build_Flags)
+else
+ @cd enclave && cargo build $(RustEnclave_Build_Flags)
+endif
+
+######## Run Enclave ########
+
+.PHONY: run
+run: $(App_Name) $(RustEnclave_Signed_Name)
+ @echo -e '\n===== Run Enclave =====\n'
+ @cd bin && ./app
+
+.PHONY: clean
+clean:
+ @rm -f $(App_Name) $(RustEnclave_Name) $(RustEnclave_Signed_Name) enclave/*_t.* app/*_u.*
+ @cd enclave && cargo clean
+ @cd app && cargo clean
+ @cd $(Rust_Target_Path)/std && cargo clean
+ @rm -rf $(CUSTOM_BIN_PATH) $(CUSTOM_LIBRARY_PATH) $(CUSTOM_SYSROOT_PATH)
diff --git a/samplecode/emm/app/Cargo.toml b/samplecode/emm/app/Cargo.toml
new file mode 100644
index 000000000..a6370f740
--- /dev/null
+++ b/samplecode/emm/app/Cargo.toml
@@ -0,0 +1,30 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+[package]
+name = "app"
+version = "1.0.0"
+authors = ["The Teaclave Authors"]
+edition = "2021"
+
+[dependencies]
+sgx_types = { path = "../../../sgx_types" }
+sgx_urts = { path = "../../../sgx_urts" }
+
+[profile.dev]
+opt-level = 0
+debug = true
\ No newline at end of file
diff --git a/samplecode/emm/app/build.rs b/samplecode/emm/app/build.rs
new file mode 100644
index 000000000..23b8f5e70
--- /dev/null
+++ b/samplecode/emm/app/build.rs
@@ -0,0 +1,37 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License..
+
+use std::env;
+
+fn main() {
+ println!("cargo:rerun-if-env-changed=SGX_MODE");
+ println!("cargo:rerun-if-changed=build.rs");
+
+ let sdk_dir = env::var("SGX_SDK").unwrap_or_else(|_| "/opt/intel/sgxsdk".to_string());
+ let mode = env::var("SGX_MODE").unwrap_or_else(|_| "HW".to_string());
+
+ println!("cargo:rustc-link-search=native=../lib");
+ println!("cargo:rustc-link-lib=static=enclave_u");
+
+ println!("cargo:rustc-link-search=native={}/lib64", sdk_dir);
+ match mode.as_ref() {
+ "SIM" | "SW" => println!("cargo:rustc-link-lib=dylib=sgx_urts_sim"),
+ "HYPER" => println!("cargo:rustc-link-lib=dylib=sgx_urts_hyper"),
+ "HW" => println!("cargo:rustc-link-lib=dylib=sgx_urts"),
+ _ => println!("cargo:rustc-link-lib=dylib=sgx_urts"),
+ }
+}
diff --git a/samplecode/emm/app/src/main.rs b/samplecode/emm/app/src/main.rs
new file mode 100644
index 000000000..6946fcd41
--- /dev/null
+++ b/samplecode/emm/app/src/main.rs
@@ -0,0 +1,51 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License..
+
+extern crate sgx_types;
+extern crate sgx_urts;
+
+use sgx_types::error::SgxStatus;
+use sgx_types::types::*;
+use sgx_urts::enclave::SgxEnclave;
+
+static ENCLAVE_FILE: &str = "enclave.signed.so";
+
+extern "C" {
+ fn ecall_test_sgx_mm_unsafe(eid: EnclaveId, retval: *mut SgxStatus) -> SgxStatus;
+}
+
+fn main() {
+ let enclave = match SgxEnclave::create(ENCLAVE_FILE, true) {
+ Ok(enclave) => {
+ println!("[+] Init Enclave Successful {}!", enclave.eid());
+ enclave
+ }
+ Err(err) => {
+ println!("[-] Init Enclave Failed {}!", err.as_str());
+ return;
+ }
+ };
+
+ // let input_string = String::from("This is a normal world string passed into Enclave!\n");
+ let mut retval = SgxStatus::Success;
+
+ let result = unsafe { ecall_test_sgx_mm_unsafe(enclave.eid(), &mut retval) };
+ match result {
+ SgxStatus::Success => println!("[+] ECall Success..."),
+ _ => println!("[-] ECall Enclave Failed {}!", result.as_str()),
+ }
+}
diff --git a/samplecode/emm/enclave/Cargo.toml b/samplecode/emm/enclave/Cargo.toml
new file mode 100644
index 000000000..e6e7d1f15
--- /dev/null
+++ b/samplecode/emm/enclave/Cargo.toml
@@ -0,0 +1,34 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+[package]
+name = "emmtest"
+version = "1.0.0"
+authors = ["The Teaclave Authors"]
+edition = "2021"
+
+[lib]
+name = "emmtest"
+crate-type = ["staticlib"]
+
+[features]
+default = []
+
+[target.'cfg(not(target_vendor = "teaclave"))'.dependencies]
+sgx_types = { path = "../../../sgx_types"}
+sgx_tstd = { path = "../../../sgx_tstd", features = ["thread"]}
+sgx_trts = { path = "../../../sgx_trts" }
diff --git a/samplecode/emm/enclave/Xargo.toml b/samplecode/emm/enclave/Xargo.toml
new file mode 100644
index 000000000..d1d0e6d1b
--- /dev/null
+++ b/samplecode/emm/enclave/Xargo.toml
@@ -0,0 +1,23 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+[dependencies]
+alloc = {}
+
+[dependencies.std]
+path = "../../../rustlib/std"
+stage = 1
diff --git a/samplecode/emm/enclave/config.xml b/samplecode/emm/enclave/config.xml
new file mode 100644
index 000000000..36f822e6c
--- /dev/null
+++ b/samplecode/emm/enclave/config.xml
@@ -0,0 +1,38 @@
+
+
+
+ 0
+ 0
+ 250
+ 3
+ 0
+ 250
+
+ 0x10000
+ 0x4000
+ 0xF0000000
+ 0x9000
+ 0x08000
+ 0x90000000
+ 0
+ 1
+ 0xFFFFFFFF
+
diff --git a/samplecode/emm/enclave/enclave.edl b/samplecode/emm/enclave/enclave.edl
new file mode 100644
index 000000000..8e60d0ad7
--- /dev/null
+++ b/samplecode/emm/enclave/enclave.edl
@@ -0,0 +1,35 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+enclave {
+ from "sgx_stdio.edl" import *;
+ from "sgx_tstd.edl" import *;
+ from "sgx_thread.edl" import *;
+
+ // from "sgx_tstdc.edl" import sgx_thread_wait_untrusted_event_ocall, sgx_thread_set_untrusted_event_ocall;
+ trusted {
+ // public sgx_status_t ecall_test_sgx_mm(int seq_id);
+ public sgx_status_t ecall_test_sgx_mm_unsafe(void);
+ // public size_t ecall_alloc_context(void);
+ // public sgx_status_t ecall_check_context(size_t tcs);
+ // public sgx_status_t ecall_dealloc_context(size_t tcs);
+ };
+ // untrusted {
+ // void ocall_print_string([in, string] const char *str);
+ // };
+
+};
\ No newline at end of file
diff --git a/samplecode/emm/enclave/enclave.lds b/samplecode/emm/enclave/enclave.lds
new file mode 100644
index 000000000..bdb7a4b53
--- /dev/null
+++ b/samplecode/emm/enclave/enclave.lds
@@ -0,0 +1,12 @@
+enclave.so
+{
+ global:
+ g_global_data_hyper;
+ g_global_data_sim;
+ g_global_data;
+ enclave_entry;
+ g_peak_heap_used;
+ g_peak_rsrv_mem_committed;
+ local:
+ *;
+};
diff --git a/samplecode/emm/enclave/private.pem b/samplecode/emm/enclave/private.pem
new file mode 100644
index 000000000..529d07be3
--- /dev/null
+++ b/samplecode/emm/enclave/private.pem
@@ -0,0 +1,39 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIG4gIBAAKCAYEAroOogvsj/fZDZY8XFdkl6dJmky0lRvnWMmpeH41Bla6U1qLZ
+AmZuyIF+mQC/cgojIsrBMzBxb1kKqzATF4+XwPwgKz7fmiddmHyYz2WDJfAjIveJ
+ZjdMjM4+EytGlkkJ52T8V8ds0/L2qKexJ+NBLxkeQLfV8n1mIk7zX7jguwbCG1Pr
+nEMdJ3Sew20vnje+RsngAzdPChoJpVsWi/K7cettX/tbnre1DL02GXc5qJoQYk7b
+3zkmhz31TgFrd9VVtmUGyFXAysuSAb3EN+5VnHGr0xKkeg8utErea2FNtNIgua8H
+ONfm9Eiyaav1SVKzPHlyqLtcdxH3I8Wg7yqMsaprZ1n5A1v/levxnL8+It02KseD
+5HqV4rf/cImSlCt3lpRg8U5E1pyFQ2IVEC/XTDMiI3c+AR+w2jSRB3Bwn9zJtFlW
+KHG3m1xGI4ck+Lci1JvWWLXQagQSPtZTsubxTQNx1gsgZhgv1JHVZMdbVlAbbRMC
+1nSuJNl7KPAS/VfzAgEDAoIBgHRXxaynbVP5gkO0ug6Qw/E27wzIw4SmjsxG6Wpe
+K7kfDeRskKxESdsA/xCrKkwGwhcx1iIgS5+Qscd1Yg+1D9X9asd/P7waPmWoZd+Z
+AhlKwhdPsO7PiF3e1AzHhGQwsUTt/Y/aSI1MpHBvy2/s1h9mFCslOUxTmWw0oj/Q
+ldIEgWeNR72CE2+jFIJIyml6ftnb6qzPiga8Bm48ubKh0kvySOqnkmnPzgh+JBD6
+JnBmtZbfPT97bwTT+N6rnPqOOApvfHPf15kWI8yDbprG1l4OCUaIUH1AszxLd826
+5IPM+8gINLRDP1MA6azECPjTyHXhtnSIBZCyWSVkc05vYmNXYUNiXWMajcxW9M02
+wKzFELO8NCEAkaTPxwo4SCyIjUxiK1LbQ9h8PSy4c1+gGP4LAMR8xqP4QKg6zdu9
+osUGG/xRe/uufgTBFkcjqBHtK5L5VI0jeNIUAgW/6iNbYXjBMJ0GfauLs+g1VsOm
+WfdgXzsb9DYdMa0OXXHypmV4GwKBwQDUwQj8RKJ6c8cT4vcWCoJvJF00+RFL+P3i
+Gx2DLERxRrDa8AVGfqaCjsR+3vLgG8V/py+z+dxZYSqeB80Qeo6PDITcRKoeAYh9
+xlT3LJOS+k1cJcEmlbbO2IjLkTmzSwa80fWexKu8/Xv6vv15gpqYl1ngYoqJM3pd
+vzmTIOi7MKSZ0WmEQavrZj8zK4endE3v0eAEeQ55j1GImbypSf7Idh7wOXtjZ7WD
+Dg6yWDrri+AP/L3gClMj8wsAxMV4ZR8CgcEA0fzDHkFa6raVOxWnObmRoDhAtE0a
+cjUj976NM5yyfdf2MrKy4/RhdTiPZ6b08/lBC/+xRfV3xKVGzacm6QjqjZrUpgHC
+0LKiZaMtccCJjLtPwQd0jGQEnKfMFaPsnhOc5y8qVkCzVOSthY5qhz0XNotHHFmJ
+gffVgB0iqrMTvSL7IA2yqqpOqNRlhaYhNl8TiFP3gIeMtVa9rZy31JPgT2uJ+kfo
+gV7sdTPEjPWZd7OshGxWpT6QfVDj/T9T7L6tAoHBAI3WBf2DFvxNL2KXT2QHAZ9t
+k3imC4f7U+wSE6zILaDZyzygA4RUbwG0gv8/TJVn2P/Eynf76DuWHGlaiLWnCbSz
+Az2DHBQBBaku409zDQym3j1ugMRjzzSQWzJg0SIyBH3hTmnYcn3+Uqcp/lEBvGW6
+O+rsXFt3pukqJmIV8HzLGGaLm62BHUeZf3dyWm+i3p/hQAL7Xvu04QW70xuGqdr5
+afV7p5eaeQIJXyGQJ0eylV/90+qxjMKiB1XYg6WYvwKBwQCL/ddpgOdHJGN8uRom
+e7Zq0Csi3hGheMKlKbN3vcxT5U7MdyHtTZZOJbTvxKNNUNYH/8uD+PqDGNneb29G
+BfGzvI3EASyLIcGZF3OhKwZd0jUrWk2y7Vhob91jwp2+t73vdMbkKyI4mHOuXvGv
+fg95si9oO7EBT+Oqvhccd2J+F1IVXncccYnF4u5ZGWt5lLewN/pVr7MjjykeaHqN
+t+rfnQam2psA6fL4zS2zTmZPzR2tnY8Y1GBTi0Ko1OKd1HMCgcAb5cB/7/AQlhP9
+yQa04PLH9ygQkKKptZp7dy5WcWRx0K/hAHRoi2aw1wZqfm7VBNu2SLcs90kCCCxp
+6C5sfJi6b8NpNbIPC+sc9wsFr7pGo9SFzQ78UlcWYK2Gu2FxlMjonhka5hvo4zvg
+WxlpXKEkaFt3gLd92m/dMqBrHfafH7VwOJY2zT3WIpjwuk0ZzmRg5p0pG/svVQEH
+NZmwRwlopysbR69B/n1nefJ84UO50fLh5s5Zr3gBRwbWNZyzhXk=
+-----END RSA PRIVATE KEY-----
diff --git a/samplecode/emm/enclave/src/lib.rs b/samplecode/emm/enclave/src/lib.rs
new file mode 100644
index 000000000..0bc3aae01
--- /dev/null
+++ b/samplecode/emm/enclave/src/lib.rs
@@ -0,0 +1,330 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License..
+
+#![cfg_attr(not(target_vendor = "teaclave"), no_std)]
+#![cfg_attr(target_vendor = "teaclave", feature(rustc_private))]
+#![feature(pointer_byte_offsets)]
+
+#[cfg(not(target_vendor = "teaclave"))]
+#[macro_use]
+extern crate sgx_tstd as std;
+extern crate sgx_trts;
+extern crate sgx_types;
+
+use core::ffi::c_void;
+use sgx_trts::emm::{self, AllocFlags, EmaOptions, PageType, PfInfo, ProtFlags};
+use sgx_trts::veh::HandleResult;
+use sgx_types::error::errno::{EACCES, EEXIST, EINVAL, EPERM};
+use sgx_types::error::SgxStatus;
+use std::io::{self, Write};
+use std::slice;
+use std::string::String;
+use std::string::ToString;
+use std::thread;
+use std::vec::Vec;
+
+const ALLOC_SIZE: usize = 0x2000;
+const SE_PAGE_SIZE: usize = 0x1000;
+
+#[no_mangle]
+fn ecall_test_sgx_mm_unsafe() -> SgxStatus {
+ let input_string = "Enclave memory management test: \n";
+ unsafe {
+ say_something(input_string.as_ptr(), input_string.len());
+ }
+ test_emm_alloc_dealloc();
+ test_stack_expand();
+ test_commit_and_uncommit();
+ test_modify_types();
+ test_dynamic_expand_tcs();
+ test_modify_perms()
+}
+
+#[derive(Clone, Copy, Default)]
+struct PfData {
+ pf: PfInfo,
+ access: i32,
+ addr_expected: usize,
+}
+
+pub extern "C" fn permission_pfhandler(info: &mut PfInfo, priv_data: *mut c_void) -> HandleResult {
+ let mut pd = unsafe { &mut *(priv_data as *mut PfData) };
+ pd.pf = *info;
+
+ let addr = pd.pf.maddr as usize;
+ let prot = ProtFlags::from_bits(pd.access as u8).unwrap();
+ let rw_bit = unsafe { pd.pf.pfec.bits.rw() };
+ if (rw_bit == 1) && (prot == ProtFlags::W) {
+ if emm::mm_modify_perms(addr, SE_PAGE_SIZE, ProtFlags::W | ProtFlags::R).is_err() {
+ panic!()
+ };
+ } else if (rw_bit == 0) && prot.contains(ProtFlags::R) {
+ if emm::mm_modify_perms(addr, SE_PAGE_SIZE, prot).is_err() {
+ panic!()
+ };
+ } else {
+ panic!()
+ }
+
+ HandleResult::Execution
+}
+
+#[no_mangle]
+fn test_modify_perms() -> SgxStatus {
+ let mut pd = PfData::default();
+ // example 1:
+ let mut options = EmaOptions::new(None, ALLOC_SIZE, AllocFlags::COMMIT_NOW);
+ options.handle(
+ Some(permission_pfhandler),
+ Some(&mut pd as *mut PfData as *mut c_void),
+ );
+ let base = emm::mm_alloc_user(&mut options).unwrap();
+
+ let data = unsafe { (base as *const u8).read() };
+ assert!(data == 0);
+
+ // read success without PF
+ assert!(unsafe { pd.pf.pfec.errcd } == 0);
+ unsafe { (base as *mut u8).write(0xFF) };
+
+ // write success without PF
+ assert!(unsafe { pd.pf.pfec.errcd } == 0);
+
+ let res = emm::mm_modify_perms(base, ALLOC_SIZE / 2, ProtFlags::R);
+ assert!(res.is_ok());
+
+ pd.access = ProtFlags::R.bits() as i32;
+ let data = unsafe { (base as *const u8).read() };
+ assert!(data == 0xFF);
+ // read success without PF
+ assert!(unsafe { pd.pf.pfec.errcd } == 0);
+
+ pd.access = ProtFlags::W.bits() as i32;
+ let count = (ALLOC_SIZE - 1) as isize;
+ unsafe {
+ let ptr = (base as *mut u8).byte_offset(count);
+ ptr.write(0xFF);
+ };
+ // write success without PF
+ assert!(unsafe { pd.pf.pfec.errcd } == 0);
+
+ pd.access = ProtFlags::W.bits() as i32;
+ unsafe { (base as *mut u8).write(0xFF) };
+ // write success with PF
+ assert!(unsafe { pd.pf.pfec.errcd } != 0);
+
+ // write indicated with PFEC
+ assert!(unsafe { pd.pf.pfec.bits.rw() } == 1);
+
+ println!("Successfully run modify permissions and customized page fault handler!");
+ SgxStatus::Success
+}
+
+#[no_mangle]
+fn test_dynamic_expand_tcs() -> SgxStatus {
+ thread::Builder::new()
+ .name("thread1".to_string())
+ .spawn(move || {
+ println!("Hello, this is a spawned thread!");
+ })
+ .expect("Failed to create thread!");
+
+ for _ in 0..40 {
+ let _t = thread::spawn(move || {
+ println!("Hello, this is a spawned thread!");
+ });
+ }
+
+ println!("Successfully dynamic expand tcs!");
+ SgxStatus::Success
+}
+
+#[no_mangle]
+fn test_modify_types() -> SgxStatus {
+ // example 1:
+ let mut options = EmaOptions::new(None, SE_PAGE_SIZE, AllocFlags::COMMIT_NOW);
+ let base = emm::mm_alloc_user(&mut options).unwrap();
+
+ let res = emm::mm_modify_type(base, SE_PAGE_SIZE, PageType::Tcs);
+ assert!(res.is_ok());
+
+ let res = emm::mm_uncommit(base, SE_PAGE_SIZE);
+ assert!(res.is_ok());
+
+ // example 2:
+ let mut options = EmaOptions::new(None, SE_PAGE_SIZE, AllocFlags::COMMIT_NOW);
+ let base = emm::mm_alloc_user(&mut options).unwrap();
+
+ let res = emm::mm_modify_perms(base, SE_PAGE_SIZE, ProtFlags::NONE);
+ assert!(res.is_ok());
+
+ let res = emm::mm_uncommit(base, SE_PAGE_SIZE);
+ assert!(res.is_ok());
+
+ // example 3:
+ let res = emm::mm_dealloc(0, ALLOC_SIZE);
+ assert!(res == Err(EINVAL));
+
+ let mut options = EmaOptions::new(None, ALLOC_SIZE, AllocFlags::COMMIT_NOW);
+ let base = emm::mm_alloc_user(&mut options).unwrap();
+
+ let res = emm::mm_modify_type(base + SE_PAGE_SIZE, SE_PAGE_SIZE, PageType::Frist);
+ assert!(res == Err(EPERM));
+
+ let res = emm::mm_modify_perms(
+ base + SE_PAGE_SIZE,
+ SE_PAGE_SIZE,
+ ProtFlags::R | ProtFlags::X,
+ );
+ assert!(res.is_ok());
+
+ let res = emm::mm_modify_type(base + SE_PAGE_SIZE, SE_PAGE_SIZE, PageType::Tcs);
+ assert!(res == Err(EACCES));
+
+ let res = emm::mm_modify_type(base, SE_PAGE_SIZE, PageType::Tcs);
+ assert!(res.is_ok());
+
+ let res = emm::mm_uncommit(base, ALLOC_SIZE);
+ assert!(res.is_ok());
+
+ let res = emm::mm_modify_type(base, SE_PAGE_SIZE, PageType::Tcs);
+ assert!(res == Err(EACCES));
+
+ let res = emm::mm_dealloc(base, ALLOC_SIZE);
+ assert!(res.is_ok());
+
+ println!("Successfully run modify types!");
+ SgxStatus::Success
+}
+
+#[no_mangle]
+fn test_commit_and_uncommit() -> SgxStatus {
+ let res = emm::mm_dealloc(0, ALLOC_SIZE);
+ assert!(res == Err(EINVAL));
+
+ let mut options = EmaOptions::new(None, ALLOC_SIZE, AllocFlags::COMMIT_NOW);
+ let base = emm::mm_alloc_user(&mut options).unwrap();
+
+ let res = emm::mm_commit(base, ALLOC_SIZE);
+ assert!(res.is_ok());
+
+ let mut options = EmaOptions::new(
+ Some(base),
+ ALLOC_SIZE,
+ AllocFlags::COMMIT_NOW | AllocFlags::FIXED,
+ );
+ let res = emm::mm_alloc_user(&mut options);
+
+ assert!(res == Err(EEXIST));
+
+ let res = emm::mm_uncommit(base, ALLOC_SIZE);
+ assert!(res.is_ok());
+
+ let res = emm::mm_uncommit(base, ALLOC_SIZE);
+ assert!(res.is_ok());
+
+ let res = emm::mm_commit(base, ALLOC_SIZE);
+ assert!(res.is_ok());
+
+ let res = emm::mm_dealloc(base, ALLOC_SIZE);
+ assert!(res.is_ok());
+
+ let res = emm::mm_dealloc(base, ALLOC_SIZE);
+ assert!(res == Err(EINVAL));
+
+ let res = emm::mm_uncommit(base, ALLOC_SIZE);
+ assert!(res == Err(EINVAL));
+
+ let mut options = EmaOptions::new(
+ None,
+ ALLOC_SIZE,
+ AllocFlags::COMMIT_ON_DEMAND | AllocFlags::FIXED,
+ );
+ let base2 = emm::mm_alloc_user(&mut options).unwrap();
+
+ assert!(base == base2);
+
+ let ptr = base2 as *mut u8;
+ unsafe {
+ ptr.write(0xFF);
+ ptr.add(ALLOC_SIZE - 1).write(0xFF);
+ };
+
+ let res = emm::mm_dealloc(base2, ALLOC_SIZE);
+ assert!(res.is_ok());
+
+ println!("Successfully run commit and uncommit!");
+ SgxStatus::Success
+}
+
+#[no_mangle]
+fn test_stack_expand() -> SgxStatus {
+ const STATIC_REGION: usize = 0x8000;
+ let mut buf = [0_u8; STATIC_REGION];
+ for (idx, item) in buf.iter_mut().enumerate() {
+ *item = (idx % 256) as u8;
+ }
+ for (idx, item) in buf.iter().enumerate() {
+ assert!(*item == (idx % 256) as u8);
+ }
+ println!("Successfully expand stack!");
+ SgxStatus::Success
+}
+
+#[no_mangle]
+fn test_emm_alloc_dealloc() -> SgxStatus {
+ let res = emm::mm_dealloc(0, ALLOC_SIZE);
+ assert!(res == Err(EINVAL));
+
+ let mut options = EmaOptions::new(None, ALLOC_SIZE, AllocFlags::COMMIT_NOW);
+ let base = emm::mm_alloc_user(&mut options).unwrap();
+
+ let res = emm::mm_dealloc(base, ALLOC_SIZE);
+ assert!(res.is_ok());
+ println!("Successfully run alloc and dealloc!");
+ SgxStatus::Success
+}
+
+/// # Safety
+#[no_mangle]
+unsafe fn say_something(some_string: *const u8, some_len: usize) -> SgxStatus {
+ let str_slice = slice::from_raw_parts(some_string, some_len);
+ let _ = io::stdout().write(str_slice);
+
+ // A sample &'static string
+ let rust_raw_string = "This is a in-Enclave ";
+ // An array
+ let word: [u8; 4] = [82, 117, 115, 116];
+ // An vector
+ let word_vec: Vec = vec![32, 115, 116, 114, 105, 110, 103, 33];
+
+ // Construct a string from &'static string
+ let mut hello_string = String::from(rust_raw_string);
+
+ // Iterate on word array
+ for c in word.iter() {
+ hello_string.push(*c as char);
+ }
+
+ // Rust style convertion
+ hello_string += String::from_utf8(word_vec).expect("Invalid UTF-8").as_str();
+
+ // Ocall to normal world for output
+ println!("{}", &hello_string);
+
+ SgxStatus::Success
+}
diff --git a/sgx_libc/sgx_tlibc_sys/tlibc/gen/sbrk.c b/sgx_libc/sgx_tlibc_sys/tlibc/gen/sbrk.c
index 203714f09..91ee5312d 100644
--- a/sgx_libc/sgx_tlibc_sys/tlibc/gen/sbrk.c
+++ b/sgx_libc/sgx_tlibc_sys/tlibc/gen/sbrk.c
@@ -122,7 +122,7 @@ void* sbrk(intptr_t n)
size = prev_heap_used - heap_min_size;
}
assert((size & (SE_PAGE_SIZE - 1)) == 0);
- int ret = sgx_trim_epc_pages(start_addr, size >> SE_PAGE_SHIFT);
+ int ret = sgx_uncommit_rts_pages(start_addr, size >> SE_PAGE_SHIFT);
if (ret != 0)
{
heap_used = prev_heap_used;
@@ -166,7 +166,7 @@ void* sbrk(intptr_t n)
size = heap_used - heap_min_size;
}
assert((size & (SE_PAGE_SIZE - 1)) == 0);
- int ret = sgx_apply_epc_pages(start_addr, size >> SE_PAGE_SHIFT);
+ int ret = sgx_commit_rts_pages(start_addr, size >> SE_PAGE_SHIFT);
if (ret != 0)
{
heap_used = prev_heap_used;
diff --git a/sgx_rsrvmm/src/rsrvmm/area.rs b/sgx_rsrvmm/src/rsrvmm/area.rs
index 177653031..95e4d566f 100644
--- a/sgx_rsrvmm/src/rsrvmm/area.rs
+++ b/sgx_rsrvmm/src/rsrvmm/area.rs
@@ -22,10 +22,9 @@ use alloc_crate::vec::Vec;
use core::any::TypeId;
use core::cmp::{self, Ordering};
use core::convert::From;
-use core::fmt;
use core::ops::{Deref, DerefMut};
-use sgx_trts::edmm::{modpr_ocall, mprotect_ocall};
-use sgx_trts::edmm::{PageFlags, PageInfo, PageRange, PageType};
+use core::{fmt, panic};
+use sgx_trts::emm::{mm_modify_perms, ProtFlags};
use sgx_trts::trts;
use sgx_types::error::errno::*;
use sgx_types::error::OsResult;
@@ -63,6 +62,18 @@ impl Default for MmPerm {
}
}
+impl From for ProtFlags {
+ fn from(p: MmPerm) -> ProtFlags {
+ match p {
+ MmPerm::None => ProtFlags::NONE,
+ MmPerm::R => ProtFlags::R,
+ MmPerm::RW => ProtFlags::RW,
+ MmPerm::RX => ProtFlags::RX,
+ MmPerm::RWX => ProtFlags::RWX,
+ }
+ }
+}
+
impl From for MmPerm {
fn from(perm: ProtectPerm) -> MmPerm {
match perm {
@@ -350,38 +361,17 @@ impl MmArea {
}
let count = self.size() >> SE_PAGE_SHIFT;
- let perm: ProtectPerm = new_perm.into();
+ let prot: ProtFlags = new_perm.into();
if trts::is_supported_edmm() {
let (pe_needed, pr_needed) = self.is_needed_modify_perm(new_perm)?;
if pe_needed || pr_needed {
- modpr_ocall(self.start(), count, perm).unwrap();
- }
-
- let pages = PageRange::new(
- self.start(),
- count,
- PageInfo {
- typ: PageType::Reg,
- flags: PageFlags::from_bits_truncate(perm.into()) | PageFlags::PR,
- },
- )
- .map_err(|_| EINVAL)?;
-
- if pe_needed {
- let _ = pages.modpe();
- }
-
- if pr_needed && new_perm != MmPerm::RWX {
- let _ = pages.accept_forward();
- }
-
- if pr_needed && new_perm == MmPerm::None {
- mprotect_ocall(self.start(), count, perm).unwrap();
+ let res = mm_modify_perms(self.start(), count << SE_PAGE_SHIFT, prot);
+ if res.is_err() {
+ panic!()
+ }
}
- } else {
- mprotect_ocall(self.start(), count, perm).unwrap();
}
self.perm = new_perm;
diff --git a/sgx_rsrvmm/src/rsrvmm/mod.rs b/sgx_rsrvmm/src/rsrvmm/mod.rs
index dbae758ac..821085ab2 100644
--- a/sgx_rsrvmm/src/rsrvmm/mod.rs
+++ b/sgx_rsrvmm/src/rsrvmm/mod.rs
@@ -20,7 +20,7 @@ use self::manager::{MmAllocAddr, MmManager};
use self::range::MmRange;
use crate::map::{Map, MapObject};
use sgx_sync::{Once, StaticMutex};
-use sgx_trts::edmm;
+use sgx_trts::emm;
use sgx_trts::trts::{self, MmLayout};
use sgx_types::error::errno::*;
use sgx_types::error::OsResult;
@@ -161,7 +161,7 @@ impl RsrvMem {
)
};
- let ret = edmm::apply_epc_pages(start_addr, size >> SE_PAGE_SHIFT);
+ let ret = emm::mm_commit(start_addr, size >> SE_PAGE_SHIFT);
if ret.is_err() {
self.committed_size = pre_committed;
bail!(ENOMEM);
diff --git a/sgx_trts/Cargo.toml b/sgx_trts/Cargo.toml
index 82282e683..cb8ce89a1 100644
--- a/sgx_trts/Cargo.toml
+++ b/sgx_trts/Cargo.toml
@@ -34,8 +34,14 @@ default = []
thread = []
sim = ["sgx_types/sim"]
hyper = ["sgx_types/hyper"]
+emm_test = []
[dependencies]
sgx_types = { path = "../sgx_types" }
sgx_crypto_sys = { path = "../sgx_crypto/sgx_crypto_sys" }
sgx_tlibc_sys = { path = "../sgx_libc/sgx_tlibc_sys" }
+
+intrusive-collections = { git = "https://github.com/ClawSeven/intrusive-rs.git", rev = "152317d" }
+buddy_system_allocator = "0.9.0"
+spin = "0.9.4"
+bitflags = "1.3"
diff --git a/sgx_trts/src/arch.rs b/sgx_trts/src/arch.rs
index d36fb09a9..a6ea03de8 100644
--- a/sgx_trts/src/arch.rs
+++ b/sgx_trts/src/arch.rs
@@ -17,7 +17,7 @@
#![allow(clippy::enum_variant_names)]
-use crate::edmm::{self, PageType};
+use crate::emm::{self, PageType};
use crate::tcs::tc;
use crate::version::*;
use crate::xsave;
@@ -40,12 +40,25 @@ macro_rules! is_page_aligned {
};
}
+// rounds to up
+macro_rules! round_to {
+ ($num:expr, $align:expr) => {
+ ($num + $align - 1) & (!($align - 1))
+ };
+}
+
macro_rules! round_to_page {
($num:expr) => {
($num + crate::arch::SE_PAGE_SIZE - 1) & (!(crate::arch::SE_PAGE_SIZE - 1))
};
}
+macro_rules! trim_to {
+ ($num:expr, $align:expr) => {
+ $num & (!($align - 1))
+ };
+}
+
macro_rules! trim_to_page {
($num:expr) => {
$num & (!(crate::arch::SE_PAGE_SIZE - 1))
@@ -553,6 +566,14 @@ pub const SI_FLAGS_SECS: u64 = SI_FLAG_SECS;
pub const SI_MASK_TCS: u64 = SI_FLAG_PT_MASK;
pub const SI_MASK_MEM_ATTRIBUTE: u64 = 0x7;
+pub const SGX_EMA_PROT_NONE: u64 = 0x0;
+pub const SGX_EMA_PROT_READ: u64 = 0x1;
+pub const SGX_EMA_PROT_WRITE: u64 = 0x2;
+pub const SGX_EMA_PROT_EXEC: u64 = 0x4;
+pub const SGX_EMA_PROT_READ_WRITE: u64 = SGX_EMA_PROT_READ | SGX_EMA_PROT_WRITE;
+pub const SGX_EMA_PROT_READ_EXEC: u64 = SGX_EMA_PROT_READ | SGX_EMA_PROT_EXEC;
+pub const SGX_EMA_PROT_READ_WRITE_EXEC: u64 = SGX_EMA_PROT_READ_WRITE | SGX_EMA_PROT_EXEC;
+
#[repr(C, packed)]
#[derive(Clone, Copy)]
pub struct OCallContext {
@@ -723,11 +744,11 @@ impl From for SecInfoFlags {
}
}
-impl From for SecInfoFlags {
- fn from(data: edmm::PageInfo) -> SecInfoFlags {
+impl From for SecInfoFlags {
+ fn from(data: emm::PageInfo) -> SecInfoFlags {
let typ = data.typ as u64;
- let flags = data.flags.bits() as u64;
- SecInfoFlags::from_bits_truncate((typ << 8) | flags)
+ let prot = data.prot.bits() as u64;
+ SecInfoFlags::from_bits_truncate((typ << 8) | prot)
}
}
@@ -786,33 +807,33 @@ impl From for SecInfo {
}
}
-impl From for SecInfo {
- fn from(data: edmm::PageInfo) -> SecInfo {
+impl From for SecInfo {
+ fn from(data: emm::PageInfo) -> SecInfo {
SecInfo::from(SecInfoFlags::from(data))
}
}
#[repr(C, align(32))]
#[derive(Clone, Copy, Debug)]
-pub struct PageInfo {
+pub struct CPageInfo {
pub linaddr: u64,
pub srcpage: u64,
pub secinfo: u64,
pub secs: u64,
}
-impl PageInfo {
- pub const ALIGN_SIZE: usize = mem::size_of::();
+impl CPageInfo {
+ pub const ALIGN_SIZE: usize = mem::size_of::();
}
-impl AsRef<[u8; PageInfo::ALIGN_SIZE]> for PageInfo {
- fn as_ref(&self) -> &[u8; PageInfo::ALIGN_SIZE] {
+impl AsRef<[u8; CPageInfo::ALIGN_SIZE]> for CPageInfo {
+ fn as_ref(&self) -> &[u8; CPageInfo::ALIGN_SIZE] {
unsafe { &*(self as *const _ as *const _) }
}
}
-impl AsRef> for PageInfo {
- fn as_ref(&self) -> &Align32<[u8; PageInfo::ALIGN_SIZE]> {
+impl AsRef> for CPageInfo {
+ fn as_ref(&self) -> &Align32<[u8; CPageInfo::ALIGN_SIZE]> {
unsafe { &*(self as *const _ as *const _) }
}
}
diff --git a/sgx_trts/src/asm/pic.S b/sgx_trts/src/asm/pic.S
index 47cea6cda..cffd92129 100644
--- a/sgx_trts/src/asm/pic.S
+++ b/sgx_trts/src/asm/pic.S
@@ -24,7 +24,7 @@ __ImageBase:
.equ SE_GUARD_PAGE_SHIFT, 16
.equ SE_GUARD_PAGE_SIZE, (1 << SE_GUARD_PAGE_SHIFT)
.equ RED_ZONE_SIZE, 128
-.equ STATIC_STACK_SIZE, 2656
+.equ STATIC_STACK_SIZE, 4096
.equ OCMD_ERET, -1
diff --git a/sgx_trts/src/call/ecall.rs b/sgx_trts/src/call/ecall.rs
index c9c2f5025..02d90d68c 100644
--- a/sgx_trts/src/call/ecall.rs
+++ b/sgx_trts/src/call/ecall.rs
@@ -272,16 +272,17 @@ pub fn ecall(idx: ECallIndex, tcs: &mut Tcs, ms: *mut T, tidx: usize) -> SgxR
ensure!(is_root_ecall, SgxStatus::ECallNotAllowed);
FIRST_ECALL.call_once(|| {
+ debug_call_once();
// EDMM:
#[cfg(not(any(feature = "sim", feature = "hyper")))]
{
if crate::feature::SysFeatures::get().is_edmm() {
// save all the static tcs into the tcs table. These TCS would be trimmed in the uninit flow.
- crate::edmm::tcs::add_static_tcs()?;
+ crate::emm::tcs::add_static_tcs()?;
// change back the page permission
- crate::edmm::mem::change_perm().map_err(|e| {
- let _ = crate::edmm::tcs::clear_static_tcs();
+ crate::emm::init::change_perm().map_err(|e| {
+ let _ = crate::emm::tcs::clear_static_tcs();
e
})?;
}
@@ -353,3 +354,6 @@ pub fn thread_is_exit() -> bool {
}
}
}
+
+#[no_mangle]
+pub extern "C" fn debug_call_once() {}
diff --git a/sgx_trts/src/call/ocall.rs b/sgx_trts/src/call/ocall.rs
index ea60ea485..207329e60 100644
--- a/sgx_trts/src/call/ocall.rs
+++ b/sgx_trts/src/call/ocall.rs
@@ -34,11 +34,13 @@ pub enum OCallIndex {
TrimCommit,
Modpr,
Mprotect,
+ Alloc,
+ Modify,
}
impl OCallIndex {
pub fn is_builtin_index(index: i32) -> bool {
- (-5..=-2).contains(&index)
+ (-7..=-2).contains(&index)
}
pub fn is_builtin(&self) -> bool {
@@ -62,6 +64,8 @@ impl TryFrom for OCallIndex {
-3 => Ok(OCallIndex::TrimCommit),
-4 => Ok(OCallIndex::Modpr),
-5 => Ok(OCallIndex::Mprotect),
+ -6 => Ok(OCallIndex::Alloc),
+ -7 => Ok(OCallIndex::Modify),
_ => Err(u8::try_from(256_u16).unwrap_err()),
}
}
@@ -76,6 +80,8 @@ impl From for i32 {
OCallIndex::TrimCommit => -3,
OCallIndex::Modpr => -4,
OCallIndex::Mprotect => -5,
+ OCallIndex::Alloc => -6,
+ OCallIndex::Modify => -7,
}
}
}
diff --git a/sgx_trts/src/capi.rs b/sgx_trts/src/capi.rs
index 9cf01743f..22d5fe314 100644
--- a/sgx_trts/src/capi.rs
+++ b/sgx_trts/src/capi.rs
@@ -15,9 +15,17 @@
// specific language governing permissions and limitations
// under the License..
+use crate::arch::{SE_PAGE_SHIFT, SE_PAGE_SIZE};
use crate::call::{ocall, OCallIndex, OcBuffer};
-use crate::edmm::mem::{apply_epc_pages, trim_epc_pages};
-use crate::enclave::{self, MmLayout};
+use crate::emm::ema::EmaOptions;
+use crate::emm::page::AllocFlags;
+use crate::emm::pfhandler::PfHandler;
+use crate::emm::vmmgr::{
+ RangeType, ALLIGNMENT_MASK, ALLIGNMENT_SHIFT, ALLOC_FLAGS_MASK, ALLOC_FLAGS_SHIFT,
+ PAGE_TYPE_MASK, PAGE_TYPE_SHIFT,
+};
+use crate::emm::{self, mm_alloc_user, mm_commit, mm_uncommit, PageInfo, PageType, ProtFlags};
+use crate::enclave::{self, is_within_enclave, MmLayout};
use crate::error;
use crate::rand::rand;
use crate::tcs::{current, stack_size, tcs_max_num, tcs_policy};
@@ -26,8 +34,8 @@ use crate::veh::{register_exception, unregister, ExceptionHandler, Handle};
use core::convert::TryFrom;
use core::ffi::c_void;
use core::num::NonZeroUsize;
-use core::ptr;
use core::slice;
+use core::{mem, ptr};
use sgx_types::error::SgxStatus;
#[inline]
@@ -170,6 +178,136 @@ pub unsafe extern "C" fn sgx_is_outside_enclave(p: *const u8, len: usize) -> i32
i32::from(enclave::is_within_host(p, len))
}
+#[inline]
+#[no_mangle]
+pub unsafe extern "C" fn sgx_commit_rts_pages(addr: usize, count: usize) -> i32 {
+ let len = count << SE_PAGE_SHIFT;
+ match emm::check_addr(addr, len) {
+ Ok(typ) => {
+ if typ != RangeType::Rts {
+ return -1;
+ }
+ }
+ Err(_) => {
+ return -1;
+ }
+ }
+
+ if mm_commit(addr, len).is_ok() {
+ 0
+ } else {
+ -1
+ }
+}
+
+#[inline]
+#[no_mangle]
+pub unsafe extern "C" fn sgx_uncommit_rts_pages(addr: usize, count: usize) -> i32 {
+ let len = count << SE_PAGE_SHIFT;
+ match emm::check_addr(addr, len) {
+ Ok(typ) => {
+ if typ != RangeType::Rts {
+ return -1;
+ }
+ }
+ Err(_) => {
+ return -1;
+ }
+ }
+ if mm_uncommit(addr, len).is_ok() {
+ 0
+ } else {
+ -1
+ }
+}
+
+// TODO: replace inarguments with "C" style arguments
+#[inline]
+#[no_mangle]
+pub unsafe extern "C" fn sgx_mm_alloc(
+ addr: usize,
+ size: usize,
+ flags: usize,
+ handler: *mut c_void,
+ priv_data: *mut c_void,
+ out_addr: *mut *mut u8,
+) -> u32 {
+ let handler = if handler.is_null() {
+ None
+ } else {
+ Some(mem::transmute::<*mut c_void, PfHandler>(handler))
+ };
+
+ let alloc_flags =
+ match AllocFlags::from_bits(((flags & ALLOC_FLAGS_MASK) >> ALLOC_FLAGS_SHIFT) as u32) {
+ Some(flags) => flags,
+ None => {
+ return SgxStatus::InvalidParameter.into();
+ }
+ };
+
+ let mut page_type =
+ match PageType::try_from(((flags & PAGE_TYPE_MASK) >> PAGE_TYPE_SHIFT) as u8) {
+ Ok(typ) => typ,
+ Err(_) => return SgxStatus::InvalidParameter.into(),
+ };
+
+ if page_type == PageType::None {
+ page_type = PageType::Reg;
+ }
+
+ if (size % SE_PAGE_SIZE) > 0 {
+ return SgxStatus::InvalidParameter.into();
+ }
+
+ let mut align_flag: u8 = ((flags & ALLIGNMENT_MASK) >> ALLIGNMENT_SHIFT) as u8;
+ if align_flag == 0 {
+ align_flag = 12;
+ }
+ if align_flag < 12 {
+ return SgxStatus::InvalidParameter.into();
+ }
+ let align_mask: usize = (1 << align_flag) - 1;
+
+ if (addr & align_mask) > 0 {
+ return SgxStatus::InvalidParameter.into();
+ }
+
+ if (addr > 0) && !is_within_enclave(addr as *const u8, size) {
+ return SgxStatus::InvalidParameter.into();
+ }
+
+ let info = if alloc_flags.contains(AllocFlags::RESERVED) {
+ PageInfo {
+ prot: ProtFlags::NONE,
+ typ: PageType::None,
+ }
+ } else {
+ PageInfo {
+ prot: ProtFlags::RW,
+ typ: page_type,
+ }
+ };
+
+ let priv_data = if priv_data.is_null() {
+ None
+ } else {
+ Some(priv_data)
+ };
+
+ let addr = if addr > 0 { Some(addr) } else { None };
+ let mut options = EmaOptions::new(addr, size, alloc_flags);
+ options.info(info).handle(handler, priv_data);
+
+ match mm_alloc_user(&options) {
+ Ok(base) => {
+ *out_addr = base as *mut u8;
+ 0
+ }
+ Err(err) => err as u32,
+ }
+}
+
#[inline]
#[no_mangle]
pub unsafe extern "C" fn sgx_ocall(idx: i32, ms: *mut c_void) -> u32 {
@@ -226,26 +364,6 @@ pub unsafe extern "C" fn sgx_ocremain_size() -> usize {
OcBuffer::remain_size()
}
-#[inline]
-#[no_mangle]
-pub unsafe extern "C" fn sgx_apply_epc_pages(addr: usize, count: usize) -> i32 {
- if apply_epc_pages(addr, count).is_ok() {
- 0
- } else {
- -1
- }
-}
-
-#[inline]
-#[no_mangle]
-pub unsafe extern "C" fn sgx_trim_epc_pages(addr: usize, count: usize) -> i32 {
- if trim_epc_pages(addr, count).is_ok() {
- 0
- } else {
- -1
- }
-}
-
#[allow(clippy::redundant_closure)]
#[inline]
#[no_mangle]
diff --git a/sgx_trts/src/edmm/mem.rs b/sgx_trts/src/edmm/mem.rs
deleted file mode 100644
index 0d6ac634d..000000000
--- a/sgx_trts/src/edmm/mem.rs
+++ /dev/null
@@ -1,236 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License..
-
-cfg_if! {
- if #[cfg(not(any(feature = "sim", feature = "hyper")))] {
- pub use hw::*;
- } else {
- pub use sw::*;
- }
-}
-
-#[cfg(not(any(feature = "sim", feature = "hyper")))]
-mod hw {
- use crate::arch::{self, Layout};
- use crate::edmm::epc::{PageFlags, PageInfo, PageRange, PageType};
- use crate::edmm::layout::LayoutTable;
- use crate::edmm::perm;
- use crate::edmm::trim;
- use crate::elf::program::Type;
- use crate::enclave::parse;
- use crate::enclave::MmLayout;
- use crate::feature::{SysFeatures, Version};
- use core::convert::TryFrom;
- use sgx_types::error::{SgxResult, SgxStatus};
- use sgx_types::types::ProtectPerm;
-
- pub fn apply_epc_pages(addr: usize, count: usize) -> SgxResult {
- ensure!(addr != 0 && count != 0, SgxStatus::InvalidParameter);
-
- if let Some(attr) = LayoutTable::new().check_dyn_range(addr, count, None) {
- let pages = PageRange::new(
- addr,
- count,
- PageInfo {
- typ: PageType::Reg,
- flags: PageFlags::R | PageFlags::W | PageFlags::PENDING,
- },
- )?;
- if (attr.attr & arch::PAGE_DIR_GROW_DOWN) == 0 {
- pages.accept_forward()
- } else {
- pages.accept_backward()
- }
- } else {
- Err(SgxStatus::InvalidParameter)
- }
- }
-
- pub fn trim_epc_pages(addr: usize, count: usize) -> SgxResult {
- ensure!(addr != 0 && count != 0, SgxStatus::InvalidParameter);
-
- LayoutTable::new()
- .check_dyn_range(addr, count, None)
- .ok_or(SgxStatus::InvalidParameter)?;
-
- trim::trim_range(addr, count)?;
-
- let pages = PageRange::new(
- addr,
- count,
- PageInfo {
- typ: PageType::Trim,
- flags: PageFlags::MODIFIED,
- },
- )?;
- pages.accept_forward()?;
-
- trim::trim_range_commit(addr, count)?;
-
- Ok(())
- }
-
- pub fn expand_stack_epc_pages(addr: usize, count: usize) -> SgxResult {
- ensure!(addr != 0 && count != 0, SgxStatus::InvalidParameter);
-
- LayoutTable::new()
- .check_dyn_range(addr, count, None)
- .ok_or(SgxStatus::InvalidParameter)?;
-
- let pages = PageRange::new(
- addr,
- count,
- PageInfo {
- typ: PageType::Reg,
- flags: PageFlags::R | PageFlags::W | PageFlags::PENDING,
- },
- )?;
- pages.accept_forward()?;
-
- Ok(())
- }
-
- #[inline]
- pub fn accept_post_remove() -> SgxResult {
- reentrant_accept_post_remove(arch::Global::get().layout_table(), 0)
- }
-
- fn reentrant_accept_post_remove(table: &[Layout], offset: usize) -> SgxResult {
- let base = MmLayout::image_base();
- unsafe {
- for (i, layout) in table.iter().enumerate() {
- if is_group_id!(layout.group.id) {
- let mut step = 0_usize;
- for _ in 0..layout.group.load_times {
- step += layout.group.load_step as usize;
- reentrant_accept_post_remove(
- &table[i - layout.group.entry_count as usize..i],
- step,
- )?;
- }
- } else if (layout.entry.attributes & arch::PAGE_ATTR_POST_REMOVE) != 0 {
- let addr = base + layout.entry.rva as usize + offset;
- let count = layout.entry.page_count as usize;
-
- let pages = PageRange::new(
- addr,
- count,
- PageInfo {
- typ: PageType::Trim,
- flags: PageFlags::MODIFIED,
- },
- )?;
- pages.accept_forward()?;
- }
- }
- Ok(())
- }
- }
-
- pub fn change_perm() -> SgxResult {
- let elf = parse::new_elf()?;
- let text_relo = parse::has_text_relo()?;
-
- let base = MmLayout::image_base();
- for phdr in elf.program_iter() {
- let typ = phdr.get_type().unwrap_or(Type::Null);
- if typ == Type::Load && text_relo && !phdr.flags().is_write() {
- let mut perm = 0_u64;
- let start = base + trim_to_page!(phdr.virtual_addr() as usize);
- let end =
- base + round_to_page!(phdr.virtual_addr() as usize + phdr.mem_size() as usize);
- let count = (end - start) / arch::SE_PAGE_SIZE;
-
- if phdr.flags().is_read() {
- perm |= arch::SI_FLAG_R;
- }
- if phdr.flags().is_execute() {
- perm |= arch::SI_FLAG_X;
- }
-
- modify_perm(start, count, perm as u8)?;
- }
- if typ == Type::GnuRelro {
- let start = base + trim_to_page!(phdr.virtual_addr() as usize);
- let end =
- base + round_to_page!(phdr.virtual_addr() as usize + phdr.mem_size() as usize);
- let count = (end - start) / arch::SE_PAGE_SIZE;
-
- if count > 0 {
- modify_perm(start, count, arch::SI_FLAG_R as u8)?;
- }
- }
- }
-
- let layout_table = arch::Global::get().layout_table();
- if let Some(layout) = layout_table.iter().find(|layout| unsafe {
- (layout.entry.id == arch::LAYOUT_ID_RSRV_MIN)
- && (layout.entry.si_flags == arch::SI_FLAGS_RWX)
- && (layout.entry.page_count > 0)
- }) {
- let start = base + unsafe { layout.entry.rva as usize };
- let count = unsafe { layout.entry.page_count as usize };
-
- modify_perm(start, count, (arch::SI_FLAG_R | arch::SI_FLAG_W) as u8)?;
- }
- Ok(())
- }
-
- fn modify_perm(addr: usize, count: usize, perm: u8) -> SgxResult {
- let pages = PageRange::new(
- addr,
- count,
- PageInfo {
- typ: PageType::Reg,
- flags: PageFlags::PR | PageFlags::from_bits_truncate(perm),
- },
- )?;
-
- if SysFeatures::get().version() == Version::Sdk2_0 {
- perm::modpr_ocall(
- addr,
- count,
- ProtectPerm::try_from(perm).map_err(|_| SgxStatus::InvalidParameter)?,
- )?;
- }
-
- pages.modify()
- }
-}
-
-#[cfg(any(feature = "sim", feature = "hyper"))]
-mod sw {
- use sgx_types::error::SgxResult;
-
- #[allow(clippy::unnecessary_wraps)]
- #[inline]
- pub fn apply_epc_pages(_addr: usize, _count: usize) -> SgxResult {
- Ok(())
- }
-
- #[allow(clippy::unnecessary_wraps)]
- #[inline]
- pub fn trim_epc_pages(_addr: usize, _count: usize) -> SgxResult {
- Ok(())
- }
-
- #[allow(clippy::unnecessary_wraps)]
- #[inline]
- pub fn expand_stack_epc_pages(_addr: usize, _count: usize) -> SgxResult {
- Ok(())
- }
-}
diff --git a/sgx_trts/src/edmm/perm.rs b/sgx_trts/src/edmm/perm.rs
deleted file mode 100644
index 4e17e67e5..000000000
--- a/sgx_trts/src/edmm/perm.rs
+++ /dev/null
@@ -1,88 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License..
-
-cfg_if! {
- if #[cfg(not(any(feature = "sim", feature = "hyper")))] {
- pub use hw::*;
- } else {
- pub use sw::*;
- }
-}
-
-#[cfg(not(any(feature = "sim", feature = "hyper")))]
-mod hw {
- use crate::arch::SE_PAGE_SHIFT;
- use crate::call::{ocall, OCallIndex, OcAlloc};
- use alloc::boxed::Box;
- use core::convert::Into;
- use sgx_types::error::{SgxResult, SgxStatus};
- use sgx_types::types::ProtectPerm;
-
- #[repr(C)]
- #[derive(Clone, Copy, Debug, Default)]
- struct ChangePermOcall {
- addr: usize,
- size: usize,
- perm: u64,
- }
-
- pub fn modpr_ocall(addr: usize, count: usize, perm: ProtectPerm) -> SgxResult {
- let mut change = Box::try_new_in(
- ChangePermOcall {
- addr,
- size: count << SE_PAGE_SHIFT,
- perm: Into::::into(perm) as u64,
- },
- OcAlloc,
- )
- .map_err(|_| SgxStatus::OutOfMemory)?;
-
- ocall(OCallIndex::Modpr, Some(change.as_mut()))
- }
-
- pub fn mprotect_ocall(addr: usize, count: usize, perm: ProtectPerm) -> SgxResult {
- let mut change = Box::try_new_in(
- ChangePermOcall {
- addr,
- size: count << SE_PAGE_SHIFT,
- perm: Into::::into(perm) as u64,
- },
- OcAlloc,
- )
- .map_err(|_| SgxStatus::OutOfMemory)?;
-
- ocall(OCallIndex::Mprotect, Some(change.as_mut()))
- }
-}
-
-#[cfg(any(feature = "sim", feature = "hyper"))]
-mod sw {
- use sgx_types::error::SgxResult;
- use sgx_types::types::ProtectPerm;
-
- #[allow(clippy::unnecessary_wraps)]
- #[inline]
- pub fn modpr_ocall(_addr: usize, _count: usize, _perm: ProtectPerm) -> SgxResult {
- Ok(())
- }
-
- #[allow(clippy::unnecessary_wraps)]
- #[inline]
- pub fn mprotect_ocall(_addr: usize, _count: usize, _perm: ProtectPerm) -> SgxResult {
- Ok(())
- }
-}
diff --git a/sgx_trts/src/edmm/trim.rs b/sgx_trts/src/edmm/trim.rs
deleted file mode 100644
index ce278cecc..000000000
--- a/sgx_trts/src/edmm/trim.rs
+++ /dev/null
@@ -1,62 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License..
-
-use crate::arch::SE_PAGE_SHIFT;
-use crate::call::{ocall, OCallIndex, OcAlloc};
-use alloc::boxed::Box;
-use sgx_types::error::{SgxResult, SgxStatus};
-
-#[repr(C)]
-#[derive(Clone, Copy, Debug, Default)]
-struct TrimRangeOcall {
- from: usize,
- to: usize,
-}
-
-#[repr(C)]
-#[derive(Clone, Copy, Debug, Default)]
-struct TrimRangeCommitOcall {
- addr: usize,
-}
-
-pub fn trim_range(addr: usize, count: usize) -> SgxResult {
- let mut trim = Box::try_new_in(
- TrimRangeOcall {
- from: addr,
- to: addr + (count << SE_PAGE_SHIFT),
- },
- OcAlloc,
- )
- .map_err(|_| SgxStatus::OutOfMemory)?;
-
- ocall(OCallIndex::Trim, Some(trim.as_mut()))
-}
-
-pub fn trim_range_commit(addr: usize, count: usize) -> SgxResult {
- for i in 0..count {
- let mut trim = Box::try_new_in(
- TrimRangeCommitOcall {
- addr: addr + i * SE_PAGE_SHIFT,
- },
- OcAlloc,
- )
- .map_err(|_| SgxStatus::OutOfMemory)?;
-
- ocall(OCallIndex::TrimCommit, Some(trim.as_mut()))?;
- }
- Ok(())
-}
diff --git a/sgx_trts/src/emm/alloc.rs b/sgx_trts/src/emm/alloc.rs
new file mode 100644
index 000000000..8388c494e
--- /dev/null
+++ b/sgx_trts/src/emm/alloc.rs
@@ -0,0 +1,585 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License..
+
+use buddy_system_allocator::LockedHeap;
+use intrusive_collections::intrusive_adapter;
+use intrusive_collections::singly_linked_list::CursorMut;
+use intrusive_collections::singly_linked_list::{Link, SinglyLinkedList};
+use intrusive_collections::UnsafeRef;
+use sgx_tlibc_sys::ENOMEM;
+
+use crate::sync::Once;
+use crate::sync::SpinMutex as Mutex;
+use core::alloc::{AllocError, Allocator, Layout};
+use core::any::Any;
+use core::mem::size_of;
+use core::mem::MaybeUninit;
+use core::ptr::NonNull;
+
+use super::ema::EmaOptions;
+use super::page::AllocFlags;
+use super::vmmgr::{RangeType, VMMGR};
+use super::{PageInfo, PageType, ProtFlags};
+use sgx_types::error::OsResult;
+
+// The size of fixed static memory for Static Allocator
+const STATIC_MEM_SIZE: usize = 65536;
+
+// The size of initial reserve memory for Reserve Allocator
+const INIT_MEM_SIZE: usize = 65536;
+
+// The size of guard pages
+const GUARD_SIZE: usize = 0x8000;
+
+// The max allocated size of Reserve Allocator
+const MAX_EMALLOC_SIZE: usize = 0x10000000;
+
+const ALLOC_MASK: usize = 1;
+const SIZE_MASK: usize = !(EXACT_MATCH_INCREMENT - 1);
+
+/// Static memory for allocation
+static mut STATIC_MEM: [u8; STATIC_MEM_SIZE] = [0; STATIC_MEM_SIZE];
+
+/// Lowest level: Allocator for static memory
+///
+/// TODO: reimplement static allocator with monotone increasing policies
+static STATIC: Once> = Once::new();
+
+/// Second level: Allocator for reserve memory
+static RSRV_ALLOCATOR: Once> = Once::new();
+
+/// Init lowest level static memory allocator
+pub fn init_static_alloc() {
+ let _ = STATIC.call_once(|| {
+ let static_alloc = LockedHeap::empty();
+ unsafe {
+ static_alloc
+ .lock()
+ .init(STATIC_MEM.as_ptr() as usize, STATIC_MEM_SIZE)
+ };
+ Ok(static_alloc)
+ });
+}
+
+/// Init reserve memory allocator
+/// init_reserve_alloc() need to be called after init_static_alloc()
+pub fn init_reserve_alloc() {
+ let _ = RSRV_ALLOCATOR.call_once(|| Ok(Mutex::new(Reserve::new(INIT_MEM_SIZE))));
+}
+
+pub trait EmmAllocator: Allocator + Any {
+ fn as_any(&self) -> &dyn Any;
+}
+
+/// AllocType layout memory from reserve memory region
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub struct RsrvAlloc;
+
+unsafe impl Allocator for RsrvAlloc {
+ fn allocate(&self, layout: Layout) -> Result, AllocError> {
+ let size = layout.size();
+ RSRV_ALLOCATOR
+ .get()
+ .unwrap()
+ .lock()
+ .emalloc(size)
+ .map(|addr| NonNull::slice_from_raw_parts(NonNull::new(addr as *mut u8).unwrap(), size))
+ .map_err(|_| AllocError)
+ }
+
+ #[inline]
+ unsafe fn deallocate(&self, ptr: NonNull, _layout: Layout) {
+ RSRV_ALLOCATOR.get().unwrap().lock().efree(ptr.addr().get())
+ }
+}
+
+impl EmmAllocator for RsrvAlloc {
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+}
+
+/// AllocType layout memory from static memory region
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub struct StaticAlloc;
+
+unsafe impl Allocator for StaticAlloc {
+ fn allocate(&self, layout: Layout) -> Result, AllocError> {
+ STATIC
+ .get()
+ .unwrap()
+ .lock()
+ .alloc(layout)
+ .map(|addr| NonNull::slice_from_raw_parts(addr, layout.size()))
+ .map_err(|_| AllocError)
+ }
+
+ #[inline]
+ unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) {
+ STATIC.get().unwrap().lock().dealloc(ptr, layout);
+ }
+}
+
+impl EmmAllocator for StaticAlloc {
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+}
+
+// Enum for allocator types
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+#[repr(u8)]
+pub enum AllocType {
+ Static,
+ Reserve,
+}
+
+impl AllocType {
+ pub fn alloctor(&self) -> &'static dyn EmmAllocator {
+ match self {
+ AllocType::Static => &StaticAlloc,
+ AllocType::Reserve => &RsrvAlloc,
+ }
+ }
+}
+
+// Chunk manages memory range.
+// The Chunk structure is filled into the layout before the base pointer.
+#[derive(Debug)]
+struct Chunk {
+ base: usize,
+ size: usize,
+ used: usize,
+ link: Link, // singly intrusive linkedlist
+}
+
+impl Chunk {
+ fn new(base: usize, size: usize) -> Self {
+ Self {
+ base,
+ size,
+ used: 0,
+ link: Link::new(),
+ }
+ }
+}
+
+intrusive_adapter!(ChunkAda = UnsafeRef: Chunk { link: Link });
+
+const NUM_EXACT_LIST: usize = 0x100;
+const HEADER_SIZE: usize = size_of::();
+const EXACT_MATCH_INCREMENT: usize = 0x8;
+const MIN_BLOCK_SIZE: usize = 0x10;
+const MAX_EXACT_SIZE: usize = MIN_BLOCK_SIZE + EXACT_MATCH_INCREMENT * (NUM_EXACT_LIST - 1);
+
+// Free block for allocating memory with exact size
+#[repr(C)]
+#[derive(Debug)]
+struct BlockFree {
+ size: usize,
+ link: Link, // singly intrusive linkedlist
+}
+
+// Used block for tracking allocated size and base pointer
+#[repr(C)]
+#[derive(Debug)]
+struct BlockUsed {
+ size: usize,
+ payload: usize, // Act as placeholder
+}
+
+impl BlockFree {
+ fn new(size: usize) -> Self {
+ Self {
+ size,
+ link: Link::new(),
+ }
+ }
+
+ fn set_size(&mut self, size: usize) {
+ self.size = size;
+ }
+
+ fn block_size(&self) -> usize {
+ self.size & SIZE_MASK
+ }
+
+ fn clear_alloced(&mut self) {
+ self.size &= SIZE_MASK;
+ }
+}
+
+impl BlockUsed {
+ fn new(size: usize) -> Self {
+ Self { size, payload: 0 }
+ }
+
+ fn set_size(&mut self, size: usize) {
+ self.size = size;
+ }
+
+ fn block_size(&self) -> usize {
+ self.size & SIZE_MASK
+ }
+
+ fn is_alloced(&self) -> bool {
+ self.size & ALLOC_MASK == 0
+ }
+
+ fn set_alloced(&mut self) {
+ self.size |= ALLOC_MASK;
+ }
+
+ fn clear_alloced(&mut self) {
+ self.size &= SIZE_MASK;
+ }
+
+ // Return the ptr of payload
+ fn payload_ptr(&self) -> usize {
+ &self.payload as *const _ as usize
+ }
+
+ unsafe fn with_payload<'a>(payload_ptr: usize) -> &'a mut BlockUsed {
+ let payload_ptr = payload_ptr as *const u8;
+ let block = &mut *(payload_ptr.byte_offset(-(HEADER_SIZE as isize)) as *mut BlockUsed);
+ block
+ }
+}
+
+impl<'a> From<&'a mut BlockFree> for &'a mut BlockUsed {
+ fn from(block_free: &'a mut BlockFree) -> Self {
+ let block_used = unsafe { &mut *(block_free as *mut _ as *mut BlockUsed) };
+
+ block_used.size = block_free.block_size();
+ // Clear residual link information
+ block_used.payload = 0;
+ block_used.set_alloced();
+
+ block_used
+ }
+}
+
+impl<'a> From<&'a mut BlockUsed> for &'a mut BlockFree {
+ fn from(block_used: &'a mut BlockUsed) -> Self {
+ let block_free = unsafe { &mut *(block_used as *mut _ as *mut BlockFree) };
+
+ block_free.size = block_used.block_size();
+ block_free.link = Link::new();
+ // Useless method to mark free tag
+ block_free.clear_alloced();
+
+ block_free
+ }
+}
+
+intrusive_adapter!(BlockFreeAda = UnsafeRef: BlockFree { link: Link });
+
+/// Interior allocator for reserve memory management
+///
+/// TODO: implement slab allocator mechanism
+pub struct Reserve {
+ exact_blocks: [SinglyLinkedList; 256],
+ large_blocks: SinglyLinkedList,
+ chunks: SinglyLinkedList,
+ // The size of memory increment
+ incr_size: usize,
+ // statistics
+ allocated: usize,
+ total: usize,
+}
+
+impl Reserve {
+ fn new(size: usize) -> Self {
+ let exact_blocks: [SinglyLinkedList; 256] = {
+ let mut exact_blocks: [MaybeUninit>; 256] =
+ MaybeUninit::uninit_array();
+ for block in &mut exact_blocks {
+ block.write(SinglyLinkedList::new(BlockFreeAda::new()));
+ }
+ unsafe { MaybeUninit::array_assume_init(exact_blocks) }
+ };
+
+ let mut reserve = Self {
+ exact_blocks,
+ large_blocks: SinglyLinkedList::new(BlockFreeAda::new()),
+ chunks: SinglyLinkedList::new(ChunkAda::new()),
+ incr_size: 65536,
+ allocated: 0,
+ total: 0,
+ };
+
+ // We shouldn't handle the allocation error of reserve memory when initializing,
+ // If it returns error, the sdk should panic and crash.
+ unsafe {
+ reserve.add_chunks(size).unwrap();
+ }
+ reserve
+ }
+
+ // Find the available free block for memory allocation,
+ // and bsize must be round to eight
+ fn get_free_block(&mut self, bsize: usize) -> Option> {
+ if bsize <= MAX_EXACT_SIZE {
+ // TODO: for exact size block, maybe we can reuse larger block
+ // rather than allocating block from chunk
+ return self.get_exact_block(bsize);
+ }
+
+ // Loop and find the most available large block
+ let list = &mut self.large_blocks;
+ let mut cursor = list.front_mut();
+ let mut suit_block: Option<*const BlockFree> = None;
+ let mut suit_block_size = 0;
+ while !cursor.is_null() {
+ let curr_block = cursor.get().unwrap();
+ if curr_block.size >= bsize
+ && (suit_block.is_none() || (suit_block_size > curr_block.size))
+ {
+ suit_block = Some(curr_block as *const BlockFree);
+ suit_block_size = curr_block.block_size();
+ }
+ cursor.move_next();
+ }
+
+ suit_block?;
+
+ cursor = list.front_mut();
+
+ let mut curr_block_ptr = cursor.get().unwrap() as *const BlockFree;
+ if curr_block_ptr == suit_block.unwrap() {
+ return list.pop_front();
+ }
+
+ let mut cursor_next = cursor.peek_next();
+ while !cursor_next.is_null() {
+ curr_block_ptr = cursor_next.get().unwrap() as *const BlockFree;
+ if curr_block_ptr == suit_block.unwrap() {
+ return cursor.remove_next();
+ }
+ cursor.move_next();
+ cursor_next = cursor.peek_next();
+ }
+
+ None
+ }
+
+ fn get_exact_block(&mut self, bsize: usize) -> Option> {
+ let idx = self.get_list_idx(bsize);
+ let list = &mut self.exact_blocks[idx];
+ list.pop_front()
+ }
+
+ fn put_free_block(&mut self, block: UnsafeRef) {
+ let block_size = block.block_size();
+ if block_size <= MAX_EXACT_SIZE {
+ // put block into exact block list
+ let idx = self.get_list_idx(block_size);
+ let list = &mut self.exact_blocks[idx];
+ list.push_front(block);
+ } else {
+ // put block into large block list
+ let list = &mut self.large_blocks;
+ list.push_front(block);
+ }
+ }
+
+ // Obtain the list index with exact block size
+ fn get_list_idx(&self, size: usize) -> usize {
+ assert!(size % EXACT_MATCH_INCREMENT == 0);
+ if size < MIN_BLOCK_SIZE {
+ return 0;
+ }
+ let idx = (size - MIN_BLOCK_SIZE) / EXACT_MATCH_INCREMENT;
+ assert!(idx < NUM_EXACT_LIST);
+ idx
+ }
+
+ // Reconstruct BlockUsed with BlockFree block_size() and set alloc, return payload ptr.
+ // BlockFree -> BlockUsed -> Payload ptr (Used)
+ fn block_to_payload(&self, mut block_free: UnsafeRef) -> usize {
+ // Inexplicily change inner data of pointer
+ let block_used: &mut BlockUsed = block_free.as_mut().into();
+ block_used.payload_ptr()
+ }
+
+ // Reconstruct a new BlockFree with BlockUsed block_size(), return payload ptr.
+ // Payload ptr (Used) -> BlockUsed -> BlockFree
+ fn payload_to_block(&self, payload_ptr: usize) -> UnsafeRef {
+ let block_used = unsafe { BlockUsed::with_payload(payload_ptr) };
+ // Inexplicily change inner data of pointer
+ let block_free: &mut BlockFree = block_used.into();
+ unsafe { UnsafeRef::from_raw(block_free as *const BlockFree) }
+ }
+
+ /// Malloc memory
+ pub fn emalloc(&mut self, size: usize) -> OsResult {
+ let mut bsize = round_to!(size + HEADER_SIZE, EXACT_MATCH_INCREMENT);
+ bsize = bsize.max(MIN_BLOCK_SIZE);
+
+ // Find free block in lists
+ let mut block = self.get_free_block(bsize);
+
+ if let Some(block) = block {
+ // No need to set size as free block contains size
+ return Ok(self.block_to_payload(block));
+ };
+
+ // Alloc new block from chunks
+ block = self.alloc_from_chunks(bsize);
+ if block.is_none() {
+ let chunk_size = size_of::();
+ let new_reserve_size = round_to!(bsize + chunk_size, INIT_MEM_SIZE);
+ unsafe { self.add_chunks(new_reserve_size)? };
+ block = self.alloc_from_chunks(bsize);
+ // Should never happen
+ if block.is_none() {
+ return Err(ENOMEM);
+ }
+ }
+
+ Ok(self.block_to_payload(block.unwrap()))
+ }
+
+ fn alloc_from_chunks(&mut self, bsize: usize) -> Option> {
+ let mut addr: usize = 0;
+ let mut cursor = self.chunks.front_mut();
+ while !cursor.is_null() {
+ let chunk = unsafe { cursor.get_mut().unwrap() };
+ if (chunk.size - chunk.used) >= bsize {
+ addr = chunk.base + chunk.used;
+ chunk.used += bsize;
+ break;
+ }
+ cursor.move_next();
+ }
+
+ if addr == 0 {
+ None
+ } else {
+ let block = BlockFree::new(bsize);
+ let ptr = addr as *mut BlockFree;
+ let block = unsafe {
+ ptr.write(block);
+ UnsafeRef::from_raw(ptr)
+ };
+ Some(block)
+ }
+ }
+
+ /// Free memory
+ pub fn efree(&mut self, payload_addr: usize) {
+ let block = self.payload_to_block(payload_addr);
+ let block_addr = block.as_ref() as *const BlockFree as usize;
+ let block_size = block.block_size();
+ let block_end = block_addr + block_size;
+ let res = self.find_chunk_with_block(block_addr, block_size);
+ if res.is_none() {
+ panic!();
+ }
+
+ // TODO: reconfigure the free block,
+ // merging its dextral block into a large block
+ let mut cursor = res.unwrap();
+ let chunk = unsafe { cursor.get_mut().unwrap() };
+
+ if block_end - chunk.base == chunk.used {
+ chunk.used -= block.block_size();
+ // TODO: Trigger merging the right-most block into this chunk,
+ // if and only if the right-most block is in free large block list
+ return;
+ }
+
+ self.put_free_block(block);
+ }
+
+ /// Adding the size of interior memory
+ /// rsize: memory increment
+ pub unsafe fn add_chunks(&mut self, rsize: usize) -> OsResult {
+ // Here we alloc at least INIT_MEM_SIZE size,
+ // but commit rsize memory, the remaining memory is COMMIT_ON_DEMAND
+ let increment = self.incr_size.max(rsize);
+ let mut vmmgr = VMMGR.get().unwrap().lock();
+
+ let mut options = EmaOptions::new(None, increment + 2 * GUARD_SIZE, AllocFlags::RESERVED);
+
+ options
+ .info(PageInfo {
+ typ: PageType::None,
+ prot: ProtFlags::NONE,
+ })
+ .alloc(AllocType::Static);
+ let base = vmmgr.alloc(&options, RangeType::User)?;
+
+ let mut options = EmaOptions::new(
+ Some(base + GUARD_SIZE),
+ increment,
+ AllocFlags::COMMIT_ON_DEMAND | AllocFlags::FIXED,
+ );
+
+ options.alloc(AllocType::Static);
+ let base = vmmgr.alloc(&options, RangeType::User)?;
+
+ vmmgr.commit(base, rsize)?;
+ drop(vmmgr);
+
+ unsafe {
+ self.write_chunk(base, increment);
+ }
+
+ self.incr_size = (self.incr_size * 2).min(MAX_EMALLOC_SIZE);
+
+ Ok(())
+ }
+
+ // Parsing the range of unmanaged memory. The function writes a chunk struct in the header of
+ // unmanaged memory, the written chunk will be responsible for managing the remaining memory.
+ unsafe fn write_chunk(&mut self, base: usize, size: usize) {
+ let header_size = size_of::();
+ let mem_base = base + header_size;
+ let mem_size = size - header_size;
+
+ let chunk: Chunk = Chunk::new(mem_base, mem_size);
+ unsafe {
+ core::ptr::write(base as *mut Chunk, chunk);
+ let chunk_ref = UnsafeRef::from_raw(base as *const Chunk);
+ self.chunks.push_front(chunk_ref);
+ }
+ }
+
+ // Find the chunk including the specified block
+ fn find_chunk_with_block(
+ &mut self,
+ block_addr: usize,
+ block_size: usize,
+ ) -> Option> {
+ if block_size == 0 {
+ return None;
+ }
+ let mut cursor = self.chunks.front_mut();
+ while !cursor.is_null() {
+ let chunk = cursor.get().unwrap();
+ if (block_addr >= chunk.base)
+ && ((block_addr + block_size) <= (chunk.base + chunk.used))
+ {
+ return Some(cursor);
+ }
+ cursor.move_next();
+ }
+
+ None
+ }
+}
diff --git a/sgx_trts/src/emm/bitmap.rs b/sgx_trts/src/emm/bitmap.rs
new file mode 100644
index 000000000..ed302f5df
--- /dev/null
+++ b/sgx_trts/src/emm/bitmap.rs
@@ -0,0 +1,147 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License..
+
+use alloc::boxed::Box;
+use alloc::vec;
+use sgx_tlibc_sys::EACCES;
+use sgx_types::error::OsResult;
+
+use crate::emm::alloc::EmmAllocator;
+use crate::emm::alloc::RsrvAlloc;
+use crate::emm::alloc::StaticAlloc;
+
+use super::alloc::AllocType;
+
+const BYTE_SIZE: usize = 8;
+macro_rules! bytes_num {
+ ($num:expr) => {
+ ($num + BYTE_SIZE - 1) / BYTE_SIZE
+ };
+}
+
+#[derive(Debug)]
+pub struct BitArray {
+ bits: usize,
+ bytes: usize,
+ data: Box<[u8], &'static dyn EmmAllocator>,
+}
+
+impl BitArray {
+ /// Init BitArray with all zero bits
+ pub fn new(bits: usize, alloc: AllocType) -> OsResult {
+ let bytes = bytes_num!(bits);
+
+ // FIXME: return error if OOM
+ let data = vec::from_elem_in(0_u8, bytes, alloc.alloctor()).into_boxed_slice();
+
+ Ok(Self { bits, bytes, data })
+ }
+
+ /// Get the value of the bit at a given index
+ pub fn get(&self, index: usize) -> OsResult {
+ if index >= self.bits {
+ return Err(EACCES);
+ }
+
+ let byte_index = index / BYTE_SIZE;
+ let bit_index = index % BYTE_SIZE;
+ let bit_mask = 1 << bit_index;
+ Ok((self.data.get(byte_index).unwrap() & bit_mask) != 0)
+ }
+
+ /// Check whether all bits are set true
+ pub fn all_true(&self) -> bool {
+ for pos in 0..self.bits {
+ if !self.get(pos).unwrap() {
+ return false;
+ }
+ }
+ true
+ }
+
+ /// Set the value of the bit at the specified index
+ pub fn set(&mut self, index: usize, value: bool) -> OsResult {
+ if index >= self.bits {
+ return Err(EACCES);
+ }
+ let byte_index = index / BYTE_SIZE;
+ let bit_index = index % BYTE_SIZE;
+ let bit_mask = 1 << bit_index;
+
+ if value {
+ self.data[byte_index] |= bit_mask;
+ } else {
+ self.data[byte_index] &= !bit_mask;
+ }
+ Ok(())
+ }
+
+ /// Set all the bits to true
+ pub fn set_full(&mut self) {
+ self.data.fill(0xFF);
+ }
+
+ /// Clear all the bits
+ pub fn clear(&mut self) {
+ self.data.fill(0);
+ }
+
+ fn alloc_type(&self) -> AllocType {
+ let allocator = *Box::allocator(&self.data);
+ if allocator.as_any().downcast_ref::().is_some() {
+ AllocType::Reserve
+ } else if allocator.as_any().downcast_ref::().is_some() {
+ AllocType::Static
+ } else {
+ panic!()
+ }
+ }
+
+ /// Split current bit array at specified position, return a new allocated bit array
+ /// corresponding to the bits at the range of [pos, end).
+ /// And the current bit array manages the bits at the range of [0, pos).
+ pub fn split(&mut self, pos: usize) -> OsResult {
+ assert!(pos > 0 && pos < self.bits);
+
+ let byte_index = pos / BYTE_SIZE;
+ let bit_index = pos % BYTE_SIZE;
+
+ let lbits = pos;
+ let lbytes = bytes_num!(lbits);
+
+ let rbits = self.bits - lbits;
+ let rbytes = bytes_num!(rbits);
+
+ let mut rarray = Self::new(rbits, self.alloc_type())?;
+
+ let rdata = &mut rarray.data;
+ let ldata = &mut self.data;
+ for (idx, item) in rdata[..(rbytes - 1)].iter_mut().enumerate() {
+ // current byte index in previous bit_array
+ let curr_idx = idx + byte_index;
+ let low_bits = ldata[curr_idx] >> bit_index;
+ let high_bits = ldata[curr_idx + 1] << (8 - bit_index);
+ *item = high_bits | low_bits;
+ }
+ rdata[rbytes - 1] = ldata[self.bytes - 1] >> bit_index;
+
+ self.bits = lbits;
+ self.bytes = lbytes;
+
+ Ok(rarray)
+ }
+}
diff --git a/sgx_trts/src/emm/ema.rs b/sgx_trts/src/emm/ema.rs
new file mode 100644
index 000000000..f26e3f9bf
--- /dev/null
+++ b/sgx_trts/src/emm/ema.rs
@@ -0,0 +1,662 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License..
+
+use crate::arch::{SE_PAGE_SHIFT, SE_PAGE_SIZE};
+use crate::emm::alloc::EmmAllocator;
+use crate::emm::{PageInfo, PageRange, PageType, ProtFlags};
+use crate::enclave::is_within_enclave;
+use alloc::boxed::Box;
+use intrusive_collections::{intrusive_adapter, LinkedListLink, UnsafeRef};
+use sgx_tlibc_sys::{c_void, EACCES, EFAULT, EINVAL};
+use sgx_types::error::OsResult;
+
+use super::alloc::AllocType;
+use super::bitmap::BitArray;
+use super::ocall;
+use super::page::AllocFlags;
+use super::pfhandler::PfHandler;
+
+/// Enclave Management Area
+///
+/// Question: should we replace BitArray with pointer
+/// to split struct into two pieces of 80 bytes and 32 bytes or an entity of 104 bytes?
+pub(crate) struct Ema {
+ // page aligned start address
+ start: usize,
+ // bytes, round to page bytes
+ length: usize,
+ alloc_flags: AllocFlags,
+ info: PageInfo,
+ // bitmap for EACCEPT status
+ // FIXME: replace BitArray with pointer
+ eaccept_map: Option,
+ // custom PF handler
+ handler: Option,
+ // private data for PF handler
+ priv_data: Option<*mut c_void>,
+ alloc: AllocType,
+ // intrusive linkedlist
+ link: LinkedListLink,
+}
+
+// Implement ema adapter for the operations of intrusive linkedlist
+intrusive_adapter!(pub(crate) EmaAda = UnsafeRef: Ema { link: LinkedListLink });
+
+#[derive(Clone, Copy)]
+/// Options for allocating Emas.
+pub struct EmaOptions {
+ pub addr: Option,
+ pub length: usize,
+ pub alloc_flags: AllocFlags,
+ pub alloc: AllocType,
+ info: PageInfo,
+ handler: Option,
+ priv_data: Option<*mut c_void>,
+}
+
+// TODO: remove send and sync
+unsafe impl Send for Ema {}
+unsafe impl Sync for Ema {}
+
+impl Ema {
+ /// Initialize Emanode with null eaccept map,
+ /// and start address must be page aligned
+ pub fn new(options: &EmaOptions) -> OsResult {
+ ensure!(options.addr.is_some(), EINVAL);
+
+ Ok(Self {
+ start: options.addr.unwrap(),
+ length: options.length,
+ alloc_flags: options.alloc_flags,
+ info: options.info,
+ eaccept_map: None,
+ handler: options.handler,
+ priv_data: options.priv_data,
+ link: LinkedListLink::new(),
+ alloc: options.alloc,
+ })
+ }
+
+ /// Split current ema at specified address, return a new allocated ema
+ /// corresponding to the memory at the range of [addr, end).
+ /// And the current ema manages the memory at the range of [start, addr).
+ pub fn split(&mut self, addr: usize) -> OsResult> {
+ let laddr = self.start;
+ let lsize = addr - laddr;
+
+ let raddr = addr;
+ let rsize = (self.start + self.length) - addr;
+
+ let rarray = match &mut self.eaccept_map {
+ Some(bitarray) => {
+ let pos = (addr - self.start) >> crate::arch::SE_PAGE_SHIFT;
+ Some(bitarray.split(pos)?)
+ }
+ None => None,
+ };
+
+ let mut rema = self.clone_ema();
+ rema.start = raddr;
+ rema.length = rsize;
+ rema.eaccept_map = rarray;
+
+ self.start = laddr;
+ self.length = lsize;
+
+ Ok(rema)
+ }
+
+ /// Employ same allocator to Clone Ema without eaccept map
+ pub(crate) fn clone_ema(&self) -> UnsafeRef {
+ let mut ema_options = EmaOptions::new(Some(self.start), self.length, self.alloc_flags);
+ ema_options
+ .info(self.info)
+ .handle(self.handler, self.priv_data)
+ .alloc(self.alloc);
+
+ let allocator = self.alloc.alloctor();
+ let ema = Box::new_in(Ema::new(&ema_options).unwrap(), allocator);
+ unsafe { UnsafeRef::from_raw(Box::into_raw(ema)) }
+ }
+
+ /// Allocate the reserve / committed / virtual memory at corresponding memory
+ pub fn alloc(&mut self) -> OsResult {
+ // RESERVED region only occupy memory range with no real allocation
+ if self.alloc_flags.contains(AllocFlags::RESERVED) {
+ return Ok(());
+ }
+
+ // Allocate new eaccept_map for COMMIT_ON_DEMAND and COMMIT_NOW
+ if self.eaccept_map.is_none() {
+ self.init_eaccept_map()?;
+ };
+
+ // Ocall to mmap memory in urts
+ ocall::alloc_ocall(self.start, self.length, self.info.typ, self.alloc_flags)?;
+
+ // Set the corresponding bits of eaccept map
+ if self.alloc_flags.contains(AllocFlags::COMMIT_NOW) {
+ let grow_up: bool = !self.alloc_flags.contains(AllocFlags::GROWSDOWN);
+ self.eaccept(self.start, self.length, grow_up)?;
+ self.eaccept_map.as_mut().unwrap().set_full();
+ } else {
+ self.eaccept_map.as_mut().unwrap().clear();
+ }
+ Ok(())
+ }
+
+ /// Eaccept target EPC pages with cpu eaccept instruction
+ fn eaccept(&self, start: usize, length: usize, grow_up: bool) -> OsResult {
+ let info = PageInfo {
+ typ: self.info.typ,
+ prot: self.info.prot | ProtFlags::PENDING,
+ };
+
+ let pages = PageRange::new(start, length / crate::arch::SE_PAGE_SIZE, info)?;
+
+ if grow_up {
+ pages.accept_backward()
+ } else {
+ pages.accept_forward()
+ }
+ }
+
+ /// Check the prerequisites of ema commitment
+ pub fn commit_check(&self) -> OsResult {
+ ensure!(
+ self.info.prot.intersects(ProtFlags::RW)
+ && self.info.typ == PageType::Reg
+ && !self.alloc_flags.contains(AllocFlags::RESERVED),
+ EACCES
+ );
+
+ Ok(())
+ }
+
+ /// Commit the corresponding memory of this ema
+ pub fn commit_all(&mut self) -> OsResult {
+ self.commit(self.start, self.length)
+ }
+
+ /// Commit the partial memory of this ema
+ pub fn commit(&mut self, start: usize, length: usize) -> OsResult {
+ ensure!(
+ length != 0
+ && (length % crate::arch::SE_PAGE_SIZE) == 0
+ && start >= self.start
+ && start + length <= self.start + self.length,
+ EINVAL
+ );
+
+ let info = PageInfo {
+ typ: PageType::Reg,
+ prot: ProtFlags::RW | ProtFlags::PENDING,
+ };
+
+ let pages = PageRange::new(start, length / crate::arch::SE_PAGE_SIZE, info)?;
+
+ // Page index of the start address
+ let init_idx = (start - self.start) >> crate::arch::SE_PAGE_SHIFT;
+ let map = self.eaccept_map.as_mut().unwrap();
+
+ for (idx, page) in pages.iter().enumerate() {
+ let page_idx = idx + init_idx;
+ if map.get(page_idx).unwrap() {
+ continue;
+ } else {
+ page.accept()?;
+ map.set(page_idx, true)?;
+ }
+ }
+ Ok(())
+ }
+
+ /// Check the prerequisites of ema uncommitment
+ pub fn uncommit_check(&self) -> OsResult {
+ ensure!(!self.alloc_flags.contains(AllocFlags::RESERVED), EACCES);
+ Ok(())
+ }
+
+ /// Uncommit the corresponding memory of this ema
+ pub fn uncommit_all(&mut self) -> OsResult {
+ self.uncommit(self.start, self.length)
+ }
+
+ /// Uncommit the partial memory of this ema
+ pub fn uncommit(&mut self, start: usize, length: usize) -> OsResult {
+ // Question: there exists a problem:
+ // If developers trim partial pages of the ema with none protection flag,
+ // the protection flag of left committed pages would be modified to Read implicitly.
+ let prot = self.info.prot;
+ if prot == ProtFlags::NONE && (self.info.typ != PageType::Tcs) {
+ self.modify_perm(ProtFlags::R)?
+ }
+
+ self.uncommit_inner(start, length, prot)
+ }
+
+ #[inline]
+ fn uncommit_inner(&mut self, start: usize, length: usize, prot: ProtFlags) -> OsResult {
+ assert!(self.eaccept_map.is_some());
+
+ if self.alloc_flags.contains(AllocFlags::RESERVED) {
+ return Ok(());
+ }
+
+ let trim_info = PageInfo {
+ typ: PageType::Trim,
+ prot: ProtFlags::MODIFIED,
+ };
+
+ let map = self.eaccept_map.as_mut().unwrap();
+ let mut start = start;
+ let end: usize = start + length;
+
+ while start < end {
+ let mut block_start = start;
+ while block_start < end {
+ let pos = (block_start - self.start) >> crate::arch::SE_PAGE_SHIFT;
+ if map.get(pos).unwrap() {
+ break;
+ } else {
+ block_start += crate::arch::SE_PAGE_SIZE;
+ }
+ }
+
+ if block_start == end {
+ break;
+ }
+
+ let mut block_end = block_start + crate::arch::SE_PAGE_SIZE;
+ while block_end < end {
+ let pos = (block_end - self.start) >> crate::arch::SE_PAGE_SHIFT;
+ if map.get(pos).unwrap() {
+ block_end += crate::arch::SE_PAGE_SIZE;
+ } else {
+ break;
+ }
+ }
+
+ let block_length = block_end - block_start;
+ ocall::modify_ocall(
+ block_start,
+ block_length,
+ PageInfo {
+ typ: self.info.typ,
+ prot,
+ },
+ PageInfo {
+ typ: PageType::Trim,
+ prot,
+ },
+ )?;
+
+ let pages = PageRange::new(
+ block_start,
+ block_length / crate::arch::SE_PAGE_SIZE,
+ trim_info,
+ )?;
+
+ let init_idx = (block_start - self.start) >> crate::arch::SE_PAGE_SHIFT;
+ for (idx, page) in pages.iter().enumerate() {
+ page.accept()?;
+ let pos = idx + init_idx;
+ map.set(pos, false)?;
+ }
+
+ // Notify trimming
+ ocall::modify_ocall(
+ block_start,
+ block_length,
+ PageInfo {
+ typ: PageType::Trim,
+ prot,
+ },
+ PageInfo {
+ typ: PageType::Trim,
+ prot,
+ },
+ )?;
+ start = block_end;
+ }
+ Ok(())
+ }
+
+ /// Check the prerequisites of modifying permissions
+ pub fn modify_perm_check(&self) -> OsResult {
+ ensure!(
+ (self.info.typ == PageType::Reg && !self.alloc_flags.contains(AllocFlags::RESERVED)),
+ EACCES
+ );
+
+ match &self.eaccept_map {
+ Some(bitmap) => {
+ if !bitmap.all_true() {
+ return Err(EINVAL);
+ }
+ }
+ None => {
+ return Err(EINVAL);
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Modifying the permissions of corresponding memory of this ema
+ pub fn modify_perm(&mut self, new_prot: ProtFlags) -> OsResult {
+ if self.info.prot == new_prot {
+ return Ok(());
+ }
+
+ // Notify modifying permissions
+ ocall::modify_ocall(
+ self.start,
+ self.length,
+ self.info,
+ PageInfo {
+ typ: self.info.typ,
+ prot: new_prot,
+ },
+ )?;
+
+ let info = PageInfo {
+ typ: PageType::Reg,
+ prot: new_prot | ProtFlags::PR,
+ };
+
+ let pages = PageRange::new(self.start, self.length / crate::arch::SE_PAGE_SIZE, info)?;
+
+ // Modpe the EPC with cpu instruction
+ for page in pages.iter() {
+ if (new_prot | self.info.prot) != self.info.prot {
+ page.modpe()?;
+ }
+
+ // If the new permission is RWX, no EMODPR needed in untrusted part (modify ocall)
+ if (new_prot & (ProtFlags::W | ProtFlags::X)) != (ProtFlags::W | ProtFlags::X) {
+ page.accept()?;
+ }
+ }
+
+ self.info = PageInfo {
+ typ: self.info.typ,
+ prot: new_prot,
+ };
+
+ if new_prot == ProtFlags::NONE {
+ ocall::modify_ocall(
+ self.start,
+ self.length,
+ PageInfo {
+ typ: self.info.typ,
+ prot: ProtFlags::NONE,
+ },
+ PageInfo {
+ typ: self.info.typ,
+ prot: ProtFlags::NONE,
+ },
+ )?;
+ }
+
+ Ok(())
+ }
+
+ /// Changing the page type from Reg to Tcs
+ pub fn change_to_tcs(&mut self) -> OsResult {
+ // The ema must have and only have one page
+
+ ensure!(self.length == SE_PAGE_SIZE, EINVAL);
+ ensure!(self.is_page_committed(self.start), EACCES);
+
+ let info = self.info;
+ if info.typ == PageType::Tcs {
+ return Ok(());
+ }
+
+ ensure!(
+ (info.prot == ProtFlags::RW) && (info.typ == PageType::Reg),
+ EACCES
+ );
+
+ ocall::modify_ocall(
+ self.start,
+ self.length,
+ info,
+ PageInfo {
+ typ: PageType::Tcs,
+ prot: info.prot,
+ },
+ )?;
+
+ let eaccept_info = PageInfo {
+ typ: PageType::Tcs,
+ prot: ProtFlags::MODIFIED,
+ };
+
+ let pages = PageRange::new(
+ self.start,
+ self.length / crate::arch::SE_PAGE_SIZE,
+ eaccept_info,
+ )?;
+
+ for page in pages.iter() {
+ page.accept()?;
+ }
+
+ self.info = PageInfo {
+ typ: PageType::Tcs,
+ prot: ProtFlags::NONE,
+ };
+ Ok(())
+ }
+
+ pub fn is_page_committed(&self, addr: usize) -> bool {
+ assert!(addr % SE_PAGE_SIZE == 0);
+ if self.eaccept_map.is_none() {
+ return false;
+ }
+ let pos = (addr - self.start) >> SE_PAGE_SHIFT;
+ self.eaccept_map.as_ref().unwrap().get(pos).unwrap()
+ }
+
+ /// Deallocate the corresponding memory of this ema
+ pub fn dealloc(&mut self) -> OsResult {
+ if self.alloc_flags.contains(AllocFlags::RESERVED) {
+ return Ok(());
+ }
+
+ if self.info.prot == ProtFlags::NONE && (self.info.typ != PageType::Tcs) {
+ self.modify_perm(ProtFlags::R)?;
+ }
+ self.uncommit_inner(self.start, self.length, ProtFlags::NONE)?;
+ Ok(())
+ }
+
+ /// Obtain the aligned end address
+ pub fn aligned_end(&self, align: usize) -> usize {
+ let curr_end = self.start + self.length;
+ round_to!(curr_end, align)
+ }
+
+ /// Obtain the end address of ema
+ pub fn end(&self) -> usize {
+ self.start + self.length
+ }
+
+ /// Obtain the start address of ema
+ pub fn start(&self) -> usize {
+ self.start
+ }
+
+ /// Obtain the length of ema (bytes)
+ pub fn len(&self) -> usize {
+ self.length
+ }
+
+ /// Check if the ema range is lower than the address
+ pub fn lower_than_addr(&self, addr: usize) -> bool {
+ self.end() <= addr
+ }
+
+ /// Check if the ema range is higher than the address
+ pub fn higher_than_addr(&self, addr: usize) -> bool {
+ self.start >= addr
+ }
+
+ /// Check if the ema range contains the specified address
+ pub fn overlap_addr(&self, addr: usize) -> bool {
+ (addr >= self.start) && (addr < self.start + self.length)
+ }
+
+ pub fn set_eaccept_map_full(&mut self) -> OsResult {
+ if self.eaccept_map.is_none() {
+ self.init_eaccept_map()?;
+ self.eaccept_map.as_mut().unwrap().set_full();
+ } else {
+ self.eaccept_map.as_mut().unwrap().set_full();
+ }
+ Ok(())
+ }
+
+ fn init_eaccept_map(&mut self) -> OsResult {
+ let page_num = self.length >> SE_PAGE_SHIFT;
+ let eaccept_map = BitArray::new(page_num, self.alloc)?;
+ self.eaccept_map = Some(eaccept_map);
+ Ok(())
+ }
+
+ fn set_flags(&mut self, flags: AllocFlags) {
+ self.alloc_flags = flags;
+ }
+
+ fn set_prot(&mut self, info: PageInfo) {
+ self.info = info;
+ }
+
+ pub fn alloc_type(&self) -> AllocType {
+ self.alloc
+ }
+
+ /// Obtain the allocator of ema
+ pub fn allocator(&self) -> &'static dyn EmmAllocator {
+ self.alloc.alloctor()
+ }
+
+ pub fn flags(&self) -> AllocFlags {
+ self.alloc_flags
+ }
+
+ pub fn info(&self) -> PageInfo {
+ self.info
+ }
+
+ pub fn fault_handler(&self) -> (Option, Option<*mut c_void>) {
+ (self.handler, self.priv_data)
+ }
+}
+
+impl EmaOptions {
+ /// Creates new options for allocating the Emas
+ pub fn new(addr: Option, length: usize, alloc_flags: AllocFlags) -> Self {
+ Self {
+ addr,
+ length,
+ alloc_flags,
+ info: PageInfo {
+ typ: PageType::Reg,
+ prot: ProtFlags::RW,
+ },
+ handler: None,
+ priv_data: None,
+ alloc: AllocType::Reserve,
+ }
+ }
+
+ /// Resets the base address of allocated Emas.
+ pub fn addr(&mut self, addr: usize) -> &mut Self {
+ self.addr = Some(addr);
+ self
+ }
+
+ /// Sets the page info of allocated Emas.
+ ///
+ /// The default value is `PageInfo { typ: PageType::Reg, prot: ProtFlags::RW }`.
+ pub fn info(&mut self, info: PageInfo) -> &mut Self {
+ self.info = info;
+ self
+ }
+
+ /// Sets the customized page fault handler and private data of allocated Emas.
+ ///
+ /// The default value is `handler: None, priv_data: None`.
+ pub fn handle(
+ &mut self,
+ handler: Option,
+ priv_data: Option<*mut c_void>,
+ ) -> &mut Self {
+ self.handler = handler;
+ self.priv_data = priv_data;
+ self
+ }
+
+ /// The method can not be exposed to User.
+ /// Sets the inner allocate method of allocated Emas.
+ ///
+ /// If `alloc` is set as `AllocType::Reserve`, the Ema will be allocated
+ /// at reserve memory region (commited pages in user region).
+ /// If `alloc` is set as `AllocType::Static`, the Ema will be allocated
+ /// at static memory region (a small static memory).
+ ///
+ /// The default value is `AllocType::Reserve`.
+ pub(crate) fn alloc(&mut self, alloc: AllocType) -> &mut Self {
+ self.alloc = alloc;
+ self
+ }
+}
+
+impl EmaOptions {
+ pub(crate) fn check(options: &EmaOptions) -> OsResult {
+ let addr = options.addr.unwrap_or(0);
+ let size = options.length;
+
+ if addr > 0 {
+ ensure!(
+ is_page_aligned!(addr) && is_within_enclave(addr as *const u8, size),
+ EINVAL
+ );
+ }
+ ensure!(size != 0 && ((size % SE_PAGE_SIZE) == 0), EINVAL);
+
+ Ok(())
+ }
+}
+
+impl Ema {
+ pub fn allocate(options: &EmaOptions, apply_now: bool) -> OsResult> {
+ ensure!(options.addr.is_some(), EFAULT);
+ let mut new_ema = {
+ let allocator = options.alloc.alloctor();
+ let new_ema = Box::new_in(Ema::new(options)?, allocator);
+ unsafe { UnsafeRef::from_raw(Box::into_raw(new_ema)) }
+ };
+ if apply_now {
+ new_ema.alloc()?;
+ }
+ Ok(new_ema)
+ }
+}
diff --git a/sgx_trts/src/emm/init.rs b/sgx_trts/src/emm/init.rs
new file mode 100644
index 000000000..04d861368
--- /dev/null
+++ b/sgx_trts/src/emm/init.rs
@@ -0,0 +1,245 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License..
+
+use super::alloc::{init_reserve_alloc, init_static_alloc};
+use super::vmmgr::init_vmmgr;
+
+pub fn init_emm() {
+ init_vmmgr();
+ init_static_alloc();
+ init_reserve_alloc();
+}
+
+pub use hw::*;
+
+#[cfg(not(any(feature = "sim", feature = "hyper")))]
+mod hw {
+ use crate::arch::{self, Layout, LayoutEntry};
+ use crate::elf::program::Type;
+ use crate::emm::ema::EmaOptions;
+ use crate::emm::layout::LayoutTable;
+ use crate::emm::page::AllocFlags;
+ use crate::emm::vmmgr::{mm_init_static_region, EMA_PROT_MASK};
+ use crate::emm::{
+ mm_alloc_rts, mm_commit, mm_dealloc, mm_modify_perms, PageInfo, PageType, ProtFlags,
+ };
+ use crate::enclave::parse;
+ use crate::enclave::MmLayout;
+ use sgx_types::error::{SgxResult, SgxStatus};
+
+ pub fn init_rts_emas() -> SgxResult {
+ init_segment_emas()?;
+
+ let layout = arch::Global::get().layout_table();
+ init_rts_contexts_emas(layout, 0)?;
+ Ok(())
+ }
+
+ fn init_rts_contexts_emas(table: &[Layout], offset: usize) -> SgxResult {
+ unsafe {
+ for (i, layout) in table.iter().enumerate() {
+ if is_group_id!(layout.group.id) {
+ let mut step = 0_usize;
+ for _ in 0..layout.group.load_times {
+ step += layout.group.load_step as usize;
+ init_rts_contexts_emas(
+ &table[i - layout.group.entry_count as usize..i],
+ step,
+ )?;
+ }
+ } else if layout.entry.id != arch::LAYOUT_ID_USER_REGION {
+ build_rts_context_emas(&layout.entry, offset)?;
+ }
+ }
+ Ok(())
+ }
+ }
+
+ fn build_rts_context_emas(entry: &LayoutEntry, offset: usize) -> SgxResult {
+ let rva = offset + (entry.rva as usize);
+ assert!(is_page_aligned!(rva));
+
+ // TODO: not sure get_enclave_base() equal to elrange_base or image_base
+ let addr = MmLayout::image_base() + rva;
+ let size = (entry.page_count << arch::SE_PAGE_SHIFT) as usize;
+
+ // entry is guard page or has EREMOVE, build a reserved ema
+ if (entry.si_flags == 0) || (entry.attributes & arch::PAGE_ATTR_EREMOVE != 0) {
+ let mut options =
+ EmaOptions::new(Some(addr), size, AllocFlags::RESERVED | AllocFlags::SYSTEM);
+ options.info(PageInfo {
+ typ: PageType::None,
+ prot: ProtFlags::NONE,
+ });
+ mm_init_static_region(&options).map_err(|_| SgxStatus::Unexpected)?;
+ return Ok(());
+ }
+
+ let post_remove = (entry.attributes & arch::PAGE_ATTR_POST_REMOVE) != 0;
+ let post_add = (entry.attributes & arch::PAGE_ATTR_POST_ADD) != 0;
+ let static_min = ((entry.attributes & arch::PAGE_ATTR_EADD) != 0) && !post_remove;
+
+ if post_remove {
+ // TODO: maybe AllocFlags need more flags or PageType is not None
+ let mut options = EmaOptions::new(Some(addr), size, AllocFlags::SYSTEM);
+ options.info(PageInfo {
+ typ: PageType::None,
+ prot: ProtFlags::RW,
+ });
+ mm_init_static_region(&options).map_err(|_| SgxStatus::Unexpected)?;
+
+ mm_dealloc(addr, size).map_err(|_| SgxStatus::Unexpected)?;
+ }
+
+ if post_add {
+ let commit_direction = if entry.id == arch::LAYOUT_ID_STACK_MAX
+ || entry.id == arch::LAYOUT_ID_STACK_DYN_MAX
+ || entry.id == arch::LAYOUT_ID_STACK_DYN_MIN
+ {
+ AllocFlags::GROWSDOWN
+ } else {
+ AllocFlags::GROWSUP
+ };
+
+ let options = EmaOptions::new(
+ Some(addr),
+ size,
+ AllocFlags::COMMIT_ON_DEMAND
+ | commit_direction
+ | AllocFlags::SYSTEM
+ | AllocFlags::FIXED,
+ );
+
+ mm_alloc_rts(&options).map_err(|_| SgxStatus::Unexpected)?;
+ } else if static_min {
+ let info = if entry.id == arch::LAYOUT_ID_TCS {
+ PageInfo {
+ typ: PageType::Tcs,
+ prot: ProtFlags::NONE,
+ }
+ } else {
+ PageInfo {
+ typ: PageType::Reg,
+ prot: ProtFlags::from_bits_truncate(
+ (entry.si_flags as usize & EMA_PROT_MASK) as u8,
+ ),
+ }
+ };
+ let mut options = EmaOptions::new(Some(addr), size, AllocFlags::SYSTEM);
+
+ options.info(info);
+ mm_init_static_region(&options).map_err(|_| SgxStatus::Unexpected)?;
+ }
+
+ Ok(())
+ }
+
+ pub fn expand_stack_epc_pages(addr: usize, count: usize) -> SgxResult {
+ ensure!(addr != 0 && count != 0, SgxStatus::InvalidParameter);
+
+ LayoutTable::new()
+ .check_dyn_range(addr, count, None)
+ .ok_or(SgxStatus::InvalidParameter)?;
+
+ mm_commit(addr, count << arch::SE_PAGE_SHIFT).map_err(|_| SgxStatus::Unexpected)?;
+
+ Ok(())
+ }
+
+ pub fn change_perm() -> SgxResult {
+ let elf = parse::new_elf()?;
+ let text_relo = parse::has_text_relo()?;
+
+ let base = MmLayout::image_base();
+ for phdr in elf.program_iter() {
+ let typ = phdr.get_type().unwrap_or(Type::Null);
+ if typ == Type::Load && text_relo && !phdr.flags().is_write() {
+ let mut perm = 0_u64;
+ let start = base + trim_to_page!(phdr.virtual_addr() as usize);
+ let end =
+ base + round_to_page!(phdr.virtual_addr() as usize + phdr.mem_size() as usize);
+ let size = end - start;
+
+ if phdr.flags().is_read() {
+ perm |= arch::SGX_EMA_PROT_READ;
+ }
+ if phdr.flags().is_execute() {
+ perm |= arch::SGX_EMA_PROT_EXEC;
+ }
+
+ let prot = ProtFlags::from_bits_truncate(perm as u8);
+ mm_modify_perms(start, size, prot).map_err(|_| SgxStatus::Unexpected)?;
+ }
+ if typ == Type::GnuRelro {
+ let start = base + trim_to_page!(phdr.virtual_addr() as usize);
+ let end =
+ base + round_to_page!(phdr.virtual_addr() as usize + phdr.mem_size() as usize);
+ let size = end - start;
+
+ if size > 0 {
+ mm_modify_perms(start, size, ProtFlags::R)
+ .map_err(|_| SgxStatus::Unexpected)?;
+ }
+ }
+ }
+
+ let layout_table = arch::Global::get().layout_table();
+ if let Some(layout) = layout_table.iter().find(|layout| unsafe {
+ (layout.entry.id == arch::LAYOUT_ID_RSRV_MIN)
+ && (layout.entry.si_flags == arch::SI_FLAGS_RWX)
+ && (layout.entry.page_count > 0)
+ }) {
+ let start = base + unsafe { layout.entry.rva as usize };
+ let size = unsafe { layout.entry.page_count as usize } << arch::SE_PAGE_SHIFT;
+
+ mm_modify_perms(start, size, ProtFlags::R).map_err(|_| SgxStatus::Unexpected)?;
+ }
+ Ok(())
+ }
+
+ pub fn init_segment_emas() -> SgxResult {
+ let elf = parse::new_elf()?;
+ let text_relo = parse::has_text_relo()?;
+
+ let base = MmLayout::image_base();
+ for phdr in elf
+ .program_iter()
+ .filter(|phdr| phdr.get_type().unwrap_or(Type::Null) == Type::Load)
+ {
+ let mut perm = ProtFlags::R;
+ let start = base + trim_to_page!(phdr.virtual_addr() as usize);
+ let end =
+ base + round_to_page!(phdr.virtual_addr() as usize + phdr.mem_size() as usize);
+
+ if phdr.flags().is_write() || text_relo {
+ perm |= ProtFlags::W;
+ }
+ if phdr.flags().is_execute() {
+ perm |= ProtFlags::X;
+ }
+
+ let mut options = EmaOptions::new(Some(start), end - start, AllocFlags::SYSTEM);
+ options.info(PageInfo {
+ typ: PageType::Reg,
+ prot: perm,
+ });
+ mm_init_static_region(&options).map_err(|_| SgxStatus::Unexpected)?;
+ }
+
+ Ok(())
+ }
+}
diff --git a/sgx_trts/src/edmm/layout.rs b/sgx_trts/src/emm/layout.rs
similarity index 100%
rename from sgx_trts/src/edmm/layout.rs
rename to sgx_trts/src/emm/layout.rs
diff --git a/sgx_trts/src/edmm/mod.rs b/sgx_trts/src/emm/mod.rs
similarity index 67%
rename from sgx_trts/src/edmm/mod.rs
rename to sgx_trts/src/emm/mod.rs
index 420dbccb4..838761fbc 100644
--- a/sgx_trts/src/edmm/mod.rs
+++ b/sgx_trts/src/emm/mod.rs
@@ -15,15 +15,24 @@
// specific language governing permissions and limitations
// under the License..
-pub(crate) mod epc;
+#[cfg(not(any(feature = "sim", feature = "hyper")))]
+pub(crate) mod alloc;
+pub(crate) mod bitmap;
+pub(crate) mod ema;
+pub(crate) mod init;
#[cfg(not(any(feature = "sim", feature = "hyper")))]
pub(crate) mod layout;
-pub(crate) mod mem;
-pub(crate) mod perm;
+pub(crate) mod ocall;
+pub(crate) mod page;
+pub(crate) mod pfhandler;
pub(crate) mod tcs;
-#[cfg(not(any(feature = "sim", feature = "hyper")))]
-pub(crate) mod trim;
+pub(crate) mod vmmgr;
+
+pub use ema::EmaOptions;
+pub use page::{AllocFlags, PageInfo, PageRange, PageType, ProtFlags};
+pub use pfhandler::{PfHandler, PfInfo, Pfec, PfecBits};
-pub use epc::{PageFlags, PageInfo, PageRange, PageType};
-pub use mem::{apply_epc_pages, trim_epc_pages};
-pub use perm::{modpr_ocall, mprotect_ocall};
+pub use vmmgr::{
+ check_addr, mm_alloc_rts, mm_alloc_user, mm_commit, mm_dealloc, mm_modify_perms,
+ mm_modify_type, mm_uncommit,
+};
diff --git a/sgx_trts/src/emm/ocall.rs b/sgx_trts/src/emm/ocall.rs
new file mode 100644
index 000000000..16cd3ea36
--- /dev/null
+++ b/sgx_trts/src/emm/ocall.rs
@@ -0,0 +1,134 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License..
+
+cfg_if! {
+ if #[cfg(not(any(feature = "sim", feature = "hyper")))] {
+ pub use hw::*;
+ } else {
+ pub use sw::*;
+ }
+}
+
+#[cfg(not(any(feature = "sim", feature = "hyper")))]
+mod hw {
+ use crate::call::{ocall, OCallIndex, OcAlloc};
+ use crate::emm::page::AllocFlags;
+ use crate::emm::{PageInfo, PageType};
+ use alloc::boxed::Box;
+ use core::convert::Into;
+ use sgx_tlibc_sys::EFAULT;
+ use sgx_types::error::OsResult;
+ #[repr(C)]
+ #[derive(Clone, Copy, Debug, Default)]
+ struct EmmAllocOcall {
+ retval: i32,
+ addr: usize,
+ size: usize,
+ page_properties: u32,
+ alloc_flags: u32,
+ }
+
+ pub fn alloc_ocall(
+ addr: usize,
+ length: usize,
+ page_type: PageType,
+ alloc_flags: AllocFlags,
+ ) -> OsResult {
+ let mut change = Box::try_new_in(
+ EmmAllocOcall {
+ retval: 0,
+ addr,
+ size: length,
+ page_properties: Into::::into(page_type) as u32,
+ alloc_flags: alloc_flags.bits(),
+ },
+ OcAlloc,
+ )
+ .map_err(|_| EFAULT)?;
+
+ let ocall_ret = ocall(OCallIndex::Alloc, Some(change.as_mut()));
+ if ocall_ret == Ok(()) && change.retval == 0 {
+ Ok(())
+ } else {
+ Err(EFAULT)
+ }
+ }
+
+ #[repr(C)]
+ #[derive(Clone, Copy, Debug, Default)]
+ struct EmmModifyOcall {
+ retval: i32,
+ addr: usize,
+ size: usize,
+ flags_from: u32,
+ flags_to: u32,
+ }
+
+ pub fn modify_ocall(
+ addr: usize,
+ length: usize,
+ info_from: PageInfo,
+ info_to: PageInfo,
+ ) -> OsResult {
+ let mut change = Box::try_new_in(
+ EmmModifyOcall {
+ retval: 0,
+ addr,
+ size: length,
+ flags_from: Into::::into(info_from),
+ flags_to: Into::::into(info_to),
+ },
+ OcAlloc,
+ )
+ .map_err(|_| EFAULT)?;
+
+ let ocall_ret = ocall(OCallIndex::Modify, Some(change.as_mut()));
+ if ocall_ret == Ok(()) && change.retval == 0 {
+ Ok(())
+ } else {
+ Err(EFAULT)
+ }
+ }
+}
+
+#[cfg(any(feature = "sim", feature = "hyper"))]
+mod sw {
+ use sgx_types::error::OsResult;
+ use sgx_types::types::ProtectPerm;
+
+ #[allow(clippy::unnecessary_wraps)]
+ #[inline]
+ pub fn alloc_ocall(
+ _addr: usize,
+ _length: usize,
+ _page_type: PageType,
+ _alloc_flags: AllocFlags,
+ ) -> OsResult {
+ Ok(())
+ }
+
+ #[allow(clippy::unnecessary_wraps)]
+ #[inline]
+ pub fn modify_ocall(
+ _addr: usize,
+ _length: usize,
+ _info_from: PageInfo,
+ _info_to: PageInfo,
+ ) -> OsResult {
+ Ok(())
+ }
+}
diff --git a/sgx_trts/src/edmm/epc.rs b/sgx_trts/src/emm/page.rs
similarity index 75%
rename from sgx_trts/src/edmm/epc.rs
rename to sgx_trts/src/emm/page.rs
index 05c966ad1..af08e7a91 100644
--- a/sgx_trts/src/edmm/epc.rs
+++ b/sgx_trts/src/emm/page.rs
@@ -19,25 +19,40 @@ use crate::arch::{SecInfo, SE_PAGE_SHIFT, SE_PAGE_SIZE};
use crate::enclave::is_within_enclave;
use crate::inst::EncluInst;
use core::num::NonZeroUsize;
-use sgx_types::error::{SgxResult, SgxStatus};
+use sgx_tlibc_sys::{EFAULT, EINVAL};
+use sgx_types::error::OsResult;
use sgx_types::marker::ContiguousMemory;
+impl_bitflags! {
+ #[derive(Copy, Clone)]
+ pub struct AllocFlags: u32 {
+ const RESERVED = 0b0001;
+ const COMMIT_NOW = 0b0010;
+ const COMMIT_ON_DEMAND = 0b0100;
+ const GROWSDOWN = 0b00010000;
+ const GROWSUP = 0b00100000;
+ const FIXED = 0b01000000;
+ const SYSTEM = 0b10000000;
+ }
+}
+
impl_enum! {
#[repr(u8)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum PageType {
- Secs = 0,
+ None = 0,
Tcs = 1,
Reg = 2,
- Va = 3,
Trim = 4,
+ Frist = 5,
+ Rest = 6,
}
}
impl_bitflags! {
#[repr(C)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
- pub struct PageFlags: u8 {
+ pub struct ProtFlags: u8 {
const NONE = 0x00;
const R = 0x01;
const W = 0x02;
@@ -45,13 +60,22 @@ impl_bitflags! {
const PENDING = 0x08;
const MODIFIED = 0x10;
const PR = 0x20;
+ const RW = Self::R.bits() | Self::W.bits();
+ const RX = Self::R.bits() | Self::X.bits();
+ const RWX = Self::R.bits() | Self::W.bits() | Self::X.bits();
}
}
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub struct PageInfo {
pub typ: PageType,
- pub flags: PageFlags,
+ pub prot: ProtFlags,
+}
+
+impl Into for PageInfo {
+ fn into(self) -> u32 {
+ (Into::::into(self.typ) as u32) << 8 | (self.prot.bits() as u32)
+ }
}
unsafe impl ContiguousMemory for PageInfo {}
@@ -66,7 +90,7 @@ pub struct PageRange {
unsafe impl ContiguousMemory for PageRange {}
impl PageRange {
- pub fn new(addr: usize, count: usize, info: PageInfo) -> SgxResult {
+ pub fn new(addr: usize, count: usize, info: PageInfo) -> OsResult {
if addr != 0
&& count != 0
&& is_within_enclave(addr as *const u8, count * SE_PAGE_SIZE)
@@ -78,35 +102,35 @@ impl PageRange {
info,
})
} else {
- Err(SgxStatus::InvalidParameter)
+ Err(EINVAL)
}
}
- pub fn accept_forward(&self) -> SgxResult {
+ pub fn accept_forward(&self) -> OsResult {
for page in self.iter() {
page.accept()?;
}
Ok(())
}
- pub fn accept_backward(&self) -> SgxResult {
+ pub fn accept_backward(&self) -> OsResult {
for page in self.iter().rev() {
page.accept()?;
}
Ok(())
}
- pub fn modpe(&self) -> SgxResult {
+ pub fn modpe(&self) -> OsResult {
for page in self.iter() {
page.modpe()?;
}
Ok(())
}
- pub(crate) fn modify(&self) -> SgxResult {
+ pub(crate) fn modify(&self) -> OsResult {
for page in self.iter() {
let _ = page.modpe();
- if !page.info.flags.contains(PageFlags::W | PageFlags::X) {
+ if !page.info.prot.contains(ProtFlags::W | ProtFlags::X) {
page.accept()?;
}
}
@@ -189,12 +213,12 @@ pub struct Page {
unsafe impl ContiguousMemory for Page {}
impl Page {
- pub fn new(addr: usize, info: PageInfo) -> SgxResult {
+ pub fn new(addr: usize, info: PageInfo) -> OsResult {
ensure!(
addr != 0
&& is_within_enclave(addr as *const u8, SE_PAGE_SIZE)
&& is_page_aligned!(addr),
- SgxStatus::InvalidParameter
+ EINVAL
);
Ok(Page { addr, info })
}
@@ -203,13 +227,13 @@ impl Page {
Page { addr, info }
}
- pub fn accept(&self) -> SgxResult {
+ pub fn accept(&self) -> OsResult {
let secinfo: SecInfo = self.info.into();
- EncluInst::eaccept(&secinfo, self.addr).map_err(|_| SgxStatus::Unexpected)
+ EncluInst::eaccept(&secinfo, self.addr).map_err(|_| EFAULT)
}
- pub fn modpe(&self) -> SgxResult {
+ pub fn modpe(&self) -> OsResult {
let secinfo: SecInfo = self.info.into();
- EncluInst::emodpe(&secinfo, self.addr).map_err(|_| SgxStatus::Unexpected)
+ EncluInst::emodpe(&secinfo, self.addr).map_err(|_| EFAULT)
}
}
diff --git a/sgx_trts/src/emm/pfhandler.rs b/sgx_trts/src/emm/pfhandler.rs
new file mode 100644
index 000000000..62bfd2d72
--- /dev/null
+++ b/sgx_trts/src/emm/pfhandler.rs
@@ -0,0 +1,148 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License..
+
+use sgx_tlibc_sys::c_void;
+
+use crate::{
+ emm::ProtFlags,
+ emm::{
+ page::AllocFlags,
+ vmmgr::{RangeType, VMMGR},
+ },
+ veh::HandleResult,
+};
+
+#[repr(C)]
+#[derive(Clone, Copy, Default)]
+pub struct PfInfo {
+ pub maddr: u64, // address for #PF.
+ pub pfec: Pfec,
+ pub reserved: u32,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub union Pfec {
+ pub errcd: u32,
+ pub bits: PfecBits,
+}
+
+impl Default for Pfec {
+ fn default() -> Self {
+ Pfec { errcd: 0 }
+ }
+}
+
+#[repr(C, packed)]
+#[derive(Clone, Copy, Default)]
+pub struct PfecBits(u32);
+
+impl PfecBits {
+ const P_OFFSET: u32 = 0;
+ const P_MASK: u32 = 0x00000001;
+ const RW_OFFSET: u32 = 1;
+ const RW_MASK: u32 = 0x00000002;
+ const SGX_OFFSET: u32 = 15;
+ const SGX_MASK: u32 = 0x00008000;
+
+ #[inline]
+ pub fn p(&self) -> u32 {
+ (self.0 & Self::P_MASK) >> Self::P_OFFSET
+ }
+
+ #[inline]
+ pub fn rw(&self) -> u32 {
+ (self.0 & Self::RW_MASK) >> Self::RW_OFFSET
+ }
+
+ #[inline]
+ pub fn sgx(&self) -> u32 {
+ (self.0 & Self::SGX_MASK) >> Self::SGX_OFFSET
+ }
+
+ #[inline]
+ pub fn set_p(&mut self, p: u32) {
+ let p = (p << Self::P_OFFSET) & Self::P_MASK;
+ self.0 = (self.0 & (!Self::P_MASK)) | p;
+ }
+
+ #[inline]
+ pub fn set_rw(&mut self, rw: u32) {
+ let rw = (rw << Self::RW_OFFSET) & Self::RW_MASK;
+ self.0 = (self.0 & (!Self::RW_MASK)) | rw;
+ }
+
+ #[inline]
+ pub fn set_sgx(&mut self, sgx: u32) {
+ let sgx = (sgx << Self::SGX_OFFSET) & Self::SGX_MASK;
+ self.0 = (self.0 & (!Self::SGX_MASK)) | sgx;
+ }
+}
+
+pub type PfHandler = extern "C" fn(info: &mut PfInfo, priv_data: *mut c_void) -> HandleResult;
+
+pub extern "C" fn mm_enclave_pfhandler(info: &mut PfInfo) -> HandleResult {
+ let addr = trim_to_page!(info.maddr as usize);
+ let mut vmmgr = VMMGR.get().unwrap().lock();
+ let mut ema_cursor = match vmmgr.search_ema(addr, RangeType::User) {
+ None => {
+ let ema_cursor = vmmgr.search_ema(addr, RangeType::Rts);
+ if ema_cursor.is_none() {
+ return HandleResult::Search;
+ }
+ ema_cursor.unwrap()
+ }
+ Some(ema_cursor) => ema_cursor,
+ };
+
+ let ema = unsafe { ema_cursor.get_mut().unwrap() };
+ let (handler, priv_data) = ema.fault_handler();
+ if let Some(handler) = handler {
+ drop(vmmgr);
+ return handler(info, priv_data.unwrap());
+ }
+
+ // No customized page fault handler
+ if ema.is_page_committed(addr) {
+ // check spurious #pf
+ let rw_bit = unsafe { info.pfec.bits.rw() };
+ if (rw_bit == 0 && !ema.info().prot.contains(ProtFlags::R))
+ || (rw_bit == 1 && !ema.info().prot.contains(ProtFlags::W))
+ {
+ return HandleResult::Search;
+ } else {
+ return HandleResult::Execution;
+ }
+ }
+
+ if ema.flags().contains(AllocFlags::COMMIT_ON_DEMAND) {
+ let rw_bit = unsafe { info.pfec.bits.rw() };
+ if (rw_bit == 0 && !ema.info().prot.contains(ProtFlags::R))
+ || (rw_bit == 1 && !ema.info().prot.contains(ProtFlags::W))
+ {
+ return HandleResult::Search;
+ };
+ ema.commit_check()
+ .expect("The EPC page fails to meet the commit condition.");
+ ema.commit(addr, crate::arch::SE_PAGE_SIZE)
+ .expect("The EPC page fails to be committed.");
+ HandleResult::Execution
+ } else {
+ // Some things are wrong
+ panic!()
+ }
+}
diff --git a/sgx_trts/src/edmm/tcs.rs b/sgx_trts/src/emm/tcs.rs
similarity index 85%
rename from sgx_trts/src/edmm/tcs.rs
rename to sgx_trts/src/emm/tcs.rs
index 271b94e24..b1dd19fa5 100644
--- a/sgx_trts/src/edmm/tcs.rs
+++ b/sgx_trts/src/emm/tcs.rs
@@ -63,7 +63,10 @@ pub fn mktcs(mk_tcs: NonNull) -> SgxResult {
#[cfg(not(any(feature = "sim", feature = "hyper")))]
mod hw {
use crate::arch::{self, Layout, Tcs};
- use crate::edmm::epc::{Page, PageFlags, PageInfo, PageType};
+ use crate::emm::mm_dealloc;
+ use crate::emm::page::PageType;
+ use crate::emm::{mm_commit, mm_modify_type};
+ use crate::enclave::state::{self, State};
use crate::enclave::MmLayout;
use crate::tcs::list;
use core::ptr;
@@ -71,8 +74,7 @@ mod hw {
use sgx_types::error::{SgxResult, SgxStatus};
pub fn add_tcs(mut tcs: NonNull) -> SgxResult {
- use crate::call::{ocall, OCallIndex};
- use crate::edmm::{self, layout::LayoutTable};
+ use crate::emm::layout::LayoutTable;
let base = MmLayout::image_base();
let table = LayoutTable::new();
@@ -93,8 +95,8 @@ mod hw {
if let Some(layout) = table.layout_by_id(id) {
if unsafe { layout.entry.attributes & arch::PAGE_ATTR_DYN_THREAD } != 0 {
let addr = base + unsafe { layout.entry.rva as usize } + offset;
- let count = unsafe { layout.entry.page_count };
- edmm::mem::apply_epc_pages(addr, count as usize)?;
+ let size = unsafe { layout.entry.page_count } << arch::SE_PAGE_SHIFT;
+ mm_commit(addr, size as usize).map_err(|_| SgxStatus::Unexpected)?;
}
}
}
@@ -115,18 +117,8 @@ mod hw {
tc.ofsbase = tcs_ptr + tc.ofsbase - base as u64;
tc.ogsbase = tcs_ptr + tc.ogsbase - base as u64;
- // ocall for MKTCS
- ocall(OCallIndex::OCall(0), Some(tc))?;
-
- // EACCEPT for MKTCS
- let page = Page::new(
- tcs.as_ptr() as usize,
- PageInfo {
- typ: PageType::Tcs,
- flags: PageFlags::MODIFIED,
- },
- )?;
- page.accept()?;
+ mm_modify_type(tcs.as_ptr() as usize, arch::SE_PAGE_SIZE, PageType::Tcs)
+ .map_err(|_| SgxStatus::Unexpected)?;
Ok(())
}
@@ -142,6 +134,21 @@ mod hw {
Ok(())
}
+ #[inline]
+ pub fn trim_tcs(tcs: &Tcs) -> SgxResult {
+ let mut list_guard = list::TCS_LIST.lock();
+ for tcs in list_guard.iter_mut().filter(|&t| !ptr::eq(t.as_ptr(), tcs)) {
+ let result = mm_dealloc(tcs.as_ptr() as usize, arch::SE_PAGE_SIZE);
+ if result.is_err() {
+ state::set_state(State::Crashed);
+ bail!(SgxStatus::Unexpected);
+ }
+ }
+
+ list_guard.clear();
+ Ok(())
+ }
+
fn reentrant_add_static_tcs(table: &[Layout], offset: usize) -> SgxResult {
let base = MmLayout::image_base();
unsafe {
@@ -167,23 +174,6 @@ mod hw {
Ok(())
}
}
-
- pub fn accept_trim_tcs(tcs: &Tcs) -> SgxResult {
- let mut list_guard = list::TCS_LIST.lock();
- for tcs in list_guard.iter_mut().filter(|&t| !ptr::eq(t.as_ptr(), tcs)) {
- let page = Page::new(
- tcs.as_ptr() as usize,
- PageInfo {
- typ: PageType::Trim,
- flags: PageFlags::MODIFIED,
- },
- )?;
- page.accept()?;
- }
-
- list_guard.clear();
- Ok(())
- }
}
#[cfg(any(feature = "sim", feature = "hyper"))]
diff --git a/sgx_trts/src/emm/vmmgr.rs b/sgx_trts/src/emm/vmmgr.rs
new file mode 100644
index 000000000..072dd3341
--- /dev/null
+++ b/sgx_trts/src/emm/vmmgr.rs
@@ -0,0 +1,638 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License..
+
+use crate::sync::Once;
+use crate::{
+ arch::{SE_PAGE_SHIFT, SE_PAGE_SIZE},
+ emm::{PageType, ProtFlags},
+ enclave::{is_within_enclave, is_within_rts_range, is_within_user_range, MmLayout},
+ sync::SpinReentrantMutex,
+};
+use alloc::boxed::Box;
+use intrusive_collections::{
+ linked_list::{Cursor, CursorMut},
+ LinkedList, UnsafeRef,
+};
+use sgx_tlibc_sys::{EEXIST, EINVAL, ENOMEM, EPERM};
+use sgx_types::error::OsResult;
+
+use super::{
+ alloc::AllocType,
+ ema::{Ema, EmaAda, EmaOptions},
+ page::AllocFlags,
+};
+
+pub const ALLOC_FLAGS_SHIFT: usize = 0;
+pub const ALLOC_FLAGS_MASK: usize = 0xFF << ALLOC_FLAGS_SHIFT;
+
+pub const PAGE_TYPE_SHIFT: usize = 8;
+pub const PAGE_TYPE_MASK: usize = 0xFF << PAGE_TYPE_SHIFT;
+
+pub const ALLIGNMENT_SHIFT: usize = 24;
+pub const ALLIGNMENT_MASK: usize = 0xFF << ALLIGNMENT_SHIFT;
+
+pub const EMA_PROT_MASK: usize = 0x7;
+
+pub(crate) static VMMGR: Once> = Once::new();
+
+/// Initialize range management
+pub fn init_vmmgr() {
+ let _ = VMMGR.call_once(|| Ok(SpinReentrantMutex::new(VmMgr::new())));
+}
+
+pub fn mm_init_static_region(options: &EmaOptions) -> OsResult {
+ let mut vmmgr = VMMGR.get().unwrap().lock();
+ vmmgr.init_static_region(options)
+}
+
+pub fn mm_alloc_user(options: &EmaOptions) -> OsResult {
+ let mut vmmgr = VMMGR.get().unwrap().lock();
+ vmmgr.alloc(options, RangeType::User)
+}
+
+pub fn mm_alloc_rts(options: &EmaOptions) -> OsResult {
+ let mut vmmgr = VMMGR.get().unwrap().lock();
+ vmmgr.alloc(options, RangeType::Rts)
+}
+
+pub fn mm_dealloc(addr: usize, size: usize) -> OsResult {
+ let mut vmmgr = VMMGR.get().unwrap().lock();
+ vmmgr.dealloc(addr, size)
+}
+
+pub fn mm_commit(addr: usize, size: usize) -> OsResult {
+ let mut vmmgr = VMMGR.get().unwrap().lock();
+ vmmgr.commit(addr, size)
+}
+
+pub fn mm_uncommit(addr: usize, size: usize) -> OsResult {
+ let mut vmmgr = VMMGR.get().unwrap().lock();
+ vmmgr.uncommit(addr, size)
+}
+
+pub fn mm_modify_type(addr: usize, size: usize, new_page_typ: PageType) -> OsResult {
+ let mut vmmgr = VMMGR.get().unwrap().lock();
+ vmmgr.modify_type(addr, size, new_page_typ)
+}
+
+pub fn mm_modify_perms(addr: usize, size: usize, prot: ProtFlags) -> OsResult {
+ let mut vmmgr = VMMGR.get().unwrap().lock();
+ vmmgr.modify_perms(addr, size, prot)
+}
+
+pub fn check_addr(addr: usize, size: usize) -> OsResult {
+ VmMgr::check(addr, size)
+}
+
+/// Virtual memory manager
+pub(crate) struct VmMgr {
+ user: LinkedList,
+ rts: LinkedList,
+}
+
+/// RangeType specifies using Rts or User range
+#[repr(u8)]
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum RangeType {
+ Rts,
+ User,
+}
+
+impl VmMgr {
+ pub fn new() -> Self {
+ Self {
+ user: LinkedList::new(EmaAda::new()),
+ rts: LinkedList::new(EmaAda::new()),
+ }
+ }
+
+ // Reserve memory range for allocations created
+ // by the RTS enclave loader at fixed address ranges
+ pub fn init_static_region(&mut self, options: &EmaOptions) -> OsResult {
+ ensure!(options.addr.is_some(), EINVAL);
+ EmaOptions::check(options)?;
+
+ let mut next_ema = self
+ .find_free_region_at(options.addr.unwrap(), options.length, RangeType::Rts)
+ .ok_or(EINVAL)?;
+
+ let mut new_ema = Ema::allocate(options, false)?;
+ if !options.alloc_flags.contains(AllocFlags::RESERVED) {
+ new_ema.set_eaccept_map_full()?;
+ }
+ next_ema.insert_before(new_ema);
+
+ Ok(())
+ }
+
+ /// Allocate a new memory region in enclave address space (ELRANGE).
+ pub fn alloc(&mut self, options: &EmaOptions, typ: RangeType) -> OsResult {
+ EmaOptions::check(options)?;
+
+ // let addr = options.addr.unwrap_or(0);
+ let addr = options.addr;
+ let size = options.length;
+ // let end = addr + size;
+
+ let mut alloc_addr: Option = None;
+ let mut alloc_next_ema: Option> = None;
+
+ if let Some(addr) = addr {
+ let end = addr + options.length;
+ let is_fixed_alloc = options.alloc_flags.contains(AllocFlags::FIXED);
+ let range = self.search_ema_range(addr, end, typ, false, false);
+
+ match range {
+ // exist in emas list
+ Some(_) => match self.clear_reserved_emas(addr, end, typ, options.alloc) {
+ Some(ema) => {
+ alloc_addr = Some(addr);
+ alloc_next_ema = Some(ema);
+ }
+ None => {
+ if is_fixed_alloc {
+ return Err(EEXIST);
+ }
+ }
+ },
+ // not exist in emas list
+ None => {
+ let next_ema = self.find_free_region_at(addr, size, typ);
+ if next_ema.is_none() && is_fixed_alloc {
+ return Err(EPERM);
+ }
+ }
+ };
+ };
+
+ if alloc_addr.is_none() {
+ let (free_addr, next_ema) = self
+ .find_free_region(size, 1 << SE_PAGE_SHIFT, typ)
+ .ok_or(ENOMEM)?;
+ alloc_addr = Some(free_addr);
+ alloc_next_ema = Some(next_ema);
+ }
+
+ let mut ema_options = *options;
+ ema_options.addr(alloc_addr.unwrap());
+
+ let new_ema = Ema::allocate(&ema_options, true)?;
+
+ alloc_next_ema.unwrap().insert_before(new_ema);
+ Ok(alloc_addr.unwrap())
+ }
+
+ /// Change permissions of an allocated region.
+ pub fn modify_perms(&mut self, addr: usize, size: usize, prot: ProtFlags) -> OsResult {
+ if prot.contains(ProtFlags::X) && !prot.contains(ProtFlags::R) {
+ return Err(EINVAL);
+ }
+ self.apply_commands(
+ addr,
+ size,
+ true,
+ |cursor| cursor.get().unwrap().modify_perm_check(),
+ |cursor| unsafe { cursor.get_mut().unwrap().modify_perm(prot) },
+ )?;
+
+ Ok(())
+ }
+
+ /// Commit a partial or full range of memory allocated previously with
+ /// COMMIT_ON_DEMAND.
+ ///
+ /// TODO: don't split Emas when committing pages
+ pub fn commit(&mut self, addr: usize, size: usize) -> OsResult {
+ let end = addr + size;
+ self.apply_commands(
+ addr,
+ size,
+ false,
+ |cursor| cursor.get().unwrap().commit_check(),
+ |cursor| {
+ let ema = unsafe { cursor.get_mut().unwrap() };
+ let start = addr.max(ema.start());
+ let end = end.min(ema.end());
+ ema.commit(start, end - start)
+ },
+ )?;
+
+ Ok(())
+ }
+
+ /// Uncommit (trim) physical EPC pages in a previously committed range.
+ ///
+ /// TODO: don't split Emas when trimming pages
+ ///
+ /// Question: There exist commit_now Emas with no pages if trimming,
+ /// How should we treat those null commit_now Emas?
+ pub fn uncommit(&mut self, addr: usize, size: usize) -> OsResult {
+ let end = addr + size;
+ self.apply_commands(
+ addr,
+ size,
+ false,
+ |cursor| cursor.get().unwrap().uncommit_check(),
+ |cursor| {
+ let ema = unsafe { cursor.get_mut().unwrap() };
+ let start = addr.max(ema.start());
+ let end = end.min(ema.end());
+ ema.uncommit(start, end - start)
+ },
+ )?;
+
+ Ok(())
+ }
+
+ /// Deallocate the address range.
+ pub fn dealloc(&mut self, addr: usize, size: usize) -> OsResult {
+ let typ = VmMgr::check(addr, size)?;
+ let (mut cursor, mut ema_num) = self
+ .search_ema_range(addr, addr + size, typ, false, true)
+ .ok_or(EINVAL)?;
+ while ema_num != 0 {
+ // Calling remove() implicitly moves cursor pointing to next ema
+ let mut ema = cursor.remove().unwrap();
+ ema.dealloc()?;
+
+ // Drop inner Ema inexplicitly
+ let allocator = ema.allocator();
+ let _ema_box = unsafe { Box::from_raw_in(UnsafeRef::into_raw(ema), allocator) };
+
+ ema_num -= 1;
+ }
+ Ok(())
+ }
+
+ /// Change the page type of an allocated region.
+ pub fn modify_type(&mut self, addr: usize, size: usize, new_page_typ: PageType) -> OsResult {
+ let typ = VmMgr::check(addr, size)?;
+ ensure!(new_page_typ == PageType::Tcs, EPERM);
+ ensure!(size == SE_PAGE_SIZE, EINVAL);
+
+ let (mut cursor, ema_num) = self
+ .search_ema_range(addr, addr + size, typ, true, true)
+ .ok_or(EINVAL)?;
+ debug_assert!(ema_num == 1);
+ unsafe { cursor.get_mut().unwrap().change_to_tcs()? };
+
+ Ok(())
+ }
+
+ // Clear the reserved Emas in charging of [start, end) memory region,
+ // return next ema cursor
+ #[inline]
+ fn clear_reserved_emas(
+ &mut self,
+ start: usize,
+ end: usize,
+ typ: RangeType,
+ alloc: AllocType,
+ ) -> Option> {
+ let (mut cursor, ema_num) = self.search_ema_range(start, end, typ, true, true)?;
+ let start_ema_ptr = cursor.get().unwrap() as *const Ema;
+ let mut count = ema_num;
+ while count != 0 {
+ let ema = cursor.get().unwrap();
+ // Ema must be reserved and can not manage internal memory region
+ if !ema.flags().contains(AllocFlags::RESERVED) || ema.alloc_type() != alloc {
+ return None;
+ }
+ cursor.move_next();
+ count -= 1;
+ }
+
+ let mut cursor = unsafe { self.cursor_mut_from_ptr(start_ema_ptr, typ) };
+ count = ema_num;
+ while count != 0 {
+ cursor.remove();
+ count -= 1;
+ }
+
+ Some(cursor)
+ }
+
+ /// Search for a range of Emas containing addresses within [start, end).
+ ///
+ /// Return the mutable cursor of start ema and ema number.
+ fn search_ema_range(
+ &mut self,
+ start: usize,
+ end: usize,
+ typ: RangeType,
+ continuous: bool,
+ split: bool,
+ ) -> Option<(CursorMut<'_, EmaAda>, usize)> {
+ let mut cursor = self.front(typ);
+
+ while !cursor.is_null() && cursor.get().unwrap().lower_than_addr(start) {
+ cursor.move_next();
+ }
+
+ if cursor.is_null() || cursor.get().unwrap().higher_than_addr(end) {
+ return None;
+ }
+
+ let mut curr_ema = cursor.get().unwrap();
+ let mut start_ema_ptr = curr_ema as *const Ema;
+ let mut emas_num = 0;
+ let mut prev_end = curr_ema.start();
+
+ while !cursor.is_null() && !cursor.get().unwrap().higher_than_addr(end) {
+ curr_ema = cursor.get().unwrap();
+ // If continuity is required, there should
+ // be no gaps in the specified range in the emas list.
+ if continuous && prev_end != curr_ema.start() {
+ return None;
+ }
+
+ emas_num += 1;
+ prev_end = curr_ema.end();
+ cursor.move_next();
+ }
+
+ // Spliting end ema
+ if split {
+ let mut end_ema_ptr = curr_ema as *const Ema;
+ // Spliting start ema
+ let mut start_cursor = unsafe { self.cursor_mut_from_ptr(start_ema_ptr, typ) };
+
+ let curr_ema = unsafe { start_cursor.get_mut().unwrap() };
+ let ema_start = curr_ema.start();
+
+ if ema_start < start {
+ let right_ema = curr_ema.split(start).unwrap();
+ start_cursor.insert_after(right_ema);
+ // start cursor moves next to refer real start ema
+ start_cursor.move_next();
+ // ptr points to the address of real start ema
+ start_ema_ptr = start_cursor.get().unwrap() as *const Ema;
+ }
+
+ // Spliting end ema
+ if emas_num == 1 {
+ end_ema_ptr = start_ema_ptr;
+ }
+
+ let mut end_cursor = unsafe { self.cursor_mut_from_ptr(end_ema_ptr, typ) };
+
+ let end_ema = unsafe { end_cursor.get_mut().unwrap() };
+ let ema_end = end_ema.end();
+
+ if ema_end > end {
+ let right_ema = end_ema.split(end).unwrap();
+ end_cursor.insert_after(right_ema);
+ }
+ }
+
+ // Recover start ema and return it as range
+ let start_cursor = unsafe { self.cursor_mut_from_ptr(start_ema_ptr, typ) };
+
+ Some((start_cursor, emas_num))
+ }
+
+ // Search for a ema node whose memory range contains address
+ pub fn search_ema(&mut self, addr: usize, typ: RangeType) -> Option> {
+ let mut cursor = self.front_mut(typ);
+
+ while !cursor.is_null() {
+ let ema = cursor.get().unwrap();
+ if ema.overlap_addr(addr) {
+ return Some(cursor);
+ }
+ cursor.move_next();
+ }
+
+ None
+ }
+
+ // Find a free space at addr with 'len' bytes in reserve region,
+ // the request space mustn't intersect with existed ema node.
+ // If success, return the next ema cursor.
+ fn find_free_region_at(
+ &mut self,
+ addr: usize,
+ len: usize,
+ typ: RangeType,
+ ) -> Option> {
+ let mut cursor = self.front_mut(typ);
+
+ while !cursor.is_null() {
+ let start_curr = cursor.get().map(|ema| ema.start()).unwrap();
+ let end_curr = start_curr + cursor.get().map(|ema| ema.len()).unwrap();
+ if start_curr >= addr + len {
+ return Some(cursor);
+ }
+
+ if addr >= end_curr {
+ cursor.move_next();
+ } else {
+ break;
+ }
+ }
+
+ // Means addr is larger than the end of the last ema node
+ if cursor.is_null() {
+ return Some(cursor);
+ }
+
+ None
+ }
+
+ // Find a free space of size at least 'size' bytes in reserve region,
+ // return the start address
+ fn find_free_region(
+ &mut self,
+ len: usize,
+ align: usize,
+ typ: RangeType,
+ ) -> Option<(usize, CursorMut<'_, EmaAda>)> {
+ let user_base = MmLayout::user_region_mem_base();
+ let user_end = user_base + MmLayout::user_region_mem_size();
+ let mut addr;
+ let mut cursor = self.front_mut(typ);
+
+ // no ema in list
+ if cursor.is_null() {
+ match typ {
+ RangeType::Rts => {
+ if user_base >= len {
+ addr = trim_to!(user_base - len, align);
+ if is_within_enclave(addr as *const u8, len) {
+ return Some((addr, cursor));
+ }
+ } else {
+ addr = round_to!(user_end, align);
+ // no integer overflow
+ if addr + len >= addr && is_within_enclave(addr as *const u8, len) {
+ return Some((addr, cursor));
+ }
+ }
+ return None;
+ }
+ RangeType::User => {
+ addr = round_to!(user_base, align);
+ if is_within_user_range(addr, len) {
+ return Some((addr, cursor));
+ }
+ return None;
+ }
+ }
+ }
+
+ let mut cursor_next = cursor.peek_next();
+
+ // ema is_null means pointing to the Null object, not means this ema is empty
+ while !cursor_next.is_null() {
+ let curr_end = cursor.get().map(|ema| ema.aligned_end(align)).unwrap();
+
+ let next_start = cursor_next.get().map(|ema| ema.start()).unwrap();
+
+ if curr_end <= next_start {
+ let free_size = next_start - curr_end;
+ if free_size >= len
+ && (typ == RangeType::User || is_within_rts_range(curr_end, len))
+ {
+ cursor.move_next();
+ return Some((curr_end, cursor));
+ }
+ }
+ cursor.move_next();
+ cursor_next = cursor.peek_next();
+ }
+
+ addr = cursor.get().map(|ema| ema.aligned_end(align)).unwrap();
+
+ if is_within_enclave(addr as *const u8, len)
+ && ((typ == RangeType::Rts && is_within_rts_range(addr, len))
+ || (typ == RangeType::User && is_within_user_range(addr, len)))
+ {
+ cursor.move_next();
+ return Some((addr, cursor));
+ }
+
+ // Cursor moves to emas->front_mut.
+ // Firstly cursor moves to None, then moves to linkedlist head
+ cursor.move_next();
+ cursor.move_next();
+
+ // Back to the first ema to check rts region before user region
+ let start_first = cursor.get().map(|ema| ema.start()).unwrap();
+ if start_first < len {
+ return None;
+ }
+
+ addr = trim_to!(start_first, align);
+
+ match typ {
+ RangeType::User => {
+ if is_within_user_range(addr, len) {
+ return Some((addr, cursor));
+ }
+ }
+ RangeType::Rts => {
+ if is_within_enclave(addr as *const u8, len) && is_within_rts_range(addr, len) {
+ return Some((addr, cursor));
+ }
+ }
+ }
+
+ None
+ }
+
+ fn front_mut(&mut self, typ: RangeType) -> CursorMut<'_, EmaAda> {
+ match typ {
+ RangeType::Rts => self.rts.front_mut(),
+ RangeType::User => self.user.front_mut(),
+ }
+ }
+
+ fn front(&self, typ: RangeType) -> Cursor<'_, EmaAda> {
+ match typ {
+ RangeType::Rts => self.rts.front(),
+ RangeType::User => self.user.front(),
+ }
+ }
+
+ unsafe fn cursor_mut_from_ptr(
+ &mut self,
+ ptr: *const Ema,
+ typ: RangeType,
+ ) -> CursorMut<'_, EmaAda> {
+ match typ {
+ RangeType::Rts => unsafe { self.rts.cursor_mut_from_ptr(ptr) },
+ RangeType::User => unsafe { self.user.cursor_mut_from_ptr(ptr) },
+ }
+ }
+
+ fn apply_commands(
+ &mut self,
+ addr: usize,
+ size: usize,
+ split: bool,
+ check: F1,
+ commands: F2,
+ ) -> OsResult
+ where
+ F1: Fn(&CursorMut<'_, EmaAda>) -> OsResult,
+ F2: Fn(&mut CursorMut<'_, EmaAda>) -> OsResult,
+ {
+ let typ = VmMgr::check(addr, size)?;
+ let (mut cursor, ema_num) = self
+ .search_ema_range(addr, addr + size, typ, true, split)
+ .ok_or(EINVAL)?;
+ let start_ema_ptr = cursor.get().unwrap() as *const Ema;
+
+ let mut count = ema_num;
+ while count != 0 {
+ check(&cursor)?;
+ cursor.move_next();
+ count -= 1;
+ }
+
+ let mut cursor = unsafe { self.cursor_mut_from_ptr(start_ema_ptr, typ) };
+ count = ema_num;
+ while count != 0 {
+ commands(&mut cursor)?;
+ cursor.move_next();
+ count -= 1;
+ }
+
+ Ok(())
+ }
+}
+
+// Utils
+impl VmMgr {
+ pub fn check(addr: usize, len: usize) -> OsResult {
+ if addr > 0 {
+ ensure!(
+ is_page_aligned!(addr) && is_within_enclave(addr as *const u8, len),
+ EINVAL
+ );
+ }
+ ensure!(len != 0 && ((len % SE_PAGE_SIZE) == 0), EINVAL);
+
+ if is_within_rts_range(addr, len) {
+ Ok(RangeType::Rts)
+ } else if is_within_user_range(addr, len) {
+ Ok(RangeType::User)
+ } else {
+ Err(EINVAL)
+ }
+ }
+}
diff --git a/sgx_trts/src/enclave/entry.rs b/sgx_trts/src/enclave/entry.rs
index 3bf7cac59..798d495cc 100644
--- a/sgx_trts/src/enclave/entry.rs
+++ b/sgx_trts/src/enclave/entry.rs
@@ -18,8 +18,8 @@
use crate::arch::Tcs;
use crate::call;
use crate::call::ECallIndex;
-use crate::edmm::tcs;
-use crate::edmm::tcs::MkTcs;
+use crate::emm::tcs;
+use crate::emm::tcs::MkTcs;
use crate::enclave;
use crate::enclave::state::{self, State};
use crate::tcs::tc;
diff --git a/sgx_trts/src/enclave/init.rs b/sgx_trts/src/enclave/init.rs
index 1e61526d0..c805a63a9 100644
--- a/sgx_trts/src/enclave/init.rs
+++ b/sgx_trts/src/enclave/init.rs
@@ -16,6 +16,7 @@
// under the License..
use crate::arch::Tcs;
+use crate::emm::init::{init_emm, init_rts_emas};
use crate::enclave::state::State;
use crate::enclave::EnclaveRange;
use crate::enclave::{mem, parse, state};
@@ -80,19 +81,21 @@ pub fn rtinit(tcs: &mut Tcs, ms: *mut SystemFeatures, tidx: usize) -> SgxResult
tc::ThreadControl::from_tcs(tcs).init(tidx, true)?;
+ heap.zero_memory();
+ rsrvmem.zero_memory();
+
+ state::set_state(State::InitDone);
+
#[cfg(not(any(feature = "sim", feature = "hyper")))]
{
if features.is_edmm() {
- // EDMM:
- // need to accept the trimming of the POST_REMOVE pages
- crate::edmm::mem::accept_post_remove()?;
+ let usermem = mem::UserRegionMem::get_or_init();
+ init_emm();
+
+ init_rts_emas()?;
}
}
- heap.zero_memory();
- rsrvmem.zero_memory();
-
- state::set_state(State::InitDone);
Ok(())
}
diff --git a/sgx_trts/src/enclave/mem.rs b/sgx_trts/src/enclave/mem.rs
index bed5bf3c2..e221cafe5 100644
--- a/sgx_trts/src/enclave/mem.rs
+++ b/sgx_trts/src/enclave/mem.rs
@@ -406,6 +406,46 @@ impl UserRegionMem {
}
}
+pub fn is_within_rts_range(start: usize, len: usize) -> bool {
+ if !is_within_enclave(start as *const u8, len) {
+ return false;
+ }
+ let end = if len > 0 {
+ if let Some(end) = start.checked_add(len - 1) {
+ end
+ } else {
+ return false;
+ }
+ } else {
+ start
+ };
+
+ let user_base = MmLayout::user_region_mem_base();
+ let user_end = user_base + MmLayout::user_region_mem_size();
+
+ (start <= end) && ((start >= user_end) || (end < user_base))
+}
+
+pub fn is_within_user_range(start: usize, len: usize) -> bool {
+ if !is_within_enclave(start as *const u8, len) {
+ return false;
+ }
+ let end = if len > 0 {
+ if let Some(end) = start.checked_add(len - 1) {
+ end
+ } else {
+ return false;
+ }
+ } else {
+ start
+ };
+
+ let user_base = MmLayout::user_region_mem_base();
+ let user_end = user_base + MmLayout::user_region_mem_size();
+
+ (start <= end) && (start >= user_base) && (end < user_end)
+}
+
pub fn is_within_enclave(p: *const u8, len: usize) -> bool {
let start = p as usize;
let end = if len > 0 {
diff --git a/sgx_trts/src/enclave/mod.rs b/sgx_trts/src/enclave/mod.rs
index afaa2b064..3c32a5cac 100644
--- a/sgx_trts/src/enclave/mod.rs
+++ b/sgx_trts/src/enclave/mod.rs
@@ -26,5 +26,8 @@ pub mod state;
pub use atexit::{at_exit, cleanup};
pub use init::{ctors, global_init, rtinit};
-pub use mem::{is_within_enclave, is_within_host, EnclaveRange, MmLayout};
+pub use mem::{
+ is_within_enclave, is_within_host, is_within_rts_range, is_within_user_range, EnclaveRange,
+ MmLayout,
+};
pub use uninit::{global_exit, rtuninit, UNINIT_FLAG};
diff --git a/sgx_trts/src/enclave/uninit.rs b/sgx_trts/src/enclave/uninit.rs
index 78a79dcb8..aaee25273 100644
--- a/sgx_trts/src/enclave/uninit.rs
+++ b/sgx_trts/src/enclave/uninit.rs
@@ -15,6 +15,7 @@
// specific language governing permissions and limitations
// under the License..
+use crate::emm::tcs::trim_tcs;
use crate::enclave::state::{self, State};
use crate::enclave::{atexit, parse};
use crate::tcs::ThreadControl;
@@ -61,7 +62,7 @@ pub fn rtuninit(tc: ThreadControl) -> SgxResult {
let is_legal = tc.is_init();
} else {
use crate::feature::SysFeatures;
- use crate::edmm::{self, layout::LayoutTable};
+ use crate::emm::layout::LayoutTable;
let is_legal = if SysFeatures::get().is_edmm() {
tc.is_utility() || !LayoutTable::new().is_dyn_tcs_exist()
@@ -79,9 +80,8 @@ pub fn rtuninit(tc: ThreadControl) -> SgxResult {
#[cfg(not(any(feature = "sim", feature = "hyper")))]
{
- if SysFeatures::get().is_edmm() && edmm::tcs::accept_trim_tcs(tcs).is_err() {
- state::set_state(State::Crashed);
- bail!(SgxStatus::Unexpected);
+ if SysFeatures::get().is_edmm() {
+ trim_tcs(tcs)?;
}
}
diff --git a/sgx_trts/src/lib.rs b/sgx_trts/src/lib.rs
index 84cb59690..d4bc51f73 100644
--- a/sgx_trts/src/lib.rs
+++ b/sgx_trts/src/lib.rs
@@ -30,10 +30,17 @@
#![feature(nonnull_slice_from_raw_parts)]
#![feature(ptr_internals)]
#![feature(thread_local)]
+#![feature(trait_alias)]
+#![feature(new_uninit)]
#![cfg_attr(feature = "sim", feature(unchecked_math))]
#![allow(clippy::missing_safety_doc)]
#![allow(dead_code)]
#![allow(non_camel_case_types)]
+#![feature(linked_list_cursors)]
+#![feature(strict_provenance)]
+#![feature(pointer_byte_offsets)]
+#![feature(maybe_uninit_array_assume_init)]
+#![feature(trait_upcasting)]
#[cfg(all(feature = "sim", feature = "hyper"))]
compile_error!("feature \"sim\" and feature \"hyper\" cannot be enabled at the same time");
@@ -60,10 +67,10 @@ mod version;
mod xsave;
pub mod capi;
+pub mod emm;
#[cfg(not(any(feature = "sim", feature = "hyper")))]
pub mod aexnotify;
-pub mod edmm;
pub mod error;
#[macro_use]
pub mod feature;
diff --git a/sgx_trts/src/se/report.rs b/sgx_trts/src/se/report.rs
index 2f8079496..9eab4eaed 100644
--- a/sgx_trts/src/se/report.rs
+++ b/sgx_trts/src/se/report.rs
@@ -26,15 +26,11 @@ use core::ptr;
use sgx_types::error::{SgxResult, SgxStatus};
use sgx_types::marker::ContiguousMemory;
use sgx_types::types::{
- Attributes, AttributesFlags, ConfigId, CpuSvn, Key128bit, KeyId, KeyName, KeyRequest, Mac,
- Measurement, MiscSelect, Report, Report2Mac, ReportBody, ReportData, TargetInfo,
+ Key128bit, KeyName, KeyRequest, Mac, Report, Report2Mac, ReportBody, ReportData, TargetInfo,
};
use sgx_types::types::{
- CONFIGID_SIZE, CPUSVN_SIZE, HASH_SIZE, ISVEXT_PROD_ID_SIZE, ISV_FAMILY_ID_SIZE, KEYID_SIZE,
- MAC_SIZE, REPORT2_MAC_RESERVED1_BYTES, REPORT2_MAC_RESERVED2_BYTES,
- REPORT_BODY_RESERVED1_BYTES, REPORT_BODY_RESERVED2_BYTES, REPORT_BODY_RESERVED3_BYTES,
- REPORT_BODY_RESERVED4_BYTES, REPORT_DATA_SIZE, TEE_REPORT2_SUBTYPE, TEE_REPORT2_TYPE,
- TEE_REPORT2_VERSION, TEE_REPORT2_VERSION_SERVICETD,
+ REPORT2_MAC_RESERVED1_BYTES, REPORT2_MAC_RESERVED2_BYTES, TEE_REPORT2_SUBTYPE,
+ TEE_REPORT2_TYPE, TEE_REPORT2_VERSION, TEE_REPORT2_VERSION_SERVICETD,
};
#[repr(C, align(128))]
@@ -58,51 +54,11 @@ unsafe impl ContiguousMemory for AlignTargetInfo {}
unsafe impl ContiguousMemory for AlignReport {}
unsafe impl ContiguousMemory for AlignReport2Mac {}
-static SELF_REPORT: Once = Once::new();
-static mut REPORT: AlignReport = AlignReport(Report {
- body: ReportBody {
- cpu_svn: CpuSvn {
- svn: [0; CPUSVN_SIZE],
- },
- misc_select: MiscSelect::empty(),
- reserved1: [0; REPORT_BODY_RESERVED1_BYTES],
- isv_ext_prod_id: [0; ISVEXT_PROD_ID_SIZE],
- attributes: Attributes {
- flags: AttributesFlags::empty(),
- xfrm: 0,
- },
- mr_enclave: Measurement { m: [0; HASH_SIZE] },
- reserved2: [0; REPORT_BODY_RESERVED2_BYTES],
- mr_signer: Measurement { m: [0; HASH_SIZE] },
- reserved3: [0; REPORT_BODY_RESERVED3_BYTES],
- config_id: ConfigId {
- id: [0; CONFIGID_SIZE],
- },
- isv_prod_id: 0,
- isv_svn: 0,
- config_svn: 0,
- reserved4: [0; REPORT_BODY_RESERVED4_BYTES],
- isv_family_id: [0; ISV_FAMILY_ID_SIZE],
- report_data: ReportData {
- d: [0; REPORT_DATA_SIZE],
- },
- },
- key_id: KeyId {
- id: [0_u8; KEYID_SIZE],
- },
- mac: [0_u8; MAC_SIZE],
-});
+static REPORT: Once = Once::new();
impl AlignReport {
pub fn get_self() -> &'static AlignReport {
- unsafe {
- let _ = SELF_REPORT.call_once(|| {
- let report = AlignReport::for_self()?;
- REPORT = report;
- Ok(())
- });
- &REPORT
- }
+ REPORT.call_once(AlignReport::for_self).unwrap()
}
pub fn for_self() -> SgxResult {
diff --git a/sgx_trts/src/sync/once.rs b/sgx_trts/src/sync/once.rs
index 3261f0113..14424385e 100644
--- a/sgx_trts/src/sync/once.rs
+++ b/sgx_trts/src/sync/once.rs
@@ -16,46 +16,94 @@
// under the License..
use crate::sync::{SpinMutex, SpinMutexGuard};
-use core::sync::atomic::{AtomicUsize, Ordering};
+use core::{
+ cell::UnsafeCell,
+ mem::MaybeUninit,
+ sync::atomic::{AtomicUsize, Ordering},
+};
use sgx_types::error::SgxResult;
-pub struct Once {
+pub struct Once {
lock: SpinMutex<()>,
state: AtomicUsize,
+ data: UnsafeCell>,
}
-unsafe impl Sync for Once {}
-unsafe impl Send for Once {}
+impl Default for Once {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+unsafe impl Sync for Once {}
+unsafe impl Send for Once {}
const INCOMPLETE: usize = 0x0;
const COMPLETE: usize = 0x1;
-impl Once {
- pub const fn new() -> Once {
- Once {
- lock: SpinMutex::new(()),
- state: AtomicUsize::new(INCOMPLETE),
- }
+impl Once {
+ /// Initialization constant of [`Once`].
+ #[allow(clippy::declare_interior_mutable_const)]
+ pub const INIT: Self = Self {
+ lock: SpinMutex::new(()),
+ state: AtomicUsize::new(INCOMPLETE),
+ data: UnsafeCell::new(MaybeUninit::uninit()),
+ };
+
+ /// Creates a new [`Once`].
+ pub const fn new() -> Self {
+ Self::INIT
}
pub fn lock(&self) -> SpinMutexGuard<()> {
self.lock.lock()
}
- pub fn call_once(&self, init: F) -> SgxResult
+ pub fn call_once(&self, init: F) -> SgxResult<&T>
where
- F: FnOnce() -> SgxResult,
+ F: FnOnce() -> SgxResult,
{
if self.is_completed() {
- return Ok(());
+ return Ok(unsafe {
+ // SAFETY: The status is Complete
+ self.force_get()
+ });
}
let _guard = self.lock.lock();
if !self.is_completed() {
- init()?;
+ let val = init()?;
+ unsafe {
+ (*self.data.get()).as_mut_ptr().write(val);
+ }
self.state.store(COMPLETE, Ordering::Release);
}
- Ok(())
+ unsafe { Ok(self.force_get()) }
+ }
+
+ /// Returns a reference to the inner value if the [`Once`] has been initialized.
+ pub fn get(&self) -> Option<&T> {
+ if self.state.load(Ordering::Acquire) == COMPLETE {
+ Some(unsafe { self.force_get() })
+ } else {
+ None
+ }
+ }
+
+ /// Get a reference to the initialized instance. Must only be called once COMPLETE.
+ unsafe fn force_get(&self) -> &T {
+ // SAFETY:
+ // * `UnsafeCell`/inner deref: data never changes again
+ // * `MaybeUninit`/outer deref: data was initialized
+ &*(*self.data.get()).as_ptr()
+ }
+
+ /// Get a reference to the initialized instance. Must only be called once COMPLETE.
+ unsafe fn force_get_mut(&mut self) -> &mut T {
+ // SAFETY:
+ // * `UnsafeCell`/inner deref: data never changes again
+ // * `MaybeUninit`/outer deref: data was initialized
+ &mut *(*self.data.get()).as_mut_ptr()
}
#[inline]
diff --git a/sgx_trts/src/tcs/tc.rs b/sgx_trts/src/tcs/tc.rs
index 629426e7a..fcd81b783 100644
--- a/sgx_trts/src/tcs/tc.rs
+++ b/sgx_trts/src/tcs/tc.rs
@@ -30,7 +30,7 @@ use sgx_types::error::SgxResult;
#[link_section = ".data.rel.ro"]
static mut STACK_CHK_GUARD: OnceCell = OnceCell::new();
-pub const STATIC_STACK_SIZE: usize = 2656; // 16 bytes aligned
+pub const STATIC_STACK_SIZE: usize = 4096;
const CANARY_OFFSET: usize = arch::SE_GUARD_PAGE_SIZE + STATIC_STACK_SIZE - mem::size_of::();
extern "C" {
@@ -207,7 +207,7 @@ impl<'a> ThreadControl<'a> {
#[cfg(not(any(feature = "sim", feature = "hyper")))]
fn is_dyn_tcs(&self) -> bool {
- let table = crate::edmm::layout::LayoutTable::new();
+ let table = crate::emm::layout::LayoutTable::new();
if let Some(attr) = table.check_dyn_range(self.tcs() as *const Tcs as usize, 1, None) {
if attr.flags == arch::SI_FLAGS_TCS {
return true;
@@ -307,5 +307,5 @@ pub fn get_stack_guard() -> NonZeroUsize {
#[cfg(not(any(feature = "sim", feature = "hyper")))]
fn stack_max_page() -> usize {
- crate::edmm::layout::LayoutTable::new().dyn_stack_max_page()
+ crate::emm::layout::LayoutTable::new().dyn_stack_max_page()
}
diff --git a/sgx_trts/src/veh/exception.rs b/sgx_trts/src/veh/exception.rs
index d24f11e25..b290d8991 100644
--- a/sgx_trts/src/veh/exception.rs
+++ b/sgx_trts/src/veh/exception.rs
@@ -16,13 +16,15 @@
// under the License..
use crate::arch::{self, MiscExInfo, SsaGpr, Tcs, Tds};
-use crate::edmm;
+use crate::emm;
+use crate::emm::pfhandler::{mm_enclave_pfhandler, PfInfo};
use crate::enclave::state::{self, State};
use crate::error;
use crate::feature::SysFeatures;
use crate::tcs::tc::{self, ThreadControl};
use crate::trts;
use crate::veh::list;
+use crate::veh::register;
use crate::veh::MAX_REGISTER_COUNT;
use crate::veh::{ExceptionHandler, ExceptionInfo, ExceptionType, ExceptionVector, HandleResult};
use crate::xsave;
@@ -137,7 +139,7 @@ pub fn handle(tcs: &mut Tcs) -> SgxResult {
if (tds.stack_commit > page_aligned_delta)
&& ((tds.stack_commit - page_aligned_delta) >= tds.stack_limit)
{
- result = edmm::mem::expand_stack_epc_pages(
+ result = emm::init::expand_stack_epc_pages(
tds.stack_commit - page_aligned_delta,
page_aligned_delta >> arch::SE_PAGE_SHIFT,
)
@@ -307,10 +309,13 @@ extern "C" fn internal_handle(info: &mut ExceptionInfo) {
if info.vector == ExceptionVector::PF {
tds.exception_flag -= 1;
- // EMM_TODO:
- // if mm_fault_handler(&info.exinfo) == SGX_MM_EXCEPTION_CONTINUE_EXECUTION {
- // exception_continue_execution(info);
- // }
+ let pfinfo =
+ unsafe { &mut *(&mut info.exinfo as *mut register::MiscExInfo as *mut PfInfo) };
+
+ if mm_enclave_pfhandler(pfinfo) == HandleResult::Execution {
+ exception_continue_execution(info, tds);
+ }
+
tds.exception_flag += 1;
}