diff --git a/.github/styles/config/vocabularies/TraceMachina/accept.txt b/.github/styles/config/vocabularies/TraceMachina/accept.txt
index 0316da81e..51ff058e2 100644
--- a/.github/styles/config/vocabularies/TraceMachina/accept.txt
+++ b/.github/styles/config/vocabularies/TraceMachina/accept.txt
@@ -11,12 +11,14 @@ Colab
composable
CPUs
[Dd]eduplication
+eviction_policy
ELB
Eskandar
FFI
FFIs
GPUs
Goma
+[Hh]ardlinks
gzip
[Hh]eatmap
[Hh]ermeticity
@@ -54,6 +56,7 @@ Tokio
TraceMachina
[Tt]oolchain
[Tt]oolchains
+[Tt]mpfs
Qwik
[Uu]psert
Verilator
diff --git a/nativelink-config/examples/README.md b/nativelink-config/examples/README.md
index 4fd79c5a1..1ec8e0543 100644
--- a/nativelink-config/examples/README.md
+++ b/nativelink-config/examples/README.md
@@ -550,4 +550,52 @@ Below, you will find a fully tested example that you can also find in [basic_cas
+
+ High-Performance tmpfs Configuration
+
+### Using tmpfs for Maximum I/O Performance
+
+NativeLink uses hardlinks to efficiently set up action sandboxes from the CAS filesystem store.
+This requires the `work_directory` and the CAS `content_path` to be on the **same filesystem**.
+
+For maximum I/O performance, you can place both on a tmpfs (RAM-based filesystem).
+Users have reported **3-4x build time improvements** when using tmpfs.
+
+#### Setup Instructions
+
+1. Create a tmpfs mount point:
+```bash
+sudo mkdir -p /mnt/tmpfs/nativelink
+sudo mount -t tmpfs -o size=50G tmpfs /mnt/tmpfs/nativelink
+```
+
+2. To make it persistent across reboots, add to `/etc/fstab`:
+```
+tmpfs /mnt/tmpfs/nativelink tmpfs size=50G,mode=1777 0 0
+```
+
+3. Configure NativeLink to use the tmpfs paths. See [tmpfs-worker.json5](tmpfs-worker.json5) for a complete example.
+
+#### Key Configuration Points
+
+Both paths must be on the same tmpfs mount:
+- CAS `content_path`: `/mnt/tmpfs/nativelink/cas`
+- Worker `work_directory`: `/mnt/tmpfs/nativelink/work`
+
+#### Trade-offs
+
+| Pros | Cons |
+|------|------|
+| Maximum I/O performance (RAM speed) | Cache is lost on restart/reboot |
+| Hardlinks work correctly (same filesystem) | Limited by available RAM |
+| Eliminates disk I/O bottleneck | Not suitable for very large CAS |
+
+#### Sizing Guidance
+
+- Set tmpfs size to ~50% of available RAM
+- Set `max_bytes` in eviction_policy to ~80% of tmpfs size
+- Monitor memory usage and adjust as needed
+
+
+
diff --git a/nativelink-config/examples/tmpfs-worker.json5 b/nativelink-config/examples/tmpfs-worker.json5
new file mode 100644
index 000000000..8d4b731f6
--- /dev/null
+++ b/nativelink-config/examples/tmpfs-worker.json5
@@ -0,0 +1,208 @@
+// High-performance tmpfs worker configuration
+//
+// This configuration uses tmpfs (RAM-based filesystem) for maximum I/O performance.
+// Users have reported 3-4x build time improvements when using tmpfs for the sandbox.
+//
+// IMPORTANT: Both the CAS content_path and work_directory MUST be on the same
+// filesystem because NativeLink uses hardlinks to efficiently set up action sandboxes.
+//
+// Setup instructions:
+// 1. Create a tmpfs mount point:
+// sudo mkdir -p /mnt/tmpfs/nativelink
+// sudo mount -t tmpfs -o size=50G tmpfs /mnt/tmpfs/nativelink
+//
+// 2. To make it persistent across reboots, add to /etc/fstab:
+// tmpfs /mnt/tmpfs/nativelink tmpfs size=50G,mode=1777 0 0
+//
+// Trade-offs:
+// - PRO: Maximum I/O performance (RAM speed)
+// - PRO: Hardlinks work correctly (same filesystem)
+// - CON: Cache is lost on restart/reboot
+// - CON: Limited by available RAM
+// - CON: Not suitable for very large CAS (>50% of RAM)
+//
+// Sizing guidance:
+// - Set tmpfs size to ~50% of available RAM
+// - Set max_bytes in eviction_policy to ~80% of tmpfs size
+// - Monitor memory usage and adjust as needed
+{
+ stores: [
+ {
+ name: "AC_MAIN_STORE",
+ filesystem: {
+ content_path: "/mnt/tmpfs/nativelink/ac",
+ temp_path: "/mnt/tmpfs/nativelink/tmp-ac",
+ eviction_policy: {
+ // 2GB for action cache
+ max_bytes: 2000000000,
+ },
+ },
+ },
+ {
+ name: "WORKER_FAST_SLOW_STORE",
+ fast_slow: {
+ // "fast" must be a "filesystem" store because the worker uses it to make
+ // hardlinks on disk to a directory where the jobs are running.
+ // CRITICAL: content_path must be on the same filesystem as work_directory
+ fast: {
+ filesystem: {
+ content_path: "/mnt/tmpfs/nativelink/cas",
+ temp_path: "/mnt/tmpfs/nativelink/tmp-cas",
+ eviction_policy: {
+ // 30GB for CAS - adjust based on your tmpfs size and workload
+ max_bytes: 30000000000,
+ },
+ },
+ },
+ slow: {
+ // Discard data - using noop since we're prioritizing speed over persistence.
+ // For production, you may want to use a persistent store here to avoid
+ // re-downloading artifacts after restarts.
+ noop: {},
+ },
+ },
+ },
+ ],
+ schedulers: [
+ {
+ name: "MAIN_SCHEDULER",
+ simple: {
+ supported_platform_properties: {
+ cpu_count: "minimum",
+ memory_kb: "minimum",
+ network_kbps: "minimum",
+ disk_read_iops: "minimum",
+ disk_read_bps: "minimum",
+ disk_write_iops: "minimum",
+ disk_write_bps: "minimum",
+ shm_size: "minimum",
+ gpu_count: "minimum",
+ gpu_model: "exact",
+ cpu_vendor: "exact",
+ cpu_arch: "exact",
+ cpu_model: "exact",
+ kernel_version: "exact",
+ OSFamily: "priority",
+ "container-image": "priority",
+ ISA: "exact",
+ },
+ },
+ },
+ ],
+ workers: [
+ {
+ local: {
+ worker_api_endpoint: {
+ uri: "grpc://127.0.0.1:50061",
+ },
+ cas_fast_slow_store: "WORKER_FAST_SLOW_STORE",
+ upload_action_result: {
+ ac_store: "AC_MAIN_STORE",
+ },
+
+ // CRITICAL: Must be on the same filesystem (tmpfs) as cas content_path
+ work_directory: "/mnt/tmpfs/nativelink/work",
+ platform_properties: {
+ cpu_count: {
+ values: [
+ "16",
+ ],
+ },
+ memory_kb: {
+ values: [
+ "500000",
+ ],
+ },
+ network_kbps: {
+ values: [
+ "100000",
+ ],
+ },
+ cpu_arch: {
+ values: [
+ "x86_64",
+ ],
+ },
+ OSFamily: {
+ values: [
+ "",
+ ],
+ },
+ "container-image": {
+ values: [
+ "",
+ ],
+ },
+ ISA: {
+ values: [
+ "x86-64",
+ ],
+ },
+ },
+ },
+ },
+ ],
+ servers: [
+ {
+ name: "public",
+ listener: {
+ http: {
+ socket_address: "0.0.0.0:50051",
+ },
+ },
+ services: {
+ cas: [
+ {
+ instance_name: "main",
+ cas_store: "WORKER_FAST_SLOW_STORE",
+ },
+ ],
+ ac: [
+ {
+ instance_name: "main",
+ ac_store: "AC_MAIN_STORE",
+ },
+ ],
+ execution: [
+ {
+ instance_name: "main",
+ cas_store: "WORKER_FAST_SLOW_STORE",
+ scheduler: "MAIN_SCHEDULER",
+ },
+ ],
+ capabilities: [
+ {
+ instance_name: "main",
+ remote_execution: {
+ scheduler: "MAIN_SCHEDULER",
+ },
+ },
+ ],
+ bytestream: [
+ {
+ instance_name: "main",
+ cas_store: "WORKER_FAST_SLOW_STORE",
+ },
+ ],
+ },
+ },
+ {
+ name: "private_workers_servers",
+ listener: {
+ http: {
+ socket_address: "0.0.0.0:50061",
+ },
+ },
+ services: {
+ worker_api: {
+ scheduler: "MAIN_SCHEDULER",
+ },
+ admin: {},
+ health: {},
+ },
+ },
+ ],
+ global: {
+ max_open_files: 24576,
+ },
+}