From 5df6f7ea7b1c503a5cdaf2321af57eb07993b43a Mon Sep 17 00:00:00 2001 From: Konrad Michalik Date: Sun, 14 Sep 2025 15:29:57 +0200 Subject: [PATCH 1/4] Experiment on dataset download --- config-overrides.js | 8 + src/WrapperApp/WrapperApp.tsx | 10 + .../components/Simulation/Geant4Datasets.tsx | 88 + .../Simulation/RunSimulationPanel.tsx | 12 +- src/libs/geant4_web/DatasetDownloadManager.ts | 113 + .../geant4_web/geant4_wasm/geant4_wasm.d.ts | 244 + .../geant4_web/geant4_wasm/geant4_wasm.js | 8314 +++++++++++++++++ .../preload/preload_G4EMLOW8.6.1.js | 419 + .../preload/preload_G4ENSDFSTATE3.0.js | 343 + .../geant4_wasm/preload/preload_G4NDL4.7.1.js | 414 + .../preload/preload_G4PARTICLEXS4.1.js | 352 + .../preload/preload_G4SAIDDATA2.0.js | 343 + .../preload/preload_PhotonEvaporation6.1.js | 343 + src/libs/geant4_web/geantWorker.worker.ts | 330 + 14 files changed, 11332 insertions(+), 1 deletion(-) create mode 100644 src/WrapperApp/components/Simulation/Geant4Datasets.tsx create mode 100644 src/libs/geant4_web/DatasetDownloadManager.ts create mode 100644 src/libs/geant4_web/geant4_wasm/geant4_wasm.d.ts create mode 100644 src/libs/geant4_web/geant4_wasm/geant4_wasm.js create mode 100644 src/libs/geant4_web/geant4_wasm/preload/preload_G4EMLOW8.6.1.js create mode 100644 src/libs/geant4_web/geant4_wasm/preload/preload_G4ENSDFSTATE3.0.js create mode 100644 src/libs/geant4_web/geant4_wasm/preload/preload_G4NDL4.7.1.js create mode 100644 src/libs/geant4_web/geant4_wasm/preload/preload_G4PARTICLEXS4.1.js create mode 100644 src/libs/geant4_web/geant4_wasm/preload/preload_G4SAIDDATA2.0.js create mode 100644 src/libs/geant4_web/geant4_wasm/preload/preload_PhotonEvaporation6.1.js create mode 100644 src/libs/geant4_web/geantWorker.worker.ts diff --git a/config-overrides.js b/config-overrides.js index de5808237..795e4e7b4 100644 --- a/config-overrides.js +++ b/config-overrides.js @@ -1,3 +1,5 @@ +const webpack = require('webpack'); + module.exports = function override(webpackConfig) { // react-dnd webpackConfig.module.rules.unshift({ @@ -7,6 +9,12 @@ module.exports = function override(webpackConfig) { } }); + webpackConfig.plugins.push( + new webpack.IgnorePlugin({ + resourceRegExp: /geant4_wasm\.wasm$/ + }) + ); + // react-dnd webpackConfig.resolve.alias = { ...webpackConfig.resolve.alias, diff --git a/src/WrapperApp/WrapperApp.tsx b/src/WrapperApp/WrapperApp.tsx index 3bcaf38e2..b5c773f1f 100644 --- a/src/WrapperApp/WrapperApp.tsx +++ b/src/WrapperApp/WrapperApp.tsx @@ -3,6 +3,7 @@ import { styled } from '@mui/material/styles'; import { SyntheticEvent, useEffect, useState } from 'react'; import { useConfig } from '../config/ConfigService'; +import { useDatasetDownloadManager } from '../libs/geant4_web/DatasetDownloadManager'; import { useAuth } from '../services/AuthService'; import { FullSimulationData } from '../services/ShSimulatorService'; import { useStore } from '../services/StoreService'; @@ -50,6 +51,12 @@ function WrapperApp() { const [providedInputFiles, setProvidedInputFiles] = useState(); const [highlightRunForm, setHighLightRunForm] = useState(false); + const { + managerState: geant4DownloadManagerState, + datasetStates: geant4DatasetStates, + startDownload: geant4DatasetDownload + } = useDatasetDownloadManager(); + useEffect(() => { if (Object.keys(providedInputFiles ?? {}).length > 0) { setHighLightRunForm(true); @@ -206,6 +213,9 @@ function WrapperApp() { highlight={highlightRunForm} clearInputFiles={() => setProvidedInputFiles(undefined)} runSimulation={runSimulation} + geant4DownloadManagerState={geant4DownloadManagerState} + geant4DatasetDownloadStart={geant4DatasetDownload} + geant4DatasetStates={geant4DatasetStates} /> {/* end Simulations screen */} diff --git a/src/WrapperApp/components/Simulation/Geant4Datasets.tsx b/src/WrapperApp/components/Simulation/Geant4Datasets.tsx new file mode 100644 index 000000000..b9d734e45 --- /dev/null +++ b/src/WrapperApp/components/Simulation/Geant4Datasets.tsx @@ -0,0 +1,88 @@ +import CheckIcon from '@mui/icons-material/Check'; +import ExpandMoreIcon from '@mui/icons-material/ExpandMore'; +import { + AccordionDetails, + AccordionSummary, + Box, + Button, + CircularProgress, + LinearProgress, + Typography, + useTheme +} from '@mui/material'; +import { useState } from 'react'; + +import { + DatasetDownloadStatus, + DatasetStatus, + DownloadManagerStatus +} from '../../../libs/geant4_web/DatasetDownloadManager'; +import StyledAccordion from '../../../shared/components/StyledAccordion'; + +export interface Geant4DatasetsProps { + geant4DownloadManagerState: DownloadManagerStatus; + geant4DatasetStates: DatasetStatus[]; + geant4DatasetDownloadStart: () => void; +} + +function DatasetCurrentStatus(props: { status: DatasetStatus }) { + const { status } = props; + + return ( + + {status.name} + {status.status === DatasetDownloadStatus.DONE && } + {(status.status === DatasetDownloadStatus.DOWNLOADING || + status.status === DatasetDownloadStatus.PROCESSING) && ( + + )} + {status.status === DatasetDownloadStatus.IDLE && } + + ); +} + +export function Geant4Datasets(props: Geant4DatasetsProps) { + const theme = useTheme(); + const { geant4DownloadManagerState, geant4DatasetStates, geant4DatasetDownloadStart } = props; + const [open, setOpen] = useState(true); + + return ( + + } + onClick={() => setOpen(!open)}> + + Datasets download + + + + {geant4DownloadManagerState === DownloadManagerStatus.IDLE && ( + + )} + {geant4DatasetStates.map(status => ( + + ))} + {geant4DownloadManagerState === DownloadManagerStatus.ERROR && ( + Something went wrong + )} + + + ); +} diff --git a/src/WrapperApp/components/Simulation/RunSimulationPanel.tsx b/src/WrapperApp/components/Simulation/RunSimulationPanel.tsx index 784638971..d4140b4f2 100644 --- a/src/WrapperApp/components/Simulation/RunSimulationPanel.tsx +++ b/src/WrapperApp/components/Simulation/RunSimulationPanel.tsx @@ -3,14 +3,21 @@ import CloudOff from '@mui/icons-material/CloudOff'; import { AccordionDetails, AccordionSummary, Link, Typography, useTheme } from '@mui/material'; import { useConfig } from '../../../config/ConfigService'; +import { + DatasetStatus, + DownloadManagerStatus +} from '../../../libs/geant4_web/DatasetDownloadManager'; import { useAuth } from '../../../services/AuthService'; import { useStore } from '../../../services/StoreService'; import StyledAccordion from '../../../shared/components/StyledAccordion'; import { SimulatorNames, SimulatorType } from '../../../types/RequestTypes'; +import { Geant4Datasets, Geant4DatasetsProps } from './Geant4Datasets'; import RecentSimulations from './RecentSimulations'; import { RunSimulationForm, RunSimulationFormProps } from './RunSimulationForm'; -export default function RunSimulationPanel(props: RunSimulationFormProps) { +export type RunSimulationPanelProps = RunSimulationFormProps & Geant4DatasetsProps; + +export default function RunSimulationPanel(props: RunSimulationPanelProps) { const theme = useTheme(); const { demoMode } = useConfig(); const { yaptideEditor } = useStore(); @@ -23,6 +30,9 @@ export default function RunSimulationPanel(props: RunSimulationFormProps) { return showRunForm ? ( <> + {yaptideEditor?.contextManager.currentSimulator === SimulatorType.GEANT4 && ( + + )} ) : ( diff --git a/src/libs/geant4_web/DatasetDownloadManager.ts b/src/libs/geant4_web/DatasetDownloadManager.ts new file mode 100644 index 000000000..236f059db --- /dev/null +++ b/src/libs/geant4_web/DatasetDownloadManager.ts @@ -0,0 +1,113 @@ +import { useCallback, useEffect, useState } from 'react'; + +export enum DownloadManagerStatus { + IDLE, + WORKING, + FINISHED, + ERROR, +} + +export enum DatasetDownloadStatus { + IDLE, + DOWNLOADING, + PROCESSING, + DONE, +} + +export interface DatasetStatus { + name: string, + status: DatasetDownloadStatus, + done?: number, + total?: number, +} + +const downloadRegex = /Downloading data... \((\d+)\/(\d+)\)/g; +const processingRegex = /Processing... \((\d+)\/(\d+)\)/g; + +export function useDatasetDownloadManager() { + const [managerState, setManagerState] = useState(DownloadManagerStatus.IDLE); + const [datasetStates, setDatasetStates] = useState>({}); + const [dataset, setDataset] = useState(); + const [idle, setIdle] = useState(true); + const [worker, setWorker] = useState(); + + const startDownload = useCallback( + idle + ? () => { + worker?.postMessage({ type: 'loadDepsData' }); + setManagerState(DownloadManagerStatus.WORKING); + setIdle(false); + } + : () => {}, + [worker, idle], + ); + + useEffect(() => { + const worker = new Worker(new URL('./geantWorker.worker.ts', import.meta.url)); + let done = '', total = ''; + worker.onmessage = (event) => { + switch (event.data.type) { + case 'status': + switch (true) { + case event.data.data?.startsWith('Name'): + if (dataset) { + setDatasetStates(states => ({ + ...states, + [dataset]: { name: dataset, status: DatasetDownloadStatus.DONE } + })); + } + const name = event.data.data.slice(6, -1); + setDataset(name); + setDatasetStates(states => ({ + ...states, + [name]: { name, status: DatasetDownloadStatus.IDLE } + })); + break; + case event.data.data?.startsWith('Downloading data'): + [, done, total] = Array.from(event.data.data.matchAll(downloadRegex))[0] as string[]; + setDatasetStates(states => ({ + ...states, + [dataset!]: { + name: dataset!, + status: DatasetDownloadStatus.DOWNLOADING, + done: parseInt(done), + total: parseInt(total) + } + })); + break; + case event.data.data?.startsWith('Processing'): + [, done, total] = Array.from(event.data.data.matchAll(processingRegex))[0] as string[]; + setDatasetStates(states => ({ + ...states, + [dataset!]: { + name: dataset!, + status: DatasetDownloadStatus.PROCESSING, + done: parseInt(done), + total: parseInt(total) + } + })); + break; + case event.data.data?.startsWith('Datasets initialized'): + setManagerState(DownloadManagerStatus.FINISHED); + break; + default: + console.log('Status: ', event.data.data); + if (dataset) { + setDatasetStates(states => ({ + ...states, + [dataset!]: { name: dataset!, status: DatasetDownloadStatus.IDLE } + })); + } + break; + } + break; + case 'error': + setManagerState(DownloadManagerStatus.ERROR); + break; + } + }; + setWorker(worker); + }, []); + + return { managerState, datasetStates: Object.values(datasetStates), startDownload }; +} \ No newline at end of file diff --git a/src/libs/geant4_web/geant4_wasm/geant4_wasm.d.ts b/src/libs/geant4_web/geant4_wasm/geant4_wasm.d.ts new file mode 100644 index 000000000..640442604 --- /dev/null +++ b/src/libs/geant4_web/geant4_wasm/geant4_wasm.d.ts @@ -0,0 +1,244 @@ +// TypeScript bindings for emscripten-generated code. Automatically generated at compile time. +declare namespace RuntimeExports { + namespace FS { + export let root: any; + export let mounts: any[]; + export let devices: {}; + export let streams: any[]; + export let nextInode: number; + export let nameTable: any; + export let currentPath: string; + export let initialized: boolean; + export let ignorePermissions: boolean; + export let filesystems: any; + export let syncFSRequests: number; + export let readFiles: {}; + export { ErrnoError }; + export { FSStream }; + export { FSNode }; + export function lookupPath(path: any, opts?: {}): { + path: string; + node?: undefined; + } | { + path: string; + node: any; + }; + export function getPath(node: any): any; + export function hashName(parentid: any, name: any): number; + export function hashAddNode(node: any): void; + export function hashRemoveNode(node: any): void; + export function lookupNode(parent: any, name: any): any; + export function createNode(parent: any, name: any, mode: any, rdev: any): any; + export function destroyNode(node: any): void; + export function isRoot(node: any): boolean; + export function isMountpoint(node: any): boolean; + export function isFile(mode: any): boolean; + export function isDir(mode: any): boolean; + export function isLink(mode: any): boolean; + export function isChrdev(mode: any): boolean; + export function isBlkdev(mode: any): boolean; + export function isFIFO(mode: any): boolean; + export function isSocket(mode: any): boolean; + export function flagsToPermissionString(flag: any): string; + export function nodePermissions(node: any, perms: any): 0 | 2; + export function mayLookup(dir: any): any; + export function mayCreate(dir: any, name: any): any; + export function mayDelete(dir: any, name: any, isdir: any): any; + export function mayOpen(node: any, flags: any): any; + export function checkOpExists(op: any, err: any): any; + export let MAX_OPEN_FDS: number; + export function nextfd(): number; + export function getStreamChecked(fd: any): any; + export function getStream(fd: any): any; + export function createStream(stream: any, fd?: number): any; + export function closeStream(fd: any): void; + export function dupStream(origStream: any, fd?: number): any; + export function doSetAttr(stream: any, node: any, attr: any): void; + export namespace chrdev_stream_ops { + function open(stream: any): void; + function llseek(): never; + } + export function major(dev: any): number; + export function minor(dev: any): number; + export function makedev(ma: any, mi: any): number; + export function registerDevice(dev: any, ops: any): void; + export function getDevice(dev: any): any; + export function getMounts(mount: any): any[]; + export function syncfs(populate: any, callback: any): void; + export function mount(type: any, opts: any, mountpoint: any): any; + export function unmount(mountpoint: any): void; + export function lookup(parent: any, name: any): any; + export function mknod(path: any, mode: any, dev: any): any; + export function statfs(path: any): any; + export function statfsStream(stream: any): any; + export function statfsNode(node: any): { + bsize: number; + frsize: number; + blocks: number; + bfree: number; + bavail: number; + files: any; + ffree: number; + fsid: number; + flags: number; + namelen: number; + }; + export function create(path: any, mode?: number): any; + export function mkdir(path: any, mode?: number): any; + export function mkdirTree(path: any, mode: any): void; + export function mkdev(path: any, mode: any, dev: any): any; + export function symlink(oldpath: any, newpath: any): any; + export function rename(old_path: any, new_path: any): void; + export function rmdir(path: any): void; + export function readdir(path: any): any; + export function unlink(path: any): void; + export function readlink(path: any): any; + export function stat(path: any, dontFollow: any): any; + export function fstat(fd: any): any; + export function lstat(path: any): any; + export function doChmod(stream: any, node: any, mode: any, dontFollow: any): void; + export function chmod(path: any, mode: any, dontFollow: any): void; + export function lchmod(path: any, mode: any): void; + export function fchmod(fd: any, mode: any): void; + export function doChown(stream: any, node: any, dontFollow: any): void; + export function chown(path: any, uid: any, gid: any, dontFollow: any): void; + export function lchown(path: any, uid: any, gid: any): void; + export function fchown(fd: any, uid: any, gid: any): void; + export function doTruncate(stream: any, node: any, len: any): void; + export function truncate(path: any, len: any): void; + export function ftruncate(fd: any, len: any): void; + export function utime(path: any, atime: any, mtime: any): void; + export function open(path: any, flags: any, mode?: number): any; + export function close(stream: any): void; + export function isClosed(stream: any): boolean; + export function llseek(stream: any, offset: any, whence: any): any; + export function read(stream: any, buffer: any, offset: any, length: any, position: any): any; + export function write(stream: any, buffer: any, offset: any, length: any, position: any, canOwn: any): any; + export function mmap(stream: any, length: any, position: any, prot: any, flags: any): any; + export function msync(stream: any, buffer: any, offset: any, length: any, mmapFlags: any): any; + export function ioctl(stream: any, cmd: any, arg: any): any; + export function readFile(path: any, opts?: {}): Uint8Array; + export function writeFile(path: any, data: any, opts?: {}): void; + export function cwd(): any; + export function chdir(path: any): void; + export function createDefaultDirectories(): void; + export function createDefaultDevices(): void; + export function createSpecialDirectories(): void; + export function createStandardStreams(input: any, output: any, error: any): void; + export function staticInit(): void; + export function init(input: any, output: any, error: any): void; + export function quit(): void; + export function findObject(path: any, dontResolveLastLink: any): any; + export function analyzePath(path: any, dontResolveLastLink: any): { + isRoot: boolean; + exists: boolean; + error: number; + name: any; + path: any; + object: any; + parentExists: boolean; + parentPath: any; + parentObject: any; + }; + export function createPath(parent: any, path: any, canRead: any, canWrite: any): any; + export function createFile(parent: any, name: any, properties: any, canRead: any, canWrite: any): any; + export function createDataFile(parent: any, name: any, data: any, canRead: any, canWrite: any, canOwn: any): void; + export function createDevice(parent: any, name: any, input: any, output: any): any; + export function forceLoadFile(obj: any): boolean; + export function createLazyFile(parent: any, name: any, url: any, canRead: any, canWrite: any): any; + export function absolutePath(): void; + export function createFolder(): void; + export function createLink(): void; + export function joinPath(): void; + export function mmapAlloc(): void; + export function standardizePath(): void; + } + function FS_createPath(...args: any[]): any; + function FS_createDataFile(...args: any[]): any; + function FS_preloadFile(parent: any, name: any, url: any, canRead: any, canWrite: any, dontCreateFile: any, canOwn: any, preFinish: any): Promise; + function FS_unlink(...args: any[]): any; + function FS_createLazyFile(...args: any[]): any; + function FS_createDevice(...args: any[]): any; + let addRunDependency: any; + let removeRunDependency: any; +} +declare class ErrnoError extends Error { + constructor(errno: any); + errno: any; + code: string; +} +declare class FSStream { + shared: {}; + set object(val: any); + get object(): any; + node: any; + get isRead(): boolean; + get isWrite(): boolean; + get isAppend(): number; + set flags(val: any); + get flags(): any; + set position(val: any); + get position(): any; +} +declare class FSNode { + constructor(parent: any, name: any, mode: any, rdev: any); + node_ops: {}; + stream_ops: {}; + readMode: number; + writeMode: number; + mounted: any; + parent: any; + mount: any; + id: number; + name: any; + mode: any; + rdev: any; + atime: number; + mtime: number; + ctime: number; + set read(val: boolean); + get read(): boolean; + set write(val: boolean); + get write(): boolean; + get isFolder(): any; + get isDevice(): any; +} +interface WasmModule { +} + +export interface ClassHandle { + isAliasOf(other: ClassHandle): boolean; + delete(): void; + deleteLater(): this; + isDeleted(): boolean; + // @ts-ignore - If targeting lower than ESNext, this symbol might not exist. + [Symbol.dispose](): void; + clone(): this; +} +export interface TestClass extends ClassHandle { + testMethod(): number; + complicatedFunction(_0: vector_int): number; +} + +export interface vector_int extends ClassHandle { + push_back(_0: number): void; + resize(_0: number, _1: number): void; + size(): number; + get(_0: number): number | undefined; + set(_0: number, _1: number): boolean; +} + +interface EmbindModule { + TestClass: { + new(_0: number, _1: number): TestClass; + }; + vector_int: { + new(): vector_int; + }; + Geant4_init(): number; + Geant4_GDML(): number; + Geant4_run(): number; +} + +export type MainModule = WasmModule & typeof RuntimeExports & EmbindModule; +export default function MainModuleFactory (options?: unknown): Promise; diff --git a/src/libs/geant4_web/geant4_wasm/geant4_wasm.js b/src/libs/geant4_web/geant4_wasm/geant4_wasm.js new file mode 100644 index 000000000..a6729eddf --- /dev/null +++ b/src/libs/geant4_web/geant4_wasm/geant4_wasm.js @@ -0,0 +1,8314 @@ +// This code implements the `-sMODULARIZE` settings by taking the generated +// JS program code (INNER_JS_CODE) and wrapping it in a factory function. + +// When targetting node and ES6 we use `await import ..` in the generated code +// so the outer function needs to be marked as async. +async function createWasmModule(moduleArg = {}) { + var moduleRtn; + +// include: shell.js +// The Module object: Our interface to the outside world. We import +// and export values on it. There are various ways Module can be used: +// 1. Not defined. We create it here +// 2. A function parameter, function(moduleArg) => Promise +// 3. pre-run appended it, var Module = {}; ..generated code.. +// 4. External script tag defines var Module. +// We need to check if Module already exists (e.g. case 3 above). +// Substitution will be replaced with actual code on later stage of the build, +// this way Closure Compiler will not mangle it (e.g. case 4. above). +// Note that if you want to run closure, and also to use Module +// after the generated code, you will need to define var Module = {}; +// before the code. Then that object will be used in the code, and you +// can continue to use Module afterwards as well. +var Module = moduleArg; + +// Determine the runtime environment we are in. You can customize this by +// setting the ENVIRONMENT setting at compile time (see settings.js). + +var ENVIRONMENT_IS_WEB = false; +var ENVIRONMENT_IS_WORKER = true; +var ENVIRONMENT_IS_NODE = false; +var ENVIRONMENT_IS_SHELL = false; + +// --pre-jses are emitted after the Module integration code, so that they can +// refer to Module (if they choose; they can also define Module) +// include: ./setup_env.js +Module.preRun.push(function () { + ENV.G4LEDATA = '/data/G4EMLOW8.6.1'; + ENV.G4LEVELGAMMADATA = '/data/PhotonEvaporation6.1'; + ENV.G4NEUTRONHPDATA = '/data/G4NDL4.7.1'; + ENV.G4ENSDFSTATEDATA = '/data/G4ENSDFSTATE3.0'; + ENV.G4SAIDXSDATA = '/data/G4SAIDDATA2.0'; + ENV.G4PARTICLEXSDATA = '/data/G4PARTICLEXS4.1' +});// end include: ./setup_env.js + + +var arguments_ = []; +var thisProgram = './this.program'; +var quit_ = (status, toThrow) => { + throw toThrow; +}; + +var _scriptName = import.meta.url; + +// `/` should be present at the end if `scriptDirectory` is not empty +var scriptDirectory = ''; +function locateFile(path) { + if (Module['locateFile']) { + return Module['locateFile'](path, scriptDirectory); + } + return scriptDirectory + path; +} + +// Hooks that are implemented differently in different runtime environments. +var readAsync, readBinary; + +if (ENVIRONMENT_IS_SHELL) { + + const isNode = typeof process == 'object' && process.versions?.node && process.type != 'renderer'; + if (isNode || typeof window == 'object' || typeof WorkerGlobalScope != 'undefined') throw new Error('not compiled for this environment (did you build to HTML and try to run it not on the web, or set ENVIRONMENT to something - like node - and run it someplace else - like on the web?)'); + +} else + +// Note that this includes Node.js workers when relevant (pthreads is enabled). +// Node.js workers are detected as a combination of ENVIRONMENT_IS_WORKER and +// ENVIRONMENT_IS_NODE. +if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) { + try { + scriptDirectory = new URL('.', _scriptName).href; // includes trailing slash + } catch { + // Must be a `blob:` or `data:` URL (e.g. `blob:http://site.com/etc/etc`), we cannot + // infer anything from them. + } + + if (!(typeof window == 'object' || typeof WorkerGlobalScope != 'undefined')) throw new Error('not compiled for this environment (did you build to HTML and try to run it not on the web, or set ENVIRONMENT to something - like node - and run it someplace else - like on the web?)'); + + { +// include: web_or_worker_shell_read.js +if (ENVIRONMENT_IS_WORKER) { + readBinary = (url) => { + var xhr = new XMLHttpRequest(); + xhr.open('GET', url, false); + xhr.responseType = 'arraybuffer'; + xhr.send(null); + return new Uint8Array(/** @type{!ArrayBuffer} */(xhr.response)); + }; + } + + readAsync = async (url) => { + assert(!isFileURI(url), "readAsync does not work with file:// URLs"); + var response = await fetch(url, { credentials: 'same-origin' }); + if (response.ok) { + return response.arrayBuffer(); + } + throw new Error(response.status + ' : ' + response.url); + }; +// end include: web_or_worker_shell_read.js + } +} else +{ + throw new Error('environment detection error'); +} + +var out = console.log.bind(console); +var err = console.error.bind(console); + +var IDBFS = 'IDBFS is no longer included by default; build with -lidbfs.js'; +var PROXYFS = 'PROXYFS is no longer included by default; build with -lproxyfs.js'; +var WORKERFS = 'WORKERFS is no longer included by default; build with -lworkerfs.js'; +var FETCHFS = 'FETCHFS is no longer included by default; build with -lfetchfs.js'; +var ICASEFS = 'ICASEFS is no longer included by default; build with -licasefs.js'; +var JSFILEFS = 'JSFILEFS is no longer included by default; build with -ljsfilefs.js'; +var OPFS = 'OPFS is no longer included by default; build with -lopfs.js'; + +var NODEFS = 'NODEFS is no longer included by default; build with -lnodefs.js'; + +// perform assertions in shell.js after we set up out() and err(), as otherwise +// if an assertion fails it cannot print the message + +assert(!ENVIRONMENT_IS_WEB, 'web environment detected but not enabled at build time. Add `web` to `-sENVIRONMENT` to enable.'); + +assert(!ENVIRONMENT_IS_NODE, 'node environment detected but not enabled at build time. Add `node` to `-sENVIRONMENT` to enable.'); + +assert(!ENVIRONMENT_IS_SHELL, 'shell environment detected but not enabled at build time. Add `shell` to `-sENVIRONMENT` to enable.'); + +// end include: shell.js + +// include: preamble.js +// === Preamble library stuff === + +// Documentation for the public APIs defined in this file must be updated in: +// site/source/docs/api_reference/preamble.js.rst +// A prebuilt local version of the documentation is available at: +// site/build/text/docs/api_reference/preamble.js.txt +// You can also build docs locally as HTML or other formats in site/ +// An online HTML version (which may be of a different version of Emscripten) +// is up at http://kripken.github.io/emscripten-site/docs/api_reference/preamble.js.html + +var wasmBinary; + +if (typeof WebAssembly != 'object') { + err('no native wasm support detected'); +} + +// Wasm globals + +//======================================== +// Runtime essentials +//======================================== + +// whether we are quitting the application. no code should run after this. +// set in exit() and abort() +var ABORT = false; + +// set by exit() and abort(). Passed to 'onExit' handler. +// NOTE: This is also used as the process return code code in shell environments +// but only when noExitRuntime is false. +var EXITSTATUS; + +// In STRICT mode, we only define assert() when ASSERTIONS is set. i.e. we +// don't define it at all in release modes. This matches the behaviour of +// MINIMAL_RUNTIME. +// TODO(sbc): Make this the default even without STRICT enabled. +/** @type {function(*, string=)} */ +function assert(condition, text) { + if (!condition) { + abort('Assertion failed' + (text ? ': ' + text : '')); + } +} + +// We used to include malloc/free by default in the past. Show a helpful error in +// builds with assertions. + +/** + * Indicates whether filename is delivered via file protocol (as opposed to http/https) + * @noinline + */ +var isFileURI = (filename) => filename.startsWith('file://'); + +// include: runtime_common.js +// include: runtime_stack_check.js +// Initializes the stack cookie. Called at the startup of main and at the startup of each thread in pthreads mode. +function writeStackCookie() { + var max = _emscripten_stack_get_end(); + assert((max & 3) == 0); + // If the stack ends at address zero we write our cookies 4 bytes into the + // stack. This prevents interference with SAFE_HEAP and ASAN which also + // monitor writes to address zero. + if (max == 0) { + max += 4; + } + // The stack grow downwards towards _emscripten_stack_get_end. + // We write cookies to the final two words in the stack and detect if they are + // ever overwritten. + HEAPU32[((max)>>2)] = 0x02135467; + HEAPU32[(((max)+(4))>>2)] = 0x89BACDFE; + // Also test the global address 0 for integrity. + HEAPU32[((0)>>2)] = 1668509029; +} + +function checkStackCookie() { + if (ABORT) return; + var max = _emscripten_stack_get_end(); + // See writeStackCookie(). + if (max == 0) { + max += 4; + } + var cookie1 = HEAPU32[((max)>>2)]; + var cookie2 = HEAPU32[(((max)+(4))>>2)]; + if (cookie1 != 0x02135467 || cookie2 != 0x89BACDFE) { + abort(`Stack overflow! Stack cookie has been overwritten at ${ptrToString(max)}, expected hex dwords 0x89BACDFE and 0x2135467, but received ${ptrToString(cookie2)} ${ptrToString(cookie1)}`); + } + // Also test the global address 0 for integrity. + if (HEAPU32[((0)>>2)] != 0x63736d65 /* 'emsc' */) { + abort('Runtime error: The application has corrupted its heap memory area (address zero)!'); + } +} +// end include: runtime_stack_check.js +// include: runtime_exceptions.js +// end include: runtime_exceptions.js +// include: runtime_debug.js +var runtimeDebug = true; // Switch to false at runtime to disable logging at the right times + +// Used by XXXXX_DEBUG settings to output debug messages. +function dbg(...args) { + if (!runtimeDebug && typeof runtimeDebug != 'undefined') return; + // TODO(sbc): Make this configurable somehow. Its not always convenient for + // logging to show up as warnings. + console.warn(...args); +} + +// Endianness check +(() => { + var h16 = new Int16Array(1); + var h8 = new Int8Array(h16.buffer); + h16[0] = 0x6373; + if (h8[0] !== 0x73 || h8[1] !== 0x63) throw 'Runtime error: expected the system to be little-endian! (Run with -sSUPPORT_BIG_ENDIAN to bypass)'; +})(); + +function consumedModuleProp(prop) { + if (!Object.getOwnPropertyDescriptor(Module, prop)) { + Object.defineProperty(Module, prop, { + configurable: true, + set() { + abort(`Attempt to set \`Module.${prop}\` after it has already been processed. This can happen, for example, when code is injected via '--post-js' rather than '--pre-js'`); + + } + }); + } +} + +function makeInvalidEarlyAccess(name) { + return () => assert(false, `call to '${name}' via reference taken before Wasm module initialization`); + +} + +function ignoredModuleProp(prop) { + if (Object.getOwnPropertyDescriptor(Module, prop)) { + abort(`\`Module.${prop}\` was supplied but \`${prop}\` not included in INCOMING_MODULE_JS_API`); + } +} + +// forcing the filesystem exports a few things by default +function isExportedByForceFilesystem(name) { + return name === 'FS_createPath' || + name === 'FS_createDataFile' || + name === 'FS_createPreloadedFile' || + name === 'FS_preloadFile' || + name === 'FS_unlink' || + name === 'addRunDependency' || + // The old FS has some functionality that WasmFS lacks. + name === 'FS_createLazyFile' || + name === 'FS_createDevice' || + name === 'removeRunDependency'; +} + +/** + * Intercept access to a global symbol. This enables us to give informative + * warnings/errors when folks attempt to use symbols they did not include in + * their build, or no symbols that no longer exist. + */ +function hookGlobalSymbolAccess(sym, func) { + if (typeof globalThis != 'undefined' && !Object.getOwnPropertyDescriptor(globalThis, sym)) { + Object.defineProperty(globalThis, sym, { + configurable: true, + get() { + func(); + return undefined; + } + }); + } +} + +function missingGlobal(sym, msg) { + hookGlobalSymbolAccess(sym, () => { + warnOnce(`\`${sym}\` is not longer defined by emscripten. ${msg}`); + }); +} + +missingGlobal('buffer', 'Please use HEAP8.buffer or wasmMemory.buffer'); +missingGlobal('asm', 'Please use wasmExports instead'); + +function missingLibrarySymbol(sym) { + hookGlobalSymbolAccess(sym, () => { + // Can't `abort()` here because it would break code that does runtime + // checks. e.g. `if (typeof SDL === 'undefined')`. + var msg = `\`${sym}\` is a library symbol and not included by default; add it to your library.js __deps or to DEFAULT_LIBRARY_FUNCS_TO_INCLUDE on the command line`; + // DEFAULT_LIBRARY_FUNCS_TO_INCLUDE requires the name as it appears in + // library.js, which means $name for a JS name with no prefix, or name + // for a JS name like _name. + var librarySymbol = sym; + if (!librarySymbol.startsWith('_')) { + librarySymbol = '$' + sym; + } + msg += ` (e.g. -sDEFAULT_LIBRARY_FUNCS_TO_INCLUDE='${librarySymbol}')`; + if (isExportedByForceFilesystem(sym)) { + msg += '. Alternatively, forcing filesystem support (-sFORCE_FILESYSTEM) can export this for you'; + } + warnOnce(msg); + }); + + // Any symbol that is not included from the JS library is also (by definition) + // not exported on the Module object. + unexportedRuntimeSymbol(sym); +} + +function unexportedRuntimeSymbol(sym) { + if (!Object.getOwnPropertyDescriptor(Module, sym)) { + Object.defineProperty(Module, sym, { + configurable: true, + get() { + var msg = `'${sym}' was not exported. add it to EXPORTED_RUNTIME_METHODS (see the Emscripten FAQ)`; + if (isExportedByForceFilesystem(sym)) { + msg += '. Alternatively, forcing filesystem support (-sFORCE_FILESYSTEM) can export this for you'; + } + abort(msg); + } + }); + } +} + +// end include: runtime_debug.js +var readyPromiseResolve, readyPromiseReject; + +// Memory management + +var wasmMemory; + +var +/** @type {!Int8Array} */ + HEAP8, +/** @type {!Uint8Array} */ + HEAPU8, +/** @type {!Int16Array} */ + HEAP16, +/** @type {!Uint16Array} */ + HEAPU16, +/** @type {!Int32Array} */ + HEAP32, +/** @type {!Uint32Array} */ + HEAPU32, +/** @type {!Float32Array} */ + HEAPF32, +/** @type {!Float64Array} */ + HEAPF64; + +// BigInt64Array type is not correctly defined in closure +var +/** not-@type {!BigInt64Array} */ + HEAP64, +/* BigUint64Array type is not correctly defined in closure +/** not-@type {!BigUint64Array} */ + HEAPU64; + +var runtimeInitialized = false; + + + +function updateMemoryViews() { + var b = wasmMemory.buffer; + HEAP8 = new Int8Array(b); + HEAP16 = new Int16Array(b); + HEAPU8 = new Uint8Array(b); + HEAPU16 = new Uint16Array(b); + HEAP32 = new Int32Array(b); + HEAPU32 = new Uint32Array(b); + HEAPF32 = new Float32Array(b); + HEAPF64 = new Float64Array(b); + HEAP64 = new BigInt64Array(b); + HEAPU64 = new BigUint64Array(b); +} + +// include: memoryprofiler.js +// end include: memoryprofiler.js +// end include: runtime_common.js +assert(typeof Int32Array != 'undefined' && typeof Float64Array !== 'undefined' && Int32Array.prototype.subarray != undefined && Int32Array.prototype.set != undefined, + 'JS engine does not provide full typed array support'); + +function preRun() { + if (Module['preRun']) { + if (typeof Module['preRun'] == 'function') Module['preRun'] = [Module['preRun']]; + while (Module['preRun'].length) { + addOnPreRun(Module['preRun'].shift()); + } + } + consumedModuleProp('preRun'); + // Begin ATPRERUNS hooks + callRuntimeCallbacks(onPreRuns); + // End ATPRERUNS hooks +} + +function initRuntime() { + assert(!runtimeInitialized); + runtimeInitialized = true; + + checkStackCookie(); + + // Begin ATINITS hooks + if (!Module['noFSInit'] && !FS.initialized) FS.init(); +TTY.init(); +SOCKFS.root = FS.mount(SOCKFS, {}, null); + // End ATINITS hooks + + wasmExports['__wasm_call_ctors'](); + + // Begin ATPOSTCTORS hooks + FS.ignorePermissions = false; + // End ATPOSTCTORS hooks +} + +function postRun() { + checkStackCookie(); + // PThreads reuse the runtime from the main thread. + + if (Module['postRun']) { + if (typeof Module['postRun'] == 'function') Module['postRun'] = [Module['postRun']]; + while (Module['postRun'].length) { + addOnPostRun(Module['postRun'].shift()); + } + } + consumedModuleProp('postRun'); + + // Begin ATPOSTRUNS hooks + callRuntimeCallbacks(onPostRuns); + // End ATPOSTRUNS hooks +} + +// A counter of dependencies for calling run(). If we need to +// do asynchronous work before running, increment this and +// decrement it. Incrementing must happen in a place like +// Module.preRun (used by emcc to add file preloading). +// Note that you can add dependencies in preRun, even though +// it happens right before run - run will be postponed until +// the dependencies are met. +var runDependencies = 0; +var dependenciesFulfilled = null; // overridden to take different actions when all run dependencies are fulfilled +var runDependencyTracking = {}; +var runDependencyWatcher = null; + +function addRunDependency(id) { + runDependencies++; + + Module['monitorRunDependencies']?.(runDependencies); + + assert(id, 'addRunDependency requires an ID') + assert(!runDependencyTracking[id]); + runDependencyTracking[id] = 1; + if (runDependencyWatcher === null && typeof setInterval != 'undefined') { + // Check for missing dependencies every few seconds + runDependencyWatcher = setInterval(() => { + if (ABORT) { + clearInterval(runDependencyWatcher); + runDependencyWatcher = null; + return; + } + var shown = false; + for (var dep in runDependencyTracking) { + if (!shown) { + shown = true; + err('still waiting on run dependencies:'); + } + err(`dependency: ${dep}`); + } + if (shown) { + err('(end of list)'); + } + }, 10000); + } +} + +function removeRunDependency(id) { + runDependencies--; + + Module['monitorRunDependencies']?.(runDependencies); + + assert(id, 'removeRunDependency requires an ID'); + assert(runDependencyTracking[id]); + delete runDependencyTracking[id]; + if (runDependencies == 0) { + if (runDependencyWatcher !== null) { + clearInterval(runDependencyWatcher); + runDependencyWatcher = null; + } + if (dependenciesFulfilled) { + var callback = dependenciesFulfilled; + dependenciesFulfilled = null; + callback(); // can add another dependenciesFulfilled + } + } +} + +/** @param {string|number=} what */ +function abort(what) { + Module['onAbort']?.(what); + + what = 'Aborted(' + what + ')'; + // TODO(sbc): Should we remove printing and leave it up to whoever + // catches the exception? + err(what); + + ABORT = true; + + // Use a wasm runtime error, because a JS error might be seen as a foreign + // exception, which means we'd run destructors on it. We need the error to + // simply make the program stop. + // FIXME This approach does not work in Wasm EH because it currently does not assume + // all RuntimeErrors are from traps; it decides whether a RuntimeError is from + // a trap or not based on a hidden field within the object. So at the moment + // we don't have a way of throwing a wasm trap from JS. TODO Make a JS API that + // allows this in the wasm spec. + + // Suppress closure compiler warning here. Closure compiler's builtin extern + // definition for WebAssembly.RuntimeError claims it takes no arguments even + // though it can. + // TODO(https://github.com/google/closure-compiler/pull/3913): Remove if/when upstream closure gets fixed. + /** @suppress {checkTypes} */ + var e = new WebAssembly.RuntimeError(what); + + readyPromiseReject?.(e); + // Throw the error whether or not MODULARIZE is set because abort is used + // in code paths apart from instantiation where an exception is expected + // to be thrown when abort is called. + throw e; +} + +function createExportWrapper(name, nargs) { + return (...args) => { + assert(runtimeInitialized, `native function \`${name}\` called before runtime initialization`); + var f = wasmExports[name]; + assert(f, `exported native function \`${name}\` not found`); + // Only assert for too many arguments. Too few can be valid since the missing arguments will be zero filled. + assert(args.length <= nargs, `native function \`${name}\` called with ${args.length} args but expects ${nargs}`); + return f(...args); + }; +} + +var wasmBinaryFile; + +function findWasmBinary() { + if (Module['locateFile']) { + return locateFile('geant4_wasm.wasm'); + } + // Use bundler-friendly `new URL(..., import.meta.url)` pattern; works in browsers too. + return new URL('geant4_wasm.wasm', import.meta.url).href; +} + +function getBinarySync(file) { + if (file == wasmBinaryFile && wasmBinary) { + return new Uint8Array(wasmBinary); + } + if (readBinary) { + return readBinary(file); + } + throw 'both async and sync fetching of the wasm failed'; +} + +async function getWasmBinary(binaryFile) { + // If we don't have the binary yet, load it asynchronously using readAsync. + if (!wasmBinary) { + // Fetch the binary using readAsync + try { + var response = await readAsync(binaryFile); + return new Uint8Array(response); + } catch { + // Fall back to getBinarySync below; + } + } + + // Otherwise, getBinarySync should be able to get it synchronously + return getBinarySync(binaryFile); +} + +async function instantiateArrayBuffer(binaryFile, imports) { + try { + var binary = await getWasmBinary(binaryFile); + var instance = await WebAssembly.instantiate(binary, imports); + return instance; + } catch (reason) { + err(`failed to asynchronously prepare wasm: ${reason}`); + + // Warn on some common problems. + if (isFileURI(wasmBinaryFile)) { + err(`warning: Loading from a file URI (${wasmBinaryFile}) is not supported in most browsers. See https://emscripten.org/docs/getting_started/FAQ.html#how-do-i-run-a-local-webserver-for-testing-why-does-my-program-stall-in-downloading-or-preparing`); + } + abort(reason); + } +} + +async function instantiateAsync(binary, binaryFile, imports) { + if (!binary + ) { + try { + var response = fetch(binaryFile, { credentials: 'same-origin' }); + var instantiationResult = await WebAssembly.instantiateStreaming(response, imports); + return instantiationResult; + } catch (reason) { + // We expect the most common failure cause to be a bad MIME type for the binary, + // in which case falling back to ArrayBuffer instantiation should work. + err(`wasm streaming compile failed: ${reason}`); + err('falling back to ArrayBuffer instantiation'); + // fall back of instantiateArrayBuffer below + }; + } + return instantiateArrayBuffer(binaryFile, imports); +} + +function getWasmImports() { + // prepare imports + return { + 'env': wasmImports, + 'wasi_snapshot_preview1': wasmImports, + } +} + +// Create the wasm instance. +// Receives the wasm imports, returns the exports. +async function createWasm() { + // Load the wasm module and create an instance of using native support in the JS engine. + // handle a generated wasm instance, receiving its exports and + // performing other necessary setup + /** @param {WebAssembly.Module=} module*/ + function receiveInstance(instance, module) { + wasmExports = instance.exports; + + + + wasmMemory = wasmExports['memory']; + + assert(wasmMemory, 'memory not found in wasm exports'); + updateMemoryViews(); + + wasmTable = wasmExports['__indirect_function_table']; + + assert(wasmTable, 'table not found in wasm exports'); + + assignWasmExports(wasmExports); + removeRunDependency('wasm-instantiate'); + return wasmExports; + } + // wait for the pthread pool (if any) + addRunDependency('wasm-instantiate'); + + // Prefer streaming instantiation if available. + // Async compilation can be confusing when an error on the page overwrites Module + // (for example, if the order of elements is wrong, and the one defining Module is + // later), so we save Module and check it later. + var trueModule = Module; + function receiveInstantiationResult(result) { + // 'result' is a ResultObject object which has both the module and instance. + // receiveInstance() will swap in the exports (to Module.asm) so they can be called + assert(Module === trueModule, 'the Module object should not be replaced during async compilation - perhaps the order of HTML elements is wrong?'); + trueModule = null; + // TODO: Due to Closure regression https://github.com/google/closure-compiler/issues/3193, the above line no longer optimizes out down to the following line. + // When the regression is fixed, can restore the above PTHREADS-enabled path. + return receiveInstance(result['instance']); + } + + var info = getWasmImports(); + + // User shell pages can write their own Module.instantiateWasm = function(imports, successCallback) callback + // to manually instantiate the Wasm module themselves. This allows pages to + // run the instantiation parallel to any other async startup actions they are + // performing. + // Also pthreads and wasm workers initialize the wasm instance through this + // path. + if (Module['instantiateWasm']) { + return new Promise((resolve, reject) => { + try { + Module['instantiateWasm'](info, (mod, inst) => { + resolve(receiveInstance(mod, inst)); + }); + } catch(e) { + err(`Module.instantiateWasm callback failed with error: ${e}`); + reject(e); + } + }); + } + + wasmBinaryFile ??= findWasmBinary(); + var result = await instantiateAsync(wasmBinary, wasmBinaryFile, info); + var exports = receiveInstantiationResult(result); + return exports; +} + +// end include: preamble.js + +// Begin JS library code + + + class ExitStatus { + name = 'ExitStatus'; + constructor(status) { + this.message = `Program terminated with exit(${status})`; + this.status = status; + } + } + + var callRuntimeCallbacks = (callbacks) => { + while (callbacks.length > 0) { + // Pass the module as the first argument. + callbacks.shift()(Module); + } + }; + var onPostRuns = []; + var addOnPostRun = (cb) => onPostRuns.push(cb); + + var onPreRuns = []; + var addOnPreRun = (cb) => onPreRuns.push(cb); + + + + /** + * @param {number} ptr + * @param {string} type + */ + function getValue(ptr, type = 'i8') { + if (type.endsWith('*')) type = '*'; + switch (type) { + case 'i1': return HEAP8[ptr]; + case 'i8': return HEAP8[ptr]; + case 'i16': return HEAP16[((ptr)>>1)]; + case 'i32': return HEAP32[((ptr)>>2)]; + case 'i64': return HEAP64[((ptr)>>3)]; + case 'float': return HEAPF32[((ptr)>>2)]; + case 'double': return HEAPF64[((ptr)>>3)]; + case '*': return HEAPU32[((ptr)>>2)]; + default: abort(`invalid type for getValue: ${type}`); + } + } + + var noExitRuntime = true; + + var ptrToString = (ptr) => { + assert(typeof ptr === 'number'); + // With CAN_ADDRESS_2GB or MEMORY64, pointers are already unsigned. + ptr >>>= 0; + return '0x' + ptr.toString(16).padStart(8, '0'); + }; + + + /** + * @param {number} ptr + * @param {number} value + * @param {string} type + */ + function setValue(ptr, value, type = 'i8') { + if (type.endsWith('*')) type = '*'; + switch (type) { + case 'i1': HEAP8[ptr] = value; break; + case 'i8': HEAP8[ptr] = value; break; + case 'i16': HEAP16[((ptr)>>1)] = value; break; + case 'i32': HEAP32[((ptr)>>2)] = value; break; + case 'i64': HEAP64[((ptr)>>3)] = BigInt(value); break; + case 'float': HEAPF32[((ptr)>>2)] = value; break; + case 'double': HEAPF64[((ptr)>>3)] = value; break; + case '*': HEAPU32[((ptr)>>2)] = value; break; + default: abort(`invalid type for setValue: ${type}`); + } + } + + var stackRestore = (val) => __emscripten_stack_restore(val); + + var stackSave = () => _emscripten_stack_get_current(); + + var warnOnce = (text) => { + warnOnce.shown ||= {}; + if (!warnOnce.shown[text]) { + warnOnce.shown[text] = 1; + err(text); + } + }; + + var UTF8Decoder = typeof TextDecoder != 'undefined' ? new TextDecoder() : undefined; + + var findStringEnd = (heapOrArray, idx, maxBytesToRead, ignoreNul) => { + var maxIdx = idx + maxBytesToRead; + if (ignoreNul) return maxIdx; + // TextDecoder needs to know the byte length in advance, it doesn't stop on + // null terminator by itself. + // As a tiny code save trick, compare idx against maxIdx using a negation, + // so that maxBytesToRead=undefined/NaN means Infinity. + while (heapOrArray[idx] && !(idx >= maxIdx)) ++idx; + return idx; + }; + + + /** + * Given a pointer 'idx' to a null-terminated UTF8-encoded string in the given + * array that contains uint8 values, returns a copy of that string as a + * Javascript String object. + * heapOrArray is either a regular array, or a JavaScript typed array view. + * @param {number=} idx + * @param {number=} maxBytesToRead + * @param {boolean=} ignoreNul - If true, the function will not stop on a NUL character. + * @return {string} + */ + var UTF8ArrayToString = (heapOrArray, idx = 0, maxBytesToRead, ignoreNul) => { + + var endPtr = findStringEnd(heapOrArray, idx, maxBytesToRead, ignoreNul); + + // When using conditional TextDecoder, skip it for short strings as the overhead of the native call is not worth it. + if (endPtr - idx > 16 && heapOrArray.buffer && UTF8Decoder) { + return UTF8Decoder.decode(heapOrArray.subarray(idx, endPtr)); + } + var str = ''; + while (idx < endPtr) { + // For UTF8 byte structure, see: + // http://en.wikipedia.org/wiki/UTF-8#Description + // https://www.ietf.org/rfc/rfc2279.txt + // https://tools.ietf.org/html/rfc3629 + var u0 = heapOrArray[idx++]; + if (!(u0 & 0x80)) { str += String.fromCharCode(u0); continue; } + var u1 = heapOrArray[idx++] & 63; + if ((u0 & 0xE0) == 0xC0) { str += String.fromCharCode(((u0 & 31) << 6) | u1); continue; } + var u2 = heapOrArray[idx++] & 63; + if ((u0 & 0xF0) == 0xE0) { + u0 = ((u0 & 15) << 12) | (u1 << 6) | u2; + } else { + if ((u0 & 0xF8) != 0xF0) warnOnce('Invalid UTF-8 leading byte ' + ptrToString(u0) + ' encountered when deserializing a UTF-8 string in wasm memory to a JS string!'); + u0 = ((u0 & 7) << 18) | (u1 << 12) | (u2 << 6) | (heapOrArray[idx++] & 63); + } + + if (u0 < 0x10000) { + str += String.fromCharCode(u0); + } else { + var ch = u0 - 0x10000; + str += String.fromCharCode(0xD800 | (ch >> 10), 0xDC00 | (ch & 0x3FF)); + } + } + return str; + }; + + /** + * Given a pointer 'ptr' to a null-terminated UTF8-encoded string in the + * emscripten HEAP, returns a copy of that string as a Javascript String object. + * + * @param {number} ptr + * @param {number=} maxBytesToRead - An optional length that specifies the + * maximum number of bytes to read. You can omit this parameter to scan the + * string until the first 0 byte. If maxBytesToRead is passed, and the string + * at [ptr, ptr+maxBytesToReadr[ contains a null byte in the middle, then the + * string will cut short at that byte index. + * @param {boolean=} ignoreNul - If true, the function will not stop on a NUL character. + * @return {string} + */ + var UTF8ToString = (ptr, maxBytesToRead, ignoreNul) => { + assert(typeof ptr == 'number', `UTF8ToString expects a number (got ${typeof ptr})`); + return ptr ? UTF8ArrayToString(HEAPU8, ptr, maxBytesToRead, ignoreNul) : ''; + }; + var ___assert_fail = (condition, filename, line, func) => + abort(`Assertion failed: ${UTF8ToString(condition)}, at: ` + [filename ? UTF8ToString(filename) : 'unknown filename', line, func ? UTF8ToString(func) : 'unknown function']); + + class ExceptionInfo { + // excPtr - Thrown object pointer to wrap. Metadata pointer is calculated from it. + constructor(excPtr) { + this.excPtr = excPtr; + this.ptr = excPtr - 24; + } + + set_type(type) { + HEAPU32[(((this.ptr)+(4))>>2)] = type; + } + + get_type() { + return HEAPU32[(((this.ptr)+(4))>>2)]; + } + + set_destructor(destructor) { + HEAPU32[(((this.ptr)+(8))>>2)] = destructor; + } + + get_destructor() { + return HEAPU32[(((this.ptr)+(8))>>2)]; + } + + set_caught(caught) { + caught = caught ? 1 : 0; + HEAP8[(this.ptr)+(12)] = caught; + } + + get_caught() { + return HEAP8[(this.ptr)+(12)] != 0; + } + + set_rethrown(rethrown) { + rethrown = rethrown ? 1 : 0; + HEAP8[(this.ptr)+(13)] = rethrown; + } + + get_rethrown() { + return HEAP8[(this.ptr)+(13)] != 0; + } + + // Initialize native structure fields. Should be called once after allocated. + init(type, destructor) { + this.set_adjusted_ptr(0); + this.set_type(type); + this.set_destructor(destructor); + } + + set_adjusted_ptr(adjustedPtr) { + HEAPU32[(((this.ptr)+(16))>>2)] = adjustedPtr; + } + + get_adjusted_ptr() { + return HEAPU32[(((this.ptr)+(16))>>2)]; + } + } + + var exceptionLast = 0; + + var uncaughtExceptionCount = 0; + var ___cxa_throw = (ptr, type, destructor) => { + var info = new ExceptionInfo(ptr); + // Initialize ExceptionInfo content after it was allocated in __cxa_allocate_exception. + info.init(type, destructor); + exceptionLast = ptr; + uncaughtExceptionCount++; + assert(false, 'Exception thrown, but exception catching is not enabled. Compile with -sNO_DISABLE_EXCEPTION_CATCHING or -sEXCEPTION_CATCHING_ALLOWED=[..] to catch.'); + }; + + var PATH = { + isAbs:(path) => path.charAt(0) === '/', + splitPath:(filename) => { + var splitPathRe = /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/; + return splitPathRe.exec(filename).slice(1); + }, + normalizeArray:(parts, allowAboveRoot) => { + // if the path tries to go above the root, `up` ends up > 0 + var up = 0; + for (var i = parts.length - 1; i >= 0; i--) { + var last = parts[i]; + if (last === '.') { + parts.splice(i, 1); + } else if (last === '..') { + parts.splice(i, 1); + up++; + } else if (up) { + parts.splice(i, 1); + up--; + } + } + // if the path is allowed to go above the root, restore leading ..s + if (allowAboveRoot) { + for (; up; up--) { + parts.unshift('..'); + } + } + return parts; + }, + normalize:(path) => { + var isAbsolute = PATH.isAbs(path), + trailingSlash = path.slice(-1) === '/'; + // Normalize the path + path = PATH.normalizeArray(path.split('/').filter((p) => !!p), !isAbsolute).join('/'); + if (!path && !isAbsolute) { + path = '.'; + } + if (path && trailingSlash) { + path += '/'; + } + return (isAbsolute ? '/' : '') + path; + }, + dirname:(path) => { + var result = PATH.splitPath(path), + root = result[0], + dir = result[1]; + if (!root && !dir) { + // No dirname whatsoever + return '.'; + } + if (dir) { + // It has a dirname, strip trailing slash + dir = dir.slice(0, -1); + } + return root + dir; + }, + basename:(path) => path && path.match(/([^\/]+|\/)\/*$/)[1], + join:(...paths) => PATH.normalize(paths.join('/')), + join2:(l, r) => PATH.normalize(l + '/' + r), + }; + + var initRandomFill = () => { + + return (view) => crypto.getRandomValues(view); + }; + var randomFill = (view) => { + // Lazily init on the first invocation. + (randomFill = initRandomFill())(view); + }; + + + + var PATH_FS = { + resolve:(...args) => { + var resolvedPath = '', + resolvedAbsolute = false; + for (var i = args.length - 1; i >= -1 && !resolvedAbsolute; i--) { + var path = (i >= 0) ? args[i] : FS.cwd(); + // Skip empty and invalid entries + if (typeof path != 'string') { + throw new TypeError('Arguments to path.resolve must be strings'); + } else if (!path) { + return ''; // an invalid portion invalidates the whole thing + } + resolvedPath = path + '/' + resolvedPath; + resolvedAbsolute = PATH.isAbs(path); + } + // At this point the path should be resolved to a full absolute path, but + // handle relative paths to be safe (might happen when process.cwd() fails) + resolvedPath = PATH.normalizeArray(resolvedPath.split('/').filter((p) => !!p), !resolvedAbsolute).join('/'); + return ((resolvedAbsolute ? '/' : '') + resolvedPath) || '.'; + }, + relative:(from, to) => { + from = PATH_FS.resolve(from).slice(1); + to = PATH_FS.resolve(to).slice(1); + function trim(arr) { + var start = 0; + for (; start < arr.length; start++) { + if (arr[start] !== '') break; + } + var end = arr.length - 1; + for (; end >= 0; end--) { + if (arr[end] !== '') break; + } + if (start > end) return []; + return arr.slice(start, end - start + 1); + } + var fromParts = trim(from.split('/')); + var toParts = trim(to.split('/')); + var length = Math.min(fromParts.length, toParts.length); + var samePartsLength = length; + for (var i = 0; i < length; i++) { + if (fromParts[i] !== toParts[i]) { + samePartsLength = i; + break; + } + } + var outputParts = []; + for (var i = samePartsLength; i < fromParts.length; i++) { + outputParts.push('..'); + } + outputParts = outputParts.concat(toParts.slice(samePartsLength)); + return outputParts.join('/'); + }, + }; + + + + var FS_stdin_getChar_buffer = []; + + var lengthBytesUTF8 = (str) => { + var len = 0; + for (var i = 0; i < str.length; ++i) { + // Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code + // unit, not a Unicode code point of the character! So decode + // UTF16->UTF32->UTF8. + // See http://unicode.org/faq/utf_bom.html#utf16-3 + var c = str.charCodeAt(i); // possibly a lead surrogate + if (c <= 0x7F) { + len++; + } else if (c <= 0x7FF) { + len += 2; + } else if (c >= 0xD800 && c <= 0xDFFF) { + len += 4; ++i; + } else { + len += 3; + } + } + return len; + }; + + var stringToUTF8Array = (str, heap, outIdx, maxBytesToWrite) => { + assert(typeof str === 'string', `stringToUTF8Array expects a string (got ${typeof str})`); + // Parameter maxBytesToWrite is not optional. Negative values, 0, null, + // undefined and false each don't write out any bytes. + if (!(maxBytesToWrite > 0)) + return 0; + + var startIdx = outIdx; + var endIdx = outIdx + maxBytesToWrite - 1; // -1 for string null terminator. + for (var i = 0; i < str.length; ++i) { + // For UTF8 byte structure, see http://en.wikipedia.org/wiki/UTF-8#Description + // and https://www.ietf.org/rfc/rfc2279.txt + // and https://tools.ietf.org/html/rfc3629 + var u = str.codePointAt(i); + if (u <= 0x7F) { + if (outIdx >= endIdx) break; + heap[outIdx++] = u; + } else if (u <= 0x7FF) { + if (outIdx + 1 >= endIdx) break; + heap[outIdx++] = 0xC0 | (u >> 6); + heap[outIdx++] = 0x80 | (u & 63); + } else if (u <= 0xFFFF) { + if (outIdx + 2 >= endIdx) break; + heap[outIdx++] = 0xE0 | (u >> 12); + heap[outIdx++] = 0x80 | ((u >> 6) & 63); + heap[outIdx++] = 0x80 | (u & 63); + } else { + if (outIdx + 3 >= endIdx) break; + if (u > 0x10FFFF) warnOnce('Invalid Unicode code point ' + ptrToString(u) + ' encountered when serializing a JS string to a UTF-8 string in wasm memory! (Valid unicode code points should be in range 0-0x10FFFF).'); + heap[outIdx++] = 0xF0 | (u >> 18); + heap[outIdx++] = 0x80 | ((u >> 12) & 63); + heap[outIdx++] = 0x80 | ((u >> 6) & 63); + heap[outIdx++] = 0x80 | (u & 63); + // Gotcha: if codePoint is over 0xFFFF, it is represented as a surrogate pair in UTF-16. + // We need to manually skip over the second code unit for correct iteration. + i++; + } + } + // Null-terminate the pointer to the buffer. + heap[outIdx] = 0; + return outIdx - startIdx; + }; + /** @type {function(string, boolean=, number=)} */ + var intArrayFromString = (stringy, dontAddNull, length) => { + var len = length > 0 ? length : lengthBytesUTF8(stringy)+1; + var u8array = new Array(len); + var numBytesWritten = stringToUTF8Array(stringy, u8array, 0, u8array.length); + if (dontAddNull) u8array.length = numBytesWritten; + return u8array; + }; + var FS_stdin_getChar = () => { + if (!FS_stdin_getChar_buffer.length) { + var result = null; + {} + if (!result) { + return null; + } + FS_stdin_getChar_buffer = intArrayFromString(result, true); + } + return FS_stdin_getChar_buffer.shift(); + }; + var TTY = { + ttys:[], + init() { + // https://github.com/emscripten-core/emscripten/pull/1555 + // if (ENVIRONMENT_IS_NODE) { + // // currently, FS.init does not distinguish if process.stdin is a file or TTY + // // device, it always assumes it's a TTY device. because of this, we're forcing + // // process.stdin to UTF8 encoding to at least make stdin reading compatible + // // with text files until FS.init can be refactored. + // process.stdin.setEncoding('utf8'); + // } + }, + shutdown() { + // https://github.com/emscripten-core/emscripten/pull/1555 + // if (ENVIRONMENT_IS_NODE) { + // // inolen: any idea as to why node -e 'process.stdin.read()' wouldn't exit immediately (with process.stdin being a tty)? + // // isaacs: because now it's reading from the stream, you've expressed interest in it, so that read() kicks off a _read() which creates a ReadReq operation + // // inolen: I thought read() in that case was a synchronous operation that just grabbed some amount of buffered data if it exists? + // // isaacs: it is. but it also triggers a _read() call, which calls readStart() on the handle + // // isaacs: do process.stdin.pause() and i'd think it'd probably close the pending call + // process.stdin.pause(); + // } + }, + register(dev, ops) { + TTY.ttys[dev] = { input: [], output: [], ops: ops }; + FS.registerDevice(dev, TTY.stream_ops); + }, + stream_ops:{ + open(stream) { + var tty = TTY.ttys[stream.node.rdev]; + if (!tty) { + throw new FS.ErrnoError(43); + } + stream.tty = tty; + stream.seekable = false; + }, + close(stream) { + // flush any pending line data + stream.tty.ops.fsync(stream.tty); + }, + fsync(stream) { + stream.tty.ops.fsync(stream.tty); + }, + read(stream, buffer, offset, length, pos /* ignored */) { + if (!stream.tty || !stream.tty.ops.get_char) { + throw new FS.ErrnoError(60); + } + var bytesRead = 0; + for (var i = 0; i < length; i++) { + var result; + try { + result = stream.tty.ops.get_char(stream.tty); + } catch (e) { + throw new FS.ErrnoError(29); + } + if (result === undefined && bytesRead === 0) { + throw new FS.ErrnoError(6); + } + if (result === null || result === undefined) break; + bytesRead++; + buffer[offset+i] = result; + } + if (bytesRead) { + stream.node.atime = Date.now(); + } + return bytesRead; + }, + write(stream, buffer, offset, length, pos) { + if (!stream.tty || !stream.tty.ops.put_char) { + throw new FS.ErrnoError(60); + } + try { + for (var i = 0; i < length; i++) { + stream.tty.ops.put_char(stream.tty, buffer[offset+i]); + } + } catch (e) { + throw new FS.ErrnoError(29); + } + if (length) { + stream.node.mtime = stream.node.ctime = Date.now(); + } + return i; + }, + }, + default_tty_ops:{ + get_char(tty) { + return FS_stdin_getChar(); + }, + put_char(tty, val) { + if (val === null || val === 10) { + out(UTF8ArrayToString(tty.output)); + tty.output = []; + } else { + if (val != 0) tty.output.push(val); // val == 0 would cut text output off in the middle. + } + }, + fsync(tty) { + if (tty.output?.length > 0) { + out(UTF8ArrayToString(tty.output)); + tty.output = []; + } + }, + ioctl_tcgets(tty) { + // typical setting + return { + c_iflag: 25856, + c_oflag: 5, + c_cflag: 191, + c_lflag: 35387, + c_cc: [ + 0x03, 0x1c, 0x7f, 0x15, 0x04, 0x00, 0x01, 0x00, 0x11, 0x13, 0x1a, 0x00, + 0x12, 0x0f, 0x17, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + ] + }; + }, + ioctl_tcsets(tty, optional_actions, data) { + // currently just ignore + return 0; + }, + ioctl_tiocgwinsz(tty) { + return [24, 80]; + }, + }, + default_tty1_ops:{ + put_char(tty, val) { + if (val === null || val === 10) { + err(UTF8ArrayToString(tty.output)); + tty.output = []; + } else { + if (val != 0) tty.output.push(val); + } + }, + fsync(tty) { + if (tty.output?.length > 0) { + err(UTF8ArrayToString(tty.output)); + tty.output = []; + } + }, + }, + }; + + + var mmapAlloc = (size) => { + abort('internal error: mmapAlloc called but `emscripten_builtin_memalign` native symbol not exported'); + }; + var MEMFS = { + ops_table:null, + mount(mount) { + return MEMFS.createNode(null, '/', 16895, 0); + }, + createNode(parent, name, mode, dev) { + if (FS.isBlkdev(mode) || FS.isFIFO(mode)) { + // no supported + throw new FS.ErrnoError(63); + } + MEMFS.ops_table ||= { + dir: { + node: { + getattr: MEMFS.node_ops.getattr, + setattr: MEMFS.node_ops.setattr, + lookup: MEMFS.node_ops.lookup, + mknod: MEMFS.node_ops.mknod, + rename: MEMFS.node_ops.rename, + unlink: MEMFS.node_ops.unlink, + rmdir: MEMFS.node_ops.rmdir, + readdir: MEMFS.node_ops.readdir, + symlink: MEMFS.node_ops.symlink + }, + stream: { + llseek: MEMFS.stream_ops.llseek + } + }, + file: { + node: { + getattr: MEMFS.node_ops.getattr, + setattr: MEMFS.node_ops.setattr + }, + stream: { + llseek: MEMFS.stream_ops.llseek, + read: MEMFS.stream_ops.read, + write: MEMFS.stream_ops.write, + mmap: MEMFS.stream_ops.mmap, + msync: MEMFS.stream_ops.msync + } + }, + link: { + node: { + getattr: MEMFS.node_ops.getattr, + setattr: MEMFS.node_ops.setattr, + readlink: MEMFS.node_ops.readlink + }, + stream: {} + }, + chrdev: { + node: { + getattr: MEMFS.node_ops.getattr, + setattr: MEMFS.node_ops.setattr + }, + stream: FS.chrdev_stream_ops + } + }; + var node = FS.createNode(parent, name, mode, dev); + if (FS.isDir(node.mode)) { + node.node_ops = MEMFS.ops_table.dir.node; + node.stream_ops = MEMFS.ops_table.dir.stream; + node.contents = {}; + } else if (FS.isFile(node.mode)) { + node.node_ops = MEMFS.ops_table.file.node; + node.stream_ops = MEMFS.ops_table.file.stream; + node.usedBytes = 0; // The actual number of bytes used in the typed array, as opposed to contents.length which gives the whole capacity. + // When the byte data of the file is populated, this will point to either a typed array, or a normal JS array. Typed arrays are preferred + // for performance, and used by default. However, typed arrays are not resizable like normal JS arrays are, so there is a small disk size + // penalty involved for appending file writes that continuously grow a file similar to std::vector capacity vs used -scheme. + node.contents = null; + } else if (FS.isLink(node.mode)) { + node.node_ops = MEMFS.ops_table.link.node; + node.stream_ops = MEMFS.ops_table.link.stream; + } else if (FS.isChrdev(node.mode)) { + node.node_ops = MEMFS.ops_table.chrdev.node; + node.stream_ops = MEMFS.ops_table.chrdev.stream; + } + node.atime = node.mtime = node.ctime = Date.now(); + // add the new node to the parent + if (parent) { + parent.contents[name] = node; + parent.atime = parent.mtime = parent.ctime = node.atime; + } + return node; + }, + getFileDataAsTypedArray(node) { + if (!node.contents) return new Uint8Array(0); + if (node.contents.subarray) return node.contents.subarray(0, node.usedBytes); // Make sure to not return excess unused bytes. + return new Uint8Array(node.contents); + }, + expandFileStorage(node, newCapacity) { + var prevCapacity = node.contents ? node.contents.length : 0; + if (prevCapacity >= newCapacity) return; // No need to expand, the storage was already large enough. + // Don't expand strictly to the given requested limit if it's only a very small increase, but instead geometrically grow capacity. + // For small filesizes (<1MB), perform size*2 geometric increase, but for large sizes, do a much more conservative size*1.125 increase to + // avoid overshooting the allocation cap by a very large margin. + var CAPACITY_DOUBLING_MAX = 1024 * 1024; + newCapacity = Math.max(newCapacity, (prevCapacity * (prevCapacity < CAPACITY_DOUBLING_MAX ? 2.0 : 1.125)) >>> 0); + if (prevCapacity != 0) newCapacity = Math.max(newCapacity, 256); // At minimum allocate 256b for each file when expanding. + var oldContents = node.contents; + node.contents = new Uint8Array(newCapacity); // Allocate new storage. + if (node.usedBytes > 0) node.contents.set(oldContents.subarray(0, node.usedBytes), 0); // Copy old data over to the new storage. + }, + resizeFileStorage(node, newSize) { + if (node.usedBytes == newSize) return; + if (newSize == 0) { + node.contents = null; // Fully decommit when requesting a resize to zero. + node.usedBytes = 0; + } else { + var oldContents = node.contents; + node.contents = new Uint8Array(newSize); // Allocate new storage. + if (oldContents) { + node.contents.set(oldContents.subarray(0, Math.min(newSize, node.usedBytes))); // Copy old data over to the new storage. + } + node.usedBytes = newSize; + } + }, + node_ops:{ + getattr(node) { + var attr = {}; + // device numbers reuse inode numbers. + attr.dev = FS.isChrdev(node.mode) ? node.id : 1; + attr.ino = node.id; + attr.mode = node.mode; + attr.nlink = 1; + attr.uid = 0; + attr.gid = 0; + attr.rdev = node.rdev; + if (FS.isDir(node.mode)) { + attr.size = 4096; + } else if (FS.isFile(node.mode)) { + attr.size = node.usedBytes; + } else if (FS.isLink(node.mode)) { + attr.size = node.link.length; + } else { + attr.size = 0; + } + attr.atime = new Date(node.atime); + attr.mtime = new Date(node.mtime); + attr.ctime = new Date(node.ctime); + // NOTE: In our implementation, st_blocks = Math.ceil(st_size/st_blksize), + // but this is not required by the standard. + attr.blksize = 4096; + attr.blocks = Math.ceil(attr.size / attr.blksize); + return attr; + }, + setattr(node, attr) { + for (const key of ["mode", "atime", "mtime", "ctime"]) { + if (attr[key] != null) { + node[key] = attr[key]; + } + } + if (attr.size !== undefined) { + MEMFS.resizeFileStorage(node, attr.size); + } + }, + lookup(parent, name) { + throw new FS.ErrnoError(44); + }, + mknod(parent, name, mode, dev) { + return MEMFS.createNode(parent, name, mode, dev); + }, + rename(old_node, new_dir, new_name) { + var new_node; + try { + new_node = FS.lookupNode(new_dir, new_name); + } catch (e) {} + if (new_node) { + if (FS.isDir(old_node.mode)) { + // if we're overwriting a directory at new_name, make sure it's empty. + for (var i in new_node.contents) { + throw new FS.ErrnoError(55); + } + } + FS.hashRemoveNode(new_node); + } + // do the internal rewiring + delete old_node.parent.contents[old_node.name]; + new_dir.contents[new_name] = old_node; + old_node.name = new_name; + new_dir.ctime = new_dir.mtime = old_node.parent.ctime = old_node.parent.mtime = Date.now(); + }, + unlink(parent, name) { + delete parent.contents[name]; + parent.ctime = parent.mtime = Date.now(); + }, + rmdir(parent, name) { + var node = FS.lookupNode(parent, name); + for (var i in node.contents) { + throw new FS.ErrnoError(55); + } + delete parent.contents[name]; + parent.ctime = parent.mtime = Date.now(); + }, + readdir(node) { + return ['.', '..', ...Object.keys(node.contents)]; + }, + symlink(parent, newname, oldpath) { + var node = MEMFS.createNode(parent, newname, 0o777 | 40960, 0); + node.link = oldpath; + return node; + }, + readlink(node) { + if (!FS.isLink(node.mode)) { + throw new FS.ErrnoError(28); + } + return node.link; + }, + }, + stream_ops:{ + read(stream, buffer, offset, length, position) { + var contents = stream.node.contents; + if (position >= stream.node.usedBytes) return 0; + var size = Math.min(stream.node.usedBytes - position, length); + assert(size >= 0); + if (size > 8 && contents.subarray) { // non-trivial, and typed array + buffer.set(contents.subarray(position, position + size), offset); + } else { + for (var i = 0; i < size; i++) buffer[offset + i] = contents[position + i]; + } + return size; + }, + write(stream, buffer, offset, length, position, canOwn) { + // The data buffer should be a typed array view + assert(!(buffer instanceof ArrayBuffer)); + + if (!length) return 0; + var node = stream.node; + node.mtime = node.ctime = Date.now(); + + if (buffer.subarray && (!node.contents || node.contents.subarray)) { // This write is from a typed array to a typed array? + if (canOwn) { + assert(position === 0, 'canOwn must imply no weird position inside the file'); + node.contents = buffer.subarray(offset, offset + length); + node.usedBytes = length; + return length; + } else if (node.usedBytes === 0 && position === 0) { // If this is a simple first write to an empty file, do a fast set since we don't need to care about old data. + node.contents = buffer.slice(offset, offset + length); + node.usedBytes = length; + return length; + } else if (position + length <= node.usedBytes) { // Writing to an already allocated and used subrange of the file? + node.contents.set(buffer.subarray(offset, offset + length), position); + return length; + } + } + + // Appending to an existing file and we need to reallocate, or source data did not come as a typed array. + MEMFS.expandFileStorage(node, position+length); + if (node.contents.subarray && buffer.subarray) { + // Use typed array write which is available. + node.contents.set(buffer.subarray(offset, offset + length), position); + } else { + for (var i = 0; i < length; i++) { + node.contents[position + i] = buffer[offset + i]; // Or fall back to manual write if not. + } + } + node.usedBytes = Math.max(node.usedBytes, position + length); + return length; + }, + llseek(stream, offset, whence) { + var position = offset; + if (whence === 1) { + position += stream.position; + } else if (whence === 2) { + if (FS.isFile(stream.node.mode)) { + position += stream.node.usedBytes; + } + } + if (position < 0) { + throw new FS.ErrnoError(28); + } + return position; + }, + mmap(stream, length, position, prot, flags) { + if (!FS.isFile(stream.node.mode)) { + throw new FS.ErrnoError(43); + } + var ptr; + var allocated; + var contents = stream.node.contents; + // Only make a new copy when MAP_PRIVATE is specified. + if (!(flags & 2) && contents && contents.buffer === HEAP8.buffer) { + // We can't emulate MAP_SHARED when the file is not backed by the + // buffer we're mapping to (e.g. the HEAP buffer). + allocated = false; + ptr = contents.byteOffset; + } else { + allocated = true; + ptr = mmapAlloc(length); + if (!ptr) { + throw new FS.ErrnoError(48); + } + if (contents) { + // Try to avoid unnecessary slices. + if (position > 0 || position + length < contents.length) { + if (contents.subarray) { + contents = contents.subarray(position, position + length); + } else { + contents = Array.prototype.slice.call(contents, position, position + length); + } + } + HEAP8.set(contents, ptr); + } + } + return { ptr, allocated }; + }, + msync(stream, buffer, offset, length, mmapFlags) { + MEMFS.stream_ops.write(stream, buffer, 0, length, offset, false); + // should we check if bytesWritten and length are the same? + return 0; + }, + }, + }; + + var FS_modeStringToFlags = (str) => { + var flagModes = { + 'r': 0, + 'r+': 2, + 'w': 512 | 64 | 1, + 'w+': 512 | 64 | 2, + 'a': 1024 | 64 | 1, + 'a+': 1024 | 64 | 2, + }; + var flags = flagModes[str]; + if (typeof flags == 'undefined') { + throw new Error(`Unknown file open mode: ${str}`); + } + return flags; + }; + + var FS_getMode = (canRead, canWrite) => { + var mode = 0; + if (canRead) mode |= 292 | 73; + if (canWrite) mode |= 146; + return mode; + }; + + + + + var strError = (errno) => UTF8ToString(_strerror(errno)); + + var ERRNO_CODES = { + 'EPERM': 63, + 'ENOENT': 44, + 'ESRCH': 71, + 'EINTR': 27, + 'EIO': 29, + 'ENXIO': 60, + 'E2BIG': 1, + 'ENOEXEC': 45, + 'EBADF': 8, + 'ECHILD': 12, + 'EAGAIN': 6, + 'EWOULDBLOCK': 6, + 'ENOMEM': 48, + 'EACCES': 2, + 'EFAULT': 21, + 'ENOTBLK': 105, + 'EBUSY': 10, + 'EEXIST': 20, + 'EXDEV': 75, + 'ENODEV': 43, + 'ENOTDIR': 54, + 'EISDIR': 31, + 'EINVAL': 28, + 'ENFILE': 41, + 'EMFILE': 33, + 'ENOTTY': 59, + 'ETXTBSY': 74, + 'EFBIG': 22, + 'ENOSPC': 51, + 'ESPIPE': 70, + 'EROFS': 69, + 'EMLINK': 34, + 'EPIPE': 64, + 'EDOM': 18, + 'ERANGE': 68, + 'ENOMSG': 49, + 'EIDRM': 24, + 'ECHRNG': 106, + 'EL2NSYNC': 156, + 'EL3HLT': 107, + 'EL3RST': 108, + 'ELNRNG': 109, + 'EUNATCH': 110, + 'ENOCSI': 111, + 'EL2HLT': 112, + 'EDEADLK': 16, + 'ENOLCK': 46, + 'EBADE': 113, + 'EBADR': 114, + 'EXFULL': 115, + 'ENOANO': 104, + 'EBADRQC': 103, + 'EBADSLT': 102, + 'EDEADLOCK': 16, + 'EBFONT': 101, + 'ENOSTR': 100, + 'ENODATA': 116, + 'ETIME': 117, + 'ENOSR': 118, + 'ENONET': 119, + 'ENOPKG': 120, + 'EREMOTE': 121, + 'ENOLINK': 47, + 'EADV': 122, + 'ESRMNT': 123, + 'ECOMM': 124, + 'EPROTO': 65, + 'EMULTIHOP': 36, + 'EDOTDOT': 125, + 'EBADMSG': 9, + 'ENOTUNIQ': 126, + 'EBADFD': 127, + 'EREMCHG': 128, + 'ELIBACC': 129, + 'ELIBBAD': 130, + 'ELIBSCN': 131, + 'ELIBMAX': 132, + 'ELIBEXEC': 133, + 'ENOSYS': 52, + 'ENOTEMPTY': 55, + 'ENAMETOOLONG': 37, + 'ELOOP': 32, + 'EOPNOTSUPP': 138, + 'EPFNOSUPPORT': 139, + 'ECONNRESET': 15, + 'ENOBUFS': 42, + 'EAFNOSUPPORT': 5, + 'EPROTOTYPE': 67, + 'ENOTSOCK': 57, + 'ENOPROTOOPT': 50, + 'ESHUTDOWN': 140, + 'ECONNREFUSED': 14, + 'EADDRINUSE': 3, + 'ECONNABORTED': 13, + 'ENETUNREACH': 40, + 'ENETDOWN': 38, + 'ETIMEDOUT': 73, + 'EHOSTDOWN': 142, + 'EHOSTUNREACH': 23, + 'EINPROGRESS': 26, + 'EALREADY': 7, + 'EDESTADDRREQ': 17, + 'EMSGSIZE': 35, + 'EPROTONOSUPPORT': 66, + 'ESOCKTNOSUPPORT': 137, + 'EADDRNOTAVAIL': 4, + 'ENETRESET': 39, + 'EISCONN': 30, + 'ENOTCONN': 53, + 'ETOOMANYREFS': 141, + 'EUSERS': 136, + 'EDQUOT': 19, + 'ESTALE': 72, + 'ENOTSUP': 138, + 'ENOMEDIUM': 148, + 'EILSEQ': 25, + 'EOVERFLOW': 61, + 'ECANCELED': 11, + 'ENOTRECOVERABLE': 56, + 'EOWNERDEAD': 62, + 'ESTRPIPE': 135, + }; + + var asyncLoad = async (url) => { + var arrayBuffer = await readAsync(url); + assert(arrayBuffer, `Loading data file "${url}" failed (no arrayBuffer).`); + return new Uint8Array(arrayBuffer); + }; + + + var FS_createDataFile = (...args) => FS.createDataFile(...args); + + var getUniqueRunDependency = (id) => { + var orig = id; + while (1) { + if (!runDependencyTracking[id]) return id; + id = orig + Math.random(); + } + }; + + var preloadPlugins = []; + var FS_handledByPreloadPlugin = async (byteArray, fullname) => { + // Ensure plugins are ready. + if (typeof Browser != 'undefined') Browser.init(); + + for (var plugin of preloadPlugins) { + if (plugin['canHandle'](fullname)) { + assert(plugin['handle'].constructor.name === 'AsyncFunction', 'Filesystem plugin handlers must be async functions (See #24914)') + return plugin['handle'](byteArray, fullname); + } + } + // In no plugin handled this file then return the original/unmodified + // byteArray. + return byteArray; + }; + var FS_preloadFile = async (parent, name, url, canRead, canWrite, dontCreateFile, canOwn, preFinish) => { + // TODO we should allow people to just pass in a complete filename instead + // of parent and name being that we just join them anyways + var fullname = name ? PATH_FS.resolve(PATH.join2(parent, name)) : parent; + var dep = getUniqueRunDependency(`cp ${fullname}`); // might have several active requests for the same fullname + addRunDependency(dep); + + try { + var byteArray = url; + if (typeof url == 'string') { + byteArray = await asyncLoad(url); + } + + byteArray = await FS_handledByPreloadPlugin(byteArray, fullname); + preFinish?.(); + if (!dontCreateFile) { + FS_createDataFile(parent, name, byteArray, canRead, canWrite, canOwn); + } + } finally { + removeRunDependency(dep); + } + }; + var FS_createPreloadedFile = (parent, name, url, canRead, canWrite, onload, onerror, dontCreateFile, canOwn, preFinish) => { + FS_preloadFile(parent, name, url, canRead, canWrite, dontCreateFile, canOwn, preFinish).then(onload).catch(onerror); + }; + var FS = { + root:null, + mounts:[], + devices:{ + }, + streams:[], + nextInode:1, + nameTable:null, + currentPath:"/", + initialized:false, + ignorePermissions:true, + filesystems:null, + syncFSRequests:0, + readFiles:{ + }, + ErrnoError:class extends Error { + name = 'ErrnoError'; + // We set the `name` property to be able to identify `FS.ErrnoError` + // - the `name` is a standard ECMA-262 property of error objects. Kind of good to have it anyway. + // - when using PROXYFS, an error can come from an underlying FS + // as different FS objects have their own FS.ErrnoError each, + // the test `err instanceof FS.ErrnoError` won't detect an error coming from another filesystem, causing bugs. + // we'll use the reliable test `err.name == "ErrnoError"` instead + constructor(errno) { + super(runtimeInitialized ? strError(errno) : ''); + this.errno = errno; + for (var key in ERRNO_CODES) { + if (ERRNO_CODES[key] === errno) { + this.code = key; + break; + } + } + } + }, + FSStream:class { + shared = {}; + get object() { + return this.node; + } + set object(val) { + this.node = val; + } + get isRead() { + return (this.flags & 2097155) !== 1; + } + get isWrite() { + return (this.flags & 2097155) !== 0; + } + get isAppend() { + return (this.flags & 1024); + } + get flags() { + return this.shared.flags; + } + set flags(val) { + this.shared.flags = val; + } + get position() { + return this.shared.position; + } + set position(val) { + this.shared.position = val; + } + }, + FSNode:class { + node_ops = {}; + stream_ops = {}; + readMode = 292 | 73; + writeMode = 146; + mounted = null; + constructor(parent, name, mode, rdev) { + if (!parent) { + parent = this; // root node sets parent to itself + } + this.parent = parent; + this.mount = parent.mount; + this.id = FS.nextInode++; + this.name = name; + this.mode = mode; + this.rdev = rdev; + this.atime = this.mtime = this.ctime = Date.now(); + } + get read() { + return (this.mode & this.readMode) === this.readMode; + } + set read(val) { + val ? this.mode |= this.readMode : this.mode &= ~this.readMode; + } + get write() { + return (this.mode & this.writeMode) === this.writeMode; + } + set write(val) { + val ? this.mode |= this.writeMode : this.mode &= ~this.writeMode; + } + get isFolder() { + return FS.isDir(this.mode); + } + get isDevice() { + return FS.isChrdev(this.mode); + } + }, + lookupPath(path, opts = {}) { + if (!path) { + throw new FS.ErrnoError(44); + } + opts.follow_mount ??= true + + if (!PATH.isAbs(path)) { + path = FS.cwd() + '/' + path; + } + + // limit max consecutive symlinks to 40 (SYMLOOP_MAX). + linkloop: for (var nlinks = 0; nlinks < 40; nlinks++) { + // split the absolute path + var parts = path.split('/').filter((p) => !!p); + + // start at the root + var current = FS.root; + var current_path = '/'; + + for (var i = 0; i < parts.length; i++) { + var islast = (i === parts.length-1); + if (islast && opts.parent) { + // stop resolving + break; + } + + if (parts[i] === '.') { + continue; + } + + if (parts[i] === '..') { + current_path = PATH.dirname(current_path); + if (FS.isRoot(current)) { + path = current_path + '/' + parts.slice(i + 1).join('/'); + // We're making progress here, don't let many consecutive ..'s + // lead to ELOOP + nlinks--; + continue linkloop; + } else { + current = current.parent; + } + continue; + } + + current_path = PATH.join2(current_path, parts[i]); + try { + current = FS.lookupNode(current, parts[i]); + } catch (e) { + // if noent_okay is true, suppress a ENOENT in the last component + // and return an object with an undefined node. This is needed for + // resolving symlinks in the path when creating a file. + if ((e?.errno === 44) && islast && opts.noent_okay) { + return { path: current_path }; + } + throw e; + } + + // jump to the mount's root node if this is a mountpoint + if (FS.isMountpoint(current) && (!islast || opts.follow_mount)) { + current = current.mounted.root; + } + + // by default, lookupPath will not follow a symlink if it is the final path component. + // setting opts.follow = true will override this behavior. + if (FS.isLink(current.mode) && (!islast || opts.follow)) { + if (!current.node_ops.readlink) { + throw new FS.ErrnoError(52); + } + var link = current.node_ops.readlink(current); + if (!PATH.isAbs(link)) { + link = PATH.dirname(current_path) + '/' + link; + } + path = link + '/' + parts.slice(i + 1).join('/'); + continue linkloop; + } + } + return { path: current_path, node: current }; + } + throw new FS.ErrnoError(32); + }, + getPath(node) { + var path; + while (true) { + if (FS.isRoot(node)) { + var mount = node.mount.mountpoint; + if (!path) return mount; + return mount[mount.length-1] !== '/' ? `${mount}/${path}` : mount + path; + } + path = path ? `${node.name}/${path}` : node.name; + node = node.parent; + } + }, + hashName(parentid, name) { + var hash = 0; + + for (var i = 0; i < name.length; i++) { + hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0; + } + return ((parentid + hash) >>> 0) % FS.nameTable.length; + }, + hashAddNode(node) { + var hash = FS.hashName(node.parent.id, node.name); + node.name_next = FS.nameTable[hash]; + FS.nameTable[hash] = node; + }, + hashRemoveNode(node) { + var hash = FS.hashName(node.parent.id, node.name); + if (FS.nameTable[hash] === node) { + FS.nameTable[hash] = node.name_next; + } else { + var current = FS.nameTable[hash]; + while (current) { + if (current.name_next === node) { + current.name_next = node.name_next; + break; + } + current = current.name_next; + } + } + }, + lookupNode(parent, name) { + var errCode = FS.mayLookup(parent); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + var hash = FS.hashName(parent.id, name); + for (var node = FS.nameTable[hash]; node; node = node.name_next) { + var nodeName = node.name; + if (node.parent.id === parent.id && nodeName === name) { + return node; + } + } + // if we failed to find it in the cache, call into the VFS + return FS.lookup(parent, name); + }, + createNode(parent, name, mode, rdev) { + assert(typeof parent == 'object') + var node = new FS.FSNode(parent, name, mode, rdev); + + FS.hashAddNode(node); + + return node; + }, + destroyNode(node) { + FS.hashRemoveNode(node); + }, + isRoot(node) { + return node === node.parent; + }, + isMountpoint(node) { + return !!node.mounted; + }, + isFile(mode) { + return (mode & 61440) === 32768; + }, + isDir(mode) { + return (mode & 61440) === 16384; + }, + isLink(mode) { + return (mode & 61440) === 40960; + }, + isChrdev(mode) { + return (mode & 61440) === 8192; + }, + isBlkdev(mode) { + return (mode & 61440) === 24576; + }, + isFIFO(mode) { + return (mode & 61440) === 4096; + }, + isSocket(mode) { + return (mode & 49152) === 49152; + }, + flagsToPermissionString(flag) { + var perms = ['r', 'w', 'rw'][flag & 3]; + if ((flag & 512)) { + perms += 'w'; + } + return perms; + }, + nodePermissions(node, perms) { + if (FS.ignorePermissions) { + return 0; + } + // return 0 if any user, group or owner bits are set. + if (perms.includes('r') && !(node.mode & 292)) { + return 2; + } else if (perms.includes('w') && !(node.mode & 146)) { + return 2; + } else if (perms.includes('x') && !(node.mode & 73)) { + return 2; + } + return 0; + }, + mayLookup(dir) { + if (!FS.isDir(dir.mode)) return 54; + var errCode = FS.nodePermissions(dir, 'x'); + if (errCode) return errCode; + if (!dir.node_ops.lookup) return 2; + return 0; + }, + mayCreate(dir, name) { + if (!FS.isDir(dir.mode)) { + return 54; + } + try { + var node = FS.lookupNode(dir, name); + return 20; + } catch (e) { + } + return FS.nodePermissions(dir, 'wx'); + }, + mayDelete(dir, name, isdir) { + var node; + try { + node = FS.lookupNode(dir, name); + } catch (e) { + return e.errno; + } + var errCode = FS.nodePermissions(dir, 'wx'); + if (errCode) { + return errCode; + } + if (isdir) { + if (!FS.isDir(node.mode)) { + return 54; + } + if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) { + return 10; + } + } else { + if (FS.isDir(node.mode)) { + return 31; + } + } + return 0; + }, + mayOpen(node, flags) { + if (!node) { + return 44; + } + if (FS.isLink(node.mode)) { + return 32; + } else if (FS.isDir(node.mode)) { + if (FS.flagsToPermissionString(flags) !== 'r' // opening for write + || (flags & (512 | 64))) { // TODO: check for O_SEARCH? (== search for dir only) + return 31; + } + } + return FS.nodePermissions(node, FS.flagsToPermissionString(flags)); + }, + checkOpExists(op, err) { + if (!op) { + throw new FS.ErrnoError(err); + } + return op; + }, + MAX_OPEN_FDS:4096, + nextfd() { + for (var fd = 0; fd <= FS.MAX_OPEN_FDS; fd++) { + if (!FS.streams[fd]) { + return fd; + } + } + throw new FS.ErrnoError(33); + }, + getStreamChecked(fd) { + var stream = FS.getStream(fd); + if (!stream) { + throw new FS.ErrnoError(8); + } + return stream; + }, + getStream:(fd) => FS.streams[fd], + createStream(stream, fd = -1) { + assert(fd >= -1); + + // clone it, so we can return an instance of FSStream + stream = Object.assign(new FS.FSStream(), stream); + if (fd == -1) { + fd = FS.nextfd(); + } + stream.fd = fd; + FS.streams[fd] = stream; + return stream; + }, + closeStream(fd) { + FS.streams[fd] = null; + }, + dupStream(origStream, fd = -1) { + var stream = FS.createStream(origStream, fd); + stream.stream_ops?.dup?.(stream); + return stream; + }, + doSetAttr(stream, node, attr) { + var setattr = stream?.stream_ops.setattr; + var arg = setattr ? stream : node; + setattr ??= node.node_ops.setattr; + FS.checkOpExists(setattr, 63) + setattr(arg, attr); + }, + chrdev_stream_ops:{ + open(stream) { + var device = FS.getDevice(stream.node.rdev); + // override node's stream ops with the device's + stream.stream_ops = device.stream_ops; + // forward the open call + stream.stream_ops.open?.(stream); + }, + llseek() { + throw new FS.ErrnoError(70); + }, + }, + major:(dev) => ((dev) >> 8), + minor:(dev) => ((dev) & 0xff), + makedev:(ma, mi) => ((ma) << 8 | (mi)), + registerDevice(dev, ops) { + FS.devices[dev] = { stream_ops: ops }; + }, + getDevice:(dev) => FS.devices[dev], + getMounts(mount) { + var mounts = []; + var check = [mount]; + + while (check.length) { + var m = check.pop(); + + mounts.push(m); + + check.push(...m.mounts); + } + + return mounts; + }, + syncfs(populate, callback) { + if (typeof populate == 'function') { + callback = populate; + populate = false; + } + + FS.syncFSRequests++; + + if (FS.syncFSRequests > 1) { + err(`warning: ${FS.syncFSRequests} FS.syncfs operations in flight at once, probably just doing extra work`); + } + + var mounts = FS.getMounts(FS.root.mount); + var completed = 0; + + function doCallback(errCode) { + assert(FS.syncFSRequests > 0); + FS.syncFSRequests--; + return callback(errCode); + } + + function done(errCode) { + if (errCode) { + if (!done.errored) { + done.errored = true; + return doCallback(errCode); + } + return; + } + if (++completed >= mounts.length) { + doCallback(null); + } + }; + + // sync all mounts + mounts.forEach((mount) => { + if (!mount.type.syncfs) { + return done(null); + } + mount.type.syncfs(mount, populate, done); + }); + }, + mount(type, opts, mountpoint) { + if (typeof type == 'string') { + // The filesystem was not included, and instead we have an error + // message stored in the variable. + throw type; + } + var root = mountpoint === '/'; + var pseudo = !mountpoint; + var node; + + if (root && FS.root) { + throw new FS.ErrnoError(10); + } else if (!root && !pseudo) { + var lookup = FS.lookupPath(mountpoint, { follow_mount: false }); + + mountpoint = lookup.path; // use the absolute path + node = lookup.node; + + if (FS.isMountpoint(node)) { + throw new FS.ErrnoError(10); + } + + if (!FS.isDir(node.mode)) { + throw new FS.ErrnoError(54); + } + } + + var mount = { + type, + opts, + mountpoint, + mounts: [] + }; + + // create a root node for the fs + var mountRoot = type.mount(mount); + mountRoot.mount = mount; + mount.root = mountRoot; + + if (root) { + FS.root = mountRoot; + } else if (node) { + // set as a mountpoint + node.mounted = mount; + + // add the new mount to the current mount's children + if (node.mount) { + node.mount.mounts.push(mount); + } + } + + return mountRoot; + }, + unmount(mountpoint) { + var lookup = FS.lookupPath(mountpoint, { follow_mount: false }); + + if (!FS.isMountpoint(lookup.node)) { + throw new FS.ErrnoError(28); + } + + // destroy the nodes for this mount, and all its child mounts + var node = lookup.node; + var mount = node.mounted; + var mounts = FS.getMounts(mount); + + Object.keys(FS.nameTable).forEach((hash) => { + var current = FS.nameTable[hash]; + + while (current) { + var next = current.name_next; + + if (mounts.includes(current.mount)) { + FS.destroyNode(current); + } + + current = next; + } + }); + + // no longer a mountpoint + node.mounted = null; + + // remove this mount from the child mounts + var idx = node.mount.mounts.indexOf(mount); + assert(idx !== -1); + node.mount.mounts.splice(idx, 1); + }, + lookup(parent, name) { + return parent.node_ops.lookup(parent, name); + }, + mknod(path, mode, dev) { + var lookup = FS.lookupPath(path, { parent: true }); + var parent = lookup.node; + var name = PATH.basename(path); + if (!name) { + throw new FS.ErrnoError(28); + } + if (name === '.' || name === '..') { + throw new FS.ErrnoError(20); + } + var errCode = FS.mayCreate(parent, name); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + if (!parent.node_ops.mknod) { + throw new FS.ErrnoError(63); + } + return parent.node_ops.mknod(parent, name, mode, dev); + }, + statfs(path) { + return FS.statfsNode(FS.lookupPath(path, {follow: true}).node); + }, + statfsStream(stream) { + // We keep a separate statfsStream function because noderawfs overrides + // it. In noderawfs, stream.node is sometimes null. Instead, we need to + // look at stream.path. + return FS.statfsNode(stream.node); + }, + statfsNode(node) { + // NOTE: None of the defaults here are true. We're just returning safe and + // sane values. Currently nodefs and rawfs replace these defaults, + // other file systems leave them alone. + var rtn = { + bsize: 4096, + frsize: 4096, + blocks: 1e6, + bfree: 5e5, + bavail: 5e5, + files: FS.nextInode, + ffree: FS.nextInode - 1, + fsid: 42, + flags: 2, + namelen: 255, + }; + + if (node.node_ops.statfs) { + Object.assign(rtn, node.node_ops.statfs(node.mount.opts.root)); + } + return rtn; + }, + create(path, mode = 0o666) { + mode &= 4095; + mode |= 32768; + return FS.mknod(path, mode, 0); + }, + mkdir(path, mode = 0o777) { + mode &= 511 | 512; + mode |= 16384; + return FS.mknod(path, mode, 0); + }, + mkdirTree(path, mode) { + var dirs = path.split('/'); + var d = ''; + for (var dir of dirs) { + if (!dir) continue; + if (d || PATH.isAbs(path)) d += '/'; + d += dir; + try { + FS.mkdir(d, mode); + } catch(e) { + if (e.errno != 20) throw e; + } + } + }, + mkdev(path, mode, dev) { + if (typeof dev == 'undefined') { + dev = mode; + mode = 0o666; + } + mode |= 8192; + return FS.mknod(path, mode, dev); + }, + symlink(oldpath, newpath) { + if (!PATH_FS.resolve(oldpath)) { + throw new FS.ErrnoError(44); + } + var lookup = FS.lookupPath(newpath, { parent: true }); + var parent = lookup.node; + if (!parent) { + throw new FS.ErrnoError(44); + } + var newname = PATH.basename(newpath); + var errCode = FS.mayCreate(parent, newname); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + if (!parent.node_ops.symlink) { + throw new FS.ErrnoError(63); + } + return parent.node_ops.symlink(parent, newname, oldpath); + }, + rename(old_path, new_path) { + var old_dirname = PATH.dirname(old_path); + var new_dirname = PATH.dirname(new_path); + var old_name = PATH.basename(old_path); + var new_name = PATH.basename(new_path); + // parents must exist + var lookup, old_dir, new_dir; + + // let the errors from non existent directories percolate up + lookup = FS.lookupPath(old_path, { parent: true }); + old_dir = lookup.node; + lookup = FS.lookupPath(new_path, { parent: true }); + new_dir = lookup.node; + + if (!old_dir || !new_dir) throw new FS.ErrnoError(44); + // need to be part of the same mount + if (old_dir.mount !== new_dir.mount) { + throw new FS.ErrnoError(75); + } + // source must exist + var old_node = FS.lookupNode(old_dir, old_name); + // old path should not be an ancestor of the new path + var relative = PATH_FS.relative(old_path, new_dirname); + if (relative.charAt(0) !== '.') { + throw new FS.ErrnoError(28); + } + // new path should not be an ancestor of the old path + relative = PATH_FS.relative(new_path, old_dirname); + if (relative.charAt(0) !== '.') { + throw new FS.ErrnoError(55); + } + // see if the new path already exists + var new_node; + try { + new_node = FS.lookupNode(new_dir, new_name); + } catch (e) { + // not fatal + } + // early out if nothing needs to change + if (old_node === new_node) { + return; + } + // we'll need to delete the old entry + var isdir = FS.isDir(old_node.mode); + var errCode = FS.mayDelete(old_dir, old_name, isdir); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + // need delete permissions if we'll be overwriting. + // need create permissions if new doesn't already exist. + errCode = new_node ? + FS.mayDelete(new_dir, new_name, isdir) : + FS.mayCreate(new_dir, new_name); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + if (!old_dir.node_ops.rename) { + throw new FS.ErrnoError(63); + } + if (FS.isMountpoint(old_node) || (new_node && FS.isMountpoint(new_node))) { + throw new FS.ErrnoError(10); + } + // if we are going to change the parent, check write permissions + if (new_dir !== old_dir) { + errCode = FS.nodePermissions(old_dir, 'w'); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + } + // remove the node from the lookup hash + FS.hashRemoveNode(old_node); + // do the underlying fs rename + try { + old_dir.node_ops.rename(old_node, new_dir, new_name); + // update old node (we do this here to avoid each backend + // needing to) + old_node.parent = new_dir; + } catch (e) { + throw e; + } finally { + // add the node back to the hash (in case node_ops.rename + // changed its name) + FS.hashAddNode(old_node); + } + }, + rmdir(path) { + var lookup = FS.lookupPath(path, { parent: true }); + var parent = lookup.node; + var name = PATH.basename(path); + var node = FS.lookupNode(parent, name); + var errCode = FS.mayDelete(parent, name, true); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + if (!parent.node_ops.rmdir) { + throw new FS.ErrnoError(63); + } + if (FS.isMountpoint(node)) { + throw new FS.ErrnoError(10); + } + parent.node_ops.rmdir(parent, name); + FS.destroyNode(node); + }, + readdir(path) { + var lookup = FS.lookupPath(path, { follow: true }); + var node = lookup.node; + var readdir = FS.checkOpExists(node.node_ops.readdir, 54); + return readdir(node); + }, + unlink(path) { + var lookup = FS.lookupPath(path, { parent: true }); + var parent = lookup.node; + if (!parent) { + throw new FS.ErrnoError(44); + } + var name = PATH.basename(path); + var node = FS.lookupNode(parent, name); + var errCode = FS.mayDelete(parent, name, false); + if (errCode) { + // According to POSIX, we should map EISDIR to EPERM, but + // we instead do what Linux does (and we must, as we use + // the musl linux libc). + throw new FS.ErrnoError(errCode); + } + if (!parent.node_ops.unlink) { + throw new FS.ErrnoError(63); + } + if (FS.isMountpoint(node)) { + throw new FS.ErrnoError(10); + } + parent.node_ops.unlink(parent, name); + FS.destroyNode(node); + }, + readlink(path) { + var lookup = FS.lookupPath(path); + var link = lookup.node; + if (!link) { + throw new FS.ErrnoError(44); + } + if (!link.node_ops.readlink) { + throw new FS.ErrnoError(28); + } + return link.node_ops.readlink(link); + }, + stat(path, dontFollow) { + var lookup = FS.lookupPath(path, { follow: !dontFollow }); + var node = lookup.node; + var getattr = FS.checkOpExists(node.node_ops.getattr, 63); + return getattr(node); + }, + fstat(fd) { + var stream = FS.getStreamChecked(fd); + var node = stream.node; + var getattr = stream.stream_ops.getattr; + var arg = getattr ? stream : node; + getattr ??= node.node_ops.getattr; + FS.checkOpExists(getattr, 63) + return getattr(arg); + }, + lstat(path) { + return FS.stat(path, true); + }, + doChmod(stream, node, mode, dontFollow) { + FS.doSetAttr(stream, node, { + mode: (mode & 4095) | (node.mode & ~4095), + ctime: Date.now(), + dontFollow + }); + }, + chmod(path, mode, dontFollow) { + var node; + if (typeof path == 'string') { + var lookup = FS.lookupPath(path, { follow: !dontFollow }); + node = lookup.node; + } else { + node = path; + } + FS.doChmod(null, node, mode, dontFollow); + }, + lchmod(path, mode) { + FS.chmod(path, mode, true); + }, + fchmod(fd, mode) { + var stream = FS.getStreamChecked(fd); + FS.doChmod(stream, stream.node, mode, false); + }, + doChown(stream, node, dontFollow) { + FS.doSetAttr(stream, node, { + timestamp: Date.now(), + dontFollow + // we ignore the uid / gid for now + }); + }, + chown(path, uid, gid, dontFollow) { + var node; + if (typeof path == 'string') { + var lookup = FS.lookupPath(path, { follow: !dontFollow }); + node = lookup.node; + } else { + node = path; + } + FS.doChown(null, node, dontFollow); + }, + lchown(path, uid, gid) { + FS.chown(path, uid, gid, true); + }, + fchown(fd, uid, gid) { + var stream = FS.getStreamChecked(fd); + FS.doChown(stream, stream.node, false); + }, + doTruncate(stream, node, len) { + if (FS.isDir(node.mode)) { + throw new FS.ErrnoError(31); + } + if (!FS.isFile(node.mode)) { + throw new FS.ErrnoError(28); + } + var errCode = FS.nodePermissions(node, 'w'); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + FS.doSetAttr(stream, node, { + size: len, + timestamp: Date.now() + }); + }, + truncate(path, len) { + if (len < 0) { + throw new FS.ErrnoError(28); + } + var node; + if (typeof path == 'string') { + var lookup = FS.lookupPath(path, { follow: true }); + node = lookup.node; + } else { + node = path; + } + FS.doTruncate(null, node, len); + }, + ftruncate(fd, len) { + var stream = FS.getStreamChecked(fd); + if (len < 0 || (stream.flags & 2097155) === 0) { + throw new FS.ErrnoError(28); + } + FS.doTruncate(stream, stream.node, len); + }, + utime(path, atime, mtime) { + var lookup = FS.lookupPath(path, { follow: true }); + var node = lookup.node; + var setattr = FS.checkOpExists(node.node_ops.setattr, 63); + setattr(node, { + atime: atime, + mtime: mtime + }); + }, + open(path, flags, mode = 0o666) { + if (path === "") { + throw new FS.ErrnoError(44); + } + flags = typeof flags == 'string' ? FS_modeStringToFlags(flags) : flags; + if ((flags & 64)) { + mode = (mode & 4095) | 32768; + } else { + mode = 0; + } + var node; + var isDirPath; + if (typeof path == 'object') { + node = path; + } else { + isDirPath = path.endsWith("/"); + // noent_okay makes it so that if the final component of the path + // doesn't exist, lookupPath returns `node: undefined`. `path` will be + // updated to point to the target of all symlinks. + var lookup = FS.lookupPath(path, { + follow: !(flags & 131072), + noent_okay: true + }); + node = lookup.node; + path = lookup.path; + } + // perhaps we need to create the node + var created = false; + if ((flags & 64)) { + if (node) { + // if O_CREAT and O_EXCL are set, error out if the node already exists + if ((flags & 128)) { + throw new FS.ErrnoError(20); + } + } else if (isDirPath) { + throw new FS.ErrnoError(31); + } else { + // node doesn't exist, try to create it + // Ignore the permission bits here to ensure we can `open` this new + // file below. We use chmod below the apply the permissions once the + // file is open. + node = FS.mknod(path, mode | 0o777, 0); + created = true; + } + } + if (!node) { + throw new FS.ErrnoError(44); + } + // can't truncate a device + if (FS.isChrdev(node.mode)) { + flags &= ~512; + } + // if asked only for a directory, then this must be one + if ((flags & 65536) && !FS.isDir(node.mode)) { + throw new FS.ErrnoError(54); + } + // check permissions, if this is not a file we just created now (it is ok to + // create and write to a file with read-only permissions; it is read-only + // for later use) + if (!created) { + var errCode = FS.mayOpen(node, flags); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + } + // do truncation if necessary + if ((flags & 512) && !created) { + FS.truncate(node, 0); + } + // we've already handled these, don't pass down to the underlying vfs + flags &= ~(128 | 512 | 131072); + + // register the stream with the filesystem + var stream = FS.createStream({ + node, + path: FS.getPath(node), // we want the absolute path to the node + flags, + seekable: true, + position: 0, + stream_ops: node.stream_ops, + // used by the file family libc calls (fopen, fwrite, ferror, etc.) + ungotten: [], + error: false + }); + // call the new stream's open function + if (stream.stream_ops.open) { + stream.stream_ops.open(stream); + } + if (created) { + FS.chmod(node, mode & 0o777); + } + if (Module['logReadFiles'] && !(flags & 1)) { + if (!(path in FS.readFiles)) { + FS.readFiles[path] = 1; + } + } + return stream; + }, + close(stream) { + if (FS.isClosed(stream)) { + throw new FS.ErrnoError(8); + } + if (stream.getdents) stream.getdents = null; // free readdir state + try { + if (stream.stream_ops.close) { + stream.stream_ops.close(stream); + } + } catch (e) { + throw e; + } finally { + FS.closeStream(stream.fd); + } + stream.fd = null; + }, + isClosed(stream) { + return stream.fd === null; + }, + llseek(stream, offset, whence) { + if (FS.isClosed(stream)) { + throw new FS.ErrnoError(8); + } + if (!stream.seekable || !stream.stream_ops.llseek) { + throw new FS.ErrnoError(70); + } + if (whence != 0 && whence != 1 && whence != 2) { + throw new FS.ErrnoError(28); + } + stream.position = stream.stream_ops.llseek(stream, offset, whence); + stream.ungotten = []; + return stream.position; + }, + read(stream, buffer, offset, length, position) { + assert(offset >= 0); + if (length < 0 || position < 0) { + throw new FS.ErrnoError(28); + } + if (FS.isClosed(stream)) { + throw new FS.ErrnoError(8); + } + if ((stream.flags & 2097155) === 1) { + throw new FS.ErrnoError(8); + } + if (FS.isDir(stream.node.mode)) { + throw new FS.ErrnoError(31); + } + if (!stream.stream_ops.read) { + throw new FS.ErrnoError(28); + } + var seeking = typeof position != 'undefined'; + if (!seeking) { + position = stream.position; + } else if (!stream.seekable) { + throw new FS.ErrnoError(70); + } + var bytesRead = stream.stream_ops.read(stream, buffer, offset, length, position); + if (!seeking) stream.position += bytesRead; + return bytesRead; + }, + write(stream, buffer, offset, length, position, canOwn) { + assert(offset >= 0); + if (length < 0 || position < 0) { + throw new FS.ErrnoError(28); + } + if (FS.isClosed(stream)) { + throw new FS.ErrnoError(8); + } + if ((stream.flags & 2097155) === 0) { + throw new FS.ErrnoError(8); + } + if (FS.isDir(stream.node.mode)) { + throw new FS.ErrnoError(31); + } + if (!stream.stream_ops.write) { + throw new FS.ErrnoError(28); + } + if (stream.seekable && stream.flags & 1024) { + // seek to the end before writing in append mode + FS.llseek(stream, 0, 2); + } + var seeking = typeof position != 'undefined'; + if (!seeking) { + position = stream.position; + } else if (!stream.seekable) { + throw new FS.ErrnoError(70); + } + var bytesWritten = stream.stream_ops.write(stream, buffer, offset, length, position, canOwn); + if (!seeking) stream.position += bytesWritten; + return bytesWritten; + }, + mmap(stream, length, position, prot, flags) { + // User requests writing to file (prot & PROT_WRITE != 0). + // Checking if we have permissions to write to the file unless + // MAP_PRIVATE flag is set. According to POSIX spec it is possible + // to write to file opened in read-only mode with MAP_PRIVATE flag, + // as all modifications will be visible only in the memory of + // the current process. + if ((prot & 2) !== 0 + && (flags & 2) === 0 + && (stream.flags & 2097155) !== 2) { + throw new FS.ErrnoError(2); + } + if ((stream.flags & 2097155) === 1) { + throw new FS.ErrnoError(2); + } + if (!stream.stream_ops.mmap) { + throw new FS.ErrnoError(43); + } + if (!length) { + throw new FS.ErrnoError(28); + } + return stream.stream_ops.mmap(stream, length, position, prot, flags); + }, + msync(stream, buffer, offset, length, mmapFlags) { + assert(offset >= 0); + if (!stream.stream_ops.msync) { + return 0; + } + return stream.stream_ops.msync(stream, buffer, offset, length, mmapFlags); + }, + ioctl(stream, cmd, arg) { + if (!stream.stream_ops.ioctl) { + throw new FS.ErrnoError(59); + } + return stream.stream_ops.ioctl(stream, cmd, arg); + }, + readFile(path, opts = {}) { + opts.flags = opts.flags || 0; + opts.encoding = opts.encoding || 'binary'; + if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') { + throw new Error(`Invalid encoding type "${opts.encoding}"`); + } + var stream = FS.open(path, opts.flags); + var stat = FS.stat(path); + var length = stat.size; + var buf = new Uint8Array(length); + FS.read(stream, buf, 0, length, 0); + if (opts.encoding === 'utf8') { + buf = UTF8ArrayToString(buf); + } + FS.close(stream); + return buf; + }, + writeFile(path, data, opts = {}) { + opts.flags = opts.flags || 577; + var stream = FS.open(path, opts.flags, opts.mode); + if (typeof data == 'string') { + data = new Uint8Array(intArrayFromString(data, true)); + } + if (ArrayBuffer.isView(data)) { + FS.write(stream, data, 0, data.byteLength, undefined, opts.canOwn); + } else { + throw new Error('Unsupported data type'); + } + FS.close(stream); + }, + cwd:() => FS.currentPath, + chdir(path) { + var lookup = FS.lookupPath(path, { follow: true }); + if (lookup.node === null) { + throw new FS.ErrnoError(44); + } + if (!FS.isDir(lookup.node.mode)) { + throw new FS.ErrnoError(54); + } + var errCode = FS.nodePermissions(lookup.node, 'x'); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + FS.currentPath = lookup.path; + }, + createDefaultDirectories() { + FS.mkdir('/tmp'); + FS.mkdir('/home'); + FS.mkdir('/home/web_user'); + }, + createDefaultDevices() { + // create /dev + FS.mkdir('/dev'); + // setup /dev/null + FS.registerDevice(FS.makedev(1, 3), { + read: () => 0, + write: (stream, buffer, offset, length, pos) => length, + llseek: () => 0, + }); + FS.mkdev('/dev/null', FS.makedev(1, 3)); + // setup /dev/tty and /dev/tty1 + // stderr needs to print output using err() rather than out() + // so we register a second tty just for it. + TTY.register(FS.makedev(5, 0), TTY.default_tty_ops); + TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops); + FS.mkdev('/dev/tty', FS.makedev(5, 0)); + FS.mkdev('/dev/tty1', FS.makedev(6, 0)); + // setup /dev/[u]random + // use a buffer to avoid overhead of individual crypto calls per byte + var randomBuffer = new Uint8Array(1024), randomLeft = 0; + var randomByte = () => { + if (randomLeft === 0) { + randomFill(randomBuffer); + randomLeft = randomBuffer.byteLength; + } + return randomBuffer[--randomLeft]; + }; + FS.createDevice('/dev', 'random', randomByte); + FS.createDevice('/dev', 'urandom', randomByte); + // we're not going to emulate the actual shm device, + // just create the tmp dirs that reside in it commonly + FS.mkdir('/dev/shm'); + FS.mkdir('/dev/shm/tmp'); + }, + createSpecialDirectories() { + // create /proc/self/fd which allows /proc/self/fd/6 => readlink gives the + // name of the stream for fd 6 (see test_unistd_ttyname) + FS.mkdir('/proc'); + var proc_self = FS.mkdir('/proc/self'); + FS.mkdir('/proc/self/fd'); + FS.mount({ + mount() { + var node = FS.createNode(proc_self, 'fd', 16895, 73); + node.stream_ops = { + llseek: MEMFS.stream_ops.llseek, + }; + node.node_ops = { + lookup(parent, name) { + var fd = +name; + var stream = FS.getStreamChecked(fd); + var ret = { + parent: null, + mount: { mountpoint: 'fake' }, + node_ops: { readlink: () => stream.path }, + id: fd + 1, + }; + ret.parent = ret; // make it look like a simple root node + return ret; + }, + readdir() { + return Array.from(FS.streams.entries()) + .filter(([k, v]) => v) + .map(([k, v]) => k.toString()); + } + }; + return node; + } + }, {}, '/proc/self/fd'); + }, + createStandardStreams(input, output, error) { + // TODO deprecate the old functionality of a single + // input / output callback and that utilizes FS.createDevice + // and instead require a unique set of stream ops + + // by default, we symlink the standard streams to the + // default tty devices. however, if the standard streams + // have been overwritten we create a unique device for + // them instead. + if (input) { + FS.createDevice('/dev', 'stdin', input); + } else { + FS.symlink('/dev/tty', '/dev/stdin'); + } + if (output) { + FS.createDevice('/dev', 'stdout', null, output); + } else { + FS.symlink('/dev/tty', '/dev/stdout'); + } + if (error) { + FS.createDevice('/dev', 'stderr', null, error); + } else { + FS.symlink('/dev/tty1', '/dev/stderr'); + } + + // open default streams for the stdin, stdout and stderr devices + var stdin = FS.open('/dev/stdin', 0); + var stdout = FS.open('/dev/stdout', 1); + var stderr = FS.open('/dev/stderr', 1); + assert(stdin.fd === 0, `invalid handle for stdin (${stdin.fd})`); + assert(stdout.fd === 1, `invalid handle for stdout (${stdout.fd})`); + assert(stderr.fd === 2, `invalid handle for stderr (${stderr.fd})`); + }, + staticInit() { + FS.nameTable = new Array(4096); + + FS.mount(MEMFS, {}, '/'); + + FS.createDefaultDirectories(); + FS.createDefaultDevices(); + FS.createSpecialDirectories(); + + FS.filesystems = { + 'MEMFS': MEMFS, + }; + }, + init(input, output, error) { + assert(!FS.initialized, 'FS.init was previously called. If you want to initialize later with custom parameters, remove any earlier calls (note that one is automatically added to the generated code)'); + FS.initialized = true; + + // Allow Module.stdin etc. to provide defaults, if none explicitly passed to us here + input ??= Module['stdin']; + output ??= Module['stdout']; + error ??= Module['stderr']; + + FS.createStandardStreams(input, output, error); + }, + quit() { + FS.initialized = false; + // force-flush all streams, so we get musl std streams printed out + _fflush(0); + // close all of our streams + for (var stream of FS.streams) { + if (stream) { + FS.close(stream); + } + } + }, + findObject(path, dontResolveLastLink) { + var ret = FS.analyzePath(path, dontResolveLastLink); + if (!ret.exists) { + return null; + } + return ret.object; + }, + analyzePath(path, dontResolveLastLink) { + // operate from within the context of the symlink's target + try { + var lookup = FS.lookupPath(path, { follow: !dontResolveLastLink }); + path = lookup.path; + } catch (e) { + } + var ret = { + isRoot: false, exists: false, error: 0, name: null, path: null, object: null, + parentExists: false, parentPath: null, parentObject: null + }; + try { + var lookup = FS.lookupPath(path, { parent: true }); + ret.parentExists = true; + ret.parentPath = lookup.path; + ret.parentObject = lookup.node; + ret.name = PATH.basename(path); + lookup = FS.lookupPath(path, { follow: !dontResolveLastLink }); + ret.exists = true; + ret.path = lookup.path; + ret.object = lookup.node; + ret.name = lookup.node.name; + ret.isRoot = lookup.path === '/'; + } catch (e) { + ret.error = e.errno; + }; + return ret; + }, + createPath(parent, path, canRead, canWrite) { + parent = typeof parent == 'string' ? parent : FS.getPath(parent); + var parts = path.split('/').reverse(); + while (parts.length) { + var part = parts.pop(); + if (!part) continue; + var current = PATH.join2(parent, part); + try { + FS.mkdir(current); + } catch (e) { + if (e.errno != 20) throw e; + } + parent = current; + } + return current; + }, + createFile(parent, name, properties, canRead, canWrite) { + var path = PATH.join2(typeof parent == 'string' ? parent : FS.getPath(parent), name); + var mode = FS_getMode(canRead, canWrite); + return FS.create(path, mode); + }, + createDataFile(parent, name, data, canRead, canWrite, canOwn) { + var path = name; + if (parent) { + parent = typeof parent == 'string' ? parent : FS.getPath(parent); + path = name ? PATH.join2(parent, name) : parent; + } + var mode = FS_getMode(canRead, canWrite); + var node = FS.create(path, mode); + if (data) { + if (typeof data == 'string') { + var arr = new Array(data.length); + for (var i = 0, len = data.length; i < len; ++i) arr[i] = data.charCodeAt(i); + data = arr; + } + // make sure we can write to the file + FS.chmod(node, mode | 146); + var stream = FS.open(node, 577); + FS.write(stream, data, 0, data.length, 0, canOwn); + FS.close(stream); + FS.chmod(node, mode); + } + }, + createDevice(parent, name, input, output) { + var path = PATH.join2(typeof parent == 'string' ? parent : FS.getPath(parent), name); + var mode = FS_getMode(!!input, !!output); + FS.createDevice.major ??= 64; + var dev = FS.makedev(FS.createDevice.major++, 0); + // Create a fake device that a set of stream ops to emulate + // the old behavior. + FS.registerDevice(dev, { + open(stream) { + stream.seekable = false; + }, + close(stream) { + // flush any pending line data + if (output?.buffer?.length) { + output(10); + } + }, + read(stream, buffer, offset, length, pos /* ignored */) { + var bytesRead = 0; + for (var i = 0; i < length; i++) { + var result; + try { + result = input(); + } catch (e) { + throw new FS.ErrnoError(29); + } + if (result === undefined && bytesRead === 0) { + throw new FS.ErrnoError(6); + } + if (result === null || result === undefined) break; + bytesRead++; + buffer[offset+i] = result; + } + if (bytesRead) { + stream.node.atime = Date.now(); + } + return bytesRead; + }, + write(stream, buffer, offset, length, pos) { + for (var i = 0; i < length; i++) { + try { + output(buffer[offset+i]); + } catch (e) { + throw new FS.ErrnoError(29); + } + } + if (length) { + stream.node.mtime = stream.node.ctime = Date.now(); + } + return i; + } + }); + return FS.mkdev(path, mode, dev); + }, + forceLoadFile(obj) { + if (obj.isDevice || obj.isFolder || obj.link || obj.contents) return true; + if (typeof XMLHttpRequest != 'undefined') { + throw new Error("Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread."); + } else { // Command-line. + try { + obj.contents = readBinary(obj.url); + obj.usedBytes = obj.contents.length; + } catch (e) { + throw new FS.ErrnoError(29); + } + } + }, + createLazyFile(parent, name, url, canRead, canWrite) { + // Lazy chunked Uint8Array (implements get and length from Uint8Array). + // Actual getting is abstracted away for eventual reuse. + class LazyUint8Array { + lengthKnown = false; + chunks = []; // Loaded chunks. Index is the chunk number + get(idx) { + if (idx > this.length-1 || idx < 0) { + return undefined; + } + var chunkOffset = idx % this.chunkSize; + var chunkNum = (idx / this.chunkSize)|0; + return this.getter(chunkNum)[chunkOffset]; + } + setDataGetter(getter) { + this.getter = getter; + } + cacheLength() { + // Find length + var xhr = new XMLHttpRequest(); + xhr.open('HEAD', url, false); + xhr.send(null); + if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status); + var datalength = Number(xhr.getResponseHeader("Content-length")); + var header; + var hasByteServing = (header = xhr.getResponseHeader("Accept-Ranges")) && header === "bytes"; + var usesGzip = (header = xhr.getResponseHeader("Content-Encoding")) && header === "gzip"; + + var chunkSize = 1024*1024; // Chunk size in bytes + + if (!hasByteServing) chunkSize = datalength; + + // Function to get a range from the remote URL. + var doXHR = (from, to) => { + if (from > to) throw new Error("invalid range (" + from + ", " + to + ") or no bytes requested!"); + if (to > datalength-1) throw new Error("only " + datalength + " bytes available! programmer error!"); + + // TODO: Use mozResponseArrayBuffer, responseStream, etc. if available. + var xhr = new XMLHttpRequest(); + xhr.open('GET', url, false); + if (datalength !== chunkSize) xhr.setRequestHeader("Range", "bytes=" + from + "-" + to); + + // Some hints to the browser that we want binary data. + xhr.responseType = 'arraybuffer'; + if (xhr.overrideMimeType) { + xhr.overrideMimeType('text/plain; charset=x-user-defined'); + } + + xhr.send(null); + if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status); + if (xhr.response !== undefined) { + return new Uint8Array(/** @type{Array} */(xhr.response || [])); + } + return intArrayFromString(xhr.responseText || '', true); + }; + var lazyArray = this; + lazyArray.setDataGetter((chunkNum) => { + var start = chunkNum * chunkSize; + var end = (chunkNum+1) * chunkSize - 1; // including this byte + end = Math.min(end, datalength-1); // if datalength-1 is selected, this is the last block + if (typeof lazyArray.chunks[chunkNum] == 'undefined') { + lazyArray.chunks[chunkNum] = doXHR(start, end); + } + if (typeof lazyArray.chunks[chunkNum] == 'undefined') throw new Error('doXHR failed!'); + return lazyArray.chunks[chunkNum]; + }); + + if (usesGzip || !datalength) { + // if the server uses gzip or doesn't supply the length, we have to download the whole file to get the (uncompressed) length + chunkSize = datalength = 1; // this will force getter(0)/doXHR do download the whole file + datalength = this.getter(0).length; + chunkSize = datalength; + out("LazyFiles on gzip forces download of the whole file when length is accessed"); + } + + this._length = datalength; + this._chunkSize = chunkSize; + this.lengthKnown = true; + } + get length() { + if (!this.lengthKnown) { + this.cacheLength(); + } + return this._length; + } + get chunkSize() { + if (!this.lengthKnown) { + this.cacheLength(); + } + return this._chunkSize; + } + } + + if (typeof XMLHttpRequest != 'undefined') { + if (!ENVIRONMENT_IS_WORKER) throw 'Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc'; + var lazyArray = new LazyUint8Array(); + var properties = { isDevice: false, contents: lazyArray }; + } else { + var properties = { isDevice: false, url: url }; + } + + var node = FS.createFile(parent, name, properties, canRead, canWrite); + // This is a total hack, but I want to get this lazy file code out of the + // core of MEMFS. If we want to keep this lazy file concept I feel it should + // be its own thin LAZYFS proxying calls to MEMFS. + if (properties.contents) { + node.contents = properties.contents; + } else if (properties.url) { + node.contents = null; + node.url = properties.url; + } + // Add a function that defers querying the file size until it is asked the first time. + Object.defineProperties(node, { + usedBytes: { + get: function() { return this.contents.length; } + } + }); + // override each stream op with one that tries to force load the lazy file first + var stream_ops = {}; + var keys = Object.keys(node.stream_ops); + keys.forEach((key) => { + var fn = node.stream_ops[key]; + stream_ops[key] = (...args) => { + FS.forceLoadFile(node); + return fn(...args); + }; + }); + function writeChunks(stream, buffer, offset, length, position) { + var contents = stream.node.contents; + if (position >= contents.length) + return 0; + var size = Math.min(contents.length - position, length); + assert(size >= 0); + if (contents.slice) { // normal array + for (var i = 0; i < size; i++) { + buffer[offset + i] = contents[position + i]; + } + } else { + for (var i = 0; i < size; i++) { // LazyUint8Array from sync binary XHR + buffer[offset + i] = contents.get(position + i); + } + } + return size; + } + // use a custom read function + stream_ops.read = (stream, buffer, offset, length, position) => { + FS.forceLoadFile(node); + return writeChunks(stream, buffer, offset, length, position) + }; + // use a custom mmap function + stream_ops.mmap = (stream, length, position, prot, flags) => { + FS.forceLoadFile(node); + var ptr = mmapAlloc(length); + if (!ptr) { + throw new FS.ErrnoError(48); + } + writeChunks(stream, HEAP8, ptr, length, position); + return { ptr, allocated: true }; + }; + node.stream_ops = stream_ops; + return node; + }, + absolutePath() { + abort('FS.absolutePath has been removed; use PATH_FS.resolve instead'); + }, + createFolder() { + abort('FS.createFolder has been removed; use FS.mkdir instead'); + }, + createLink() { + abort('FS.createLink has been removed; use FS.symlink instead'); + }, + joinPath() { + abort('FS.joinPath has been removed; use PATH.join instead'); + }, + mmapAlloc() { + abort('FS.mmapAlloc has been replaced by the top level function mmapAlloc'); + }, + standardizePath() { + abort('FS.standardizePath has been removed; use PATH.normalize instead'); + }, + }; + + var SYSCALLS = { + DEFAULT_POLLMASK:5, + calculateAt(dirfd, path, allowEmpty) { + if (PATH.isAbs(path)) { + return path; + } + // relative path + var dir; + if (dirfd === -100) { + dir = FS.cwd(); + } else { + var dirstream = SYSCALLS.getStreamFromFD(dirfd); + dir = dirstream.path; + } + if (path.length == 0) { + if (!allowEmpty) { + throw new FS.ErrnoError(44);; + } + return dir; + } + return dir + '/' + path; + }, + writeStat(buf, stat) { + HEAPU32[((buf)>>2)] = stat.dev; + HEAPU32[(((buf)+(4))>>2)] = stat.mode; + HEAPU32[(((buf)+(8))>>2)] = stat.nlink; + HEAPU32[(((buf)+(12))>>2)] = stat.uid; + HEAPU32[(((buf)+(16))>>2)] = stat.gid; + HEAPU32[(((buf)+(20))>>2)] = stat.rdev; + HEAP64[(((buf)+(24))>>3)] = BigInt(stat.size); + HEAP32[(((buf)+(32))>>2)] = 4096; + HEAP32[(((buf)+(36))>>2)] = stat.blocks; + var atime = stat.atime.getTime(); + var mtime = stat.mtime.getTime(); + var ctime = stat.ctime.getTime(); + HEAP64[(((buf)+(40))>>3)] = BigInt(Math.floor(atime / 1000)); + HEAPU32[(((buf)+(48))>>2)] = (atime % 1000) * 1000 * 1000; + HEAP64[(((buf)+(56))>>3)] = BigInt(Math.floor(mtime / 1000)); + HEAPU32[(((buf)+(64))>>2)] = (mtime % 1000) * 1000 * 1000; + HEAP64[(((buf)+(72))>>3)] = BigInt(Math.floor(ctime / 1000)); + HEAPU32[(((buf)+(80))>>2)] = (ctime % 1000) * 1000 * 1000; + HEAP64[(((buf)+(88))>>3)] = BigInt(stat.ino); + return 0; + }, + writeStatFs(buf, stats) { + HEAPU32[(((buf)+(4))>>2)] = stats.bsize; + HEAPU32[(((buf)+(60))>>2)] = stats.bsize; + HEAP64[(((buf)+(8))>>3)] = BigInt(stats.blocks); + HEAP64[(((buf)+(16))>>3)] = BigInt(stats.bfree); + HEAP64[(((buf)+(24))>>3)] = BigInt(stats.bavail); + HEAP64[(((buf)+(32))>>3)] = BigInt(stats.files); + HEAP64[(((buf)+(40))>>3)] = BigInt(stats.ffree); + HEAPU32[(((buf)+(48))>>2)] = stats.fsid; + HEAPU32[(((buf)+(64))>>2)] = stats.flags; // ST_NOSUID + HEAPU32[(((buf)+(56))>>2)] = stats.namelen; + }, + doMsync(addr, stream, len, flags, offset) { + if (!FS.isFile(stream.node.mode)) { + throw new FS.ErrnoError(43); + } + if (flags & 2) { + // MAP_PRIVATE calls need not to be synced back to underlying fs + return 0; + } + var buffer = HEAPU8.slice(addr, addr + len); + FS.msync(stream, buffer, offset, len, flags); + }, + getStreamFromFD(fd) { + var stream = FS.getStreamChecked(fd); + return stream; + }, + varargs:undefined, + getStr(ptr) { + var ret = UTF8ToString(ptr); + return ret; + }, + }; + function ___syscall_chmod(path, mode) { + try { + + path = SYSCALLS.getStr(path); + FS.chmod(path, mode); + return 0; + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + } + + var SOCKFS = { + websocketArgs:{ + }, + callbacks:{ + }, + on(event, callback) { + SOCKFS.callbacks[event] = callback; + }, + emit(event, param) { + SOCKFS.callbacks[event]?.(param); + }, + mount(mount) { + // The incomming Module['websocket'] can be used for configuring + // configuring subprotocol/url, etc + SOCKFS.websocketArgs = Module['websocket'] || {}; + // Add the Event registration mechanism to the exported websocket configuration + // object so we can register network callbacks from native JavaScript too. + // For more documentation see system/include/emscripten/emscripten.h + (Module['websocket'] ??= {})['on'] = SOCKFS.on; + + return FS.createNode(null, '/', 16895, 0); + }, + createSocket(family, type, protocol) { + // Emscripten only supports AF_INET + if (family != 2) { + throw new FS.ErrnoError(5); + } + type &= ~526336; // Some applications may pass it; it makes no sense for a single process. + // Emscripten only supports SOCK_STREAM and SOCK_DGRAM + if (type != 1 && type != 2) { + throw new FS.ErrnoError(28); + } + var streaming = type == 1; + if (streaming && protocol && protocol != 6) { + throw new FS.ErrnoError(66); // if SOCK_STREAM, must be tcp or 0. + } + + // create our internal socket structure + var sock = { + family, + type, + protocol, + server: null, + error: null, // Used in getsockopt for SOL_SOCKET/SO_ERROR test + peers: {}, + pending: [], + recv_queue: [], + sock_ops: SOCKFS.websocket_sock_ops + }; + + // create the filesystem node to store the socket structure + var name = SOCKFS.nextname(); + var node = FS.createNode(SOCKFS.root, name, 49152, 0); + node.sock = sock; + + // and the wrapping stream that enables library functions such + // as read and write to indirectly interact with the socket + var stream = FS.createStream({ + path: name, + node, + flags: 2, + seekable: false, + stream_ops: SOCKFS.stream_ops + }); + + // map the new stream to the socket structure (sockets have a 1:1 + // relationship with a stream) + sock.stream = stream; + + return sock; + }, + getSocket(fd) { + var stream = FS.getStream(fd); + if (!stream || !FS.isSocket(stream.node.mode)) { + return null; + } + return stream.node.sock; + }, + stream_ops:{ + poll(stream) { + var sock = stream.node.sock; + return sock.sock_ops.poll(sock); + }, + ioctl(stream, request, varargs) { + var sock = stream.node.sock; + return sock.sock_ops.ioctl(sock, request, varargs); + }, + read(stream, buffer, offset, length, position /* ignored */) { + var sock = stream.node.sock; + var msg = sock.sock_ops.recvmsg(sock, length); + if (!msg) { + // socket is closed + return 0; + } + buffer.set(msg.buffer, offset); + return msg.buffer.length; + }, + write(stream, buffer, offset, length, position /* ignored */) { + var sock = stream.node.sock; + return sock.sock_ops.sendmsg(sock, buffer, offset, length); + }, + close(stream) { + var sock = stream.node.sock; + sock.sock_ops.close(sock); + }, + }, + nextname() { + if (!SOCKFS.nextname.current) { + SOCKFS.nextname.current = 0; + } + return `socket[${SOCKFS.nextname.current++}]`; + }, + websocket_sock_ops:{ + createPeer(sock, addr, port) { + var ws; + + if (typeof addr == 'object') { + ws = addr; + addr = null; + port = null; + } + + if (ws) { + // for sockets that've already connected (e.g. we're the server) + // we can inspect the _socket property for the address + if (ws._socket) { + addr = ws._socket.remoteAddress; + port = ws._socket.remotePort; + } + // if we're just now initializing a connection to the remote, + // inspect the url property + else { + var result = /ws[s]?:\/\/([^:]+):(\d+)/.exec(ws.url); + if (!result) { + throw new Error('WebSocket URL must be in the format ws(s)://address:port'); + } + addr = result[1]; + port = parseInt(result[2], 10); + } + } else { + // create the actual websocket object and connect + try { + // The default value is 'ws://' the replace is needed because the compiler replaces '//' comments with '#' + // comments without checking context, so we'd end up with ws:#, the replace swaps the '#' for '//' again. + var url = 'ws://'.replace('#', '//'); + // Make the WebSocket subprotocol (Sec-WebSocket-Protocol) default to binary if no configuration is set. + var subProtocols = 'binary'; // The default value is 'binary' + // The default WebSocket options + var opts = undefined; + + // Fetch runtime WebSocket URL config. + if (SOCKFS.websocketArgs['url']) { + url = SOCKFS.websocketArgs['url']; + } + // Fetch runtime WebSocket subprotocol config. + if (SOCKFS.websocketArgs['subprotocol']) { + subProtocols = SOCKFS.websocketArgs['subprotocol']; + } else if (SOCKFS.websocketArgs['subprotocol'] === null) { + subProtocols = 'null' + } + + if (url === 'ws://' || url === 'wss://') { // Is the supplied URL config just a prefix, if so complete it. + var parts = addr.split('/'); + url = url + parts[0] + ":" + port + "/" + parts.slice(1).join('/'); + } + + if (subProtocols !== 'null') { + // The regex trims the string (removes spaces at the beginning and end, then splits the string by + // , into an Array. Whitespace removal is important for Websockify and ws. + subProtocols = subProtocols.replace(/^ +| +$/g,"").split(/ *, */); + + opts = subProtocols; + } + + // If node we use the ws library. + var WebSocketConstructor; + { + WebSocketConstructor = WebSocket; + } + ws = new WebSocketConstructor(url, opts); + ws.binaryType = 'arraybuffer'; + } catch (e) { + throw new FS.ErrnoError(23); + } + } + + var peer = { + addr, + port, + socket: ws, + msg_send_queue: [] + }; + + SOCKFS.websocket_sock_ops.addPeer(sock, peer); + SOCKFS.websocket_sock_ops.handlePeerEvents(sock, peer); + + // if this is a bound dgram socket, send the port number first to allow + // us to override the ephemeral port reported to us by remotePort on the + // remote end. + if (sock.type === 2 && typeof sock.sport != 'undefined') { + peer.msg_send_queue.push(new Uint8Array([ + 255, 255, 255, 255, + 'p'.charCodeAt(0), 'o'.charCodeAt(0), 'r'.charCodeAt(0), 't'.charCodeAt(0), + ((sock.sport & 0xff00) >> 8) , (sock.sport & 0xff) + ])); + } + + return peer; + }, + getPeer(sock, addr, port) { + return sock.peers[addr + ':' + port]; + }, + addPeer(sock, peer) { + sock.peers[peer.addr + ':' + peer.port] = peer; + }, + removePeer(sock, peer) { + delete sock.peers[peer.addr + ':' + peer.port]; + }, + handlePeerEvents(sock, peer) { + var first = true; + + var handleOpen = function () { + + sock.connecting = false; + SOCKFS.emit('open', sock.stream.fd); + + try { + var queued = peer.msg_send_queue.shift(); + while (queued) { + peer.socket.send(queued); + queued = peer.msg_send_queue.shift(); + } + } catch (e) { + // not much we can do here in the way of proper error handling as we've already + // lied and said this data was sent. shut it down. + peer.socket.close(); + } + }; + + function handleMessage(data) { + if (typeof data == 'string') { + var encoder = new TextEncoder(); // should be utf-8 + data = encoder.encode(data); // make a typed array from the string + } else { + assert(data.byteLength !== undefined); // must receive an ArrayBuffer + if (data.byteLength == 0) { + // An empty ArrayBuffer will emit a pseudo disconnect event + // as recv/recvmsg will return zero which indicates that a socket + // has performed a shutdown although the connection has not been disconnected yet. + return; + } + data = new Uint8Array(data); // make a typed array view on the array buffer + } + + // if this is the port message, override the peer's port with it + var wasfirst = first; + first = false; + if (wasfirst && + data.length === 10 && + data[0] === 255 && data[1] === 255 && data[2] === 255 && data[3] === 255 && + data[4] === 'p'.charCodeAt(0) && data[5] === 'o'.charCodeAt(0) && data[6] === 'r'.charCodeAt(0) && data[7] === 't'.charCodeAt(0)) { + // update the peer's port and it's key in the peer map + var newport = ((data[8] << 8) | data[9]); + SOCKFS.websocket_sock_ops.removePeer(sock, peer); + peer.port = newport; + SOCKFS.websocket_sock_ops.addPeer(sock, peer); + return; + } + + sock.recv_queue.push({ addr: peer.addr, port: peer.port, data: data }); + SOCKFS.emit('message', sock.stream.fd); + }; + + if (ENVIRONMENT_IS_NODE) { + peer.socket.on('open', handleOpen); + peer.socket.on('message', function(data, isBinary) { + if (!isBinary) { + return; + } + handleMessage((new Uint8Array(data)).buffer); // copy from node Buffer -> ArrayBuffer + }); + peer.socket.on('close', function() { + SOCKFS.emit('close', sock.stream.fd); + }); + peer.socket.on('error', function(error) { + // Although the ws library may pass errors that may be more descriptive than + // ECONNREFUSED they are not necessarily the expected error code e.g. + // ENOTFOUND on getaddrinfo seems to be node.js specific, so using ECONNREFUSED + // is still probably the most useful thing to do. + sock.error = 14; // Used in getsockopt for SOL_SOCKET/SO_ERROR test. + SOCKFS.emit('error', [sock.stream.fd, sock.error, 'ECONNREFUSED: Connection refused']); + // don't throw + }); + } else { + peer.socket.onopen = handleOpen; + peer.socket.onclose = function() { + SOCKFS.emit('close', sock.stream.fd); + }; + peer.socket.onmessage = function peer_socket_onmessage(event) { + handleMessage(event.data); + }; + peer.socket.onerror = function(error) { + // The WebSocket spec only allows a 'simple event' to be thrown on error, + // so we only really know as much as ECONNREFUSED. + sock.error = 14; // Used in getsockopt for SOL_SOCKET/SO_ERROR test. + SOCKFS.emit('error', [sock.stream.fd, sock.error, 'ECONNREFUSED: Connection refused']); + }; + } + }, + poll(sock) { + if (sock.type === 1 && sock.server) { + // listen sockets should only say they're available for reading + // if there are pending clients. + return sock.pending.length ? (64 | 1) : 0; + } + + var mask = 0; + var dest = sock.type === 1 ? // we only care about the socket state for connection-based sockets + SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport) : + null; + + if (sock.recv_queue.length || + !dest || // connection-less sockets are always ready to read + (dest && dest.socket.readyState === dest.socket.CLOSING) || + (dest && dest.socket.readyState === dest.socket.CLOSED)) { // let recv return 0 once closed + mask |= (64 | 1); + } + + if (!dest || // connection-less sockets are always ready to write + (dest && dest.socket.readyState === dest.socket.OPEN)) { + mask |= 4; + } + + if ((dest && dest.socket.readyState === dest.socket.CLOSING) || + (dest && dest.socket.readyState === dest.socket.CLOSED)) { + // When an non-blocking connect fails mark the socket as writable. + // Its up to the calling code to then use getsockopt with SO_ERROR to + // retrieve the error. + // See https://man7.org/linux/man-pages/man2/connect.2.html + if (sock.connecting) { + mask |= 4; + } else { + mask |= 16; + } + } + + return mask; + }, + ioctl(sock, request, arg) { + switch (request) { + case 21531: + var bytes = 0; + if (sock.recv_queue.length) { + bytes = sock.recv_queue[0].data.length; + } + HEAP32[((arg)>>2)] = bytes; + return 0; + case 21537: + var on = HEAP32[((arg)>>2)]; + if (on) { + sock.stream.flags |= 2048; + } else { + sock.stream.flags &= ~2048; + } + return 0; + default: + return 28; + } + }, + close(sock) { + // if we've spawned a listen server, close it + if (sock.server) { + try { + sock.server.close(); + } catch (e) { + } + sock.server = null; + } + // close any peer connections + for (var peer of Object.values(sock.peers)) { + try { + peer.socket.close(); + } catch (e) { + } + SOCKFS.websocket_sock_ops.removePeer(sock, peer); + } + return 0; + }, + bind(sock, addr, port) { + if (typeof sock.saddr != 'undefined' || typeof sock.sport != 'undefined') { + throw new FS.ErrnoError(28); // already bound + } + sock.saddr = addr; + sock.sport = port; + // in order to emulate dgram sockets, we need to launch a listen server when + // binding on a connection-less socket + // note: this is only required on the server side + if (sock.type === 2) { + // close the existing server if it exists + if (sock.server) { + sock.server.close(); + sock.server = null; + } + // swallow error operation not supported error that occurs when binding in the + // browser where this isn't supported + try { + sock.sock_ops.listen(sock, 0); + } catch (e) { + if (!(e.name === 'ErrnoError')) throw e; + if (e.errno !== 138) throw e; + } + } + }, + connect(sock, addr, port) { + if (sock.server) { + throw new FS.ErrnoError(138); + } + + // TODO autobind + // if (!sock.addr && sock.type == 2) { + // } + + // early out if we're already connected / in the middle of connecting + if (typeof sock.daddr != 'undefined' && typeof sock.dport != 'undefined') { + var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport); + if (dest) { + if (dest.socket.readyState === dest.socket.CONNECTING) { + throw new FS.ErrnoError(7); + } else { + throw new FS.ErrnoError(30); + } + } + } + + // add the socket to our peer list and set our + // destination address / port to match + var peer = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port); + sock.daddr = peer.addr; + sock.dport = peer.port; + + // because we cannot synchronously block to wait for the WebSocket + // connection to complete, we return here pretending that the connection + // was a success. + sock.connecting = true; + }, + listen(sock, backlog) { + if (!ENVIRONMENT_IS_NODE) { + throw new FS.ErrnoError(138); + } + }, + accept(listensock) { + if (!listensock.server || !listensock.pending.length) { + throw new FS.ErrnoError(28); + } + var newsock = listensock.pending.shift(); + newsock.stream.flags = listensock.stream.flags; + return newsock; + }, + getname(sock, peer) { + var addr, port; + if (peer) { + if (sock.daddr === undefined || sock.dport === undefined) { + throw new FS.ErrnoError(53); + } + addr = sock.daddr; + port = sock.dport; + } else { + // TODO saddr and sport will be set for bind()'d UDP sockets, but what + // should we be returning for TCP sockets that've been connect()'d? + addr = sock.saddr || 0; + port = sock.sport || 0; + } + return { addr, port }; + }, + sendmsg(sock, buffer, offset, length, addr, port) { + if (sock.type === 2) { + // connection-less sockets will honor the message address, + // and otherwise fall back to the bound destination address + if (addr === undefined || port === undefined) { + addr = sock.daddr; + port = sock.dport; + } + // if there was no address to fall back to, error out + if (addr === undefined || port === undefined) { + throw new FS.ErrnoError(17); + } + } else { + // connection-based sockets will only use the bound + addr = sock.daddr; + port = sock.dport; + } + + // find the peer for the destination address + var dest = SOCKFS.websocket_sock_ops.getPeer(sock, addr, port); + + // early out if not connected with a connection-based socket + if (sock.type === 1) { + if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) { + throw new FS.ErrnoError(53); + } + } + + // create a copy of the incoming data to send, as the WebSocket API + // doesn't work entirely with an ArrayBufferView, it'll just send + // the entire underlying buffer + if (ArrayBuffer.isView(buffer)) { + offset += buffer.byteOffset; + buffer = buffer.buffer; + } + + var data = buffer.slice(offset, offset + length); + + // if we don't have a cached connectionless UDP datagram connection, or + // the TCP socket is still connecting, queue the message to be sent upon + // connect, and lie, saying the data was sent now. + if (!dest || dest.socket.readyState !== dest.socket.OPEN) { + // if we're not connected, open a new connection + if (sock.type === 2) { + if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) { + dest = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port); + } + } + dest.msg_send_queue.push(data); + return length; + } + + try { + // send the actual data + dest.socket.send(data); + return length; + } catch (e) { + throw new FS.ErrnoError(28); + } + }, + recvmsg(sock, length) { + // http://pubs.opengroup.org/onlinepubs/7908799/xns/recvmsg.html + if (sock.type === 1 && sock.server) { + // tcp servers should not be recv()'ing on the listen socket + throw new FS.ErrnoError(53); + } + + var queued = sock.recv_queue.shift(); + if (!queued) { + if (sock.type === 1) { + var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport); + + if (!dest) { + // if we have a destination address but are not connected, error out + throw new FS.ErrnoError(53); + } + if (dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) { + // return null if the socket has closed + return null; + } + // else, our socket is in a valid state but truly has nothing available + throw new FS.ErrnoError(6); + } + throw new FS.ErrnoError(6); + } + + // queued.data will be an ArrayBuffer if it's unadulterated, but if it's + // requeued TCP data it'll be an ArrayBufferView + var queuedLength = queued.data.byteLength || queued.data.length; + var queuedOffset = queued.data.byteOffset || 0; + var queuedBuffer = queued.data.buffer || queued.data; + var bytesRead = Math.min(length, queuedLength); + var res = { + buffer: new Uint8Array(queuedBuffer, queuedOffset, bytesRead), + addr: queued.addr, + port: queued.port + }; + + // push back any unread data for TCP connections + if (sock.type === 1 && bytesRead < queuedLength) { + var bytesRemaining = queuedLength - bytesRead; + queued.data = new Uint8Array(queuedBuffer, queuedOffset + bytesRead, bytesRemaining); + sock.recv_queue.unshift(queued); + } + + return res; + }, + }, + }; + + var getSocketFromFD = (fd) => { + var socket = SOCKFS.getSocket(fd); + if (!socket) throw new FS.ErrnoError(8); + return socket; + }; + + var inetNtop4 = (addr) => + (addr & 0xff) + '.' + ((addr >> 8) & 0xff) + '.' + ((addr >> 16) & 0xff) + '.' + ((addr >> 24) & 0xff); + + + var inetNtop6 = (ints) => { + // ref: http://www.ietf.org/rfc/rfc2373.txt - section 2.5.4 + // Format for IPv4 compatible and mapped 128-bit IPv6 Addresses + // 128-bits are split into eight 16-bit words + // stored in network byte order (big-endian) + // | 80 bits | 16 | 32 bits | + // +-----------------------------------------------------------------+ + // | 10 bytes | 2 | 4 bytes | + // +--------------------------------------+--------------------------+ + // + 5 words | 1 | 2 words | + // +--------------------------------------+--------------------------+ + // |0000..............................0000|0000| IPv4 ADDRESS | (compatible) + // +--------------------------------------+----+---------------------+ + // |0000..............................0000|FFFF| IPv4 ADDRESS | (mapped) + // +--------------------------------------+----+---------------------+ + var str = ""; + var word = 0; + var longest = 0; + var lastzero = 0; + var zstart = 0; + var len = 0; + var i = 0; + var parts = [ + ints[0] & 0xffff, + (ints[0] >> 16), + ints[1] & 0xffff, + (ints[1] >> 16), + ints[2] & 0xffff, + (ints[2] >> 16), + ints[3] & 0xffff, + (ints[3] >> 16) + ]; + + // Handle IPv4-compatible, IPv4-mapped, loopback and any/unspecified addresses + + var hasipv4 = true; + var v4part = ""; + // check if the 10 high-order bytes are all zeros (first 5 words) + for (i = 0; i < 5; i++) { + if (parts[i] !== 0) { hasipv4 = false; break; } + } + + if (hasipv4) { + // low-order 32-bits store an IPv4 address (bytes 13 to 16) (last 2 words) + v4part = inetNtop4(parts[6] | (parts[7] << 16)); + // IPv4-mapped IPv6 address if 16-bit value (bytes 11 and 12) == 0xFFFF (6th word) + if (parts[5] === -1) { + str = "::ffff:"; + str += v4part; + return str; + } + // IPv4-compatible IPv6 address if 16-bit value (bytes 11 and 12) == 0x0000 (6th word) + if (parts[5] === 0) { + str = "::"; + //special case IPv6 addresses + if (v4part === "0.0.0.0") v4part = ""; // any/unspecified address + if (v4part === "0.0.0.1") v4part = "1";// loopback address + str += v4part; + return str; + } + } + + // Handle all other IPv6 addresses + + // first run to find the longest contiguous zero words + for (word = 0; word < 8; word++) { + if (parts[word] === 0) { + if (word - lastzero > 1) { + len = 0; + } + lastzero = word; + len++; + } + if (len > longest) { + longest = len; + zstart = word - longest + 1; + } + } + + for (word = 0; word < 8; word++) { + if (longest > 1) { + // compress contiguous zeros - to produce "::" + if (parts[word] === 0 && word >= zstart && word < (zstart + longest) ) { + if (word === zstart) { + str += ":"; + if (zstart === 0) str += ":"; //leading zeros case + } + continue; + } + } + // converts 16-bit words from big-endian to little-endian before converting to hex string + str += Number(_ntohs(parts[word] & 0xffff)).toString(16); + str += word < 7 ? ":" : ""; + } + return str; + }; + + var readSockaddr = (sa, salen) => { + // family / port offsets are common to both sockaddr_in and sockaddr_in6 + var family = HEAP16[((sa)>>1)]; + var port = _ntohs(HEAPU16[(((sa)+(2))>>1)]); + var addr; + + switch (family) { + case 2: + if (salen !== 16) { + return { errno: 28 }; + } + addr = HEAP32[(((sa)+(4))>>2)]; + addr = inetNtop4(addr); + break; + case 10: + if (salen !== 28) { + return { errno: 28 }; + } + addr = [ + HEAP32[(((sa)+(8))>>2)], + HEAP32[(((sa)+(12))>>2)], + HEAP32[(((sa)+(16))>>2)], + HEAP32[(((sa)+(20))>>2)] + ]; + addr = inetNtop6(addr); + break; + default: + return { errno: 5 }; + } + + return { family: family, addr: addr, port: port }; + }; + + + var inetPton4 = (str) => { + var b = str.split('.'); + for (var i = 0; i < 4; i++) { + var tmp = Number(b[i]); + if (isNaN(tmp)) return null; + b[i] = tmp; + } + return (b[0] | (b[1] << 8) | (b[2] << 16) | (b[3] << 24)) >>> 0; + }; + + var inetPton6 = (str) => { + var words; + var w, offset, z, i; + /* http://home.deds.nl/~aeron/regex/ */ + var valid6regx = /^((?=.*::)(?!.*::.+::)(::)?([\dA-F]{1,4}:(:|\b)|){5}|([\dA-F]{1,4}:){6})((([\dA-F]{1,4}((?!\3)::|:\b|$))|(?!\2\3)){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})$/i + var parts = []; + if (!valid6regx.test(str)) { + return null; + } + if (str === "::") { + return [0, 0, 0, 0, 0, 0, 0, 0]; + } + // Z placeholder to keep track of zeros when splitting the string on ":" + if (str.startsWith("::")) { + str = str.replace("::", "Z:"); // leading zeros case + } else { + str = str.replace("::", ":Z:"); + } + + if (str.indexOf(".") > 0) { + // parse IPv4 embedded stress + str = str.replace(new RegExp('[.]', 'g'), ":"); + words = str.split(":"); + words[words.length-4] = Number(words[words.length-4]) + Number(words[words.length-3])*256; + words[words.length-3] = Number(words[words.length-2]) + Number(words[words.length-1])*256; + words = words.slice(0, words.length-2); + } else { + words = str.split(":"); + } + + offset = 0; z = 0; + for (w=0; w < words.length; w++) { + if (typeof words[w] == 'string') { + if (words[w] === 'Z') { + // compressed zeros - write appropriate number of zero words + for (z = 0; z < (8 - words.length+1); z++) { + parts[w+z] = 0; + } + offset = z-1; + } else { + // parse hex to field to 16-bit value and write it in network byte-order + parts[w+offset] = _htons(parseInt(words[w],16)); + } + } else { + // parsed IPv4 words + parts[w+offset] = words[w]; + } + } + return [ + (parts[1] << 16) | parts[0], + (parts[3] << 16) | parts[2], + (parts[5] << 16) | parts[4], + (parts[7] << 16) | parts[6] + ]; + }; + var DNS = { + address_map:{ + id:1, + addrs:{ + }, + names:{ + }, + }, + lookup_name(name) { + // If the name is already a valid ipv4 / ipv6 address, don't generate a fake one. + var res = inetPton4(name); + if (res !== null) { + return name; + } + res = inetPton6(name); + if (res !== null) { + return name; + } + + // See if this name is already mapped. + var addr; + + if (DNS.address_map.addrs[name]) { + addr = DNS.address_map.addrs[name]; + } else { + var id = DNS.address_map.id++; + assert(id < 65535, 'exceeded max address mappings of 65535'); + + addr = '172.29.' + (id & 0xff) + '.' + (id & 0xff00); + + DNS.address_map.names[addr] = name; + DNS.address_map.addrs[name] = addr; + } + + return addr; + }, + lookup_addr(addr) { + if (DNS.address_map.names[addr]) { + return DNS.address_map.names[addr]; + } + + return null; + }, + }; + var getSocketAddress = (addrp, addrlen) => { + var info = readSockaddr(addrp, addrlen); + if (info.errno) throw new FS.ErrnoError(info.errno); + info.addr = DNS.lookup_addr(info.addr) || info.addr; + return info; + }; + function ___syscall_connect(fd, addr, addrlen, d1, d2, d3) { + try { + + var sock = getSocketFromFD(fd); + var info = getSocketAddress(addr, addrlen); + sock.sock_ops.connect(sock, info.addr, info.port); + return 0; + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + } + + function ___syscall_dup(fd) { + try { + + var old = SYSCALLS.getStreamFromFD(fd); + return FS.dupStream(old).fd; + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + } + + function ___syscall_faccessat(dirfd, path, amode, flags) { + try { + + path = SYSCALLS.getStr(path); + assert(!flags || flags == 512); + path = SYSCALLS.calculateAt(dirfd, path); + if (amode & ~7) { + // need a valid mode + return -28; + } + var lookup = FS.lookupPath(path, { follow: true }); + var node = lookup.node; + if (!node) { + return -44; + } + var perms = ''; + if (amode & 4) perms += 'r'; + if (amode & 2) perms += 'w'; + if (amode & 1) perms += 'x'; + if (perms /* otherwise, they've just passed F_OK */ && FS.nodePermissions(node, perms)) { + return -2; + } + return 0; + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + } + + function ___syscall_fchmod(fd, mode) { + try { + + FS.fchmod(fd, mode); + return 0; + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + } + + /** @suppress {duplicate } */ + var syscallGetVarargI = () => { + assert(SYSCALLS.varargs != undefined); + // the `+` prepended here is necessary to convince the JSCompiler that varargs is indeed a number. + var ret = HEAP32[((+SYSCALLS.varargs)>>2)]; + SYSCALLS.varargs += 4; + return ret; + }; + var syscallGetVarargP = syscallGetVarargI; + + + function ___syscall_fcntl64(fd, cmd, varargs) { + SYSCALLS.varargs = varargs; + try { + + var stream = SYSCALLS.getStreamFromFD(fd); + switch (cmd) { + case 0: { + var arg = syscallGetVarargI(); + if (arg < 0) { + return -28; + } + while (FS.streams[arg]) { + arg++; + } + var newStream; + newStream = FS.dupStream(stream, arg); + return newStream.fd; + } + case 1: + case 2: + return 0; // FD_CLOEXEC makes no sense for a single process. + case 3: + return stream.flags; + case 4: { + var arg = syscallGetVarargI(); + stream.flags |= arg; + return 0; + } + case 12: { + var arg = syscallGetVarargP(); + var offset = 0; + // We're always unlocked. + HEAP16[(((arg)+(offset))>>1)] = 2; + return 0; + } + case 13: + case 14: + // Pretend that the locking is successful. These are process-level locks, + // and Emscripten programs are a single process. If we supported linking a + // filesystem between programs, we'd need to do more here. + // See https://github.com/emscripten-core/emscripten/issues/23697 + return 0; + } + return -28; + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + } + + function ___syscall_fstat64(fd, buf) { + try { + + return SYSCALLS.writeStat(buf, FS.fstat(fd)); + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + } + + var INT53_MAX = 9007199254740992; + + var INT53_MIN = -9007199254740992; + var bigintToI53Checked = (num) => (num < INT53_MIN || num > INT53_MAX) ? NaN : Number(num); + function ___syscall_ftruncate64(fd, length) { + length = bigintToI53Checked(length); + + + try { + + if (isNaN(length)) return -61; + FS.ftruncate(fd, length); + return 0; + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + ; + } + + + var stringToUTF8 = (str, outPtr, maxBytesToWrite) => { + assert(typeof maxBytesToWrite == 'number', 'stringToUTF8(str, outPtr, maxBytesToWrite) is missing the third parameter that specifies the length of the output buffer!'); + return stringToUTF8Array(str, HEAPU8, outPtr, maxBytesToWrite); + }; + function ___syscall_getcwd(buf, size) { + try { + + if (size === 0) return -28; + var cwd = FS.cwd(); + var cwdLengthInBytes = lengthBytesUTF8(cwd) + 1; + if (size < cwdLengthInBytes) return -68; + stringToUTF8(cwd, buf, size); + return cwdLengthInBytes; + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + } + + + function ___syscall_ioctl(fd, op, varargs) { + SYSCALLS.varargs = varargs; + try { + + var stream = SYSCALLS.getStreamFromFD(fd); + switch (op) { + case 21509: { + if (!stream.tty) return -59; + return 0; + } + case 21505: { + if (!stream.tty) return -59; + if (stream.tty.ops.ioctl_tcgets) { + var termios = stream.tty.ops.ioctl_tcgets(stream); + var argp = syscallGetVarargP(); + HEAP32[((argp)>>2)] = termios.c_iflag || 0; + HEAP32[(((argp)+(4))>>2)] = termios.c_oflag || 0; + HEAP32[(((argp)+(8))>>2)] = termios.c_cflag || 0; + HEAP32[(((argp)+(12))>>2)] = termios.c_lflag || 0; + for (var i = 0; i < 32; i++) { + HEAP8[(argp + i)+(17)] = termios.c_cc[i] || 0; + } + return 0; + } + return 0; + } + case 21510: + case 21511: + case 21512: { + if (!stream.tty) return -59; + return 0; // no-op, not actually adjusting terminal settings + } + case 21506: + case 21507: + case 21508: { + if (!stream.tty) return -59; + if (stream.tty.ops.ioctl_tcsets) { + var argp = syscallGetVarargP(); + var c_iflag = HEAP32[((argp)>>2)]; + var c_oflag = HEAP32[(((argp)+(4))>>2)]; + var c_cflag = HEAP32[(((argp)+(8))>>2)]; + var c_lflag = HEAP32[(((argp)+(12))>>2)]; + var c_cc = [] + for (var i = 0; i < 32; i++) { + c_cc.push(HEAP8[(argp + i)+(17)]); + } + return stream.tty.ops.ioctl_tcsets(stream.tty, op, { c_iflag, c_oflag, c_cflag, c_lflag, c_cc }); + } + return 0; // no-op, not actually adjusting terminal settings + } + case 21519: { + if (!stream.tty) return -59; + var argp = syscallGetVarargP(); + HEAP32[((argp)>>2)] = 0; + return 0; + } + case 21520: { + if (!stream.tty) return -59; + return -28; // not supported + } + case 21537: + case 21531: { + var argp = syscallGetVarargP(); + return FS.ioctl(stream, op, argp); + } + case 21523: { + // TODO: in theory we should write to the winsize struct that gets + // passed in, but for now musl doesn't read anything on it + if (!stream.tty) return -59; + if (stream.tty.ops.ioctl_tiocgwinsz) { + var winsize = stream.tty.ops.ioctl_tiocgwinsz(stream.tty); + var argp = syscallGetVarargP(); + HEAP16[((argp)>>1)] = winsize[0]; + HEAP16[(((argp)+(2))>>1)] = winsize[1]; + } + return 0; + } + case 21524: { + // TODO: technically, this ioctl call should change the window size. + // but, since emscripten doesn't have any concept of a terminal window + // yet, we'll just silently throw it away as we do TIOCGWINSZ + if (!stream.tty) return -59; + return 0; + } + case 21515: { + if (!stream.tty) return -59; + return 0; + } + default: return -28; // not supported + } + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + } + + function ___syscall_lstat64(path, buf) { + try { + + path = SYSCALLS.getStr(path); + return SYSCALLS.writeStat(buf, FS.lstat(path)); + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + } + + function ___syscall_newfstatat(dirfd, path, buf, flags) { + try { + + path = SYSCALLS.getStr(path); + var nofollow = flags & 256; + var allowEmpty = flags & 4096; + flags = flags & (~6400); + assert(!flags, `unknown flags in __syscall_newfstatat: ${flags}`); + path = SYSCALLS.calculateAt(dirfd, path, allowEmpty); + return SYSCALLS.writeStat(buf, nofollow ? FS.lstat(path) : FS.stat(path)); + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + } + + + function ___syscall_openat(dirfd, path, flags, varargs) { + SYSCALLS.varargs = varargs; + try { + + path = SYSCALLS.getStr(path); + path = SYSCALLS.calculateAt(dirfd, path); + var mode = varargs ? syscallGetVarargI() : 0; + return FS.open(path, flags, mode).fd; + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + } + + + + function ___syscall_readlinkat(dirfd, path, buf, bufsize) { + try { + + path = SYSCALLS.getStr(path); + path = SYSCALLS.calculateAt(dirfd, path); + if (bufsize <= 0) return -28; + var ret = FS.readlink(path); + + var len = Math.min(bufsize, lengthBytesUTF8(ret)); + var endChar = HEAP8[buf+len]; + stringToUTF8(ret, buf, bufsize+1); + // readlink is one of the rare functions that write out a C string, but does never append a null to the output buffer(!) + // stringToUTF8() always appends a null byte, so restore the character under the null byte after the write. + HEAP8[buf+len] = endChar; + return len; + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + } + + + + + var zeroMemory = (ptr, size) => HEAPU8.fill(0, ptr, ptr + size); + + /** @param {number=} addrlen */ + var writeSockaddr = (sa, family, addr, port, addrlen) => { + switch (family) { + case 2: + addr = inetPton4(addr); + zeroMemory(sa, 16); + if (addrlen) { + HEAP32[((addrlen)>>2)] = 16; + } + HEAP16[((sa)>>1)] = family; + HEAP32[(((sa)+(4))>>2)] = addr; + HEAP16[(((sa)+(2))>>1)] = _htons(port); + break; + case 10: + addr = inetPton6(addr); + zeroMemory(sa, 28); + if (addrlen) { + HEAP32[((addrlen)>>2)] = 28; + } + HEAP32[((sa)>>2)] = family; + HEAP32[(((sa)+(8))>>2)] = addr[0]; + HEAP32[(((sa)+(12))>>2)] = addr[1]; + HEAP32[(((sa)+(16))>>2)] = addr[2]; + HEAP32[(((sa)+(20))>>2)] = addr[3]; + HEAP16[(((sa)+(2))>>1)] = _htons(port); + break; + default: + return 5; + } + return 0; + }; + + function ___syscall_recvfrom(fd, buf, len, flags, addr, addrlen) { + try { + + var sock = getSocketFromFD(fd); + var msg = sock.sock_ops.recvmsg(sock, len); + if (!msg) return 0; // socket is closed + if (addr) { + var errno = writeSockaddr(addr, sock.family, DNS.lookup_name(msg.addr), msg.port, addrlen); + assert(!errno); + } + HEAPU8.set(msg.buffer, buf); + return msg.buffer.byteLength; + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + } + + function ___syscall_rmdir(path) { + try { + + path = SYSCALLS.getStr(path); + FS.rmdir(path); + return 0; + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + } + + + function ___syscall_sendto(fd, message, length, flags, addr, addr_len) { + try { + + var sock = getSocketFromFD(fd); + if (!addr) { + // send, no address provided + return FS.write(sock.stream, HEAP8, message, length); + } + var dest = getSocketAddress(addr, addr_len); + // sendto an address + return sock.sock_ops.sendmsg(sock, HEAP8, message, length, dest.addr, dest.port); + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + } + + function ___syscall_socket(domain, type, protocol) { + try { + + var sock = SOCKFS.createSocket(domain, type, protocol); + assert(sock.stream.fd < 64); // XXX ? select() assumes socket fd values are in 0..63 + return sock.stream.fd; + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + } + + function ___syscall_stat64(path, buf) { + try { + + path = SYSCALLS.getStr(path); + return SYSCALLS.writeStat(buf, FS.stat(path)); + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + } + + function ___syscall_unlinkat(dirfd, path, flags) { + try { + + path = SYSCALLS.getStr(path); + path = SYSCALLS.calculateAt(dirfd, path); + if (!flags) { + FS.unlink(path); + } else if (flags === 512) { + FS.rmdir(path); + } else { + return -28; + } + return 0; + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return -e.errno; + } + } + + var __abort_js = () => + abort('native code called abort()'); + + var AsciiToString = (ptr) => { + var str = ''; + while (1) { + var ch = HEAPU8[ptr++]; + if (!ch) return str; + str += String.fromCharCode(ch); + } + }; + + var awaitingDependencies = { + }; + + var registeredTypes = { + }; + + var typeDependencies = { + }; + + var BindingError = class BindingError extends Error { constructor(message) { super(message); this.name = 'BindingError'; }}; + var throwBindingError = (message) => { throw new BindingError(message); }; + /** @param {Object=} options */ + function sharedRegisterType(rawType, registeredInstance, options = {}) { + var name = registeredInstance.name; + if (!rawType) { + throwBindingError(`type "${name}" must have a positive integer typeid pointer`); + } + if (registeredTypes.hasOwnProperty(rawType)) { + if (options.ignoreDuplicateRegistrations) { + return; + } else { + throwBindingError(`Cannot register type '${name}' twice`); + } + } + + registeredTypes[rawType] = registeredInstance; + delete typeDependencies[rawType]; + + if (awaitingDependencies.hasOwnProperty(rawType)) { + var callbacks = awaitingDependencies[rawType]; + delete awaitingDependencies[rawType]; + callbacks.forEach((cb) => cb()); + } + } + /** @param {Object=} options */ + function registerType(rawType, registeredInstance, options = {}) { + return sharedRegisterType(rawType, registeredInstance, options); + } + + var integerReadValueFromPointer = (name, width, signed) => { + // integers are quite common, so generate very specialized functions + switch (width) { + case 1: return signed ? + (pointer) => HEAP8[pointer] : + (pointer) => HEAPU8[pointer]; + case 2: return signed ? + (pointer) => HEAP16[((pointer)>>1)] : + (pointer) => HEAPU16[((pointer)>>1)] + case 4: return signed ? + (pointer) => HEAP32[((pointer)>>2)] : + (pointer) => HEAPU32[((pointer)>>2)] + case 8: return signed ? + (pointer) => HEAP64[((pointer)>>3)] : + (pointer) => HEAPU64[((pointer)>>3)] + default: + throw new TypeError(`invalid integer width (${width}): ${name}`); + } + }; + + var embindRepr = (v) => { + if (v === null) { + return 'null'; + } + var t = typeof v; + if (t === 'object' || t === 'array' || t === 'function') { + return v.toString(); + } else { + return '' + v; + } + }; + + var assertIntegerRange = (typeName, value, minRange, maxRange) => { + if (value < minRange || value > maxRange) { + throw new TypeError(`Passing a number "${embindRepr(value)}" from JS side to C/C++ side to an argument of type "${typeName}", which is outside the valid range [${minRange}, ${maxRange}]!`); + } + }; + /** @suppress {globalThis} */ + var __embind_register_bigint = (primitiveType, name, size, minRange, maxRange) => { + name = AsciiToString(name); + + const isUnsignedType = minRange === 0n; + + let fromWireType = (value) => value; + if (isUnsignedType) { + // uint64 get converted to int64 in ABI, fix them up like we do for 32-bit integers. + const bitSize = size * 8; + fromWireType = (value) => { + return BigInt.asUintN(bitSize, value); + } + maxRange = fromWireType(maxRange); + } + + registerType(primitiveType, { + name, + fromWireType: fromWireType, + toWireType: (destructors, value) => { + if (typeof value == "number") { + value = BigInt(value); + } + else if (typeof value != "bigint") { + throw new TypeError(`Cannot convert "${embindRepr(value)}" to ${this.name}`); + } + assertIntegerRange(name, value, minRange, maxRange); + return value; + }, + readValueFromPointer: integerReadValueFromPointer(name, size, !isUnsignedType), + destructorFunction: null, // This type does not need a destructor + }); + }; + + + /** @suppress {globalThis} */ + var __embind_register_bool = (rawType, name, trueValue, falseValue) => { + name = AsciiToString(name); + registerType(rawType, { + name, + fromWireType: function(wt) { + // ambiguous emscripten ABI: sometimes return values are + // true or false, and sometimes integers (0 or 1) + return !!wt; + }, + toWireType: function(destructors, o) { + return o ? trueValue : falseValue; + }, + readValueFromPointer: function(pointer) { + return this.fromWireType(HEAPU8[pointer]); + }, + destructorFunction: null, // This type does not need a destructor + }); + }; + + + + var shallowCopyInternalPointer = (o) => { + return { + count: o.count, + deleteScheduled: o.deleteScheduled, + preservePointerOnDelete: o.preservePointerOnDelete, + ptr: o.ptr, + ptrType: o.ptrType, + smartPtr: o.smartPtr, + smartPtrType: o.smartPtrType, + }; + }; + + var throwInstanceAlreadyDeleted = (obj) => { + function getInstanceTypeName(handle) { + return handle.$$.ptrType.registeredClass.name; + } + throwBindingError(getInstanceTypeName(obj) + ' instance already deleted'); + }; + + var finalizationRegistry = false; + + var detachFinalizer = (handle) => {}; + + var runDestructor = ($$) => { + if ($$.smartPtr) { + $$.smartPtrType.rawDestructor($$.smartPtr); + } else { + $$.ptrType.registeredClass.rawDestructor($$.ptr); + } + }; + var releaseClassHandle = ($$) => { + $$.count.value -= 1; + var toDelete = 0 === $$.count.value; + if (toDelete) { + runDestructor($$); + } + }; + + var downcastPointer = (ptr, ptrClass, desiredClass) => { + if (ptrClass === desiredClass) { + return ptr; + } + if (undefined === desiredClass.baseClass) { + return null; // no conversion + } + + var rv = downcastPointer(ptr, ptrClass, desiredClass.baseClass); + if (rv === null) { + return null; + } + return desiredClass.downcast(rv); + }; + + var registeredPointers = { + }; + + var registeredInstances = { + }; + + var getBasestPointer = (class_, ptr) => { + if (ptr === undefined) { + throwBindingError('ptr should not be undefined'); + } + while (class_.baseClass) { + ptr = class_.upcast(ptr); + class_ = class_.baseClass; + } + return ptr; + }; + var getInheritedInstance = (class_, ptr) => { + ptr = getBasestPointer(class_, ptr); + return registeredInstances[ptr]; + }; + + var InternalError = class InternalError extends Error { constructor(message) { super(message); this.name = 'InternalError'; }}; + var throwInternalError = (message) => { throw new InternalError(message); }; + + var makeClassHandle = (prototype, record) => { + if (!record.ptrType || !record.ptr) { + throwInternalError('makeClassHandle requires ptr and ptrType'); + } + var hasSmartPtrType = !!record.smartPtrType; + var hasSmartPtr = !!record.smartPtr; + if (hasSmartPtrType !== hasSmartPtr) { + throwInternalError('Both smartPtrType and smartPtr must be specified'); + } + record.count = { value: 1 }; + return attachFinalizer(Object.create(prototype, { + $$: { + value: record, + writable: true, + }, + })); + }; + /** @suppress {globalThis} */ + function RegisteredPointer_fromWireType(ptr) { + // ptr is a raw pointer (or a raw smartpointer) + + // rawPointer is a maybe-null raw pointer + var rawPointer = this.getPointee(ptr); + if (!rawPointer) { + this.destructor(ptr); + return null; + } + + var registeredInstance = getInheritedInstance(this.registeredClass, rawPointer); + if (undefined !== registeredInstance) { + // JS object has been neutered, time to repopulate it + if (0 === registeredInstance.$$.count.value) { + registeredInstance.$$.ptr = rawPointer; + registeredInstance.$$.smartPtr = ptr; + return registeredInstance['clone'](); + } else { + // else, just increment reference count on existing object + // it already has a reference to the smart pointer + var rv = registeredInstance['clone'](); + this.destructor(ptr); + return rv; + } + } + + function makeDefaultHandle() { + if (this.isSmartPointer) { + return makeClassHandle(this.registeredClass.instancePrototype, { + ptrType: this.pointeeType, + ptr: rawPointer, + smartPtrType: this, + smartPtr: ptr, + }); + } else { + return makeClassHandle(this.registeredClass.instancePrototype, { + ptrType: this, + ptr, + }); + } + } + + var actualType = this.registeredClass.getActualType(rawPointer); + var registeredPointerRecord = registeredPointers[actualType]; + if (!registeredPointerRecord) { + return makeDefaultHandle.call(this); + } + + var toType; + if (this.isConst) { + toType = registeredPointerRecord.constPointerType; + } else { + toType = registeredPointerRecord.pointerType; + } + var dp = downcastPointer( + rawPointer, + this.registeredClass, + toType.registeredClass); + if (dp === null) { + return makeDefaultHandle.call(this); + } + if (this.isSmartPointer) { + return makeClassHandle(toType.registeredClass.instancePrototype, { + ptrType: toType, + ptr: dp, + smartPtrType: this, + smartPtr: ptr, + }); + } else { + return makeClassHandle(toType.registeredClass.instancePrototype, { + ptrType: toType, + ptr: dp, + }); + } + } + var attachFinalizer = (handle) => { + if ('undefined' === typeof FinalizationRegistry) { + attachFinalizer = (handle) => handle; + return handle; + } + // If the running environment has a FinalizationRegistry (see + // https://github.com/tc39/proposal-weakrefs), then attach finalizers + // for class handles. We check for the presence of FinalizationRegistry + // at run-time, not build-time. + finalizationRegistry = new FinalizationRegistry((info) => { + console.warn(info.leakWarning); + releaseClassHandle(info.$$); + }); + attachFinalizer = (handle) => { + var $$ = handle.$$; + var hasSmartPtr = !!$$.smartPtr; + if (hasSmartPtr) { + // We should not call the destructor on raw pointers in case other code expects the pointee to live + var info = { $$: $$ }; + // Create a warning as an Error instance in advance so that we can store + // the current stacktrace and point to it when / if a leak is detected. + // This is more useful than the empty stacktrace of `FinalizationRegistry` + // callback. + var cls = $$.ptrType.registeredClass; + var err = new Error(`Embind found a leaked C++ instance ${cls.name} <${ptrToString($$.ptr)}>.\n` + + "We'll free it automatically in this case, but this functionality is not reliable across various environments.\n" + + "Make sure to invoke .delete() manually once you're done with the instance instead.\n" + + "Originally allocated"); // `.stack` will add "at ..." after this sentence + if ('captureStackTrace' in Error) { + Error.captureStackTrace(err, RegisteredPointer_fromWireType); + } + info.leakWarning = err.stack.replace(/^Error: /, ''); + finalizationRegistry.register(handle, info, handle); + } + return handle; + }; + detachFinalizer = (handle) => finalizationRegistry.unregister(handle); + return attachFinalizer(handle); + }; + + + + + var deletionQueue = []; + var flushPendingDeletes = () => { + while (deletionQueue.length) { + var obj = deletionQueue.pop(); + obj.$$.deleteScheduled = false; + obj['delete'](); + } + }; + + var delayFunction; + var init_ClassHandle = () => { + let proto = ClassHandle.prototype; + + Object.assign(proto, { + "isAliasOf"(other) { + if (!(this instanceof ClassHandle)) { + return false; + } + if (!(other instanceof ClassHandle)) { + return false; + } + + var leftClass = this.$$.ptrType.registeredClass; + var left = this.$$.ptr; + other.$$ = /** @type {Object} */ (other.$$); + var rightClass = other.$$.ptrType.registeredClass; + var right = other.$$.ptr; + + while (leftClass.baseClass) { + left = leftClass.upcast(left); + leftClass = leftClass.baseClass; + } + + while (rightClass.baseClass) { + right = rightClass.upcast(right); + rightClass = rightClass.baseClass; + } + + return leftClass === rightClass && left === right; + }, + + "clone"() { + if (!this.$$.ptr) { + throwInstanceAlreadyDeleted(this); + } + + if (this.$$.preservePointerOnDelete) { + this.$$.count.value += 1; + return this; + } else { + var clone = attachFinalizer(Object.create(Object.getPrototypeOf(this), { + $$: { + value: shallowCopyInternalPointer(this.$$), + } + })); + + clone.$$.count.value += 1; + clone.$$.deleteScheduled = false; + return clone; + } + }, + + "delete"() { + if (!this.$$.ptr) { + throwInstanceAlreadyDeleted(this); + } + + if (this.$$.deleteScheduled && !this.$$.preservePointerOnDelete) { + throwBindingError('Object already scheduled for deletion'); + } + + detachFinalizer(this); + releaseClassHandle(this.$$); + + if (!this.$$.preservePointerOnDelete) { + this.$$.smartPtr = undefined; + this.$$.ptr = undefined; + } + }, + + "isDeleted"() { + return !this.$$.ptr; + }, + + "deleteLater"() { + if (!this.$$.ptr) { + throwInstanceAlreadyDeleted(this); + } + if (this.$$.deleteScheduled && !this.$$.preservePointerOnDelete) { + throwBindingError('Object already scheduled for deletion'); + } + deletionQueue.push(this); + if (deletionQueue.length === 1 && delayFunction) { + delayFunction(flushPendingDeletes); + } + this.$$.deleteScheduled = true; + return this; + }, + }); + + // Support `using ...` from https://github.com/tc39/proposal-explicit-resource-management. + const symbolDispose = Symbol.dispose; + if (symbolDispose) { + proto[symbolDispose] = proto['delete']; + } + }; + /** @constructor */ + function ClassHandle() { + } + + var createNamedFunction = (name, func) => Object.defineProperty(func, 'name', { value: name }); + + + var ensureOverloadTable = (proto, methodName, humanName) => { + if (undefined === proto[methodName].overloadTable) { + var prevFunc = proto[methodName]; + // Inject an overload resolver function that routes to the appropriate overload based on the number of arguments. + proto[methodName] = function(...args) { + // TODO This check can be removed in -O3 level "unsafe" optimizations. + if (!proto[methodName].overloadTable.hasOwnProperty(args.length)) { + throwBindingError(`Function '${humanName}' called with an invalid number of arguments (${args.length}) - expects one of (${proto[methodName].overloadTable})!`); + } + return proto[methodName].overloadTable[args.length].apply(this, args); + }; + // Move the previous function into the overload table. + proto[methodName].overloadTable = []; + proto[methodName].overloadTable[prevFunc.argCount] = prevFunc; + } + }; + + /** @param {number=} numArguments */ + var exposePublicSymbol = (name, value, numArguments) => { + if (Module.hasOwnProperty(name)) { + if (undefined === numArguments || (undefined !== Module[name].overloadTable && undefined !== Module[name].overloadTable[numArguments])) { + throwBindingError(`Cannot register public name '${name}' twice`); + } + + // We are exposing a function with the same name as an existing function. Create an overload table and a function selector + // that routes between the two. + ensureOverloadTable(Module, name, name); + if (Module[name].overloadTable.hasOwnProperty(numArguments)) { + throwBindingError(`Cannot register multiple overloads of a function with the same number of arguments (${numArguments})!`); + } + // Add the new function into the overload table. + Module[name].overloadTable[numArguments] = value; + } else { + Module[name] = value; + Module[name].argCount = numArguments; + } + }; + + var char_0 = 48; + + var char_9 = 57; + var makeLegalFunctionName = (name) => { + assert(typeof name === 'string'); + name = name.replace(/[^a-zA-Z0-9_]/g, '$'); + var f = name.charCodeAt(0); + if (f >= char_0 && f <= char_9) { + return `_${name}`; + } + return name; + }; + + + /** @constructor */ + function RegisteredClass(name, + constructor, + instancePrototype, + rawDestructor, + baseClass, + getActualType, + upcast, + downcast) { + this.name = name; + this.constructor = constructor; + this.instancePrototype = instancePrototype; + this.rawDestructor = rawDestructor; + this.baseClass = baseClass; + this.getActualType = getActualType; + this.upcast = upcast; + this.downcast = downcast; + this.pureVirtualFunctions = []; + } + + + var upcastPointer = (ptr, ptrClass, desiredClass) => { + while (ptrClass !== desiredClass) { + if (!ptrClass.upcast) { + throwBindingError(`Expected null or instance of ${desiredClass.name}, got an instance of ${ptrClass.name}`); + } + ptr = ptrClass.upcast(ptr); + ptrClass = ptrClass.baseClass; + } + return ptr; + }; + + /** @suppress {globalThis} */ + function constNoSmartPtrRawPointerToWireType(destructors, handle) { + if (handle === null) { + if (this.isReference) { + throwBindingError(`null is not a valid ${this.name}`); + } + return 0; + } + + if (!handle.$$) { + throwBindingError(`Cannot pass "${embindRepr(handle)}" as a ${this.name}`); + } + if (!handle.$$.ptr) { + throwBindingError(`Cannot pass deleted object as a pointer of type ${this.name}`); + } + var handleClass = handle.$$.ptrType.registeredClass; + var ptr = upcastPointer(handle.$$.ptr, handleClass, this.registeredClass); + return ptr; + } + + + /** @suppress {globalThis} */ + function genericPointerToWireType(destructors, handle) { + var ptr; + if (handle === null) { + if (this.isReference) { + throwBindingError(`null is not a valid ${this.name}`); + } + + if (this.isSmartPointer) { + ptr = this.rawConstructor(); + if (destructors !== null) { + destructors.push(this.rawDestructor, ptr); + } + return ptr; + } else { + return 0; + } + } + + if (!handle || !handle.$$) { + throwBindingError(`Cannot pass "${embindRepr(handle)}" as a ${this.name}`); + } + if (!handle.$$.ptr) { + throwBindingError(`Cannot pass deleted object as a pointer of type ${this.name}`); + } + if (!this.isConst && handle.$$.ptrType.isConst) { + throwBindingError(`Cannot convert argument of type ${(handle.$$.smartPtrType ? handle.$$.smartPtrType.name : handle.$$.ptrType.name)} to parameter type ${this.name}`); + } + var handleClass = handle.$$.ptrType.registeredClass; + ptr = upcastPointer(handle.$$.ptr, handleClass, this.registeredClass); + + if (this.isSmartPointer) { + // TODO: this is not strictly true + // We could support BY_EMVAL conversions from raw pointers to smart pointers + // because the smart pointer can hold a reference to the handle + if (undefined === handle.$$.smartPtr) { + throwBindingError('Passing raw pointer to smart pointer is illegal'); + } + + switch (this.sharingPolicy) { + case 0: // NONE + // no upcasting + if (handle.$$.smartPtrType === this) { + ptr = handle.$$.smartPtr; + } else { + throwBindingError(`Cannot convert argument of type ${(handle.$$.smartPtrType ? handle.$$.smartPtrType.name : handle.$$.ptrType.name)} to parameter type ${this.name}`); + } + break; + + case 1: // INTRUSIVE + ptr = handle.$$.smartPtr; + break; + + case 2: // BY_EMVAL + if (handle.$$.smartPtrType === this) { + ptr = handle.$$.smartPtr; + } else { + var clonedHandle = handle['clone'](); + ptr = this.rawShare( + ptr, + Emval.toHandle(() => clonedHandle['delete']()) + ); + if (destructors !== null) { + destructors.push(this.rawDestructor, ptr); + } + } + break; + + default: + throwBindingError('Unsupporting sharing policy'); + } + } + return ptr; + } + + + + /** @suppress {globalThis} */ + function nonConstNoSmartPtrRawPointerToWireType(destructors, handle) { + if (handle === null) { + if (this.isReference) { + throwBindingError(`null is not a valid ${this.name}`); + } + return 0; + } + + if (!handle.$$) { + throwBindingError(`Cannot pass "${embindRepr(handle)}" as a ${this.name}`); + } + if (!handle.$$.ptr) { + throwBindingError(`Cannot pass deleted object as a pointer of type ${this.name}`); + } + if (handle.$$.ptrType.isConst) { + throwBindingError(`Cannot convert argument of type ${handle.$$.ptrType.name} to parameter type ${this.name}`); + } + var handleClass = handle.$$.ptrType.registeredClass; + var ptr = upcastPointer(handle.$$.ptr, handleClass, this.registeredClass); + return ptr; + } + + + /** @suppress {globalThis} */ + function readPointer(pointer) { + return this.fromWireType(HEAPU32[((pointer)>>2)]); + } + + var init_RegisteredPointer = () => { + Object.assign(RegisteredPointer.prototype, { + getPointee(ptr) { + if (this.rawGetPointee) { + ptr = this.rawGetPointee(ptr); + } + return ptr; + }, + destructor(ptr) { + this.rawDestructor?.(ptr); + }, + readValueFromPointer: readPointer, + fromWireType: RegisteredPointer_fromWireType, + }); + }; + /** @constructor + @param {*=} pointeeType, + @param {*=} sharingPolicy, + @param {*=} rawGetPointee, + @param {*=} rawConstructor, + @param {*=} rawShare, + @param {*=} rawDestructor, + */ + function RegisteredPointer( + name, + registeredClass, + isReference, + isConst, + + // smart pointer properties + isSmartPointer, + pointeeType, + sharingPolicy, + rawGetPointee, + rawConstructor, + rawShare, + rawDestructor + ) { + this.name = name; + this.registeredClass = registeredClass; + this.isReference = isReference; + this.isConst = isConst; + + // smart pointer properties + this.isSmartPointer = isSmartPointer; + this.pointeeType = pointeeType; + this.sharingPolicy = sharingPolicy; + this.rawGetPointee = rawGetPointee; + this.rawConstructor = rawConstructor; + this.rawShare = rawShare; + this.rawDestructor = rawDestructor; + + if (!isSmartPointer && registeredClass.baseClass === undefined) { + if (isConst) { + this.toWireType = constNoSmartPtrRawPointerToWireType; + this.destructorFunction = null; + } else { + this.toWireType = nonConstNoSmartPtrRawPointerToWireType; + this.destructorFunction = null; + } + } else { + this.toWireType = genericPointerToWireType; + // Here we must leave this.destructorFunction undefined, since whether genericPointerToWireType returns + // a pointer that needs to be freed up is runtime-dependent, and cannot be evaluated at registration time. + // TODO: Create an alternative mechanism that allows removing the use of var destructors = []; array in + // craftInvokerFunction altogether. + } + } + + /** @param {number=} numArguments */ + var replacePublicSymbol = (name, value, numArguments) => { + if (!Module.hasOwnProperty(name)) { + throwInternalError('Replacing nonexistent public symbol'); + } + // If there's an overload table for this symbol, replace the symbol in the overload table instead. + if (undefined !== Module[name].overloadTable && undefined !== numArguments) { + Module[name].overloadTable[numArguments] = value; + } else { + Module[name] = value; + Module[name].argCount = numArguments; + } + }; + + + + var wasmTableMirror = []; + + /** @type {WebAssembly.Table} */ + var wasmTable; + var getWasmTableEntry = (funcPtr) => { + var func = wasmTableMirror[funcPtr]; + if (!func) { + /** @suppress {checkTypes} */ + wasmTableMirror[funcPtr] = func = wasmTable.get(funcPtr); + } + /** @suppress {checkTypes} */ + assert(wasmTable.get(funcPtr) == func, 'JavaScript-side Wasm function table mirror is out of date!'); + return func; + }; + var embind__requireFunction = (signature, rawFunction, isAsync = false) => { + assert(!isAsync, 'Async bindings are only supported with JSPI.'); + + signature = AsciiToString(signature); + + function makeDynCaller() { + var rtn = getWasmTableEntry(rawFunction); + return rtn; + } + + var fp = makeDynCaller(); + if (typeof fp != 'function') { + throwBindingError(`unknown function pointer with signature ${signature}: ${rawFunction}`); + } + return fp; + }; + + + + class UnboundTypeError extends Error {} + + + + var getTypeName = (type) => { + var ptr = ___getTypeName(type); + var rv = AsciiToString(ptr); + _free(ptr); + return rv; + }; + var throwUnboundTypeError = (message, types) => { + var unboundTypes = []; + var seen = {}; + function visit(type) { + if (seen[type]) { + return; + } + if (registeredTypes[type]) { + return; + } + if (typeDependencies[type]) { + typeDependencies[type].forEach(visit); + return; + } + unboundTypes.push(type); + seen[type] = true; + } + types.forEach(visit); + + throw new UnboundTypeError(`${message}: ` + unboundTypes.map(getTypeName).join([', '])); + }; + + + + + var whenDependentTypesAreResolved = (myTypes, dependentTypes, getTypeConverters) => { + myTypes.forEach((type) => typeDependencies[type] = dependentTypes); + + function onComplete(typeConverters) { + var myTypeConverters = getTypeConverters(typeConverters); + if (myTypeConverters.length !== myTypes.length) { + throwInternalError('Mismatched type converter count'); + } + for (var i = 0; i < myTypes.length; ++i) { + registerType(myTypes[i], myTypeConverters[i]); + } + } + + var typeConverters = new Array(dependentTypes.length); + var unregisteredTypes = []; + var registered = 0; + dependentTypes.forEach((dt, i) => { + if (registeredTypes.hasOwnProperty(dt)) { + typeConverters[i] = registeredTypes[dt]; + } else { + unregisteredTypes.push(dt); + if (!awaitingDependencies.hasOwnProperty(dt)) { + awaitingDependencies[dt] = []; + } + awaitingDependencies[dt].push(() => { + typeConverters[i] = registeredTypes[dt]; + ++registered; + if (registered === unregisteredTypes.length) { + onComplete(typeConverters); + } + }); + } + }); + if (0 === unregisteredTypes.length) { + onComplete(typeConverters); + } + }; + var __embind_register_class = (rawType, + rawPointerType, + rawConstPointerType, + baseClassRawType, + getActualTypeSignature, + getActualType, + upcastSignature, + upcast, + downcastSignature, + downcast, + name, + destructorSignature, + rawDestructor) => { + name = AsciiToString(name); + getActualType = embind__requireFunction(getActualTypeSignature, getActualType); + upcast &&= embind__requireFunction(upcastSignature, upcast); + downcast &&= embind__requireFunction(downcastSignature, downcast); + rawDestructor = embind__requireFunction(destructorSignature, rawDestructor); + var legalFunctionName = makeLegalFunctionName(name); + + exposePublicSymbol(legalFunctionName, function() { + // this code cannot run if baseClassRawType is zero + throwUnboundTypeError(`Cannot construct ${name} due to unbound types`, [baseClassRawType]); + }); + + whenDependentTypesAreResolved( + [rawType, rawPointerType, rawConstPointerType], + baseClassRawType ? [baseClassRawType] : [], + (base) => { + base = base[0]; + + var baseClass; + var basePrototype; + if (baseClassRawType) { + baseClass = base.registeredClass; + basePrototype = baseClass.instancePrototype; + } else { + basePrototype = ClassHandle.prototype; + } + + var constructor = createNamedFunction(name, function(...args) { + if (Object.getPrototypeOf(this) !== instancePrototype) { + throw new BindingError(`Use 'new' to construct ${name}`); + } + if (undefined === registeredClass.constructor_body) { + throw new BindingError(`${name} has no accessible constructor`); + } + var body = registeredClass.constructor_body[args.length]; + if (undefined === body) { + throw new BindingError(`Tried to invoke ctor of ${name} with invalid number of parameters (${args.length}) - expected (${Object.keys(registeredClass.constructor_body).toString()}) parameters instead!`); + } + return body.apply(this, args); + }); + + var instancePrototype = Object.create(basePrototype, { + constructor: { value: constructor }, + }); + + constructor.prototype = instancePrototype; + + var registeredClass = new RegisteredClass(name, + constructor, + instancePrototype, + rawDestructor, + baseClass, + getActualType, + upcast, + downcast); + + if (registeredClass.baseClass) { + // Keep track of class hierarchy. Used to allow sub-classes to inherit class functions. + registeredClass.baseClass.__derivedClasses ??= []; + + registeredClass.baseClass.__derivedClasses.push(registeredClass); + } + + var referenceConverter = new RegisteredPointer(name, + registeredClass, + true, + false, + false); + + var pointerConverter = new RegisteredPointer(name + '*', + registeredClass, + false, + false, + false); + + var constPointerConverter = new RegisteredPointer(name + ' const*', + registeredClass, + false, + true, + false); + + registeredPointers[rawType] = { + pointerType: pointerConverter, + constPointerType: constPointerConverter + }; + + replacePublicSymbol(legalFunctionName, constructor); + + return [referenceConverter, pointerConverter, constPointerConverter]; + } + ); + }; + + var heap32VectorToArray = (count, firstElement) => { + var array = []; + for (var i = 0; i < count; i++) { + // TODO(https://github.com/emscripten-core/emscripten/issues/17310): + // Find a way to hoist the `>> 2` or `>> 3` out of this loop. + array.push(HEAPU32[(((firstElement)+(i * 4))>>2)]); + } + return array; + }; + + + + + var runDestructors = (destructors) => { + while (destructors.length) { + var ptr = destructors.pop(); + var del = destructors.pop(); + del(ptr); + } + }; + + + function usesDestructorStack(argTypes) { + // Skip return value at index 0 - it's not deleted here. + for (var i = 1; i < argTypes.length; ++i) { + // The type does not define a destructor function - must use dynamic stack + if (argTypes[i] !== null && argTypes[i].destructorFunction === undefined) { + return true; + } + } + return false; + } + + + function checkArgCount(numArgs, minArgs, maxArgs, humanName, throwBindingError) { + if (numArgs < minArgs || numArgs > maxArgs) { + var argCountMessage = minArgs == maxArgs ? minArgs : `${minArgs} to ${maxArgs}`; + throwBindingError(`function ${humanName} called with ${numArgs} arguments, expected ${argCountMessage}`); + } + } + function createJsInvoker(argTypes, isClassMethodFunc, returns, isAsync) { + var needsDestructorStack = usesDestructorStack(argTypes); + var argCount = argTypes.length - 2; + var argsList = []; + var argsListWired = ['fn']; + if (isClassMethodFunc) { + argsListWired.push('thisWired'); + } + for (var i = 0; i < argCount; ++i) { + argsList.push(`arg${i}`) + argsListWired.push(`arg${i}Wired`) + } + argsList = argsList.join(',') + argsListWired = argsListWired.join(',') + + var invokerFnBody = `return function (${argsList}) {\n`; + + invokerFnBody += "checkArgCount(arguments.length, minArgs, maxArgs, humanName, throwBindingError);\n"; + + if (needsDestructorStack) { + invokerFnBody += "var destructors = [];\n"; + } + + var dtorStack = needsDestructorStack ? "destructors" : "null"; + var args1 = ["humanName", "throwBindingError", "invoker", "fn", "runDestructors", "fromRetWire", "toClassParamWire"]; + + if (isClassMethodFunc) { + invokerFnBody += `var thisWired = toClassParamWire(${dtorStack}, this);\n`; + } + + for (var i = 0; i < argCount; ++i) { + var argName = `toArg${i}Wire`; + invokerFnBody += `var arg${i}Wired = ${argName}(${dtorStack}, arg${i});\n`; + args1.push(argName); + } + + invokerFnBody += (returns || isAsync ? "var rv = ":"") + `invoker(${argsListWired});\n`; + + var returnVal = returns ? "rv" : ""; + + if (needsDestructorStack) { + invokerFnBody += "runDestructors(destructors);\n"; + } else { + for (var i = isClassMethodFunc?1:2; i < argTypes.length; ++i) { // Skip return value at index 0 - it's not deleted here. Also skip class type if not a method. + var paramName = (i === 1 ? "thisWired" : ("arg"+(i - 2)+"Wired")); + if (argTypes[i].destructorFunction !== null) { + invokerFnBody += `${paramName}_dtor(${paramName});\n`; + args1.push(`${paramName}_dtor`); + } + } + } + + if (returns) { + invokerFnBody += "var ret = fromRetWire(rv);\n" + + "return ret;\n"; + } else { + } + + invokerFnBody += "}\n"; + + args1.push('checkArgCount', 'minArgs', 'maxArgs'); + invokerFnBody = `if (arguments.length !== ${args1.length}){ throw new Error(humanName + "Expected ${args1.length} closure arguments " + arguments.length + " given."); }\n${invokerFnBody}`; + return new Function(args1, invokerFnBody); + } + + function getRequiredArgCount(argTypes) { + var requiredArgCount = argTypes.length - 2; + for (var i = argTypes.length - 1; i >= 2; --i) { + if (!argTypes[i].optional) { + break; + } + requiredArgCount--; + } + return requiredArgCount; + } + + function craftInvokerFunction(humanName, argTypes, classType, cppInvokerFunc, cppTargetFunc, /** boolean= */ isAsync) { + // humanName: a human-readable string name for the function to be generated. + // argTypes: An array that contains the embind type objects for all types in the function signature. + // argTypes[0] is the type object for the function return value. + // argTypes[1] is the type object for function this object/class type, or null if not crafting an invoker for a class method. + // argTypes[2...] are the actual function parameters. + // classType: The embind type object for the class to be bound, or null if this is not a method of a class. + // cppInvokerFunc: JS Function object to the C++-side function that interops into C++ code. + // cppTargetFunc: Function pointer (an integer to FUNCTION_TABLE) to the target C++ function the cppInvokerFunc will end up calling. + // isAsync: Optional. If true, returns an async function. Async bindings are only supported with JSPI. + var argCount = argTypes.length; + + if (argCount < 2) { + throwBindingError("argTypes array size mismatch! Must at least get return value and 'this' types!"); + } + + assert(!isAsync, 'Async bindings are only supported with JSPI.'); + var isClassMethodFunc = (argTypes[1] !== null && classType !== null); + + // Free functions with signature "void function()" do not need an invoker that marshalls between wire types. + // TODO: This omits argument count check - enable only at -O3 or similar. + // if (ENABLE_UNSAFE_OPTS && argCount == 2 && argTypes[0].name == "void" && !isClassMethodFunc) { + // return FUNCTION_TABLE[fn]; + // } + + // Determine if we need to use a dynamic stack to store the destructors for the function parameters. + // TODO: Remove this completely once all function invokers are being dynamically generated. + var needsDestructorStack = usesDestructorStack(argTypes); + + var returns = !argTypes[0].isVoid; + + var expectedArgCount = argCount - 2; + var minArgs = getRequiredArgCount(argTypes); + // Builld the arguments that will be passed into the closure around the invoker + // function. + var retType = argTypes[0]; + var instType = argTypes[1]; + var closureArgs = [humanName, throwBindingError, cppInvokerFunc, cppTargetFunc, runDestructors, retType.fromWireType.bind(retType), instType?.toWireType.bind(instType)]; + for (var i = 2; i < argCount; ++i) { + var argType = argTypes[i]; + closureArgs.push(argType.toWireType.bind(argType)); + } + if (!needsDestructorStack) { + // Skip return value at index 0 - it's not deleted here. Also skip class type if not a method. + for (var i = isClassMethodFunc?1:2; i < argTypes.length; ++i) { + if (argTypes[i].destructorFunction !== null) { + closureArgs.push(argTypes[i].destructorFunction); + } + } + } + closureArgs.push(checkArgCount, minArgs, expectedArgCount); + + let invokerFactory = createJsInvoker(argTypes, isClassMethodFunc, returns, isAsync); + var invokerFn = invokerFactory(...closureArgs); + return createNamedFunction(humanName, invokerFn); + } + var __embind_register_class_constructor = ( + rawClassType, + argCount, + rawArgTypesAddr, + invokerSignature, + invoker, + rawConstructor + ) => { + assert(argCount > 0); + var rawArgTypes = heap32VectorToArray(argCount, rawArgTypesAddr); + invoker = embind__requireFunction(invokerSignature, invoker); + var args = [rawConstructor]; + var destructors = []; + + whenDependentTypesAreResolved([], [rawClassType], (classType) => { + classType = classType[0]; + var humanName = `constructor ${classType.name}`; + + if (undefined === classType.registeredClass.constructor_body) { + classType.registeredClass.constructor_body = []; + } + if (undefined !== classType.registeredClass.constructor_body[argCount - 1]) { + throw new BindingError(`Cannot register multiple constructors with identical number of parameters (${argCount-1}) for class '${classType.name}'! Overload resolution is currently only performed using the parameter count, not actual type info!`); + } + classType.registeredClass.constructor_body[argCount - 1] = () => { + throwUnboundTypeError(`Cannot construct ${classType.name} due to unbound types`, rawArgTypes); + }; + + whenDependentTypesAreResolved([], rawArgTypes, (argTypes) => { + // Insert empty slot for context type (argTypes[1]). + argTypes.splice(1, 0, null); + classType.registeredClass.constructor_body[argCount - 1] = craftInvokerFunction(humanName, argTypes, null, invoker, rawConstructor); + return []; + }); + return []; + }); + }; + + + + + + + + var getFunctionName = (signature) => { + signature = signature.trim(); + const argsIndex = signature.indexOf("("); + if (argsIndex === -1) return signature; + assert(signature.endsWith(")"), "Parentheses for argument names should match."); + return signature.slice(0, argsIndex); + }; + var __embind_register_class_function = (rawClassType, + methodName, + argCount, + rawArgTypesAddr, // [ReturnType, ThisType, Args...] + invokerSignature, + rawInvoker, + context, + isPureVirtual, + isAsync, + isNonnullReturn) => { + var rawArgTypes = heap32VectorToArray(argCount, rawArgTypesAddr); + methodName = AsciiToString(methodName); + methodName = getFunctionName(methodName); + rawInvoker = embind__requireFunction(invokerSignature, rawInvoker, isAsync); + + whenDependentTypesAreResolved([], [rawClassType], (classType) => { + classType = classType[0]; + var humanName = `${classType.name}.${methodName}`; + + if (methodName.startsWith("@@")) { + methodName = Symbol[methodName.substring(2)]; + } + + if (isPureVirtual) { + classType.registeredClass.pureVirtualFunctions.push(methodName); + } + + function unboundTypesHandler() { + throwUnboundTypeError(`Cannot call ${humanName} due to unbound types`, rawArgTypes); + } + + var proto = classType.registeredClass.instancePrototype; + var method = proto[methodName]; + if (undefined === method || (undefined === method.overloadTable && method.className !== classType.name && method.argCount === argCount - 2)) { + // This is the first overload to be registered, OR we are replacing a + // function in the base class with a function in the derived class. + unboundTypesHandler.argCount = argCount - 2; + unboundTypesHandler.className = classType.name; + proto[methodName] = unboundTypesHandler; + } else { + // There was an existing function with the same name registered. Set up + // a function overload routing table. + ensureOverloadTable(proto, methodName, humanName); + proto[methodName].overloadTable[argCount - 2] = unboundTypesHandler; + } + + whenDependentTypesAreResolved([], rawArgTypes, (argTypes) => { + var memberFunction = craftInvokerFunction(humanName, argTypes, classType, rawInvoker, context, isAsync); + + // Replace the initial unbound-handler-stub function with the + // appropriate member function, now that all types are resolved. If + // multiple overloads are registered for this function, the function + // goes into an overload table. + if (undefined === proto[methodName].overloadTable) { + // Set argCount in case an overload is registered later + memberFunction.argCount = argCount - 2; + proto[methodName] = memberFunction; + } else { + proto[methodName].overloadTable[argCount - 2] = memberFunction; + } + + return []; + }); + return []; + }); + }; + + + var emval_freelist = []; + + var emval_handles = [0,1,,1,null,1,true,1,false,1]; + var __emval_decref = (handle) => { + if (handle > 9 && 0 === --emval_handles[handle + 1]) { + assert(emval_handles[handle] !== undefined, `Decref for unallocated handle.`); + emval_handles[handle] = undefined; + emval_freelist.push(handle); + } + }; + + + + var Emval = { + toValue:(handle) => { + if (!handle) { + throwBindingError(`Cannot use deleted val. handle = ${handle}`); + } + // handle 2 is supposed to be `undefined`. + assert(handle === 2 || emval_handles[handle] !== undefined && handle % 2 === 0, `invalid handle: ${handle}`); + return emval_handles[handle]; + }, + toHandle:(value) => { + switch (value) { + case undefined: return 2; + case null: return 4; + case true: return 6; + case false: return 8; + default:{ + const handle = emval_freelist.pop() || emval_handles.length; + emval_handles[handle] = value; + emval_handles[handle + 1] = 1; + return handle; + } + } + }, + }; + + var EmValType = { + name: 'emscripten::val', + fromWireType: (handle) => { + var rv = Emval.toValue(handle); + __emval_decref(handle); + return rv; + }, + toWireType: (destructors, value) => Emval.toHandle(value), + readValueFromPointer: readPointer, + destructorFunction: null, // This type does not need a destructor + + // TODO: do we need a deleteObject here? write a test where + // emval is passed into JS via an interface + }; + var __embind_register_emval = (rawType) => registerType(rawType, EmValType); + + var floatReadValueFromPointer = (name, width) => { + switch (width) { + case 4: return function(pointer) { + return this.fromWireType(HEAPF32[((pointer)>>2)]); + }; + case 8: return function(pointer) { + return this.fromWireType(HEAPF64[((pointer)>>3)]); + }; + default: + throw new TypeError(`invalid float width (${width}): ${name}`); + } + }; + + + + var __embind_register_float = (rawType, name, size) => { + name = AsciiToString(name); + registerType(rawType, { + name, + fromWireType: (value) => value, + toWireType: (destructors, value) => { + if (typeof value != "number" && typeof value != "boolean") { + throw new TypeError(`Cannot convert ${embindRepr(value)} to ${this.name}`); + } + // The VM will perform JS to Wasm value conversion, according to the spec: + // https://www.w3.org/TR/wasm-js-api-1/#towebassemblyvalue + return value; + }, + readValueFromPointer: floatReadValueFromPointer(name, size), + destructorFunction: null, // This type does not need a destructor + }); + }; + + + + + + + + + + var __embind_register_function = (name, argCount, rawArgTypesAddr, signature, rawInvoker, fn, isAsync, isNonnullReturn) => { + var argTypes = heap32VectorToArray(argCount, rawArgTypesAddr); + name = AsciiToString(name); + name = getFunctionName(name); + + rawInvoker = embind__requireFunction(signature, rawInvoker, isAsync); + + exposePublicSymbol(name, function() { + throwUnboundTypeError(`Cannot call ${name} due to unbound types`, argTypes); + }, argCount - 1); + + whenDependentTypesAreResolved([], argTypes, (argTypes) => { + var invokerArgsArray = [argTypes[0] /* return value */, null /* no class 'this'*/].concat(argTypes.slice(1) /* actual params */); + replacePublicSymbol(name, craftInvokerFunction(name, invokerArgsArray, null /* no class 'this'*/, rawInvoker, fn, isAsync), argCount - 1); + return []; + }); + }; + + + + + + /** @suppress {globalThis} */ + var __embind_register_integer = (primitiveType, name, size, minRange, maxRange) => { + name = AsciiToString(name); + + const isUnsignedType = minRange === 0; + + let fromWireType = (value) => value; + if (isUnsignedType) { + var bitshift = 32 - 8*size; + fromWireType = (value) => (value << bitshift) >>> bitshift; + maxRange = fromWireType(maxRange); + } + + registerType(primitiveType, { + name, + fromWireType: fromWireType, + toWireType: (destructors, value) => { + if (typeof value != "number" && typeof value != "boolean") { + throw new TypeError(`Cannot convert "${embindRepr(value)}" to ${name}`); + } + assertIntegerRange(name, value, minRange, maxRange); + // The VM will perform JS to Wasm value conversion, according to the spec: + // https://www.w3.org/TR/wasm-js-api-1/#towebassemblyvalue + return value; + }, + readValueFromPointer: integerReadValueFromPointer(name, size, minRange !== 0), + destructorFunction: null, // This type does not need a destructor + }); + }; + + + var __embind_register_memory_view = (rawType, dataTypeIndex, name) => { + var typeMapping = [ + Int8Array, + Uint8Array, + Int16Array, + Uint16Array, + Int32Array, + Uint32Array, + Float32Array, + Float64Array, + BigInt64Array, + BigUint64Array, + ]; + + var TA = typeMapping[dataTypeIndex]; + + function decodeMemoryView(handle) { + var size = HEAPU32[((handle)>>2)]; + var data = HEAPU32[(((handle)+(4))>>2)]; + return new TA(HEAP8.buffer, data, size); + } + + name = AsciiToString(name); + registerType(rawType, { + name, + fromWireType: decodeMemoryView, + readValueFromPointer: decodeMemoryView, + }, { + ignoreDuplicateRegistrations: true, + }); + }; + + + var EmValOptionalType = Object.assign({optional: true}, EmValType);; + var __embind_register_optional = (rawOptionalType, rawType) => { + registerType(rawOptionalType, EmValOptionalType); + }; + + + + var __embind_register_smart_ptr = (rawType, + rawPointeeType, + name, + sharingPolicy, + getPointeeSignature, + rawGetPointee, + constructorSignature, + rawConstructor, + shareSignature, + rawShare, + destructorSignature, + rawDestructor) => { + name = AsciiToString(name); + rawGetPointee = embind__requireFunction(getPointeeSignature, rawGetPointee); + rawConstructor = embind__requireFunction(constructorSignature, rawConstructor); + rawShare = embind__requireFunction(shareSignature, rawShare); + rawDestructor = embind__requireFunction(destructorSignature, rawDestructor); + + whenDependentTypesAreResolved([rawType], [rawPointeeType], (pointeeType) => { + pointeeType = pointeeType[0]; + + var registeredPointer = new RegisteredPointer(name, + pointeeType.registeredClass, + false, + false, + // smart pointer properties + true, + pointeeType, + sharingPolicy, + rawGetPointee, + rawConstructor, + rawShare, + rawDestructor); + return [registeredPointer]; + }); + }; + + + + + + + + + + var __embind_register_std_string = (rawType, name) => { + name = AsciiToString(name); + var stdStringIsUTF8 = true; + + registerType(rawType, { + name, + // For some method names we use string keys here since they are part of + // the public/external API and/or used by the runtime-generated code. + fromWireType(value) { + var length = HEAPU32[((value)>>2)]; + var payload = value + 4; + + var str; + if (stdStringIsUTF8) { + str = UTF8ToString(payload, length, true); + } else { + str = ''; + for (var i = 0; i < length; ++i) { + str += String.fromCharCode(HEAPU8[payload + i]); + } + } + + _free(value); + + return str; + }, + toWireType(destructors, value) { + if (value instanceof ArrayBuffer) { + value = new Uint8Array(value); + } + + var length; + var valueIsOfTypeString = (typeof value == 'string'); + + // We accept `string` or array views with single byte elements + if (!(valueIsOfTypeString || (ArrayBuffer.isView(value) && value.BYTES_PER_ELEMENT == 1))) { + throwBindingError('Cannot pass non-string to std::string'); + } + if (stdStringIsUTF8 && valueIsOfTypeString) { + length = lengthBytesUTF8(value); + } else { + length = value.length; + } + + // assumes POINTER_SIZE alignment + var base = _malloc(4 + length + 1); + var ptr = base + 4; + HEAPU32[((base)>>2)] = length; + if (valueIsOfTypeString) { + if (stdStringIsUTF8) { + stringToUTF8(value, ptr, length + 1); + } else { + for (var i = 0; i < length; ++i) { + var charCode = value.charCodeAt(i); + if (charCode > 255) { + _free(base); + throwBindingError('String has UTF-16 code units that do not fit in 8 bits'); + } + HEAPU8[ptr + i] = charCode; + } + } + } else { + HEAPU8.set(value, ptr); + } + + if (destructors !== null) { + destructors.push(_free, base); + } + return base; + }, + readValueFromPointer: readPointer, + destructorFunction(ptr) { + _free(ptr); + }, + }); + }; + + + + + var UTF16Decoder = typeof TextDecoder != 'undefined' ? new TextDecoder('utf-16le') : undefined;; + + var UTF16ToString = (ptr, maxBytesToRead, ignoreNul) => { + assert(ptr % 2 == 0, 'Pointer passed to UTF16ToString must be aligned to two bytes!'); + var idx = ((ptr)>>1); + var endIdx = findStringEnd(HEAPU16, idx, maxBytesToRead / 2, ignoreNul); + + // When using conditional TextDecoder, skip it for short strings as the overhead of the native call is not worth it. + if (endIdx - idx > 16 && UTF16Decoder) + return UTF16Decoder.decode(HEAPU16.subarray(idx, endIdx)); + + // Fallback: decode without UTF16Decoder + var str = ''; + + // If maxBytesToRead is not passed explicitly, it will be undefined, and the + // for-loop's condition will always evaluate to true. The loop is then + // terminated on the first null char. + for (var i = idx; i < endIdx; ++i) { + var codeUnit = HEAPU16[i]; + // fromCharCode constructs a character from a UTF-16 code unit, so we can + // pass the UTF16 string right through. + str += String.fromCharCode(codeUnit); + } + + return str; + }; + + var stringToUTF16 = (str, outPtr, maxBytesToWrite) => { + assert(outPtr % 2 == 0, 'Pointer passed to stringToUTF16 must be aligned to two bytes!'); + assert(typeof maxBytesToWrite == 'number', 'stringToUTF16(str, outPtr, maxBytesToWrite) is missing the third parameter that specifies the length of the output buffer!'); + // Backwards compatibility: if max bytes is not specified, assume unsafe unbounded write is allowed. + maxBytesToWrite ??= 0x7FFFFFFF; + if (maxBytesToWrite < 2) return 0; + maxBytesToWrite -= 2; // Null terminator. + var startPtr = outPtr; + var numCharsToWrite = (maxBytesToWrite < str.length*2) ? (maxBytesToWrite / 2) : str.length; + for (var i = 0; i < numCharsToWrite; ++i) { + // charCodeAt returns a UTF-16 encoded code unit, so it can be directly written to the HEAP. + var codeUnit = str.charCodeAt(i); // possibly a lead surrogate + HEAP16[((outPtr)>>1)] = codeUnit; + outPtr += 2; + } + // Null-terminate the pointer to the HEAP. + HEAP16[((outPtr)>>1)] = 0; + return outPtr - startPtr; + }; + + var lengthBytesUTF16 = (str) => str.length*2; + + var UTF32ToString = (ptr, maxBytesToRead, ignoreNul) => { + assert(ptr % 4 == 0, 'Pointer passed to UTF32ToString must be aligned to four bytes!'); + var str = ''; + var startIdx = ((ptr)>>2); + // If maxBytesToRead is not passed explicitly, it will be undefined, and this + // will always evaluate to true. This saves on code size. + for (var i = 0; !(i >= maxBytesToRead / 4); i++) { + var utf32 = HEAPU32[startIdx + i]; + if (!utf32 && !ignoreNul) break; + str += String.fromCodePoint(utf32); + } + return str; + }; + + var stringToUTF32 = (str, outPtr, maxBytesToWrite) => { + assert(outPtr % 4 == 0, 'Pointer passed to stringToUTF32 must be aligned to four bytes!'); + assert(typeof maxBytesToWrite == 'number', 'stringToUTF32(str, outPtr, maxBytesToWrite) is missing the third parameter that specifies the length of the output buffer!'); + // Backwards compatibility: if max bytes is not specified, assume unsafe unbounded write is allowed. + maxBytesToWrite ??= 0x7FFFFFFF; + if (maxBytesToWrite < 4) return 0; + var startPtr = outPtr; + var endPtr = startPtr + maxBytesToWrite - 4; + for (var i = 0; i < str.length; ++i) { + var codePoint = str.codePointAt(i); + // Gotcha: if codePoint is over 0xFFFF, it is represented as a surrogate pair in UTF-16. + // We need to manually skip over the second code unit for correct iteration. + if (codePoint > 0xFFFF) { + i++; + } + HEAP32[((outPtr)>>2)] = codePoint; + outPtr += 4; + if (outPtr + 4 > endPtr) break; + } + // Null-terminate the pointer to the HEAP. + HEAP32[((outPtr)>>2)] = 0; + return outPtr - startPtr; + }; + + var lengthBytesUTF32 = (str) => { + var len = 0; + for (var i = 0; i < str.length; ++i) { + var codePoint = str.codePointAt(i); + // Gotcha: if codePoint is over 0xFFFF, it is represented as a surrogate pair in UTF-16. + // We need to manually skip over the second code unit for correct iteration. + if (codePoint > 0xFFFF) { + i++; + } + len += 4; + } + + return len; + }; + var __embind_register_std_wstring = (rawType, charSize, name) => { + name = AsciiToString(name); + var decodeString, encodeString, lengthBytesUTF; + if (charSize === 2) { + decodeString = UTF16ToString; + encodeString = stringToUTF16; + lengthBytesUTF = lengthBytesUTF16; + } else { + assert(charSize === 4, 'only 2-byte and 4-byte strings are currently supported'); + decodeString = UTF32ToString; + encodeString = stringToUTF32; + lengthBytesUTF = lengthBytesUTF32; + } + registerType(rawType, { + name, + fromWireType: (value) => { + // Code mostly taken from _embind_register_std_string fromWireType + var length = HEAPU32[((value)>>2)]; + var str = decodeString(value + 4, length * charSize, true); + + _free(value); + + return str; + }, + toWireType: (destructors, value) => { + if (!(typeof value == 'string')) { + throwBindingError(`Cannot pass non-string to C++ string type ${name}`); + } + + // assumes POINTER_SIZE alignment + var length = lengthBytesUTF(value); + var ptr = _malloc(4 + length + charSize); + HEAPU32[((ptr)>>2)] = length / charSize; + + encodeString(value, ptr + 4, length + charSize); + + if (destructors !== null) { + destructors.push(_free, ptr); + } + return ptr; + }, + readValueFromPointer: readPointer, + destructorFunction(ptr) { + _free(ptr); + } + }); + }; + + + var __embind_register_void = (rawType, name) => { + name = AsciiToString(name); + registerType(rawType, { + isVoid: true, // void return values can be optimized out sometimes + name, + fromWireType: () => undefined, + // TODO: assert if anything else is given? + toWireType: (destructors, o) => undefined, + }); + }; + + var __emscripten_system = (command) => { + // int system(const char *command); + // http://pubs.opengroup.org/onlinepubs/000095399/functions/system.html + // Can't call external programs. + if (!command) return 0; // no shell available + return -52; + }; + + var __emscripten_throw_longjmp = () => { + throw Infinity; + }; + + var emval_methodCallers = []; + var emval_addMethodCaller = (caller) => { + var id = emval_methodCallers.length; + emval_methodCallers.push(caller); + return id; + }; + + + + var requireRegisteredType = (rawType, humanName) => { + var impl = registeredTypes[rawType]; + if (undefined === impl) { + throwBindingError(`${humanName} has unknown type ${getTypeName(rawType)}`); + } + return impl; + }; + var emval_lookupTypes = (argCount, argTypes) => { + var a = new Array(argCount); + for (var i = 0; i < argCount; ++i) { + a[i] = requireRegisteredType(HEAPU32[(((argTypes)+(i*4))>>2)], + `parameter ${i}`); + } + return a; + }; + + + var emval_returnValue = (toReturnWire, destructorsRef, handle) => { + var destructors = []; + var result = toReturnWire(destructors, handle); + if (destructors.length) { + // void, primitives and any other types w/o destructors don't need to allocate a handle + HEAPU32[((destructorsRef)>>2)] = Emval.toHandle(destructors); + } + return result; + }; + + + var emval_symbols = { + }; + + var getStringOrSymbol = (address) => { + var symbol = emval_symbols[address]; + if (symbol === undefined) { + return AsciiToString(address); + } + return symbol; + }; + var __emval_create_invoker = (argCount, argTypesPtr, kind) => { + var GenericWireTypeSize = 8; + + var [retType, ...argTypes] = emval_lookupTypes(argCount, argTypesPtr); + var toReturnWire = retType.toWireType.bind(retType); + var argFromPtr = argTypes.map(type => type.readValueFromPointer.bind(type)); + argCount--; // remove the extracted return type + + var captures = {'toValue': Emval.toValue}; + var args = argFromPtr.map((argFromPtr, i) => { + var captureName = `argFromPtr${i}`; + captures[captureName] = argFromPtr; + return `${captureName}(args${i ? '+' + i * GenericWireTypeSize : ''})`; + }); + var functionBody; + switch (kind){ + case 0: + functionBody = 'toValue(handle)'; + break; + case 2: + functionBody = 'new (toValue(handle))'; + break; + case 3: + functionBody = ''; + break; + case 1: + captures['getStringOrSymbol'] = getStringOrSymbol; + functionBody = 'toValue(handle)[getStringOrSymbol(methodName)]'; + break; + } + functionBody += `(${args})`; + if (!retType.isVoid) { + captures['toReturnWire'] = toReturnWire; + captures['emval_returnValue'] = emval_returnValue; + functionBody = `return emval_returnValue(toReturnWire, destructorsRef, ${functionBody})`; + } + functionBody = `return function (handle, methodName, destructorsRef, args) { + ${functionBody} + }`; + + var invokerFunction = new Function(Object.keys(captures), functionBody)(...Object.values(captures)); + var functionName = `methodCaller<(${argTypes.map(t => t.name)}) => ${retType.name}>`; + return emval_addMethodCaller(createNamedFunction(functionName, invokerFunction)); + }; + + + var __emval_incref = (handle) => { + if (handle > 9) { + emval_handles[handle + 1] += 1; + } + }; + + + + var __emval_invoke = (caller, handle, methodName, destructorsRef, args) => { + return emval_methodCallers[caller](handle, methodName, destructorsRef, args); + }; + + + + var __emval_run_destructors = (handle) => { + var destructors = Emval.toValue(handle); + runDestructors(destructors); + __emval_decref(handle); + }; + + function __gmtime_js(time, tmPtr) { + time = bigintToI53Checked(time); + + + var date = new Date(time * 1000); + HEAP32[((tmPtr)>>2)] = date.getUTCSeconds(); + HEAP32[(((tmPtr)+(4))>>2)] = date.getUTCMinutes(); + HEAP32[(((tmPtr)+(8))>>2)] = date.getUTCHours(); + HEAP32[(((tmPtr)+(12))>>2)] = date.getUTCDate(); + HEAP32[(((tmPtr)+(16))>>2)] = date.getUTCMonth(); + HEAP32[(((tmPtr)+(20))>>2)] = date.getUTCFullYear()-1900; + HEAP32[(((tmPtr)+(24))>>2)] = date.getUTCDay(); + var start = Date.UTC(date.getUTCFullYear(), 0, 1, 0, 0, 0, 0); + var yday = ((date.getTime() - start) / (1000 * 60 * 60 * 24))|0; + HEAP32[(((tmPtr)+(28))>>2)] = yday; + ; + } + + var isLeapYear = (year) => year%4 === 0 && (year%100 !== 0 || year%400 === 0); + + var MONTH_DAYS_LEAP_CUMULATIVE = [0,31,60,91,121,152,182,213,244,274,305,335]; + + var MONTH_DAYS_REGULAR_CUMULATIVE = [0,31,59,90,120,151,181,212,243,273,304,334]; + var ydayFromDate = (date) => { + var leap = isLeapYear(date.getFullYear()); + var monthDaysCumulative = (leap ? MONTH_DAYS_LEAP_CUMULATIVE : MONTH_DAYS_REGULAR_CUMULATIVE); + var yday = monthDaysCumulative[date.getMonth()] + date.getDate() - 1; // -1 since it's days since Jan 1 + + return yday; + }; + + function __localtime_js(time, tmPtr) { + time = bigintToI53Checked(time); + + + var date = new Date(time*1000); + HEAP32[((tmPtr)>>2)] = date.getSeconds(); + HEAP32[(((tmPtr)+(4))>>2)] = date.getMinutes(); + HEAP32[(((tmPtr)+(8))>>2)] = date.getHours(); + HEAP32[(((tmPtr)+(12))>>2)] = date.getDate(); + HEAP32[(((tmPtr)+(16))>>2)] = date.getMonth(); + HEAP32[(((tmPtr)+(20))>>2)] = date.getFullYear()-1900; + HEAP32[(((tmPtr)+(24))>>2)] = date.getDay(); + + var yday = ydayFromDate(date)|0; + HEAP32[(((tmPtr)+(28))>>2)] = yday; + HEAP32[(((tmPtr)+(36))>>2)] = -(date.getTimezoneOffset() * 60); + + // Attention: DST is in December in South, and some regions don't have DST at all. + var start = new Date(date.getFullYear(), 0, 1); + var summerOffset = new Date(date.getFullYear(), 6, 1).getTimezoneOffset(); + var winterOffset = start.getTimezoneOffset(); + var dst = (summerOffset != winterOffset && date.getTimezoneOffset() == Math.min(winterOffset, summerOffset))|0; + HEAP32[(((tmPtr)+(32))>>2)] = dst; + ; + } + + + var __mktime_js = function(tmPtr) { + + var ret = (() => { + var date = new Date(HEAP32[(((tmPtr)+(20))>>2)] + 1900, + HEAP32[(((tmPtr)+(16))>>2)], + HEAP32[(((tmPtr)+(12))>>2)], + HEAP32[(((tmPtr)+(8))>>2)], + HEAP32[(((tmPtr)+(4))>>2)], + HEAP32[((tmPtr)>>2)], + 0); + + // There's an ambiguous hour when the time goes back; the tm_isdst field is + // used to disambiguate it. Date() basically guesses, so we fix it up if it + // guessed wrong, or fill in tm_isdst with the guess if it's -1. + var dst = HEAP32[(((tmPtr)+(32))>>2)]; + var guessedOffset = date.getTimezoneOffset(); + var start = new Date(date.getFullYear(), 0, 1); + var summerOffset = new Date(date.getFullYear(), 6, 1).getTimezoneOffset(); + var winterOffset = start.getTimezoneOffset(); + var dstOffset = Math.min(winterOffset, summerOffset); // DST is in December in South + if (dst < 0) { + // Attention: some regions don't have DST at all. + HEAP32[(((tmPtr)+(32))>>2)] = Number(summerOffset != winterOffset && dstOffset == guessedOffset); + } else if ((dst > 0) != (dstOffset == guessedOffset)) { + var nonDstOffset = Math.max(winterOffset, summerOffset); + var trueOffset = dst > 0 ? dstOffset : nonDstOffset; + // Don't try setMinutes(date.getMinutes() + ...) -- it's messed up. + date.setTime(date.getTime() + (trueOffset - guessedOffset)*60000); + } + + HEAP32[(((tmPtr)+(24))>>2)] = date.getDay(); + var yday = ydayFromDate(date)|0; + HEAP32[(((tmPtr)+(28))>>2)] = yday; + // To match expected behavior, update fields from date + HEAP32[((tmPtr)>>2)] = date.getSeconds(); + HEAP32[(((tmPtr)+(4))>>2)] = date.getMinutes(); + HEAP32[(((tmPtr)+(8))>>2)] = date.getHours(); + HEAP32[(((tmPtr)+(12))>>2)] = date.getDate(); + HEAP32[(((tmPtr)+(16))>>2)] = date.getMonth(); + HEAP32[(((tmPtr)+(20))>>2)] = date.getYear(); + + var timeMs = date.getTime(); + if (isNaN(timeMs)) { + return -1; + } + // Return time in microseconds + return timeMs / 1000; + })(); + return BigInt(ret); + }; + + + var __tzset_js = (timezone, daylight, std_name, dst_name) => { + // TODO: Use (malleable) environment variables instead of system settings. + var currentYear = new Date().getFullYear(); + var winter = new Date(currentYear, 0, 1); + var summer = new Date(currentYear, 6, 1); + var winterOffset = winter.getTimezoneOffset(); + var summerOffset = summer.getTimezoneOffset(); + + // Local standard timezone offset. Local standard time is not adjusted for + // daylight savings. This code uses the fact that getTimezoneOffset returns + // a greater value during Standard Time versus Daylight Saving Time (DST). + // Thus it determines the expected output during Standard Time, and it + // compares whether the output of the given date the same (Standard) or less + // (DST). + var stdTimezoneOffset = Math.max(winterOffset, summerOffset); + + // timezone is specified as seconds west of UTC ("The external variable + // `timezone` shall be set to the difference, in seconds, between + // Coordinated Universal Time (UTC) and local standard time."), the same + // as returned by stdTimezoneOffset. + // See http://pubs.opengroup.org/onlinepubs/009695399/functions/tzset.html + HEAPU32[((timezone)>>2)] = stdTimezoneOffset * 60; + + HEAP32[((daylight)>>2)] = Number(winterOffset != summerOffset); + + var extractZone = (timezoneOffset) => { + // Why inverse sign? + // Read here https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/getTimezoneOffset + var sign = timezoneOffset >= 0 ? "-" : "+"; + + var absOffset = Math.abs(timezoneOffset) + var hours = String(Math.floor(absOffset / 60)).padStart(2, "0"); + var minutes = String(absOffset % 60).padStart(2, "0"); + + return `UTC${sign}${hours}${minutes}`; + } + + var winterName = extractZone(winterOffset); + var summerName = extractZone(summerOffset); + assert(winterName); + assert(summerName); + assert(lengthBytesUTF8(winterName) <= 16, `timezone name truncated to fit in TZNAME_MAX (${winterName})`); + assert(lengthBytesUTF8(summerName) <= 16, `timezone name truncated to fit in TZNAME_MAX (${summerName})`); + if (summerOffset < winterOffset) { + // Northern hemisphere + stringToUTF8(winterName, std_name, 17); + stringToUTF8(summerName, dst_name, 17); + } else { + stringToUTF8(winterName, dst_name, 17); + stringToUTF8(summerName, std_name, 17); + } + }; + + var _emscripten_get_now = () => performance.now(); + + var _emscripten_date_now = () => Date.now(); + + var nowIsMonotonic = 1; + + var checkWasiClock = (clock_id) => clock_id >= 0 && clock_id <= 3; + + function _clock_time_get(clk_id, ignored_precision, ptime) { + ignored_precision = bigintToI53Checked(ignored_precision); + + + if (!checkWasiClock(clk_id)) { + return 28; + } + var now; + // all wasi clocks but realtime are monotonic + if (clk_id === 0) { + now = _emscripten_date_now(); + } else if (nowIsMonotonic) { + now = _emscripten_get_now(); + } else { + return 52; + } + // "now" is in ms, and wasi times are in ns. + var nsec = Math.round(now * 1000 * 1000); + HEAP64[((ptime)>>3)] = BigInt(nsec); + return 0; + ; + } + + + var _emscripten_err = (str) => err(UTF8ToString(str)); + + var getHeapMax = () => + HEAPU8.length; + var _emscripten_get_heap_max = () => getHeapMax(); + + + var abortOnCannotGrowMemory = (requestedSize) => { + abort(`Cannot enlarge memory arrays to size ${requestedSize} bytes (OOM). Either (1) compile with -sINITIAL_MEMORY=X with X higher than the current value ${HEAP8.length}, (2) compile with -sALLOW_MEMORY_GROWTH which allows increasing the size at runtime, or (3) if you want malloc to return NULL (0) instead of this abort, compile with -sABORTING_MALLOC=0`); + }; + var _emscripten_resize_heap = (requestedSize) => { + var oldSize = HEAPU8.length; + // With CAN_ADDRESS_2GB or MEMORY64, pointers are already unsigned. + requestedSize >>>= 0; + abortOnCannotGrowMemory(requestedSize); + }; + + var ENV = { + }; + + var getExecutableName = () => thisProgram || './this.program'; + var getEnvStrings = () => { + if (!getEnvStrings.strings) { + // Default values. + // Browser language detection #8751 + var lang = ((typeof navigator == 'object' && navigator.language) || 'C').replace('-', '_') + '.UTF-8'; + var env = { + 'USER': 'web_user', + 'LOGNAME': 'web_user', + 'PATH': '/', + 'PWD': '/', + 'HOME': '/home/web_user', + 'LANG': lang, + '_': getExecutableName() + }; + // Apply the user-provided values, if any. + for (var x in ENV) { + // x is a key in ENV; if ENV[x] is undefined, that means it was + // explicitly set to be so. We allow user code to do that to + // force variables with default values to remain unset. + if (ENV[x] === undefined) delete env[x]; + else env[x] = ENV[x]; + } + var strings = []; + for (var x in env) { + strings.push(`${x}=${env[x]}`); + } + getEnvStrings.strings = strings; + } + return getEnvStrings.strings; + }; + + var _environ_get = (__environ, environ_buf) => { + var bufSize = 0; + var envp = 0; + for (var string of getEnvStrings()) { + var ptr = environ_buf + bufSize; + HEAPU32[(((__environ)+(envp))>>2)] = ptr; + bufSize += stringToUTF8(string, ptr, Infinity) + 1; + envp += 4; + } + return 0; + }; + + + var _environ_sizes_get = (penviron_count, penviron_buf_size) => { + var strings = getEnvStrings(); + HEAPU32[((penviron_count)>>2)] = strings.length; + var bufSize = 0; + for (var string of strings) { + bufSize += lengthBytesUTF8(string) + 1; + } + HEAPU32[((penviron_buf_size)>>2)] = bufSize; + return 0; + }; + + + var runtimeKeepaliveCounter = 0; + var keepRuntimeAlive = () => noExitRuntime || runtimeKeepaliveCounter > 0; + var _proc_exit = (code) => { + EXITSTATUS = code; + if (!keepRuntimeAlive()) { + Module['onExit']?.(code); + ABORT = true; + } + quit_(code, new ExitStatus(code)); + }; + + + /** @suppress {duplicate } */ + /** @param {boolean|number=} implicit */ + var exitJS = (status, implicit) => { + EXITSTATUS = status; + + checkUnflushedContent(); + + // if exit() was called explicitly, warn the user if the runtime isn't actually being shut down + if (keepRuntimeAlive() && !implicit) { + var msg = `program exited (with status: ${status}), but keepRuntimeAlive() is set (counter=${runtimeKeepaliveCounter}) due to an async operation, so halting execution but not exiting the runtime or preventing further async execution (you can use emscripten_force_exit, if you want to force a true shutdown)`; + readyPromiseReject?.(msg); + err(msg); + } + + _proc_exit(status); + }; + var _exit = exitJS; + + function _fd_close(fd) { + try { + + var stream = SYSCALLS.getStreamFromFD(fd); + FS.close(stream); + return 0; + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return e.errno; + } + } + + function _fd_fdstat_get(fd, pbuf) { + try { + + var rightsBase = 0; + var rightsInheriting = 0; + var flags = 0; + { + var stream = SYSCALLS.getStreamFromFD(fd); + // All character devices are terminals (other things a Linux system would + // assume is a character device, like the mouse, we have special APIs for). + var type = stream.tty ? 2 : + FS.isDir(stream.mode) ? 3 : + FS.isLink(stream.mode) ? 7 : + 4; + } + HEAP8[pbuf] = type; + HEAP16[(((pbuf)+(2))>>1)] = flags; + HEAP64[(((pbuf)+(8))>>3)] = BigInt(rightsBase); + HEAP64[(((pbuf)+(16))>>3)] = BigInt(rightsInheriting); + return 0; + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return e.errno; + } + } + + /** @param {number=} offset */ + var doReadv = (stream, iov, iovcnt, offset) => { + var ret = 0; + for (var i = 0; i < iovcnt; i++) { + var ptr = HEAPU32[((iov)>>2)]; + var len = HEAPU32[(((iov)+(4))>>2)]; + iov += 8; + var curr = FS.read(stream, HEAP8, ptr, len, offset); + if (curr < 0) return -1; + ret += curr; + if (curr < len) break; // nothing more to read + if (typeof offset != 'undefined') { + offset += curr; + } + } + return ret; + }; + + function _fd_read(fd, iov, iovcnt, pnum) { + try { + + var stream = SYSCALLS.getStreamFromFD(fd); + var num = doReadv(stream, iov, iovcnt); + HEAPU32[((pnum)>>2)] = num; + return 0; + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return e.errno; + } + } + + + function _fd_seek(fd, offset, whence, newOffset) { + offset = bigintToI53Checked(offset); + + + try { + + if (isNaN(offset)) return 61; + var stream = SYSCALLS.getStreamFromFD(fd); + FS.llseek(stream, offset, whence); + HEAP64[((newOffset)>>3)] = BigInt(stream.position); + if (stream.getdents && offset === 0 && whence === 0) stream.getdents = null; // reset readdir state + return 0; + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return e.errno; + } + ; + } + + function _fd_sync(fd) { + try { + + var stream = SYSCALLS.getStreamFromFD(fd); + if (stream.stream_ops?.fsync) { + return stream.stream_ops.fsync(stream); + } + return 0; // we can't do anything synchronously; the in-memory FS is already synced to + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return e.errno; + } + } + + /** @param {number=} offset */ + var doWritev = (stream, iov, iovcnt, offset) => { + var ret = 0; + for (var i = 0; i < iovcnt; i++) { + var ptr = HEAPU32[((iov)>>2)]; + var len = HEAPU32[(((iov)+(4))>>2)]; + iov += 8; + var curr = FS.write(stream, HEAP8, ptr, len, offset); + if (curr < 0) return -1; + ret += curr; + if (curr < len) { + // No more space to write. + break; + } + if (typeof offset != 'undefined') { + offset += curr; + } + } + return ret; + }; + + function _fd_write(fd, iov, iovcnt, pnum) { + try { + + var stream = SYSCALLS.getStreamFromFD(fd); + var num = doWritev(stream, iov, iovcnt); + HEAPU32[((pnum)>>2)] = num; + return 0; + } catch (e) { + if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; + return e.errno; + } + } + + + + + + + + + + var _getaddrinfo = (node, service, hint, out) => { + // Note getaddrinfo currently only returns a single addrinfo with ai_next defaulting to NULL. When NULL + // hints are specified or ai_family set to AF_UNSPEC or ai_socktype or ai_protocol set to 0 then we + // really should provide a linked list of suitable addrinfo values. + var addrs = []; + var canon = null; + var addr = 0; + var port = 0; + var flags = 0; + var family = 0; + var type = 0; + var proto = 0; + var ai, last; + + function allocaddrinfo(family, type, proto, canon, addr, port) { + var sa, salen, ai; + var errno; + + salen = family === 10 ? + 28 : + 16; + addr = family === 10 ? + inetNtop6(addr) : + inetNtop4(addr); + sa = _malloc(salen); + errno = writeSockaddr(sa, family, addr, port); + assert(!errno); + + ai = _malloc(32); + HEAP32[(((ai)+(4))>>2)] = family; + HEAP32[(((ai)+(8))>>2)] = type; + HEAP32[(((ai)+(12))>>2)] = proto; + HEAPU32[(((ai)+(24))>>2)] = canon; + HEAPU32[(((ai)+(20))>>2)] = sa; + if (family === 10) { + HEAP32[(((ai)+(16))>>2)] = 28; + } else { + HEAP32[(((ai)+(16))>>2)] = 16; + } + HEAP32[(((ai)+(28))>>2)] = 0; + + return ai; + } + + if (hint) { + flags = HEAP32[((hint)>>2)]; + family = HEAP32[(((hint)+(4))>>2)]; + type = HEAP32[(((hint)+(8))>>2)]; + proto = HEAP32[(((hint)+(12))>>2)]; + } + if (type && !proto) { + proto = type === 2 ? 17 : 6; + } + if (!type && proto) { + type = proto === 17 ? 2 : 1; + } + + // If type or proto are set to zero in hints we should really be returning multiple addrinfo values, but for + // now default to a TCP STREAM socket so we can at least return a sensible addrinfo given NULL hints. + if (proto === 0) { + proto = 6; + } + if (type === 0) { + type = 1; + } + + if (!node && !service) { + return -2; + } + if (flags & ~(1|2|4| + 1024|8|16|32)) { + return -1; + } + if (hint !== 0 && (HEAP32[((hint)>>2)] & 2) && !node) { + return -1; + } + if (flags & 32) { + // TODO + return -2; + } + if (type !== 0 && type !== 1 && type !== 2) { + return -7; + } + if (family !== 0 && family !== 2 && family !== 10) { + return -6; + } + + if (service) { + service = UTF8ToString(service); + port = parseInt(service, 10); + + if (isNaN(port)) { + if (flags & 1024) { + return -2; + } + // TODO support resolving well-known service names from: + // http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt + return -8; + } + } + + if (!node) { + if (family === 0) { + family = 2; + } + if ((flags & 1) === 0) { + if (family === 2) { + addr = _htonl(2130706433); + } else { + addr = [0, 0, 0, _htonl(1)]; + } + } + ai = allocaddrinfo(family, type, proto, null, addr, port); + HEAPU32[((out)>>2)] = ai; + return 0; + } + + // + // try as a numeric address + // + node = UTF8ToString(node); + addr = inetPton4(node); + if (addr !== null) { + // incoming node is a valid ipv4 address + if (family === 0 || family === 2) { + family = 2; + } + else if (family === 10 && (flags & 8)) { + addr = [0, 0, _htonl(0xffff), addr]; + family = 10; + } else { + return -2; + } + } else { + addr = inetPton6(node); + if (addr !== null) { + // incoming node is a valid ipv6 address + if (family === 0 || family === 10) { + family = 10; + } else { + return -2; + } + } + } + if (addr != null) { + ai = allocaddrinfo(family, type, proto, node, addr, port); + HEAPU32[((out)>>2)] = ai; + return 0; + } + if (flags & 4) { + return -2; + } + + // + // try as a hostname + // + // resolve the hostname to a temporary fake address + node = DNS.lookup_name(node); + addr = inetPton4(node); + if (family === 0) { + family = 2; + } else if (family === 10) { + addr = [0, 0, _htonl(0xffff), addr]; + } + ai = allocaddrinfo(family, type, proto, null, addr, port); + HEAPU32[((out)>>2)] = ai; + return 0; + }; + + + + + var FS_createPath = (...args) => FS.createPath(...args); + + + + var FS_unlink = (...args) => FS.unlink(...args); + + var FS_createLazyFile = (...args) => FS.createLazyFile(...args); + + var FS_createDevice = (...args) => FS.createDevice(...args); + + FS.createPreloadedFile = FS_createPreloadedFile; + FS.preloadFile = FS_preloadFile; + FS.staticInit();; +init_ClassHandle(); +init_RegisteredPointer(); +assert(emval_handles.length === 5 * 2); +// End JS library code + +// include: postlibrary.js +// This file is included after the automatically-generated JS library code +// but before the wasm module is created. + +{ + + // Begin ATMODULES hooks + if (Module['noExitRuntime']) noExitRuntime = Module['noExitRuntime']; +if (Module['preloadPlugins']) preloadPlugins = Module['preloadPlugins']; +if (Module['print']) out = Module['print']; +if (Module['printErr']) err = Module['printErr']; +if (Module['wasmBinary']) wasmBinary = Module['wasmBinary']; + // End ATMODULES hooks + + checkIncomingModuleAPI(); + + if (Module['arguments']) arguments_ = Module['arguments']; + if (Module['thisProgram']) thisProgram = Module['thisProgram']; + + // Assertions on removed incoming Module JS APIs. + assert(typeof Module['memoryInitializerPrefixURL'] == 'undefined', 'Module.memoryInitializerPrefixURL option was removed, use Module.locateFile instead'); + assert(typeof Module['pthreadMainPrefixURL'] == 'undefined', 'Module.pthreadMainPrefixURL option was removed, use Module.locateFile instead'); + assert(typeof Module['cdInitializerPrefixURL'] == 'undefined', 'Module.cdInitializerPrefixURL option was removed, use Module.locateFile instead'); + assert(typeof Module['filePackagePrefixURL'] == 'undefined', 'Module.filePackagePrefixURL option was removed, use Module.locateFile instead'); + assert(typeof Module['read'] == 'undefined', 'Module.read option was removed'); + assert(typeof Module['readAsync'] == 'undefined', 'Module.readAsync option was removed (modify readAsync in JS)'); + assert(typeof Module['readBinary'] == 'undefined', 'Module.readBinary option was removed (modify readBinary in JS)'); + assert(typeof Module['setWindowTitle'] == 'undefined', 'Module.setWindowTitle option was removed (modify emscripten_set_window_title in JS)'); + assert(typeof Module['TOTAL_MEMORY'] == 'undefined', 'Module.TOTAL_MEMORY has been renamed Module.INITIAL_MEMORY'); + assert(typeof Module['ENVIRONMENT'] == 'undefined', 'Module.ENVIRONMENT has been deprecated. To force the environment, use the ENVIRONMENT compile-time option (for example, -sENVIRONMENT=web or -sENVIRONMENT=node)'); + assert(typeof Module['STACK_SIZE'] == 'undefined', 'STACK_SIZE can no longer be set at runtime. Use -sSTACK_SIZE at link time') + // If memory is defined in wasm, the user can't provide it, or set INITIAL_MEMORY + assert(typeof Module['wasmMemory'] == 'undefined', 'Use of `wasmMemory` detected. Use -sIMPORTED_MEMORY to define wasmMemory externally'); + assert(typeof Module['INITIAL_MEMORY'] == 'undefined', 'Detected runtime INITIAL_MEMORY setting. Use -sIMPORTED_MEMORY to define wasmMemory dynamically'); + +} + +// Begin runtime exports + Module['addRunDependency'] = addRunDependency; + Module['removeRunDependency'] = removeRunDependency; + Module['FS_preloadFile'] = FS_preloadFile; + Module['FS_unlink'] = FS_unlink; + Module['FS_createPath'] = FS_createPath; + Module['FS_createDevice'] = FS_createDevice; + Module['FS'] = FS; + Module['FS_createDataFile'] = FS_createDataFile; + Module['FS_createLazyFile'] = FS_createLazyFile; + var missingLibrarySymbols = [ + 'writeI53ToI64', + 'writeI53ToI64Clamped', + 'writeI53ToI64Signaling', + 'writeI53ToU64Clamped', + 'writeI53ToU64Signaling', + 'readI53FromI64', + 'readI53FromU64', + 'convertI32PairToI53', + 'convertI32PairToI53Checked', + 'convertU32PairToI53', + 'stackAlloc', + 'getTempRet0', + 'setTempRet0', + 'growMemory', + 'withStackSave', + 'readEmAsmArgs', + 'jstoi_q', + 'autoResumeAudioContext', + 'getDynCaller', + 'dynCall', + 'handleException', + 'runtimeKeepalivePush', + 'runtimeKeepalivePop', + 'callUserCallback', + 'maybeExit', + 'asmjsMangle', + 'alignMemory', + 'HandleAllocator', + 'getNativeTypeSize', + 'addOnInit', + 'addOnPostCtor', + 'addOnPreMain', + 'addOnExit', + 'STACK_SIZE', + 'STACK_ALIGN', + 'POINTER_SIZE', + 'ASSERTIONS', + 'ccall', + 'cwrap', + 'convertJsFunctionToWasm', + 'getEmptyTableSlot', + 'updateTableMap', + 'getFunctionAddress', + 'addFunction', + 'removeFunction', + 'intArrayToString', + 'stringToAscii', + 'stringToNewUTF8', + 'stringToUTF8OnStack', + 'writeArrayToMemory', + 'registerKeyEventCallback', + 'maybeCStringToJsString', + 'findEventTarget', + 'getBoundingClientRect', + 'fillMouseEventData', + 'registerMouseEventCallback', + 'registerWheelEventCallback', + 'registerUiEventCallback', + 'registerFocusEventCallback', + 'fillDeviceOrientationEventData', + 'registerDeviceOrientationEventCallback', + 'fillDeviceMotionEventData', + 'registerDeviceMotionEventCallback', + 'screenOrientation', + 'fillOrientationChangeEventData', + 'registerOrientationChangeEventCallback', + 'fillFullscreenChangeEventData', + 'registerFullscreenChangeEventCallback', + 'JSEvents_requestFullscreen', + 'JSEvents_resizeCanvasForFullscreen', + 'registerRestoreOldStyle', + 'hideEverythingExceptGivenElement', + 'restoreHiddenElements', + 'setLetterbox', + 'softFullscreenResizeWebGLRenderTarget', + 'doRequestFullscreen', + 'fillPointerlockChangeEventData', + 'registerPointerlockChangeEventCallback', + 'registerPointerlockErrorEventCallback', + 'requestPointerLock', + 'fillVisibilityChangeEventData', + 'registerVisibilityChangeEventCallback', + 'registerTouchEventCallback', + 'fillGamepadEventData', + 'registerGamepadEventCallback', + 'registerBeforeUnloadEventCallback', + 'fillBatteryEventData', + 'registerBatteryEventCallback', + 'setCanvasElementSize', + 'getCanvasElementSize', + 'jsStackTrace', + 'getCallstack', + 'convertPCtoSourceLocation', + 'wasiRightsToMuslOFlags', + 'wasiOFlagsToMuslOFlags', + 'safeSetTimeout', + 'setImmediateWrapped', + 'safeRequestAnimationFrame', + 'clearImmediateWrapped', + 'registerPostMainLoop', + 'registerPreMainLoop', + 'getPromise', + 'makePromise', + 'idsToPromises', + 'makePromiseCallback', + 'findMatchingCatch', + 'Browser_asyncPrepareDataCounter', + 'arraySum', + 'addDays', + 'FS_mkdirTree', + '_setNetworkCallback', + 'heapObjectForWebGLType', + 'toTypedArrayIndex', + 'webgl_enable_ANGLE_instanced_arrays', + 'webgl_enable_OES_vertex_array_object', + 'webgl_enable_WEBGL_draw_buffers', + 'webgl_enable_WEBGL_multi_draw', + 'webgl_enable_EXT_polygon_offset_clamp', + 'webgl_enable_EXT_clip_control', + 'webgl_enable_WEBGL_polygon_mode', + 'emscriptenWebGLGet', + 'computeUnpackAlignedImageSize', + 'colorChannelsInGlTextureFormat', + 'emscriptenWebGLGetTexPixelData', + 'emscriptenWebGLGetUniform', + 'webglGetUniformLocation', + 'webglPrepareUniformLocationsBeforeFirstUse', + 'webglGetLeftBracePos', + 'emscriptenWebGLGetVertexAttrib', + '__glGetActiveAttribOrUniform', + 'writeGLArray', + 'registerWebGlEventCallback', + 'runAndAbortIfError', + 'ALLOC_NORMAL', + 'ALLOC_STACK', + 'allocate', + 'writeStringToMemory', + 'writeAsciiToMemory', + 'demangle', + 'stackTrace', + 'getFunctionArgsName', + 'createJsInvokerSignature', + 'PureVirtualError', + 'registerInheritedInstance', + 'unregisterInheritedInstance', + 'getInheritedInstanceCount', + 'getLiveInheritedInstances', + 'enumReadValueFromPointer', + 'setDelayFunction', + 'validateThis', + 'count_emval_handles', + 'emval_get_global', +]; +missingLibrarySymbols.forEach(missingLibrarySymbol) + + var unexportedSymbols = [ + 'run', + 'out', + 'err', + 'callMain', + 'abort', + 'wasmMemory', + 'wasmExports', + 'HEAPF32', + 'HEAPF64', + 'HEAP8', + 'HEAPU8', + 'HEAP16', + 'HEAPU16', + 'HEAP32', + 'HEAPU32', + 'HEAP64', + 'HEAPU64', + 'writeStackCookie', + 'checkStackCookie', + 'INT53_MAX', + 'INT53_MIN', + 'bigintToI53Checked', + 'stackSave', + 'stackRestore', + 'ptrToString', + 'zeroMemory', + 'exitJS', + 'getHeapMax', + 'abortOnCannotGrowMemory', + 'ENV', + 'ERRNO_CODES', + 'strError', + 'inetPton4', + 'inetNtop4', + 'inetPton6', + 'inetNtop6', + 'readSockaddr', + 'writeSockaddr', + 'DNS', + 'Protocols', + 'Sockets', + 'timers', + 'warnOnce', + 'readEmAsmArgsArray', + 'getExecutableName', + 'keepRuntimeAlive', + 'asyncLoad', + 'mmapAlloc', + 'wasmTable', + 'getUniqueRunDependency', + 'noExitRuntime', + 'addOnPreRun', + 'addOnPostRun', + 'freeTableIndexes', + 'functionsInTableMap', + 'setValue', + 'getValue', + 'PATH', + 'PATH_FS', + 'UTF8Decoder', + 'UTF8ArrayToString', + 'UTF8ToString', + 'stringToUTF8Array', + 'stringToUTF8', + 'lengthBytesUTF8', + 'intArrayFromString', + 'AsciiToString', + 'UTF16Decoder', + 'UTF16ToString', + 'stringToUTF16', + 'lengthBytesUTF16', + 'UTF32ToString', + 'stringToUTF32', + 'lengthBytesUTF32', + 'JSEvents', + 'specialHTMLTargets', + 'findCanvasEventTarget', + 'currentFullscreenStrategy', + 'restoreOldWindowedStyle', + 'UNWIND_CACHE', + 'ExitStatus', + 'getEnvStrings', + 'checkWasiClock', + 'doReadv', + 'doWritev', + 'initRandomFill', + 'randomFill', + 'emSetImmediate', + 'emClearImmediate_deps', + 'emClearImmediate', + 'promiseMap', + 'uncaughtExceptionCount', + 'exceptionLast', + 'exceptionCaught', + 'ExceptionInfo', + 'Browser', + 'requestFullscreen', + 'requestFullScreen', + 'setCanvasSize', + 'getUserMedia', + 'createContext', + 'getPreloadedImageData__data', + 'wget', + 'MONTH_DAYS_REGULAR', + 'MONTH_DAYS_LEAP', + 'MONTH_DAYS_REGULAR_CUMULATIVE', + 'MONTH_DAYS_LEAP_CUMULATIVE', + 'isLeapYear', + 'ydayFromDate', + 'SYSCALLS', + 'getSocketFromFD', + 'getSocketAddress', + 'preloadPlugins', + 'FS_createPreloadedFile', + 'FS_modeStringToFlags', + 'FS_getMode', + 'FS_stdin_getChar_buffer', + 'FS_stdin_getChar', + 'FS_readFile', + 'FS_root', + 'FS_mounts', + 'FS_devices', + 'FS_streams', + 'FS_nextInode', + 'FS_nameTable', + 'FS_currentPath', + 'FS_initialized', + 'FS_ignorePermissions', + 'FS_filesystems', + 'FS_syncFSRequests', + 'FS_readFiles', + 'FS_lookupPath', + 'FS_getPath', + 'FS_hashName', + 'FS_hashAddNode', + 'FS_hashRemoveNode', + 'FS_lookupNode', + 'FS_createNode', + 'FS_destroyNode', + 'FS_isRoot', + 'FS_isMountpoint', + 'FS_isFile', + 'FS_isDir', + 'FS_isLink', + 'FS_isChrdev', + 'FS_isBlkdev', + 'FS_isFIFO', + 'FS_isSocket', + 'FS_flagsToPermissionString', + 'FS_nodePermissions', + 'FS_mayLookup', + 'FS_mayCreate', + 'FS_mayDelete', + 'FS_mayOpen', + 'FS_checkOpExists', + 'FS_nextfd', + 'FS_getStreamChecked', + 'FS_getStream', + 'FS_createStream', + 'FS_closeStream', + 'FS_dupStream', + 'FS_doSetAttr', + 'FS_chrdev_stream_ops', + 'FS_major', + 'FS_minor', + 'FS_makedev', + 'FS_registerDevice', + 'FS_getDevice', + 'FS_getMounts', + 'FS_syncfs', + 'FS_mount', + 'FS_unmount', + 'FS_lookup', + 'FS_mknod', + 'FS_statfs', + 'FS_statfsStream', + 'FS_statfsNode', + 'FS_create', + 'FS_mkdir', + 'FS_mkdev', + 'FS_symlink', + 'FS_rename', + 'FS_rmdir', + 'FS_readdir', + 'FS_readlink', + 'FS_stat', + 'FS_fstat', + 'FS_lstat', + 'FS_doChmod', + 'FS_chmod', + 'FS_lchmod', + 'FS_fchmod', + 'FS_doChown', + 'FS_chown', + 'FS_lchown', + 'FS_fchown', + 'FS_doTruncate', + 'FS_truncate', + 'FS_ftruncate', + 'FS_utime', + 'FS_open', + 'FS_close', + 'FS_isClosed', + 'FS_llseek', + 'FS_read', + 'FS_write', + 'FS_mmap', + 'FS_msync', + 'FS_ioctl', + 'FS_writeFile', + 'FS_cwd', + 'FS_chdir', + 'FS_createDefaultDirectories', + 'FS_createDefaultDevices', + 'FS_createSpecialDirectories', + 'FS_createStandardStreams', + 'FS_staticInit', + 'FS_init', + 'FS_quit', + 'FS_findObject', + 'FS_analyzePath', + 'FS_createFile', + 'FS_forceLoadFile', + 'FS_absolutePath', + 'FS_createFolder', + 'FS_createLink', + 'FS_joinPath', + 'FS_mmapAlloc', + 'FS_standardizePath', + 'MEMFS', + 'TTY', + 'PIPEFS', + 'SOCKFS', + 'tempFixedLengthArray', + 'miniTempWebGLFloatBuffers', + 'miniTempWebGLIntBuffers', + 'GL', + 'AL', + 'GLUT', + 'EGL', + 'GLEW', + 'IDBStore', + 'SDL', + 'SDL_gfx', + 'allocateUTF8', + 'allocateUTF8OnStack', + 'print', + 'printErr', + 'jstoi_s', + 'InternalError', + 'BindingError', + 'throwInternalError', + 'throwBindingError', + 'registeredTypes', + 'awaitingDependencies', + 'typeDependencies', + 'tupleRegistrations', + 'structRegistrations', + 'sharedRegisterType', + 'whenDependentTypesAreResolved', + 'getTypeName', + 'getFunctionName', + 'heap32VectorToArray', + 'requireRegisteredType', + 'usesDestructorStack', + 'checkArgCount', + 'getRequiredArgCount', + 'createJsInvoker', + 'UnboundTypeError', + 'EmValType', + 'EmValOptionalType', + 'throwUnboundTypeError', + 'ensureOverloadTable', + 'exposePublicSymbol', + 'replacePublicSymbol', + 'createNamedFunction', + 'embindRepr', + 'registeredInstances', + 'getBasestPointer', + 'getInheritedInstance', + 'registeredPointers', + 'registerType', + 'integerReadValueFromPointer', + 'floatReadValueFromPointer', + 'assertIntegerRange', + 'readPointer', + 'runDestructors', + 'craftInvokerFunction', + 'embind__requireFunction', + 'genericPointerToWireType', + 'constNoSmartPtrRawPointerToWireType', + 'nonConstNoSmartPtrRawPointerToWireType', + 'init_RegisteredPointer', + 'RegisteredPointer', + 'RegisteredPointer_fromWireType', + 'runDestructor', + 'releaseClassHandle', + 'finalizationRegistry', + 'detachFinalizer_deps', + 'detachFinalizer', + 'attachFinalizer', + 'makeClassHandle', + 'init_ClassHandle', + 'ClassHandle', + 'throwInstanceAlreadyDeleted', + 'deletionQueue', + 'flushPendingDeletes', + 'delayFunction', + 'RegisteredClass', + 'shallowCopyInternalPointer', + 'downcastPointer', + 'upcastPointer', + 'char_0', + 'char_9', + 'makeLegalFunctionName', + 'emval_freelist', + 'emval_handles', + 'emval_symbols', + 'getStringOrSymbol', + 'Emval', + 'emval_returnValue', + 'emval_lookupTypes', + 'emval_methodCallers', + 'emval_addMethodCaller', +]; +unexportedSymbols.forEach(unexportedRuntimeSymbol); + + // End runtime exports + // Begin JS library exports + // End JS library exports + +// end include: postlibrary.js + +function checkIncomingModuleAPI() { + ignoredModuleProp('fetchSettings'); +} + +// Imports from the Wasm binary. +var ___getTypeName = makeInvalidEarlyAccess('___getTypeName'); +var _free = makeInvalidEarlyAccess('_free'); +var _malloc = makeInvalidEarlyAccess('_malloc'); +var _fflush = makeInvalidEarlyAccess('_fflush'); +var _emscripten_stack_get_end = makeInvalidEarlyAccess('_emscripten_stack_get_end'); +var _emscripten_stack_get_base = makeInvalidEarlyAccess('_emscripten_stack_get_base'); +var _htonl = makeInvalidEarlyAccess('_htonl'); +var _htons = makeInvalidEarlyAccess('_htons'); +var _ntohs = makeInvalidEarlyAccess('_ntohs'); +var _strerror = makeInvalidEarlyAccess('_strerror'); +var _setThrew = makeInvalidEarlyAccess('_setThrew'); +var _emscripten_stack_init = makeInvalidEarlyAccess('_emscripten_stack_init'); +var _emscripten_stack_get_free = makeInvalidEarlyAccess('_emscripten_stack_get_free'); +var __emscripten_stack_restore = makeInvalidEarlyAccess('__emscripten_stack_restore'); +var __emscripten_stack_alloc = makeInvalidEarlyAccess('__emscripten_stack_alloc'); +var _emscripten_stack_get_current = makeInvalidEarlyAccess('_emscripten_stack_get_current'); + +function assignWasmExports(wasmExports) { + ___getTypeName = createExportWrapper('__getTypeName', 1); + _free = createExportWrapper('free', 1); + _malloc = createExportWrapper('malloc', 1); + _fflush = createExportWrapper('fflush', 1); + _emscripten_stack_get_end = wasmExports['emscripten_stack_get_end']; + _emscripten_stack_get_base = wasmExports['emscripten_stack_get_base']; + _htonl = createExportWrapper('htonl', 1); + _htons = createExportWrapper('htons', 1); + _ntohs = createExportWrapper('ntohs', 1); + _strerror = createExportWrapper('strerror', 1); + _setThrew = createExportWrapper('setThrew', 2); + _emscripten_stack_init = wasmExports['emscripten_stack_init']; + _emscripten_stack_get_free = wasmExports['emscripten_stack_get_free']; + __emscripten_stack_restore = wasmExports['_emscripten_stack_restore']; + __emscripten_stack_alloc = wasmExports['_emscripten_stack_alloc']; + _emscripten_stack_get_current = wasmExports['emscripten_stack_get_current']; +} +var wasmImports = { + /** @export */ + __assert_fail: ___assert_fail, + /** @export */ + __cxa_throw: ___cxa_throw, + /** @export */ + __syscall_chmod: ___syscall_chmod, + /** @export */ + __syscall_connect: ___syscall_connect, + /** @export */ + __syscall_dup: ___syscall_dup, + /** @export */ + __syscall_faccessat: ___syscall_faccessat, + /** @export */ + __syscall_fchmod: ___syscall_fchmod, + /** @export */ + __syscall_fcntl64: ___syscall_fcntl64, + /** @export */ + __syscall_fstat64: ___syscall_fstat64, + /** @export */ + __syscall_ftruncate64: ___syscall_ftruncate64, + /** @export */ + __syscall_getcwd: ___syscall_getcwd, + /** @export */ + __syscall_ioctl: ___syscall_ioctl, + /** @export */ + __syscall_lstat64: ___syscall_lstat64, + /** @export */ + __syscall_newfstatat: ___syscall_newfstatat, + /** @export */ + __syscall_openat: ___syscall_openat, + /** @export */ + __syscall_readlinkat: ___syscall_readlinkat, + /** @export */ + __syscall_recvfrom: ___syscall_recvfrom, + /** @export */ + __syscall_rmdir: ___syscall_rmdir, + /** @export */ + __syscall_sendto: ___syscall_sendto, + /** @export */ + __syscall_socket: ___syscall_socket, + /** @export */ + __syscall_stat64: ___syscall_stat64, + /** @export */ + __syscall_unlinkat: ___syscall_unlinkat, + /** @export */ + _abort_js: __abort_js, + /** @export */ + _embind_register_bigint: __embind_register_bigint, + /** @export */ + _embind_register_bool: __embind_register_bool, + /** @export */ + _embind_register_class: __embind_register_class, + /** @export */ + _embind_register_class_constructor: __embind_register_class_constructor, + /** @export */ + _embind_register_class_function: __embind_register_class_function, + /** @export */ + _embind_register_emval: __embind_register_emval, + /** @export */ + _embind_register_float: __embind_register_float, + /** @export */ + _embind_register_function: __embind_register_function, + /** @export */ + _embind_register_integer: __embind_register_integer, + /** @export */ + _embind_register_memory_view: __embind_register_memory_view, + /** @export */ + _embind_register_optional: __embind_register_optional, + /** @export */ + _embind_register_smart_ptr: __embind_register_smart_ptr, + /** @export */ + _embind_register_std_string: __embind_register_std_string, + /** @export */ + _embind_register_std_wstring: __embind_register_std_wstring, + /** @export */ + _embind_register_void: __embind_register_void, + /** @export */ + _emscripten_system: __emscripten_system, + /** @export */ + _emscripten_throw_longjmp: __emscripten_throw_longjmp, + /** @export */ + _emval_create_invoker: __emval_create_invoker, + /** @export */ + _emval_decref: __emval_decref, + /** @export */ + _emval_incref: __emval_incref, + /** @export */ + _emval_invoke: __emval_invoke, + /** @export */ + _emval_run_destructors: __emval_run_destructors, + /** @export */ + _gmtime_js: __gmtime_js, + /** @export */ + _localtime_js: __localtime_js, + /** @export */ + _mktime_js: __mktime_js, + /** @export */ + _tzset_js: __tzset_js, + /** @export */ + clock_time_get: _clock_time_get, + /** @export */ + emscripten_date_now: _emscripten_date_now, + /** @export */ + emscripten_err: _emscripten_err, + /** @export */ + emscripten_get_heap_max: _emscripten_get_heap_max, + /** @export */ + emscripten_get_now: _emscripten_get_now, + /** @export */ + emscripten_resize_heap: _emscripten_resize_heap, + /** @export */ + environ_get: _environ_get, + /** @export */ + environ_sizes_get: _environ_sizes_get, + /** @export */ + exit: _exit, + /** @export */ + fd_close: _fd_close, + /** @export */ + fd_fdstat_get: _fd_fdstat_get, + /** @export */ + fd_read: _fd_read, + /** @export */ + fd_seek: _fd_seek, + /** @export */ + fd_sync: _fd_sync, + /** @export */ + fd_write: _fd_write, + /** @export */ + getaddrinfo: _getaddrinfo, + /** @export */ + invoke_ii, + /** @export */ + invoke_v, + /** @export */ + invoke_vi, + /** @export */ + invoke_vii +}; +var wasmExports = await createWasm(); + +function invoke_vii(index,a1,a2) { + var sp = stackSave(); + try { + getWasmTableEntry(index)(a1,a2); + } catch(e) { + stackRestore(sp); + if (e !== e+0) throw e; + _setThrew(1, 0); + } +} + +function invoke_vi(index,a1) { + var sp = stackSave(); + try { + getWasmTableEntry(index)(a1); + } catch(e) { + stackRestore(sp); + if (e !== e+0) throw e; + _setThrew(1, 0); + } +} + +function invoke_ii(index,a1) { + var sp = stackSave(); + try { + return getWasmTableEntry(index)(a1); + } catch(e) { + stackRestore(sp); + if (e !== e+0) throw e; + _setThrew(1, 0); + } +} + +function invoke_v(index) { + var sp = stackSave(); + try { + getWasmTableEntry(index)(); + } catch(e) { + stackRestore(sp); + if (e !== e+0) throw e; + _setThrew(1, 0); + } +} + + +// include: postamble.js +// === Auto-generated postamble setup entry stuff === + +var calledRun; + +function stackCheckInit() { + // This is normally called automatically during __wasm_call_ctors but need to + // get these values before even running any of the ctors so we call it redundantly + // here. + _emscripten_stack_init(); + // TODO(sbc): Move writeStackCookie to native to to avoid this. + writeStackCookie(); +} + +function run() { + + if (runDependencies > 0) { + dependenciesFulfilled = run; + return; + } + + stackCheckInit(); + + preRun(); + + // a preRun added a dependency, run will be called later + if (runDependencies > 0) { + dependenciesFulfilled = run; + return; + } + + function doRun() { + // run may have just been called through dependencies being fulfilled just in this very frame, + // or while the async setStatus time below was happening + assert(!calledRun); + calledRun = true; + Module['calledRun'] = true; + + if (ABORT) return; + + initRuntime(); + + readyPromiseResolve?.(Module); + Module['onRuntimeInitialized']?.(); + consumedModuleProp('onRuntimeInitialized'); + + assert(!Module['_main'], 'compiled without a main, but one is present. if you added it from JS, use Module["onRuntimeInitialized"]'); + + postRun(); + } + + if (Module['setStatus']) { + Module['setStatus']('Running...'); + setTimeout(() => { + setTimeout(() => Module['setStatus'](''), 1); + doRun(); + }, 1); + } else + { + doRun(); + } + checkStackCookie(); +} + +function checkUnflushedContent() { + // Compiler settings do not allow exiting the runtime, so flushing + // the streams is not possible. but in ASSERTIONS mode we check + // if there was something to flush, and if so tell the user they + // should request that the runtime be exitable. + // Normally we would not even include flush() at all, but in ASSERTIONS + // builds we do so just for this check, and here we see if there is any + // content to flush, that is, we check if there would have been + // something a non-ASSERTIONS build would have not seen. + // How we flush the streams depends on whether we are in SYSCALLS_REQUIRE_FILESYSTEM=0 + // mode (which has its own special function for this; otherwise, all + // the code is inside libc) + var oldOut = out; + var oldErr = err; + var has = false; + out = err = (x) => { + has = true; + } + try { // it doesn't matter if it fails + _fflush(0); + // also flush in the JS FS layer + ['stdout', 'stderr'].forEach((name) => { + var info = FS.analyzePath('/dev/' + name); + if (!info) return; + var stream = info.object; + var rdev = stream.rdev; + var tty = TTY.ttys[rdev]; + if (tty?.output?.length) { + has = true; + } + }); + } catch(e) {} + out = oldOut; + err = oldErr; + if (has) { + warnOnce('stdio streams had content in them that was not flushed. you should set EXIT_RUNTIME to 1 (see the Emscripten FAQ), or make sure to emit a newline when you printf etc.'); + } +} + +function preInit() { + if (Module['preInit']) { + if (typeof Module['preInit'] == 'function') Module['preInit'] = [Module['preInit']]; + while (Module['preInit'].length > 0) { + Module['preInit'].shift()(); + } + } + consumedModuleProp('preInit'); +} + +preInit(); +run(); + +// end include: postamble.js + +// include: postamble_modularize.js +// In MODULARIZE mode we wrap the generated code in a factory function +// and return either the Module itself, or a promise of the module. +// +// We assign to the `moduleRtn` global here and configure closure to see +// this as and extern so it won't get minified. + +if (runtimeInitialized) { + moduleRtn = Module; +} else { + // Set up the promise that indicates the Module is initialized + moduleRtn = new Promise((resolve, reject) => { + readyPromiseResolve = resolve; + readyPromiseReject = reject; + }); +} + +// Assertion for attempting to access module properties on the incoming +// moduleArg. In the past we used this object as the prototype of the module +// and assigned properties to it, but now we return a distinct object. This +// keeps the instance private until it is ready (i.e the promise has been +// resolved). +for (const prop of Object.keys(Module)) { + if (!(prop in moduleArg)) { + Object.defineProperty(moduleArg, prop, { + configurable: true, + get() { + abort(`Access to module property ('${prop}') is no longer possible via the module constructor argument; Instead, use the result of the module constructor.`) + } + }); + } +} +// end include: postamble_modularize.js + + + + return moduleRtn; +} + +// Export using a UMD style export, or ES6 exports if selected +export default createWasmModule; + diff --git a/src/libs/geant4_web/geant4_wasm/preload/preload_G4EMLOW8.6.1.js b/src/libs/geant4_web/geant4_wasm/preload/preload_G4EMLOW8.6.1.js new file mode 100644 index 000000000..fec0c48bf --- /dev/null +++ b/src/libs/geant4_web/geant4_wasm/preload/preload_G4EMLOW8.6.1.js @@ -0,0 +1,419 @@ +export default async function loadDataFile(Module) { + + Module['expectedDataFileDownloads'] ??= 0; + Module['expectedDataFileDownloads']++; + // Do not attempt to redownload the virtual filesystem data when in a pthread or a Wasm Worker context. + var isPthread = typeof ENVIRONMENT_IS_PTHREAD != 'undefined' && ENVIRONMENT_IS_PTHREAD; + var isWasmWorker = typeof ENVIRONMENT_IS_WASM_WORKER != 'undefined' && ENVIRONMENT_IS_WASM_WORKER; + if (isPthread || isWasmWorker) return; +return new Promise((loadDataResolve, loadDataReject) => { + async function loadPackage(metadata) { + + var PACKAGE_PATH = ''; + if (typeof window === 'object') { + PACKAGE_PATH = window['encodeURIComponent'](window.location.pathname.substring(0, window.location.pathname.lastIndexOf('/')) + '/'); + } else if (typeof process === 'undefined' && typeof location !== 'undefined') { + // web worker + PACKAGE_PATH = encodeURIComponent(location.pathname.substring(0, location.pathname.lastIndexOf('/')) + '/'); + } + var PACKAGE_NAME = '/workspaces/geant-wasm/build_wasm/data/G4EMLOW8.6.1.data'; + var REMOTE_PACKAGE_BASE = 'G4EMLOW8.6.1.data'; + var REMOTE_PACKAGE_NAME = Module['locateFile']?.(REMOTE_PACKAGE_BASE, '') ?? REMOTE_PACKAGE_BASE; + var REMOTE_PACKAGE_SIZE = metadata['remote_package_size']; + + async function fetchRemotePackage(packageName, packageSize) { + + Module['dataFileDownloads'] ??= {}; + try { + var response = await fetch(packageName); + } catch (e) { + throw new Error(`Network Error: ${packageName}`, {e}); + } + if (!response.ok) { + throw new Error(`${response.status}: ${response.url}`); + } + + const chunks = []; + const headers = response.headers; + const total = Number(headers.get('Content-Length') ?? packageSize); + let loaded = 0; + + Module['setStatus']?.('Downloading data...'); + const reader = response.body.getReader(); + + while (1) { + var {done, value} = await reader.read(); + if (done) break; + chunks.push(value); + loaded += value.length; + Module['dataFileDownloads'][packageName] = {loaded, total}; + + let totalLoaded = 0; + let totalSize = 0; + + for (const download of Object.values(Module['dataFileDownloads'])) { + totalLoaded += download.loaded; + totalSize += download.total; + } + + Module['setStatus']?.(`Downloading data... (${totalLoaded}/${totalSize})`); + } + + const packageData = new Uint8Array(chunks.map((c) => c.length).reduce((a, b) => a + b, 0)); + let offset = 0; + for (const chunk of chunks) { + packageData.set(chunk, offset); + offset += chunk.length; + } + return packageData.buffer; + } + + async function runWithFS(Module) { + + function assert(check, msg) { + if (!check) throw new Error(msg); + } +Module['FS_createPath']("/", "data", true, true); +Module['FS_createPath']("/data", "G4EMLOW8.6.1", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "JAEAESData", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "XRayReflection_data", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "auger", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "brem", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "brem_SB", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/brem_SB", "SBTables", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "charge_transf", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "comp", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "dna", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "doppler", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "dpwa", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/dpwa", "dcss", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/dpwa/dcss", "el", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/dpwa/dcss", "pos", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/dpwa", "stables", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/dpwa/stables", "el", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/dpwa/stables", "pos", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "epics2017", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/epics2017", "comp", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/epics2017", "pair", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/epics2017", "phot", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/epics2017", "rayl", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "estar", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/estar", "estar_basic", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/estar/estar_basic", "elems", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/estar/estar_basic", "mater", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/estar", "estar_long", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/estar/estar_long", "elems", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/estar/estar_long", "mater", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "fluor", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "fluor_ANSTO", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "fluor_Bearden", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "fluor_XDB_EADL", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "ion_stopping_data", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/ion_stopping_data", "icru73", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/ion_stopping_data", "icru90", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "ioni", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "livermore", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/livermore", "brem", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/livermore", "comp", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/livermore", "pair", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/livermore", "pairdata", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/livermore", "phot_epics2014", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/livermore", "rayl", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/livermore", "tripdata", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "microelec", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/microelec", "Elastic", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/microelec", "Inelastic", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/microelec", "Structure", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "msc_GS", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/msc_GS", "GSGrid_1", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/msc_GS", "GSGrid_2", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/msc_GS", "MottCor", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/msc_GS/MottCor", "el", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/msc_GS/MottCor", "pos", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/msc_GS", "PWACor", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/msc_GS/PWACor", "el", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/msc_GS/PWACor", "pos", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "mupair", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "penelope", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/penelope", "bremsstrahlung", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/penelope", "pairproduction", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/penelope", "photoelectric", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/penelope", "rayleigh", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/penelope/rayleigh", "MIFF", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "photoelectric_angular", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "pixe", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/pixe", "ecpssr", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/pixe/ecpssr", "alpha", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/pixe/ecpssr", "proton", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/pixe", "kacsPaul", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/pixe", "kpcsPaul", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/pixe", "uf", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1", "pixe_ANSTO", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/pixe_ANSTO", "alpha", true, true); +Module['FS_createPath']("/data/G4EMLOW8.6.1/pixe_ANSTO", "proton", true, true); + + /** @constructor */ + function DataRequest(start, end, audio) { + this.start = start; + this.end = end; + this.audio = audio; + } + DataRequest.prototype = { + requests: {}, + open: function(mode, name) { + this.name = name; + this.requests[name] = this; + Module['addRunDependency'](`fp ${this.name}`); + }, + send: function() {}, + onload: function() { + var byteArray = this.byteArray.subarray(this.start, this.end); + this.finish(byteArray); + }, + finish: async function(byteArray) { + var that = this; + // canOwn this data in the filesystem, it is a slice into the heap that will never change + Module['FS_createDataFile'](this.name, null, byteArray, true, true, true); + Module['removeRunDependency'](`fp ${that.name}`); +loadDataResolve(); + this.requests[this.name] = null; + } + }; + + var files = metadata['files']; + for (var i = 0; i < files.length; ++i) { + new DataRequest(files[i]['start'], files[i]['end'], files[i]['audio'] || 0).open('GET', files[i]['filename']); + } + + var PACKAGE_UUID = metadata['package_uuid']; + var IDB_RO = "readonly"; + var IDB_RW = "readwrite"; + var DB_NAME = "EM_PRELOAD_CACHE"; + var DB_VERSION = 1; + var METADATA_STORE_NAME = 'METADATA'; + var PACKAGE_STORE_NAME = 'PACKAGES'; + + async function openDatabase() { + if (typeof indexedDB == 'undefined') { + throw new Error('using IndexedDB to cache data can only be done on a web page or in a web worker'); + } + return new Promise((resolve, reject) => { + var openRequest = indexedDB.open(DB_NAME, DB_VERSION); + openRequest.onupgradeneeded = (event) => { + var db = /** @type {IDBDatabase} */ (event.target.result); + + if (db.objectStoreNames.contains(PACKAGE_STORE_NAME)) { + db.deleteObjectStore(PACKAGE_STORE_NAME); + } + var packages = db.createObjectStore(PACKAGE_STORE_NAME); + + if (db.objectStoreNames.contains(METADATA_STORE_NAME)) { + db.deleteObjectStore(METADATA_STORE_NAME); + } + var metadata = db.createObjectStore(METADATA_STORE_NAME); + }; + openRequest.onsuccess = (event) => { + var db = /** @type {IDBDatabase} */ (event.target.result); + resolve(db); + }; + openRequest.onerror = reject; + }); + } + + // This is needed as chromium has a limit on per-entry files in IndexedDB + // https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&sq=package:chromium&g=0&l=177 + // https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60 + // We set the chunk size to 64MB to stay well-below the limit + var CHUNK_SIZE = 64 * 1024 * 1024; + + async function cacheRemotePackage(db, packageName, packageData, packageMeta) { + var transactionPackages = db.transaction([PACKAGE_STORE_NAME], IDB_RW); + var packages = transactionPackages.objectStore(PACKAGE_STORE_NAME); + var chunkSliceStart = 0; + var nextChunkSliceStart = 0; + var chunkCount = Math.ceil(packageData.byteLength / CHUNK_SIZE); + var finishedChunks = 0; + + return new Promise((resolve, reject) => { + for (var chunkId = 0; chunkId < chunkCount; chunkId++) { + nextChunkSliceStart += CHUNK_SIZE; + var putPackageRequest = packages.put( + packageData.slice(chunkSliceStart, nextChunkSliceStart), + `package/${packageName}/${chunkId}` + ); + chunkSliceStart = nextChunkSliceStart; + putPackageRequest.onsuccess = (event) => { + finishedChunks++; + if (finishedChunks == chunkCount) { + var transaction_metadata = db.transaction( + [METADATA_STORE_NAME], + IDB_RW + ); + var metadata = transaction_metadata.objectStore(METADATA_STORE_NAME); + var putMetadataRequest = metadata.put( + { + 'uuid': packageMeta.uuid, + 'chunkCount': chunkCount + }, + `metadata/${packageName}` + ); + putMetadataRequest.onsuccess = (event) => resolve(packageData); + putMetadataRequest.onerror = reject; + } + }; + putPackageRequest.onerror = reject; + } + }); + } + + /* + * Check if there's a cached package, and if so whether it's the latest available. + * Resolves to the cached metadata, or `null` if it is missing or out-of-date. + */ + async function checkCachedPackage(db, packageName) { + var transaction = db.transaction([METADATA_STORE_NAME], IDB_RO); + var metadata = transaction.objectStore(METADATA_STORE_NAME); + var getRequest = metadata.get(`metadata/${packageName}`); + return new Promise((resolve, reject) => { + getRequest.onsuccess = (event) => { + var result = event.target.result; + if (result && PACKAGE_UUID === result['uuid']) { + resolve(result); + } else { + resolve(null); + } + } + getRequest.onerror = reject; + }); + } + + async function fetchCachedPackage(db, packageName, metadata) { + var transaction = db.transaction([PACKAGE_STORE_NAME], IDB_RO); + var packages = transaction.objectStore(PACKAGE_STORE_NAME); + + var chunksDone = 0; + var totalSize = 0; + var chunkCount = metadata['chunkCount']; + var chunks = new Array(chunkCount); + + return new Promise((resolve, reject) => { + for (var chunkId = 0; chunkId < chunkCount; chunkId++) { + var getRequest = packages.get(`package/${packageName}/${chunkId}`); + getRequest.onsuccess = (event) => { + if (!event.target.result) { + reject(`CachedPackageNotFound for: ${packageName}`); + return; + } + // If there's only 1 chunk, there's nothing to concatenate it with so we can just return it now + if (chunkCount == 1) { + resolve(event.target.result); + } else { + chunksDone++; + totalSize += event.target.result.byteLength; + chunks.push(event.target.result); + if (chunksDone == chunkCount) { + if (chunksDone == 1) { + resolve(event.target.result); + } else { + var tempTyped = new Uint8Array(totalSize); + var byteOffset = 0; + for (var chunkId in chunks) { + var buffer = chunks[chunkId]; + tempTyped.set(new Uint8Array(buffer), byteOffset); + byteOffset += buffer.byteLength; + buffer = undefined; + } + chunks = undefined; + resolve(tempTyped.buffer); + tempTyped = undefined; + } + } + } + }; + getRequest.onerror = reject; + } + }); + } + + function processPackageData(arrayBuffer) { + assert(arrayBuffer, 'Loading data file failed.'); + assert(arrayBuffer.constructor.name === ArrayBuffer.name, 'bad input to processPackageData'); + var byteArray = new Uint8Array(arrayBuffer); + var curr; + // Reuse the bytearray from the XHR as the source for file reads. + DataRequest.prototype.byteArray = byteArray; + var files = metadata['files']; + for (var i = 0; i < files.length; ++i) { + DataRequest.prototype.requests[files[i].filename].onload(); + } Module['removeRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4EMLOW8.6.1.data'); + + } + Module['addRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4EMLOW8.6.1.data'); + + Module['preloadResults'] ??= {}; + + async function preloadFallback(error) { + console.error(error); + console.error('falling back to default preload behavior'); + processPackageData(await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE)); + } + + try { + var db = await openDatabase(); + var pkgMetadata = await checkCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME); + var useCached = !!pkgMetadata; + Module['preloadResults'][PACKAGE_NAME] = {fromCache: useCached}; + if (useCached) { + processPackageData(await fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, pkgMetadata)); + } else { + var packageData = await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE); + try { + processPackageData(await cacheRemotePackage(db, PACKAGE_PATH + PACKAGE_NAME, packageData, {uuid:PACKAGE_UUID})) + } catch (error) { + console.error(error); + processPackageData(packageData); + } + } + } catch(e) { + await preloadFallback(e) + .catch((error) => { + loadDataReject(error); + }); + } + + Module['setStatus']?.('Downloading...'); + + } + if (Module['calledRun']) { + runWithFS(Module) + .catch((error) => { + loadDataReject(error); + }); + } else { + (Module['preRun'] ??= []).push(runWithFS); // FS is not initialized yet, wait for it + } + + Module['removeRunDependency']('preload_G4EMLOW8.6.1.js.metadata'); + } + + async function runMetaWithFS() { + Module['addRunDependency']('preload_G4EMLOW8.6.1.js.metadata'); + var metadataUrl = Module['locateFile']?.('preload_G4EMLOW8.6.1.js.metadata', '') ?? 'preload_G4EMLOW8.6.1.js.metadata'; + + var response = await fetch(metadataUrl); + if (!response.ok) { + throw new Error(`${response.status}: ${response.url}`); + } + var json = await response.json(); + return loadPackage(json); + } + + if (Module['calledRun']) { + runMetaWithFS(); + } else { + (Module['preRun'] ??= []).push(runMetaWithFS); + } + + }); +} +// END the loadDataFile function diff --git a/src/libs/geant4_web/geant4_wasm/preload/preload_G4ENSDFSTATE3.0.js b/src/libs/geant4_web/geant4_wasm/preload/preload_G4ENSDFSTATE3.0.js new file mode 100644 index 000000000..1e4a44698 --- /dev/null +++ b/src/libs/geant4_web/geant4_wasm/preload/preload_G4ENSDFSTATE3.0.js @@ -0,0 +1,343 @@ +export default async function loadDataFile(Module) { + + Module['expectedDataFileDownloads'] ??= 0; + Module['expectedDataFileDownloads']++; + // Do not attempt to redownload the virtual filesystem data when in a pthread or a Wasm Worker context. + var isPthread = typeof ENVIRONMENT_IS_PTHREAD != 'undefined' && ENVIRONMENT_IS_PTHREAD; + var isWasmWorker = typeof ENVIRONMENT_IS_WASM_WORKER != 'undefined' && ENVIRONMENT_IS_WASM_WORKER; + if (isPthread || isWasmWorker) return; +return new Promise((loadDataResolve, loadDataReject) => { + async function loadPackage(metadata) { + + var PACKAGE_PATH = ''; + if (typeof window === 'object') { + PACKAGE_PATH = window['encodeURIComponent'](window.location.pathname.substring(0, window.location.pathname.lastIndexOf('/')) + '/'); + } else if (typeof process === 'undefined' && typeof location !== 'undefined') { + // web worker + PACKAGE_PATH = encodeURIComponent(location.pathname.substring(0, location.pathname.lastIndexOf('/')) + '/'); + } + var PACKAGE_NAME = '/workspaces/geant-wasm/build_wasm/data/G4ENSDFSTATE3.0.data'; + var REMOTE_PACKAGE_BASE = 'G4ENSDFSTATE3.0.data'; + var REMOTE_PACKAGE_NAME = Module['locateFile']?.(REMOTE_PACKAGE_BASE, '') ?? REMOTE_PACKAGE_BASE; + var REMOTE_PACKAGE_SIZE = metadata['remote_package_size']; + + async function fetchRemotePackage(packageName, packageSize) { + + Module['dataFileDownloads'] ??= {}; + try { + var response = await fetch(packageName); + } catch (e) { + throw new Error(`Network Error: ${packageName}`, {e}); + } + if (!response.ok) { + throw new Error(`${response.status}: ${response.url}`); + } + + const chunks = []; + const headers = response.headers; + const total = Number(headers.get('Content-Length') ?? packageSize); + let loaded = 0; + + Module['setStatus']?.('Downloading data...'); + const reader = response.body.getReader(); + + while (1) { + var {done, value} = await reader.read(); + if (done) break; + chunks.push(value); + loaded += value.length; + Module['dataFileDownloads'][packageName] = {loaded, total}; + + let totalLoaded = 0; + let totalSize = 0; + + for (const download of Object.values(Module['dataFileDownloads'])) { + totalLoaded += download.loaded; + totalSize += download.total; + } + + Module['setStatus']?.(`Downloading data... (${totalLoaded}/${totalSize})`); + } + + const packageData = new Uint8Array(chunks.map((c) => c.length).reduce((a, b) => a + b, 0)); + let offset = 0; + for (const chunk of chunks) { + packageData.set(chunk, offset); + offset += chunk.length; + } + return packageData.buffer; + } + + async function runWithFS(Module) { + + function assert(check, msg) { + if (!check) throw new Error(msg); + } +Module['FS_createPath']("/", "data", true, true); +Module['FS_createPath']("/data", "G4ENSDFSTATE3.0", true, true); + + /** @constructor */ + function DataRequest(start, end, audio) { + this.start = start; + this.end = end; + this.audio = audio; + } + DataRequest.prototype = { + requests: {}, + open: function(mode, name) { + this.name = name; + this.requests[name] = this; + Module['addRunDependency'](`fp ${this.name}`); + }, + send: function() {}, + onload: function() { + var byteArray = this.byteArray.subarray(this.start, this.end); + this.finish(byteArray); + }, + finish: async function(byteArray) { + var that = this; + // canOwn this data in the filesystem, it is a slice into the heap that will never change + Module['FS_createDataFile'](this.name, null, byteArray, true, true, true); + Module['removeRunDependency'](`fp ${that.name}`); +loadDataResolve(); + this.requests[this.name] = null; + } + }; + + var files = metadata['files']; + for (var i = 0; i < files.length; ++i) { + new DataRequest(files[i]['start'], files[i]['end'], files[i]['audio'] || 0).open('GET', files[i]['filename']); + } + + var PACKAGE_UUID = metadata['package_uuid']; + var IDB_RO = "readonly"; + var IDB_RW = "readwrite"; + var DB_NAME = "EM_PRELOAD_CACHE"; + var DB_VERSION = 1; + var METADATA_STORE_NAME = 'METADATA'; + var PACKAGE_STORE_NAME = 'PACKAGES'; + + async function openDatabase() { + if (typeof indexedDB == 'undefined') { + throw new Error('using IndexedDB to cache data can only be done on a web page or in a web worker'); + } + return new Promise((resolve, reject) => { + var openRequest = indexedDB.open(DB_NAME, DB_VERSION); + openRequest.onupgradeneeded = (event) => { + var db = /** @type {IDBDatabase} */ (event.target.result); + + if (db.objectStoreNames.contains(PACKAGE_STORE_NAME)) { + db.deleteObjectStore(PACKAGE_STORE_NAME); + } + var packages = db.createObjectStore(PACKAGE_STORE_NAME); + + if (db.objectStoreNames.contains(METADATA_STORE_NAME)) { + db.deleteObjectStore(METADATA_STORE_NAME); + } + var metadata = db.createObjectStore(METADATA_STORE_NAME); + }; + openRequest.onsuccess = (event) => { + var db = /** @type {IDBDatabase} */ (event.target.result); + resolve(db); + }; + openRequest.onerror = reject; + }); + } + + // This is needed as chromium has a limit on per-entry files in IndexedDB + // https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&sq=package:chromium&g=0&l=177 + // https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60 + // We set the chunk size to 64MB to stay well-below the limit + var CHUNK_SIZE = 64 * 1024 * 1024; + + async function cacheRemotePackage(db, packageName, packageData, packageMeta) { + var transactionPackages = db.transaction([PACKAGE_STORE_NAME], IDB_RW); + var packages = transactionPackages.objectStore(PACKAGE_STORE_NAME); + var chunkSliceStart = 0; + var nextChunkSliceStart = 0; + var chunkCount = Math.ceil(packageData.byteLength / CHUNK_SIZE); + var finishedChunks = 0; + + return new Promise((resolve, reject) => { + for (var chunkId = 0; chunkId < chunkCount; chunkId++) { + nextChunkSliceStart += CHUNK_SIZE; + var putPackageRequest = packages.put( + packageData.slice(chunkSliceStart, nextChunkSliceStart), + `package/${packageName}/${chunkId}` + ); + chunkSliceStart = nextChunkSliceStart; + putPackageRequest.onsuccess = (event) => { + finishedChunks++; + if (finishedChunks == chunkCount) { + var transaction_metadata = db.transaction( + [METADATA_STORE_NAME], + IDB_RW + ); + var metadata = transaction_metadata.objectStore(METADATA_STORE_NAME); + var putMetadataRequest = metadata.put( + { + 'uuid': packageMeta.uuid, + 'chunkCount': chunkCount + }, + `metadata/${packageName}` + ); + putMetadataRequest.onsuccess = (event) => resolve(packageData); + putMetadataRequest.onerror = reject; + } + }; + putPackageRequest.onerror = reject; + } + }); + } + + /* + * Check if there's a cached package, and if so whether it's the latest available. + * Resolves to the cached metadata, or `null` if it is missing or out-of-date. + */ + async function checkCachedPackage(db, packageName) { + var transaction = db.transaction([METADATA_STORE_NAME], IDB_RO); + var metadata = transaction.objectStore(METADATA_STORE_NAME); + var getRequest = metadata.get(`metadata/${packageName}`); + return new Promise((resolve, reject) => { + getRequest.onsuccess = (event) => { + var result = event.target.result; + if (result && PACKAGE_UUID === result['uuid']) { + resolve(result); + } else { + resolve(null); + } + } + getRequest.onerror = reject; + }); + } + + async function fetchCachedPackage(db, packageName, metadata) { + var transaction = db.transaction([PACKAGE_STORE_NAME], IDB_RO); + var packages = transaction.objectStore(PACKAGE_STORE_NAME); + + var chunksDone = 0; + var totalSize = 0; + var chunkCount = metadata['chunkCount']; + var chunks = new Array(chunkCount); + + return new Promise((resolve, reject) => { + for (var chunkId = 0; chunkId < chunkCount; chunkId++) { + var getRequest = packages.get(`package/${packageName}/${chunkId}`); + getRequest.onsuccess = (event) => { + if (!event.target.result) { + reject(`CachedPackageNotFound for: ${packageName}`); + return; + } + // If there's only 1 chunk, there's nothing to concatenate it with so we can just return it now + if (chunkCount == 1) { + resolve(event.target.result); + } else { + chunksDone++; + totalSize += event.target.result.byteLength; + chunks.push(event.target.result); + if (chunksDone == chunkCount) { + if (chunksDone == 1) { + resolve(event.target.result); + } else { + var tempTyped = new Uint8Array(totalSize); + var byteOffset = 0; + for (var chunkId in chunks) { + var buffer = chunks[chunkId]; + tempTyped.set(new Uint8Array(buffer), byteOffset); + byteOffset += buffer.byteLength; + buffer = undefined; + } + chunks = undefined; + resolve(tempTyped.buffer); + tempTyped = undefined; + } + } + } + }; + getRequest.onerror = reject; + } + }); + } + + function processPackageData(arrayBuffer) { + assert(arrayBuffer, 'Loading data file failed.'); + assert(arrayBuffer.constructor.name === ArrayBuffer.name, 'bad input to processPackageData'); + var byteArray = new Uint8Array(arrayBuffer); + var curr; + // Reuse the bytearray from the XHR as the source for file reads. + DataRequest.prototype.byteArray = byteArray; + var files = metadata['files']; + for (var i = 0; i < files.length; ++i) { + DataRequest.prototype.requests[files[i].filename].onload(); + } Module['removeRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4ENSDFSTATE3.0.data'); + + } + Module['addRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4ENSDFSTATE3.0.data'); + + Module['preloadResults'] ??= {}; + + async function preloadFallback(error) { + console.error(error); + console.error('falling back to default preload behavior'); + processPackageData(await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE)); + } + + try { + var db = await openDatabase(); + var pkgMetadata = await checkCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME); + var useCached = !!pkgMetadata; + Module['preloadResults'][PACKAGE_NAME] = {fromCache: useCached}; + if (useCached) { + processPackageData(await fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, pkgMetadata)); + } else { + var packageData = await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE); + try { + processPackageData(await cacheRemotePackage(db, PACKAGE_PATH + PACKAGE_NAME, packageData, {uuid:PACKAGE_UUID})) + } catch (error) { + console.error(error); + processPackageData(packageData); + } + } + } catch(e) { + await preloadFallback(e) + .catch((error) => { + loadDataReject(error); + }); + } + + Module['setStatus']?.('Downloading...'); + + } + if (Module['calledRun']) { + runWithFS(Module) + .catch((error) => { + loadDataReject(error); + }); + } else { + (Module['preRun'] ??= []).push(runWithFS); // FS is not initialized yet, wait for it + } + + Module['removeRunDependency']('preload_G4ENSDFSTATE3.0.js.metadata'); + } + + async function runMetaWithFS() { + Module['addRunDependency']('preload_G4ENSDFSTATE3.0.js.metadata'); + var metadataUrl = Module['locateFile']?.('preload_G4ENSDFSTATE3.0.js.metadata', '') ?? 'preload_G4ENSDFSTATE3.0.js.metadata'; + + var response = await fetch(metadataUrl); + if (!response.ok) { + throw new Error(`${response.status}: ${response.url}`); + } + var json = await response.json(); + return loadPackage(json); + } + + if (Module['calledRun']) { + runMetaWithFS(); + } else { + (Module['preRun'] ??= []).push(runMetaWithFS); + } + + }); +} +// END the loadDataFile function diff --git a/src/libs/geant4_web/geant4_wasm/preload/preload_G4NDL4.7.1.js b/src/libs/geant4_web/geant4_wasm/preload/preload_G4NDL4.7.1.js new file mode 100644 index 000000000..5fa1ba19a --- /dev/null +++ b/src/libs/geant4_web/geant4_wasm/preload/preload_G4NDL4.7.1.js @@ -0,0 +1,414 @@ +export default async function loadDataFile(Module) { + + Module['expectedDataFileDownloads'] ??= 0; + Module['expectedDataFileDownloads']++; + // Do not attempt to redownload the virtual filesystem data when in a pthread or a Wasm Worker context. + var isPthread = typeof ENVIRONMENT_IS_PTHREAD != 'undefined' && ENVIRONMENT_IS_PTHREAD; + var isWasmWorker = typeof ENVIRONMENT_IS_WASM_WORKER != 'undefined' && ENVIRONMENT_IS_WASM_WORKER; + if (isPthread || isWasmWorker) return; +return new Promise((loadDataResolve, loadDataReject) => { + async function loadPackage(metadata) { + + var PACKAGE_PATH = ''; + if (typeof window === 'object') { + PACKAGE_PATH = window['encodeURIComponent'](window.location.pathname.substring(0, window.location.pathname.lastIndexOf('/')) + '/'); + } else if (typeof process === 'undefined' && typeof location !== 'undefined') { + // web worker + PACKAGE_PATH = encodeURIComponent(location.pathname.substring(0, location.pathname.lastIndexOf('/')) + '/'); + } + var PACKAGE_NAME = '/workspaces/geant-wasm/build_wasm/data/G4NDL4.7.1.data'; + var REMOTE_PACKAGE_BASE = 'G4NDL4.7.1.data'; + var REMOTE_PACKAGE_NAME = Module['locateFile']?.(REMOTE_PACKAGE_BASE, '') ?? REMOTE_PACKAGE_BASE; + var REMOTE_PACKAGE_SIZE = metadata['remote_package_size']; + + async function fetchRemotePackage(packageName, packageSize) { + + Module['dataFileDownloads'] ??= {}; + try { + var response = await fetch(packageName); + } catch (e) { + throw new Error(`Network Error: ${packageName}`, {e}); + } + if (!response.ok) { + throw new Error(`${response.status}: ${response.url}`); + } + + const chunks = []; + const headers = response.headers; + const total = Number(headers.get('Content-Length') ?? packageSize); + let loaded = 0; + + Module['setStatus']?.('Downloading data...'); + const reader = response.body.getReader(); + + while (1) { + var {done, value} = await reader.read(); + if (done) break; + chunks.push(value); + loaded += value.length; + Module['dataFileDownloads'][packageName] = {loaded, total}; + + let totalLoaded = 0; + let totalSize = 0; + + for (const download of Object.values(Module['dataFileDownloads'])) { + totalLoaded += download.loaded; + totalSize += download.total; + } + + Module['setStatus']?.(`Downloading data... (${totalLoaded}/${totalSize})`); + } + + const packageData = new Uint8Array(chunks.map((c) => c.length).reduce((a, b) => a + b, 0)); + let offset = 0; + for (const chunk of chunks) { + packageData.set(chunk, offset); + offset += chunk.length; + } + return packageData.buffer; + } + + async function runWithFS(Module) { + + function assert(check, msg) { + if (!check) throw new Error(msg); + } +Module['FS_createPath']("/", "data", true, true); +Module['FS_createPath']("/data", "G4NDL4.7.1", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1", "Capture", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Capture", "CrossSection", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Capture", "FS", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Capture", "FSMF6", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1", "Elastic", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Elastic", "CrossSection", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Elastic", "FS", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1", "Fission", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Fission", "CrossSection", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Fission", "FC", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Fission", "FF", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Fission", "FS", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Fission", "LC", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Fission", "SC", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Fission", "TC", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1", "Inelastic", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "CrossSection", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F01", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F02", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F03", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F04", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F05", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F06", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F07", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F08", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F09", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F10", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F11", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F12", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F13", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F14", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F15", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F17", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F18", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F19", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F20", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F21", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F22", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F23", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F24", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F25", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F26", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F27", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F28", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F29", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F30", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F31", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F32", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F33", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F34", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F35", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F36", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "Gammas", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1", "IsotopeProduction", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/IsotopeProduction", "CrossSection", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1", "JENDL_HE", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/JENDL_HE", "neutron", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/JENDL_HE/neutron", "Elastic", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/JENDL_HE/neutron/Elastic", "CrossSection", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/JENDL_HE/neutron", "Inelastic", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/JENDL_HE/neutron/Inelastic", "CrossSection", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1", "ThermalScattering", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/ThermalScattering", "Coherent", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/ThermalScattering/Coherent", "CrossSection", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/ThermalScattering/Coherent", "FS", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/ThermalScattering", "Incoherent", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/ThermalScattering/Incoherent", "CrossSection", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/ThermalScattering/Incoherent", "FS", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/ThermalScattering", "Inelastic", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/ThermalScattering/Inelastic", "CrossSection", true, true); +Module['FS_createPath']("/data/G4NDL4.7.1/ThermalScattering/Inelastic", "FS", true, true); + + /** @constructor */ + function DataRequest(start, end, audio) { + this.start = start; + this.end = end; + this.audio = audio; + } + DataRequest.prototype = { + requests: {}, + open: function(mode, name) { + this.name = name; + this.requests[name] = this; + Module['addRunDependency'](`fp ${this.name}`); + }, + send: function() {}, + onload: function() { + var byteArray = this.byteArray.subarray(this.start, this.end); + this.finish(byteArray); + }, + finish: async function(byteArray) { + var that = this; + // canOwn this data in the filesystem, it is a slice into the heap that will never change + Module['FS_createDataFile'](this.name, null, byteArray, true, true, true); + Module['removeRunDependency'](`fp ${that.name}`); +loadDataResolve(); + this.requests[this.name] = null; + } + }; + + var files = metadata['files']; + for (var i = 0; i < files.length; ++i) { + new DataRequest(files[i]['start'], files[i]['end'], files[i]['audio'] || 0).open('GET', files[i]['filename']); + } + + var PACKAGE_UUID = metadata['package_uuid']; + var IDB_RO = "readonly"; + var IDB_RW = "readwrite"; + var DB_NAME = "EM_PRELOAD_CACHE"; + var DB_VERSION = 1; + var METADATA_STORE_NAME = 'METADATA'; + var PACKAGE_STORE_NAME = 'PACKAGES'; + + async function openDatabase() { + if (typeof indexedDB == 'undefined') { + throw new Error('using IndexedDB to cache data can only be done on a web page or in a web worker'); + } + return new Promise((resolve, reject) => { + var openRequest = indexedDB.open(DB_NAME, DB_VERSION); + openRequest.onupgradeneeded = (event) => { + var db = /** @type {IDBDatabase} */ (event.target.result); + + if (db.objectStoreNames.contains(PACKAGE_STORE_NAME)) { + db.deleteObjectStore(PACKAGE_STORE_NAME); + } + var packages = db.createObjectStore(PACKAGE_STORE_NAME); + + if (db.objectStoreNames.contains(METADATA_STORE_NAME)) { + db.deleteObjectStore(METADATA_STORE_NAME); + } + var metadata = db.createObjectStore(METADATA_STORE_NAME); + }; + openRequest.onsuccess = (event) => { + var db = /** @type {IDBDatabase} */ (event.target.result); + resolve(db); + }; + openRequest.onerror = reject; + }); + } + + // This is needed as chromium has a limit on per-entry files in IndexedDB + // https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&sq=package:chromium&g=0&l=177 + // https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60 + // We set the chunk size to 64MB to stay well-below the limit + var CHUNK_SIZE = 64 * 1024 * 1024; + + async function cacheRemotePackage(db, packageName, packageData, packageMeta) { + var transactionPackages = db.transaction([PACKAGE_STORE_NAME], IDB_RW); + var packages = transactionPackages.objectStore(PACKAGE_STORE_NAME); + var chunkSliceStart = 0; + var nextChunkSliceStart = 0; + var chunkCount = Math.ceil(packageData.byteLength / CHUNK_SIZE); + var finishedChunks = 0; + + return new Promise((resolve, reject) => { + for (var chunkId = 0; chunkId < chunkCount; chunkId++) { + nextChunkSliceStart += CHUNK_SIZE; + var putPackageRequest = packages.put( + packageData.slice(chunkSliceStart, nextChunkSliceStart), + `package/${packageName}/${chunkId}` + ); + chunkSliceStart = nextChunkSliceStart; + putPackageRequest.onsuccess = (event) => { + finishedChunks++; + if (finishedChunks == chunkCount) { + var transaction_metadata = db.transaction( + [METADATA_STORE_NAME], + IDB_RW + ); + var metadata = transaction_metadata.objectStore(METADATA_STORE_NAME); + var putMetadataRequest = metadata.put( + { + 'uuid': packageMeta.uuid, + 'chunkCount': chunkCount + }, + `metadata/${packageName}` + ); + putMetadataRequest.onsuccess = (event) => resolve(packageData); + putMetadataRequest.onerror = reject; + } + }; + putPackageRequest.onerror = reject; + } + }); + } + + /* + * Check if there's a cached package, and if so whether it's the latest available. + * Resolves to the cached metadata, or `null` if it is missing or out-of-date. + */ + async function checkCachedPackage(db, packageName) { + var transaction = db.transaction([METADATA_STORE_NAME], IDB_RO); + var metadata = transaction.objectStore(METADATA_STORE_NAME); + var getRequest = metadata.get(`metadata/${packageName}`); + return new Promise((resolve, reject) => { + getRequest.onsuccess = (event) => { + var result = event.target.result; + if (result && PACKAGE_UUID === result['uuid']) { + resolve(result); + } else { + resolve(null); + } + } + getRequest.onerror = reject; + }); + } + + async function fetchCachedPackage(db, packageName, metadata) { + var transaction = db.transaction([PACKAGE_STORE_NAME], IDB_RO); + var packages = transaction.objectStore(PACKAGE_STORE_NAME); + + var chunksDone = 0; + var totalSize = 0; + var chunkCount = metadata['chunkCount']; + var chunks = new Array(chunkCount); + + return new Promise((resolve, reject) => { + for (var chunkId = 0; chunkId < chunkCount; chunkId++) { + var getRequest = packages.get(`package/${packageName}/${chunkId}`); + getRequest.onsuccess = (event) => { + if (!event.target.result) { + reject(`CachedPackageNotFound for: ${packageName}`); + return; + } + // If there's only 1 chunk, there's nothing to concatenate it with so we can just return it now + if (chunkCount == 1) { + resolve(event.target.result); + } else { + chunksDone++; + totalSize += event.target.result.byteLength; + chunks.push(event.target.result); + if (chunksDone == chunkCount) { + if (chunksDone == 1) { + resolve(event.target.result); + } else { + var tempTyped = new Uint8Array(totalSize); + var byteOffset = 0; + for (var chunkId in chunks) { + var buffer = chunks[chunkId]; + tempTyped.set(new Uint8Array(buffer), byteOffset); + byteOffset += buffer.byteLength; + buffer = undefined; + } + chunks = undefined; + resolve(tempTyped.buffer); + tempTyped = undefined; + } + } + } + }; + getRequest.onerror = reject; + } + }); + } + + function processPackageData(arrayBuffer) { + assert(arrayBuffer, 'Loading data file failed.'); + assert(arrayBuffer.constructor.name === ArrayBuffer.name, 'bad input to processPackageData'); + var byteArray = new Uint8Array(arrayBuffer); + var curr; + // Reuse the bytearray from the XHR as the source for file reads. + DataRequest.prototype.byteArray = byteArray; + var files = metadata['files']; + for (var i = 0; i < files.length; ++i) { + DataRequest.prototype.requests[files[i].filename].onload(); + } Module['removeRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4NDL4.7.1.data'); + + } + Module['addRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4NDL4.7.1.data'); + + Module['preloadResults'] ??= {}; + + async function preloadFallback(error) { + console.error(error); + console.error('falling back to default preload behavior'); + processPackageData(await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE)); + } + + try { + var db = await openDatabase(); + var pkgMetadata = await checkCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME); + var useCached = !!pkgMetadata; + Module['preloadResults'][PACKAGE_NAME] = {fromCache: useCached}; + if (useCached) { + processPackageData(await fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, pkgMetadata)); + } else { + var packageData = await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE); + try { + processPackageData(await cacheRemotePackage(db, PACKAGE_PATH + PACKAGE_NAME, packageData, {uuid:PACKAGE_UUID})) + } catch (error) { + console.error(error); + processPackageData(packageData); + } + } + } catch(e) { + await preloadFallback(e) + .catch((error) => { + loadDataReject(error); + }); + } + + Module['setStatus']?.('Downloading...'); + + } + if (Module['calledRun']) { + runWithFS(Module) + .catch((error) => { + loadDataReject(error); + }); + } else { + (Module['preRun'] ??= []).push(runWithFS); // FS is not initialized yet, wait for it + } + + Module['removeRunDependency']('preload_G4NDL4.7.1.js.metadata'); + } + + async function runMetaWithFS() { + Module['addRunDependency']('preload_G4NDL4.7.1.js.metadata'); + var metadataUrl = Module['locateFile']?.('preload_G4NDL4.7.1.js.metadata', '') ?? 'preload_G4NDL4.7.1.js.metadata'; + + var response = await fetch(metadataUrl); + if (!response.ok) { + throw new Error(`${response.status}: ${response.url}`); + } + var json = await response.json(); + return loadPackage(json); + } + + if (Module['calledRun']) { + runMetaWithFS(); + } else { + (Module['preRun'] ??= []).push(runMetaWithFS); + } + + }); +} +// END the loadDataFile function diff --git a/src/libs/geant4_web/geant4_wasm/preload/preload_G4PARTICLEXS4.1.js b/src/libs/geant4_web/geant4_wasm/preload/preload_G4PARTICLEXS4.1.js new file mode 100644 index 000000000..51fbf030e --- /dev/null +++ b/src/libs/geant4_web/geant4_wasm/preload/preload_G4PARTICLEXS4.1.js @@ -0,0 +1,352 @@ +export default async function loadDataFile(Module) { + + Module['expectedDataFileDownloads'] ??= 0; + Module['expectedDataFileDownloads']++; + // Do not attempt to redownload the virtual filesystem data when in a pthread or a Wasm Worker context. + var isPthread = typeof ENVIRONMENT_IS_PTHREAD != 'undefined' && ENVIRONMENT_IS_PTHREAD; + var isWasmWorker = typeof ENVIRONMENT_IS_WASM_WORKER != 'undefined' && ENVIRONMENT_IS_WASM_WORKER; + if (isPthread || isWasmWorker) return; +return new Promise((loadDataResolve, loadDataReject) => { + async function loadPackage(metadata) { + + var PACKAGE_PATH = ''; + if (typeof window === 'object') { + PACKAGE_PATH = window['encodeURIComponent'](window.location.pathname.substring(0, window.location.pathname.lastIndexOf('/')) + '/'); + } else if (typeof process === 'undefined' && typeof location !== 'undefined') { + // web worker + PACKAGE_PATH = encodeURIComponent(location.pathname.substring(0, location.pathname.lastIndexOf('/')) + '/'); + } + var PACKAGE_NAME = '/workspaces/geant-wasm/build_wasm/data/G4PARTICLEXS4.1.data'; + var REMOTE_PACKAGE_BASE = 'G4PARTICLEXS4.1.data'; + var REMOTE_PACKAGE_NAME = Module['locateFile']?.(REMOTE_PACKAGE_BASE, '') ?? REMOTE_PACKAGE_BASE; + var REMOTE_PACKAGE_SIZE = metadata['remote_package_size']; + + async function fetchRemotePackage(packageName, packageSize) { + + Module['dataFileDownloads'] ??= {}; + try { + var response = await fetch(packageName); + } catch (e) { + throw new Error(`Network Error: ${packageName}`, {e}); + } + if (!response.ok) { + throw new Error(`${response.status}: ${response.url}`); + } + + const chunks = []; + const headers = response.headers; + const total = Number(headers.get('Content-Length') ?? packageSize); + let loaded = 0; + + Module['setStatus']?.('Downloading data...'); + const reader = response.body.getReader(); + + while (1) { + var {done, value} = await reader.read(); + if (done) break; + chunks.push(value); + loaded += value.length; + Module['dataFileDownloads'][packageName] = {loaded, total}; + + let totalLoaded = 0; + let totalSize = 0; + + for (const download of Object.values(Module['dataFileDownloads'])) { + totalLoaded += download.loaded; + totalSize += download.total; + } + + Module['setStatus']?.(`Downloading data... (${totalLoaded}/${totalSize})`); + } + + const packageData = new Uint8Array(chunks.map((c) => c.length).reduce((a, b) => a + b, 0)); + let offset = 0; + for (const chunk of chunks) { + packageData.set(chunk, offset); + offset += chunk.length; + } + return packageData.buffer; + } + + async function runWithFS(Module) { + + function assert(check, msg) { + if (!check) throw new Error(msg); + } +Module['FS_createPath']("/", "data", true, true); +Module['FS_createPath']("/data", "G4PARTICLEXS4.1", true, true); +Module['FS_createPath']("/data/G4PARTICLEXS4.1", "He3", true, true); +Module['FS_createPath']("/data/G4PARTICLEXS4.1", "alpha", true, true); +Module['FS_createPath']("/data/G4PARTICLEXS4.1", "deuteron", true, true); +Module['FS_createPath']("/data/G4PARTICLEXS4.1", "gamma", true, true); +Module['FS_createPath']("/data/G4PARTICLEXS4.1", "neutrino", true, true); +Module['FS_createPath']("/data/G4PARTICLEXS4.1/neutrino", "nu_mu", true, true); +Module['FS_createPath']("/data/G4PARTICLEXS4.1", "neutron", true, true); +Module['FS_createPath']("/data/G4PARTICLEXS4.1", "proton", true, true); +Module['FS_createPath']("/data/G4PARTICLEXS4.1", "triton", true, true); + + /** @constructor */ + function DataRequest(start, end, audio) { + this.start = start; + this.end = end; + this.audio = audio; + } + DataRequest.prototype = { + requests: {}, + open: function(mode, name) { + this.name = name; + this.requests[name] = this; + Module['addRunDependency'](`fp ${this.name}`); + }, + send: function() {}, + onload: function() { + var byteArray = this.byteArray.subarray(this.start, this.end); + this.finish(byteArray); + }, + finish: async function(byteArray) { + var that = this; + // canOwn this data in the filesystem, it is a slice into the heap that will never change + Module['FS_createDataFile'](this.name, null, byteArray, true, true, true); + Module['removeRunDependency'](`fp ${that.name}`); +loadDataResolve(); + this.requests[this.name] = null; + } + }; + + var files = metadata['files']; + for (var i = 0; i < files.length; ++i) { + new DataRequest(files[i]['start'], files[i]['end'], files[i]['audio'] || 0).open('GET', files[i]['filename']); + } + + var PACKAGE_UUID = metadata['package_uuid']; + var IDB_RO = "readonly"; + var IDB_RW = "readwrite"; + var DB_NAME = "EM_PRELOAD_CACHE"; + var DB_VERSION = 1; + var METADATA_STORE_NAME = 'METADATA'; + var PACKAGE_STORE_NAME = 'PACKAGES'; + + async function openDatabase() { + if (typeof indexedDB == 'undefined') { + throw new Error('using IndexedDB to cache data can only be done on a web page or in a web worker'); + } + return new Promise((resolve, reject) => { + var openRequest = indexedDB.open(DB_NAME, DB_VERSION); + openRequest.onupgradeneeded = (event) => { + var db = /** @type {IDBDatabase} */ (event.target.result); + + if (db.objectStoreNames.contains(PACKAGE_STORE_NAME)) { + db.deleteObjectStore(PACKAGE_STORE_NAME); + } + var packages = db.createObjectStore(PACKAGE_STORE_NAME); + + if (db.objectStoreNames.contains(METADATA_STORE_NAME)) { + db.deleteObjectStore(METADATA_STORE_NAME); + } + var metadata = db.createObjectStore(METADATA_STORE_NAME); + }; + openRequest.onsuccess = (event) => { + var db = /** @type {IDBDatabase} */ (event.target.result); + resolve(db); + }; + openRequest.onerror = reject; + }); + } + + // This is needed as chromium has a limit on per-entry files in IndexedDB + // https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&sq=package:chromium&g=0&l=177 + // https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60 + // We set the chunk size to 64MB to stay well-below the limit + var CHUNK_SIZE = 64 * 1024 * 1024; + + async function cacheRemotePackage(db, packageName, packageData, packageMeta) { + var transactionPackages = db.transaction([PACKAGE_STORE_NAME], IDB_RW); + var packages = transactionPackages.objectStore(PACKAGE_STORE_NAME); + var chunkSliceStart = 0; + var nextChunkSliceStart = 0; + var chunkCount = Math.ceil(packageData.byteLength / CHUNK_SIZE); + var finishedChunks = 0; + + return new Promise((resolve, reject) => { + for (var chunkId = 0; chunkId < chunkCount; chunkId++) { + nextChunkSliceStart += CHUNK_SIZE; + var putPackageRequest = packages.put( + packageData.slice(chunkSliceStart, nextChunkSliceStart), + `package/${packageName}/${chunkId}` + ); + chunkSliceStart = nextChunkSliceStart; + putPackageRequest.onsuccess = (event) => { + finishedChunks++; + if (finishedChunks == chunkCount) { + var transaction_metadata = db.transaction( + [METADATA_STORE_NAME], + IDB_RW + ); + var metadata = transaction_metadata.objectStore(METADATA_STORE_NAME); + var putMetadataRequest = metadata.put( + { + 'uuid': packageMeta.uuid, + 'chunkCount': chunkCount + }, + `metadata/${packageName}` + ); + putMetadataRequest.onsuccess = (event) => resolve(packageData); + putMetadataRequest.onerror = reject; + } + }; + putPackageRequest.onerror = reject; + } + }); + } + + /* + * Check if there's a cached package, and if so whether it's the latest available. + * Resolves to the cached metadata, or `null` if it is missing or out-of-date. + */ + async function checkCachedPackage(db, packageName) { + var transaction = db.transaction([METADATA_STORE_NAME], IDB_RO); + var metadata = transaction.objectStore(METADATA_STORE_NAME); + var getRequest = metadata.get(`metadata/${packageName}`); + return new Promise((resolve, reject) => { + getRequest.onsuccess = (event) => { + var result = event.target.result; + if (result && PACKAGE_UUID === result['uuid']) { + resolve(result); + } else { + resolve(null); + } + } + getRequest.onerror = reject; + }); + } + + async function fetchCachedPackage(db, packageName, metadata) { + var transaction = db.transaction([PACKAGE_STORE_NAME], IDB_RO); + var packages = transaction.objectStore(PACKAGE_STORE_NAME); + + var chunksDone = 0; + var totalSize = 0; + var chunkCount = metadata['chunkCount']; + var chunks = new Array(chunkCount); + + return new Promise((resolve, reject) => { + for (var chunkId = 0; chunkId < chunkCount; chunkId++) { + var getRequest = packages.get(`package/${packageName}/${chunkId}`); + getRequest.onsuccess = (event) => { + if (!event.target.result) { + reject(`CachedPackageNotFound for: ${packageName}`); + return; + } + // If there's only 1 chunk, there's nothing to concatenate it with so we can just return it now + if (chunkCount == 1) { + resolve(event.target.result); + } else { + chunksDone++; + totalSize += event.target.result.byteLength; + chunks.push(event.target.result); + if (chunksDone == chunkCount) { + if (chunksDone == 1) { + resolve(event.target.result); + } else { + var tempTyped = new Uint8Array(totalSize); + var byteOffset = 0; + for (var chunkId in chunks) { + var buffer = chunks[chunkId]; + tempTyped.set(new Uint8Array(buffer), byteOffset); + byteOffset += buffer.byteLength; + buffer = undefined; + } + chunks = undefined; + resolve(tempTyped.buffer); + tempTyped = undefined; + } + } + } + }; + getRequest.onerror = reject; + } + }); + } + + function processPackageData(arrayBuffer) { + assert(arrayBuffer, 'Loading data file failed.'); + assert(arrayBuffer.constructor.name === ArrayBuffer.name, 'bad input to processPackageData'); + var byteArray = new Uint8Array(arrayBuffer); + var curr; + // Reuse the bytearray from the XHR as the source for file reads. + DataRequest.prototype.byteArray = byteArray; + var files = metadata['files']; + for (var i = 0; i < files.length; ++i) { + DataRequest.prototype.requests[files[i].filename].onload(); + } Module['removeRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4PARTICLEXS4.1.data'); + + } + Module['addRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4PARTICLEXS4.1.data'); + + Module['preloadResults'] ??= {}; + + async function preloadFallback(error) { + console.error(error); + console.error('falling back to default preload behavior'); + processPackageData(await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE)); + } + + try { + var db = await openDatabase(); + var pkgMetadata = await checkCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME); + var useCached = !!pkgMetadata; + Module['preloadResults'][PACKAGE_NAME] = {fromCache: useCached}; + if (useCached) { + processPackageData(await fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, pkgMetadata)); + } else { + var packageData = await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE); + try { + processPackageData(await cacheRemotePackage(db, PACKAGE_PATH + PACKAGE_NAME, packageData, {uuid:PACKAGE_UUID})) + } catch (error) { + console.error(error); + processPackageData(packageData); + } + } + } catch(e) { + await preloadFallback(e) + .catch((error) => { + loadDataReject(error); + }); + } + + Module['setStatus']?.('Downloading...'); + + } + if (Module['calledRun']) { + runWithFS(Module) + .catch((error) => { + loadDataReject(error); + }); + } else { + (Module['preRun'] ??= []).push(runWithFS); // FS is not initialized yet, wait for it + } + + Module['removeRunDependency']('preload_G4PARTICLEXS4.1.js.metadata'); + } + + async function runMetaWithFS() { + Module['addRunDependency']('preload_G4PARTICLEXS4.1.js.metadata'); + var metadataUrl = Module['locateFile']?.('preload_G4PARTICLEXS4.1.js.metadata', '') ?? 'preload_G4PARTICLEXS4.1.js.metadata'; + + var response = await fetch(metadataUrl); + if (!response.ok) { + throw new Error(`${response.status}: ${response.url}`); + } + var json = await response.json(); + return loadPackage(json); + } + + if (Module['calledRun']) { + runMetaWithFS(); + } else { + (Module['preRun'] ??= []).push(runMetaWithFS); + } + + }); +} +// END the loadDataFile function diff --git a/src/libs/geant4_web/geant4_wasm/preload/preload_G4SAIDDATA2.0.js b/src/libs/geant4_web/geant4_wasm/preload/preload_G4SAIDDATA2.0.js new file mode 100644 index 000000000..1e6d139a7 --- /dev/null +++ b/src/libs/geant4_web/geant4_wasm/preload/preload_G4SAIDDATA2.0.js @@ -0,0 +1,343 @@ +export default async function loadDataFile(Module) { + + Module['expectedDataFileDownloads'] ??= 0; + Module['expectedDataFileDownloads']++; + // Do not attempt to redownload the virtual filesystem data when in a pthread or a Wasm Worker context. + var isPthread = typeof ENVIRONMENT_IS_PTHREAD != 'undefined' && ENVIRONMENT_IS_PTHREAD; + var isWasmWorker = typeof ENVIRONMENT_IS_WASM_WORKER != 'undefined' && ENVIRONMENT_IS_WASM_WORKER; + if (isPthread || isWasmWorker) return; +return new Promise((loadDataResolve, loadDataReject) => { + async function loadPackage(metadata) { + + var PACKAGE_PATH = ''; + if (typeof window === 'object') { + PACKAGE_PATH = window['encodeURIComponent'](window.location.pathname.substring(0, window.location.pathname.lastIndexOf('/')) + '/'); + } else if (typeof process === 'undefined' && typeof location !== 'undefined') { + // web worker + PACKAGE_PATH = encodeURIComponent(location.pathname.substring(0, location.pathname.lastIndexOf('/')) + '/'); + } + var PACKAGE_NAME = '/workspaces/geant-wasm/build_wasm/data/G4SAIDDATA2.0.data'; + var REMOTE_PACKAGE_BASE = 'G4SAIDDATA2.0.data'; + var REMOTE_PACKAGE_NAME = Module['locateFile']?.(REMOTE_PACKAGE_BASE, '') ?? REMOTE_PACKAGE_BASE; + var REMOTE_PACKAGE_SIZE = metadata['remote_package_size']; + + async function fetchRemotePackage(packageName, packageSize) { + + Module['dataFileDownloads'] ??= {}; + try { + var response = await fetch(packageName); + } catch (e) { + throw new Error(`Network Error: ${packageName}`, {e}); + } + if (!response.ok) { + throw new Error(`${response.status}: ${response.url}`); + } + + const chunks = []; + const headers = response.headers; + const total = Number(headers.get('Content-Length') ?? packageSize); + let loaded = 0; + + Module['setStatus']?.('Downloading data...'); + const reader = response.body.getReader(); + + while (1) { + var {done, value} = await reader.read(); + if (done) break; + chunks.push(value); + loaded += value.length; + Module['dataFileDownloads'][packageName] = {loaded, total}; + + let totalLoaded = 0; + let totalSize = 0; + + for (const download of Object.values(Module['dataFileDownloads'])) { + totalLoaded += download.loaded; + totalSize += download.total; + } + + Module['setStatus']?.(`Downloading data... (${totalLoaded}/${totalSize})`); + } + + const packageData = new Uint8Array(chunks.map((c) => c.length).reduce((a, b) => a + b, 0)); + let offset = 0; + for (const chunk of chunks) { + packageData.set(chunk, offset); + offset += chunk.length; + } + return packageData.buffer; + } + + async function runWithFS(Module) { + + function assert(check, msg) { + if (!check) throw new Error(msg); + } +Module['FS_createPath']("/", "data", true, true); +Module['FS_createPath']("/data", "G4SAIDDATA2.0", true, true); + + /** @constructor */ + function DataRequest(start, end, audio) { + this.start = start; + this.end = end; + this.audio = audio; + } + DataRequest.prototype = { + requests: {}, + open: function(mode, name) { + this.name = name; + this.requests[name] = this; + Module['addRunDependency'](`fp ${this.name}`); + }, + send: function() {}, + onload: function() { + var byteArray = this.byteArray.subarray(this.start, this.end); + this.finish(byteArray); + }, + finish: async function(byteArray) { + var that = this; + // canOwn this data in the filesystem, it is a slice into the heap that will never change + Module['FS_createDataFile'](this.name, null, byteArray, true, true, true); + Module['removeRunDependency'](`fp ${that.name}`); +loadDataResolve(); + this.requests[this.name] = null; + } + }; + + var files = metadata['files']; + for (var i = 0; i < files.length; ++i) { + new DataRequest(files[i]['start'], files[i]['end'], files[i]['audio'] || 0).open('GET', files[i]['filename']); + } + + var PACKAGE_UUID = metadata['package_uuid']; + var IDB_RO = "readonly"; + var IDB_RW = "readwrite"; + var DB_NAME = "EM_PRELOAD_CACHE"; + var DB_VERSION = 1; + var METADATA_STORE_NAME = 'METADATA'; + var PACKAGE_STORE_NAME = 'PACKAGES'; + + async function openDatabase() { + if (typeof indexedDB == 'undefined') { + throw new Error('using IndexedDB to cache data can only be done on a web page or in a web worker'); + } + return new Promise((resolve, reject) => { + var openRequest = indexedDB.open(DB_NAME, DB_VERSION); + openRequest.onupgradeneeded = (event) => { + var db = /** @type {IDBDatabase} */ (event.target.result); + + if (db.objectStoreNames.contains(PACKAGE_STORE_NAME)) { + db.deleteObjectStore(PACKAGE_STORE_NAME); + } + var packages = db.createObjectStore(PACKAGE_STORE_NAME); + + if (db.objectStoreNames.contains(METADATA_STORE_NAME)) { + db.deleteObjectStore(METADATA_STORE_NAME); + } + var metadata = db.createObjectStore(METADATA_STORE_NAME); + }; + openRequest.onsuccess = (event) => { + var db = /** @type {IDBDatabase} */ (event.target.result); + resolve(db); + }; + openRequest.onerror = reject; + }); + } + + // This is needed as chromium has a limit on per-entry files in IndexedDB + // https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&sq=package:chromium&g=0&l=177 + // https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60 + // We set the chunk size to 64MB to stay well-below the limit + var CHUNK_SIZE = 64 * 1024 * 1024; + + async function cacheRemotePackage(db, packageName, packageData, packageMeta) { + var transactionPackages = db.transaction([PACKAGE_STORE_NAME], IDB_RW); + var packages = transactionPackages.objectStore(PACKAGE_STORE_NAME); + var chunkSliceStart = 0; + var nextChunkSliceStart = 0; + var chunkCount = Math.ceil(packageData.byteLength / CHUNK_SIZE); + var finishedChunks = 0; + + return new Promise((resolve, reject) => { + for (var chunkId = 0; chunkId < chunkCount; chunkId++) { + nextChunkSliceStart += CHUNK_SIZE; + var putPackageRequest = packages.put( + packageData.slice(chunkSliceStart, nextChunkSliceStart), + `package/${packageName}/${chunkId}` + ); + chunkSliceStart = nextChunkSliceStart; + putPackageRequest.onsuccess = (event) => { + finishedChunks++; + if (finishedChunks == chunkCount) { + var transaction_metadata = db.transaction( + [METADATA_STORE_NAME], + IDB_RW + ); + var metadata = transaction_metadata.objectStore(METADATA_STORE_NAME); + var putMetadataRequest = metadata.put( + { + 'uuid': packageMeta.uuid, + 'chunkCount': chunkCount + }, + `metadata/${packageName}` + ); + putMetadataRequest.onsuccess = (event) => resolve(packageData); + putMetadataRequest.onerror = reject; + } + }; + putPackageRequest.onerror = reject; + } + }); + } + + /* + * Check if there's a cached package, and if so whether it's the latest available. + * Resolves to the cached metadata, or `null` if it is missing or out-of-date. + */ + async function checkCachedPackage(db, packageName) { + var transaction = db.transaction([METADATA_STORE_NAME], IDB_RO); + var metadata = transaction.objectStore(METADATA_STORE_NAME); + var getRequest = metadata.get(`metadata/${packageName}`); + return new Promise((resolve, reject) => { + getRequest.onsuccess = (event) => { + var result = event.target.result; + if (result && PACKAGE_UUID === result['uuid']) { + resolve(result); + } else { + resolve(null); + } + } + getRequest.onerror = reject; + }); + } + + async function fetchCachedPackage(db, packageName, metadata) { + var transaction = db.transaction([PACKAGE_STORE_NAME], IDB_RO); + var packages = transaction.objectStore(PACKAGE_STORE_NAME); + + var chunksDone = 0; + var totalSize = 0; + var chunkCount = metadata['chunkCount']; + var chunks = new Array(chunkCount); + + return new Promise((resolve, reject) => { + for (var chunkId = 0; chunkId < chunkCount; chunkId++) { + var getRequest = packages.get(`package/${packageName}/${chunkId}`); + getRequest.onsuccess = (event) => { + if (!event.target.result) { + reject(`CachedPackageNotFound for: ${packageName}`); + return; + } + // If there's only 1 chunk, there's nothing to concatenate it with so we can just return it now + if (chunkCount == 1) { + resolve(event.target.result); + } else { + chunksDone++; + totalSize += event.target.result.byteLength; + chunks.push(event.target.result); + if (chunksDone == chunkCount) { + if (chunksDone == 1) { + resolve(event.target.result); + } else { + var tempTyped = new Uint8Array(totalSize); + var byteOffset = 0; + for (var chunkId in chunks) { + var buffer = chunks[chunkId]; + tempTyped.set(new Uint8Array(buffer), byteOffset); + byteOffset += buffer.byteLength; + buffer = undefined; + } + chunks = undefined; + resolve(tempTyped.buffer); + tempTyped = undefined; + } + } + } + }; + getRequest.onerror = reject; + } + }); + } + + function processPackageData(arrayBuffer) { + assert(arrayBuffer, 'Loading data file failed.'); + assert(arrayBuffer.constructor.name === ArrayBuffer.name, 'bad input to processPackageData'); + var byteArray = new Uint8Array(arrayBuffer); + var curr; + // Reuse the bytearray from the XHR as the source for file reads. + DataRequest.prototype.byteArray = byteArray; + var files = metadata['files']; + for (var i = 0; i < files.length; ++i) { + DataRequest.prototype.requests[files[i].filename].onload(); + } Module['removeRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4SAIDDATA2.0.data'); + + } + Module['addRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4SAIDDATA2.0.data'); + + Module['preloadResults'] ??= {}; + + async function preloadFallback(error) { + console.error(error); + console.error('falling back to default preload behavior'); + processPackageData(await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE)); + } + + try { + var db = await openDatabase(); + var pkgMetadata = await checkCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME); + var useCached = !!pkgMetadata; + Module['preloadResults'][PACKAGE_NAME] = {fromCache: useCached}; + if (useCached) { + processPackageData(await fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, pkgMetadata)); + } else { + var packageData = await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE); + try { + processPackageData(await cacheRemotePackage(db, PACKAGE_PATH + PACKAGE_NAME, packageData, {uuid:PACKAGE_UUID})) + } catch (error) { + console.error(error); + processPackageData(packageData); + } + } + } catch(e) { + await preloadFallback(e) + .catch((error) => { + loadDataReject(error); + }); + } + + Module['setStatus']?.('Downloading...'); + + } + if (Module['calledRun']) { + runWithFS(Module) + .catch((error) => { + loadDataReject(error); + }); + } else { + (Module['preRun'] ??= []).push(runWithFS); // FS is not initialized yet, wait for it + } + + Module['removeRunDependency']('preload_G4SAIDDATA2.0.js.metadata'); + } + + async function runMetaWithFS() { + Module['addRunDependency']('preload_G4SAIDDATA2.0.js.metadata'); + var metadataUrl = Module['locateFile']?.('preload_G4SAIDDATA2.0.js.metadata', '') ?? 'preload_G4SAIDDATA2.0.js.metadata'; + + var response = await fetch(metadataUrl); + if (!response.ok) { + throw new Error(`${response.status}: ${response.url}`); + } + var json = await response.json(); + return loadPackage(json); + } + + if (Module['calledRun']) { + runMetaWithFS(); + } else { + (Module['preRun'] ??= []).push(runMetaWithFS); + } + + }); +} +// END the loadDataFile function diff --git a/src/libs/geant4_web/geant4_wasm/preload/preload_PhotonEvaporation6.1.js b/src/libs/geant4_web/geant4_wasm/preload/preload_PhotonEvaporation6.1.js new file mode 100644 index 000000000..aec733217 --- /dev/null +++ b/src/libs/geant4_web/geant4_wasm/preload/preload_PhotonEvaporation6.1.js @@ -0,0 +1,343 @@ +export default async function loadDataFile(Module) { + + Module['expectedDataFileDownloads'] ??= 0; + Module['expectedDataFileDownloads']++; + // Do not attempt to redownload the virtual filesystem data when in a pthread or a Wasm Worker context. + var isPthread = typeof ENVIRONMENT_IS_PTHREAD != 'undefined' && ENVIRONMENT_IS_PTHREAD; + var isWasmWorker = typeof ENVIRONMENT_IS_WASM_WORKER != 'undefined' && ENVIRONMENT_IS_WASM_WORKER; + if (isPthread || isWasmWorker) return; +return new Promise((loadDataResolve, loadDataReject) => { + async function loadPackage(metadata) { + + var PACKAGE_PATH = ''; + if (typeof window === 'object') { + PACKAGE_PATH = window['encodeURIComponent'](window.location.pathname.substring(0, window.location.pathname.lastIndexOf('/')) + '/'); + } else if (typeof process === 'undefined' && typeof location !== 'undefined') { + // web worker + PACKAGE_PATH = encodeURIComponent(location.pathname.substring(0, location.pathname.lastIndexOf('/')) + '/'); + } + var PACKAGE_NAME = '/workspaces/geant-wasm/build_wasm/data/PhotonEvaporation6.1.data'; + var REMOTE_PACKAGE_BASE = 'PhotonEvaporation6.1.data'; + var REMOTE_PACKAGE_NAME = Module['locateFile']?.(REMOTE_PACKAGE_BASE, '') ?? REMOTE_PACKAGE_BASE; + var REMOTE_PACKAGE_SIZE = metadata['remote_package_size']; + + async function fetchRemotePackage(packageName, packageSize) { + + Module['dataFileDownloads'] ??= {}; + try { + var response = await fetch(packageName); + } catch (e) { + throw new Error(`Network Error: ${packageName}`, {e}); + } + if (!response.ok) { + throw new Error(`${response.status}: ${response.url}`); + } + + const chunks = []; + const headers = response.headers; + const total = Number(headers.get('Content-Length') ?? packageSize); + let loaded = 0; + + Module['setStatus']?.('Downloading data...'); + const reader = response.body.getReader(); + + while (1) { + var {done, value} = await reader.read(); + if (done) break; + chunks.push(value); + loaded += value.length; + Module['dataFileDownloads'][packageName] = {loaded, total}; + + let totalLoaded = 0; + let totalSize = 0; + + for (const download of Object.values(Module['dataFileDownloads'])) { + totalLoaded += download.loaded; + totalSize += download.total; + } + + Module['setStatus']?.(`Downloading data... (${totalLoaded}/${totalSize})`); + } + + const packageData = new Uint8Array(chunks.map((c) => c.length).reduce((a, b) => a + b, 0)); + let offset = 0; + for (const chunk of chunks) { + packageData.set(chunk, offset); + offset += chunk.length; + } + return packageData.buffer; + } + + async function runWithFS(Module) { + + function assert(check, msg) { + if (!check) throw new Error(msg); + } +Module['FS_createPath']("/", "data", true, true); +Module['FS_createPath']("/data", "PhotonEvaporation6.1", true, true); + + /** @constructor */ + function DataRequest(start, end, audio) { + this.start = start; + this.end = end; + this.audio = audio; + } + DataRequest.prototype = { + requests: {}, + open: function(mode, name) { + this.name = name; + this.requests[name] = this; + Module['addRunDependency'](`fp ${this.name}`); + }, + send: function() {}, + onload: function() { + var byteArray = this.byteArray.subarray(this.start, this.end); + this.finish(byteArray); + }, + finish: async function(byteArray) { + var that = this; + // canOwn this data in the filesystem, it is a slice into the heap that will never change + Module['FS_createDataFile'](this.name, null, byteArray, true, true, true); + Module['removeRunDependency'](`fp ${that.name}`); +loadDataResolve(); + this.requests[this.name] = null; + } + }; + + var files = metadata['files']; + for (var i = 0; i < files.length; ++i) { + new DataRequest(files[i]['start'], files[i]['end'], files[i]['audio'] || 0).open('GET', files[i]['filename']); + } + + var PACKAGE_UUID = metadata['package_uuid']; + var IDB_RO = "readonly"; + var IDB_RW = "readwrite"; + var DB_NAME = "EM_PRELOAD_CACHE"; + var DB_VERSION = 1; + var METADATA_STORE_NAME = 'METADATA'; + var PACKAGE_STORE_NAME = 'PACKAGES'; + + async function openDatabase() { + if (typeof indexedDB == 'undefined') { + throw new Error('using IndexedDB to cache data can only be done on a web page or in a web worker'); + } + return new Promise((resolve, reject) => { + var openRequest = indexedDB.open(DB_NAME, DB_VERSION); + openRequest.onupgradeneeded = (event) => { + var db = /** @type {IDBDatabase} */ (event.target.result); + + if (db.objectStoreNames.contains(PACKAGE_STORE_NAME)) { + db.deleteObjectStore(PACKAGE_STORE_NAME); + } + var packages = db.createObjectStore(PACKAGE_STORE_NAME); + + if (db.objectStoreNames.contains(METADATA_STORE_NAME)) { + db.deleteObjectStore(METADATA_STORE_NAME); + } + var metadata = db.createObjectStore(METADATA_STORE_NAME); + }; + openRequest.onsuccess = (event) => { + var db = /** @type {IDBDatabase} */ (event.target.result); + resolve(db); + }; + openRequest.onerror = reject; + }); + } + + // This is needed as chromium has a limit on per-entry files in IndexedDB + // https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&sq=package:chromium&g=0&l=177 + // https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60 + // We set the chunk size to 64MB to stay well-below the limit + var CHUNK_SIZE = 64 * 1024 * 1024; + + async function cacheRemotePackage(db, packageName, packageData, packageMeta) { + var transactionPackages = db.transaction([PACKAGE_STORE_NAME], IDB_RW); + var packages = transactionPackages.objectStore(PACKAGE_STORE_NAME); + var chunkSliceStart = 0; + var nextChunkSliceStart = 0; + var chunkCount = Math.ceil(packageData.byteLength / CHUNK_SIZE); + var finishedChunks = 0; + + return new Promise((resolve, reject) => { + for (var chunkId = 0; chunkId < chunkCount; chunkId++) { + nextChunkSliceStart += CHUNK_SIZE; + var putPackageRequest = packages.put( + packageData.slice(chunkSliceStart, nextChunkSliceStart), + `package/${packageName}/${chunkId}` + ); + chunkSliceStart = nextChunkSliceStart; + putPackageRequest.onsuccess = (event) => { + finishedChunks++; + if (finishedChunks == chunkCount) { + var transaction_metadata = db.transaction( + [METADATA_STORE_NAME], + IDB_RW + ); + var metadata = transaction_metadata.objectStore(METADATA_STORE_NAME); + var putMetadataRequest = metadata.put( + { + 'uuid': packageMeta.uuid, + 'chunkCount': chunkCount + }, + `metadata/${packageName}` + ); + putMetadataRequest.onsuccess = (event) => resolve(packageData); + putMetadataRequest.onerror = reject; + } + }; + putPackageRequest.onerror = reject; + } + }); + } + + /* + * Check if there's a cached package, and if so whether it's the latest available. + * Resolves to the cached metadata, or `null` if it is missing or out-of-date. + */ + async function checkCachedPackage(db, packageName) { + var transaction = db.transaction([METADATA_STORE_NAME], IDB_RO); + var metadata = transaction.objectStore(METADATA_STORE_NAME); + var getRequest = metadata.get(`metadata/${packageName}`); + return new Promise((resolve, reject) => { + getRequest.onsuccess = (event) => { + var result = event.target.result; + if (result && PACKAGE_UUID === result['uuid']) { + resolve(result); + } else { + resolve(null); + } + } + getRequest.onerror = reject; + }); + } + + async function fetchCachedPackage(db, packageName, metadata) { + var transaction = db.transaction([PACKAGE_STORE_NAME], IDB_RO); + var packages = transaction.objectStore(PACKAGE_STORE_NAME); + + var chunksDone = 0; + var totalSize = 0; + var chunkCount = metadata['chunkCount']; + var chunks = new Array(chunkCount); + + return new Promise((resolve, reject) => { + for (var chunkId = 0; chunkId < chunkCount; chunkId++) { + var getRequest = packages.get(`package/${packageName}/${chunkId}`); + getRequest.onsuccess = (event) => { + if (!event.target.result) { + reject(`CachedPackageNotFound for: ${packageName}`); + return; + } + // If there's only 1 chunk, there's nothing to concatenate it with so we can just return it now + if (chunkCount == 1) { + resolve(event.target.result); + } else { + chunksDone++; + totalSize += event.target.result.byteLength; + chunks.push(event.target.result); + if (chunksDone == chunkCount) { + if (chunksDone == 1) { + resolve(event.target.result); + } else { + var tempTyped = new Uint8Array(totalSize); + var byteOffset = 0; + for (var chunkId in chunks) { + var buffer = chunks[chunkId]; + tempTyped.set(new Uint8Array(buffer), byteOffset); + byteOffset += buffer.byteLength; + buffer = undefined; + } + chunks = undefined; + resolve(tempTyped.buffer); + tempTyped = undefined; + } + } + } + }; + getRequest.onerror = reject; + } + }); + } + + function processPackageData(arrayBuffer) { + assert(arrayBuffer, 'Loading data file failed.'); + assert(arrayBuffer.constructor.name === ArrayBuffer.name, 'bad input to processPackageData'); + var byteArray = new Uint8Array(arrayBuffer); + var curr; + // Reuse the bytearray from the XHR as the source for file reads. + DataRequest.prototype.byteArray = byteArray; + var files = metadata['files']; + for (var i = 0; i < files.length; ++i) { + DataRequest.prototype.requests[files[i].filename].onload(); + } Module['removeRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/PhotonEvaporation6.1.data'); + + } + Module['addRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/PhotonEvaporation6.1.data'); + + Module['preloadResults'] ??= {}; + + async function preloadFallback(error) { + console.error(error); + console.error('falling back to default preload behavior'); + processPackageData(await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE)); + } + + try { + var db = await openDatabase(); + var pkgMetadata = await checkCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME); + var useCached = !!pkgMetadata; + Module['preloadResults'][PACKAGE_NAME] = {fromCache: useCached}; + if (useCached) { + processPackageData(await fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, pkgMetadata)); + } else { + var packageData = await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE); + try { + processPackageData(await cacheRemotePackage(db, PACKAGE_PATH + PACKAGE_NAME, packageData, {uuid:PACKAGE_UUID})) + } catch (error) { + console.error(error); + processPackageData(packageData); + } + } + } catch(e) { + await preloadFallback(e) + .catch((error) => { + loadDataReject(error); + }); + } + + Module['setStatus']?.('Downloading...'); + + } + if (Module['calledRun']) { + runWithFS(Module) + .catch((error) => { + loadDataReject(error); + }); + } else { + (Module['preRun'] ??= []).push(runWithFS); // FS is not initialized yet, wait for it + } + + Module['removeRunDependency']('preload_PhotonEvaporation6.1.js.metadata'); + } + + async function runMetaWithFS() { + Module['addRunDependency']('preload_PhotonEvaporation6.1.js.metadata'); + var metadataUrl = Module['locateFile']?.('preload_PhotonEvaporation6.1.js.metadata', '') ?? 'preload_PhotonEvaporation6.1.js.metadata'; + + var response = await fetch(metadataUrl); + if (!response.ok) { + throw new Error(`${response.status}: ${response.url}`); + } + var json = await response.json(); + return loadPackage(json); + } + + if (Module['calledRun']) { + runMetaWithFS(); + } else { + (Module['preRun'] ??= []).push(runMetaWithFS); + } + + }); +} +// END the loadDataFile function diff --git a/src/libs/geant4_web/geantWorker.worker.ts b/src/libs/geant4_web/geantWorker.worker.ts new file mode 100644 index 000000000..1920afb85 --- /dev/null +++ b/src/libs/geant4_web/geantWorker.worker.ts @@ -0,0 +1,330 @@ +import createMainModule from './geant4_wasm/geant4_wasm' + +import { default as initG4EMLOW } from './geant4_wasm/preload/preload_G4EMLOW8.6.1'; +import { default as initG4ENSDFSTATE } from './geant4_wasm/preload/preload_G4ENSDFSTATE3.0'; +import { default as initG4NDL } from './geant4_wasm/preload/preload_G4NDL4.7.1'; +import { default as initG4PARTICLEXS } from './geant4_wasm/preload/preload_G4PARTICLEXS4.1'; +import { default as initG4SAIDDATA } from './geant4_wasm/preload/preload_G4SAIDDATA2.0'; +import { default as initPhotoEvaporation } from './geant4_wasm/preload/preload_PhotonEvaporation6.1'; + +const s3_prefix_map: Record = { + ".wasm": "https://s3p.cloud.cyfronet.pl/geant4-wasm/", + ".data": "https://s3p.cloud.cyfronet.pl/geant4-wasm/datafiles/", + ".metadata": "https://s3p.cloud.cyfronet.pl/geant4-wasm/datafiles/", + ".json": "https://s3p.cloud.cyfronet.pl/geant4-wasm/lazy_files_metadata/", +}; + +/* eslint-disable-next-line no-restricted-globals */ +var ctx: Worker = self as any; // TypeScript type assertion to treat self as a Worker + +var preModule = { + preRun: [ ], + postRun: [], + onRuntimeInitialized: function () { + console.log("onRuntimeInitialized"); + postMessage({ type: 'init', data: "onRuntimeInitialized" }); + }, + printErr: (function () { + return function (text: any) { + if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' '); + + if (!text.includes('dependency')) { + console.error(text); + } + }; + })(), + print: (function () { + + return function (text: any) { + if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' '); + + // console.log(text); + postMessage({ type: 'print', data: text }); + + }; + })(), + last: { + time: Date.now(), + text: '' + }, + setStatus: function (text: string) { + if (!preModule.last) preModule.last = { time: Date.now(), text: '' }; + if (text === preModule.last.text) return; + var m = text.match(/([^(]+)\((\d+(\.\d+)?)\/(\d+)\)/); + var now = Date.now(); + if (m && now - preModule.last.time < 30) return; // if this is a progress update, skip it if too soon + preModule.last.time = now; + preModule.last.text = text; + postMessage({ type: 'status', data: text }); + }, + totalDependencies: 0, + monitorRunDependencies: function (left: any) { + this.totalDependencies = Math.max(this.totalDependencies, left); + preModule.setStatus(left ? 'Preparing... (' + (this.totalDependencies - left) + '/' + this.totalDependencies + ')' : 'All downloads complete.'); + }, + locateFile: function (path: any, prefix: any) { + // if it's a mem init file, use a custom dir + const ext = path.slice(path.lastIndexOf('.')); + if (ext in s3_prefix_map) { + return s3_prefix_map[ext] + path; + } + + // otherwise, use the default, the prefix (JS file's dir) + the path + return prefix + path; + }, +}; + +var mod = createMainModule(preModule); +mod.then((module) => { + const tClass = new module.TestClass(1, 2); + + console.log(tClass.testMethod()); + const vec = new module.vector_int(); + vec.push_back(1); + vec.push_back(2); + vec.push_back(3); + + console.log(tClass.complicatedFunction(vec)); +}); + +ctx.onmessage = async (event: MessageEvent) => { + switch (event.data.type) { + case "loadDepsData": { + const res = await mod.then(async (module) => { + + console.log("Initializing lazy files..."); + postMessage({ type: 'init', data: "afsdafgdfghadsffadf" }); + try { + postMessage({ type: 'status', data: 'Name: G4ENSDFSTATE' }); + await initG4ENSDFSTATE(module); + postMessage({ type: 'status', data: 'Name: G4EMLOW' }); + await initG4EMLOW(module); + postMessage({ type: 'status', data: 'Name: G4NDL' }); + await initG4NDL(module); + postMessage({ type: 'status', data: 'Name: G4PARTICLEXS' }); + await initG4PARTICLEXS(module); + postMessage({ type: 'status', data: 'Name: G4SAIDDATA' }); + await initG4SAIDDATA(module); + postMessage({ type: 'status', data: 'Name: PhotonEvaporation' }); + await initPhotoEvaporation(module); + } catch (error: unknown) { + console.error("Error initializing lazy files:", (error as Error).message); + } + postMessage({ type: 'status', message: 'Datasets initialized' }); + }); + break; + } + case "loadDepsLazy": { + const res = await mod.then(async (module) => { + module.FS_createPath('/', 'data', true, true); + module.FS_createPath('/data', 'G4EMLOW8.6.1', true, true); + module.FS_createPath('/data', 'G4ENSDFSTATE3.0', true, true); + module.FS_createPath('/data', 'G4NDL4.7.1', true, true); + module.FS_createPath('/data', 'G4PARTICLEXS4.1', true, true); + module.FS_createPath('/data', 'G4SAIDDATA2.0', true, true); + module.FS_createPath('/data', 'PhotonEvaporation6.1', true, true); + + const jsonFiles = [ + "load_G4EMLOW8.6.1.json", + "load_G4ENSDFSTATE3.0.json", + "load_G4NDL4.7.1.json", + "load_G4PARTICLEXS4.1.json", + "load_G4SAIDDATA2.0.json", + "load_PhotonEvaporation6.1.json" + ]; + + for (const jsonFile of jsonFiles) { + const path = s3_prefix_map[".json"] + jsonFile; + await fetch(path) + .then(response => { + if (!response.ok) { + throw new Error("HTTP error " + response.status); + } + return response.json(); + }) + .then((data: any) => { + for (const file of data) { + if (file.type === 'file') { + module.FS_createLazyFile(file.parent, file.name, file.url, true, true); + } else if (file.type === 'path') { + module.FS_createPath(file.parent, file.name, true, true); + } + } + }); + console.log(`Loaded lazy files from ${jsonFile}`); + } + }); + break; + } + case "runSimulation": + try { + console.log("Running simulation..."); + const initResult = await mod.then((module) => { + module.Geant4_init() + }); + console.log("Initialization result:", initResult); + + const result = await mod.then((module) => module.Geant4_run()); + ctx.postMessage({ + type: "result", + result: result + }); + } catch (error: unknown) { + ctx.postMessage({ + type: "error", + message: (error as Error).message + }); + } + break; + case "runGDML": + try { + console.log("Running GDML simulation..."); + const gdmlResult = await mod.then((module) => { + module.FS.createFile("/", "geom.gdml", null, true, true); + module.FS.createFile("/", "init.mac", null, true, true); + + module.FS.writeFile('geom.gdml', +` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +` + ); + + module.FS.writeFile('init.mac', +`/run/initialize + +########################################## +####### Particle Source definition ####### +########################################## + +/gps/verbose 0 +/gps/particle proton +/gps/position 0 0 -10.5 cm + +/gps/pos/type Beam +/gps/direction 0 0 1 + +/gps/ene/type Gauss +/gps/ene/mono 150 MeV +/gps/ene/sigma 1.5 MeV +/gps/ene/max 1000 MeV + +########################################## +################ Scoring ################# +########################################## + +/score/create/cylinderMesh CylZ_Mesh +/score/mesh/cylinderSize 5 10 cm +/score/mesh/nBin 1 400 1 +/score/quantity/energyDeposit eDep +/score/filter/particle protonFilter proton +/score/quantity/cellFlux fluence +/score/filter/particle protonFilter proton +/score/close + +/score/create/boxMesh YZ_Mesh +/score/mesh/boxSize 0.5 4. 10. cm +/score/mesh/nBin 1 80 400 +/score/quantity/energyDeposit eDep +/score/filter/particle protonFilter proton +/score/quantity/cellFlux fluence +/score/filter/particle protonFilter proton +/score/close + +/score/create/probe Pr 2. cm +/score/probe/locate 0. 0. -5. cm +/score/quantity/cellFlux fluxdiff +/score/filter/particle protonFilter proton +/score/close + +/analysis/h1/create fluxdiff Pr_differential 100 0. 200. MeV + +/score/fill1D 0 Pr fluxdiff + +########################################## +################## Run ################### +########################################## + +/run/beamOn 10000 + +########################################## +############ Collect results ############# +########################################## + +/particle/dump +/score/dumpQuantityToFile CylZ_Mesh eDep cylz_edep.txt +/score/dumpQuantityToFile CylZ_Mesh fluence cylz_fluence.txt +/score/dumpQuantityToFile YZ_Mesh eDep yz_edep.txt +/score/dumpQuantityToFile YZ_Mesh fluence yz_fluence.txt +/score/dumpQuantityToFile Pr fluxdiff diff.txt +` + ); + return module.Geant4_GDML(); + }); + + console.log("GDML run result:", gdmlResult); + + const result_data = await mod.then((module) => { + return module.FS.readFile("cylz_fluence.txt", { encoding: "utf8" }); + }); + + ctx.postMessage({ + type: "gdmlResult", + result: gdmlResult + }); + } catch (error: unknown) { + ctx.postMessage({ + type: "error", + message: (error as Error).message, + error: error + }); + } + break; + default: + console.warn("Unknown message type:", event.data.type); + } +} \ No newline at end of file From ee7d1405c14aee63af27093d7430975b1a0410a0 Mon Sep 17 00:00:00 2001 From: Konrad Michalik Date: Sun, 14 Sep 2025 19:41:06 +0200 Subject: [PATCH 2/4] Fix updates on react side --- .../components/Simulation/Geant4Datasets.tsx | 23 +++++++++++++++---- src/libs/geant4_web/DatasetDownloadManager.ts | 18 +++++++++------ 2 files changed, 30 insertions(+), 11 deletions(-) diff --git a/src/WrapperApp/components/Simulation/Geant4Datasets.tsx b/src/WrapperApp/components/Simulation/Geant4Datasets.tsx index b9d734e45..e2dbb4ac9 100644 --- a/src/WrapperApp/components/Simulation/Geant4Datasets.tsx +++ b/src/WrapperApp/components/Simulation/Geant4Datasets.tsx @@ -5,7 +5,6 @@ import { AccordionSummary, Box, Button, - CircularProgress, LinearProgress, Typography, useTheme @@ -30,11 +29,27 @@ function DatasetCurrentStatus(props: { status: DatasetStatus }) { return ( - {status.name} - {status.status === DatasetDownloadStatus.DONE && } + + {status.name} + {status.status === DatasetDownloadStatus.DONE && ( + + )} + {(status.status === DatasetDownloadStatus.DOWNLOADING || status.status === DatasetDownloadStatus.PROCESSING) && ( - + + + + {Math.round((status.done! / status.total!) * 10000) / 100}% + + )} {status.status === DatasetDownloadStatus.IDLE && } diff --git a/src/libs/geant4_web/DatasetDownloadManager.ts b/src/libs/geant4_web/DatasetDownloadManager.ts index 236f059db..ec633a716 100644 --- a/src/libs/geant4_web/DatasetDownloadManager.ts +++ b/src/libs/geant4_web/DatasetDownloadManager.ts @@ -22,7 +22,7 @@ export interface DatasetStatus { } const downloadRegex = /Downloading data... \((\d+)\/(\d+)\)/g; -const processingRegex = /Processing... \((\d+)\/(\d+)\)/g; +const processingRegex = /Preparing... \((\d+)\/(\d+)\)/g; export function useDatasetDownloadManager() { const [managerState, setManagerState] = useState(DownloadManagerStatus.IDLE); @@ -43,9 +43,12 @@ export function useDatasetDownloadManager() { ); useEffect(() => { - const worker = new Worker(new URL('./geantWorker.worker.ts', import.meta.url)); + setWorker(new Worker(new URL('./geantWorker.worker.ts', import.meta.url))); + }, []); + + useEffect(() => { let done = '', total = ''; - worker.onmessage = (event) => { + const handler = (event: MessageEvent) => { switch (event.data.type) { case 'status': switch (true) { @@ -63,7 +66,7 @@ export function useDatasetDownloadManager() { [name]: { name, status: DatasetDownloadStatus.IDLE } })); break; - case event.data.data?.startsWith('Downloading data'): + case event.data.data?.startsWith('Downloading data... ('): [, done, total] = Array.from(event.data.data.matchAll(downloadRegex))[0] as string[]; setDatasetStates(states => ({ ...states, @@ -75,7 +78,7 @@ export function useDatasetDownloadManager() { } })); break; - case event.data.data?.startsWith('Processing'): + case event.data.data?.startsWith('Preparing... ('): [, done, total] = Array.from(event.data.data.matchAll(processingRegex))[0] as string[]; setDatasetStates(states => ({ ...states, @@ -106,8 +109,9 @@ export function useDatasetDownloadManager() { break; } }; - setWorker(worker); - }, []); + worker?.addEventListener('message', handler); + return () => worker?.removeEventListener('message', handler); + }, [worker, dataset]); return { managerState, datasetStates: Object.values(datasetStates), startDownload }; } \ No newline at end of file From 0492b1c6463560babb0968ced46815673571c90e Mon Sep 17 00:00:00 2001 From: Konrad Michalik Date: Sun, 14 Sep 2025 21:04:24 +0200 Subject: [PATCH 3/4] Patch preload_XYZ to send more info and use it to simplify --- src/libs/geant4_web/DatasetDownloadManager.ts | 77 ++++++++++--------- .../preload/preload_G4EMLOW8.6.1.js | 4 +- .../preload/preload_G4ENSDFSTATE3.0.js | 4 +- .../geant4_wasm/preload/preload_G4NDL4.7.1.js | 4 +- .../preload/preload_G4PARTICLEXS4.1.js | 4 +- .../preload/preload_G4SAIDDATA2.0.js | 4 +- .../preload/preload_PhotonEvaporation6.1.js | 4 +- src/libs/geant4_web/geantWorker.worker.ts | 11 +-- 8 files changed, 62 insertions(+), 50 deletions(-) diff --git a/src/libs/geant4_web/DatasetDownloadManager.ts b/src/libs/geant4_web/DatasetDownloadManager.ts index ec633a716..9af872136 100644 --- a/src/libs/geant4_web/DatasetDownloadManager.ts +++ b/src/libs/geant4_web/DatasetDownloadManager.ts @@ -21,13 +21,15 @@ export interface DatasetStatus { total?: number, } -const downloadRegex = /Downloading data... \((\d+)\/(\d+)\)/g; -const processingRegex = /Preparing... \((\d+)\/(\d+)\)/g; +const idleRegex = /IDLE \((\w+)\)/g; +const downloadEndRegex = /END DL \((\w+)\)/g; +const downloadRegex = /DL \((\w+)\) \((\d+)\/(\d+)\)/g; +const processingRegex = /PROCESS \((\d+)\/(\d+)\)/g; export function useDatasetDownloadManager() { const [managerState, setManagerState] = useState(DownloadManagerStatus.IDLE); const [datasetStates, setDatasetStates] = useState>({}); - const [dataset, setDataset] = useState(); + const [processingState, setProcessingState] = useState(undefined); const [idle, setIdle] = useState(true); const [worker, setWorker] = useState(); @@ -35,6 +37,7 @@ export function useDatasetDownloadManager() { idle ? () => { worker?.postMessage({ type: 'loadDepsData' }); + setProcessingState({ name: 'Processing downloaded files', status: DatasetDownloadStatus.PROCESSING, done: 0, total: 1 }); setManagerState(DownloadManagerStatus.WORKING); setIdle(false); } @@ -47,60 +50,59 @@ export function useDatasetDownloadManager() { }, []); useEffect(() => { - let done = '', total = ''; + let done = '', total = '', dataset = ''; const handler = (event: MessageEvent) => { switch (event.data.type) { case 'status': switch (true) { - case event.data.data?.startsWith('Name'): - if (dataset) { - setDatasetStates(states => ({ - ...states, - [dataset]: { name: dataset, status: DatasetDownloadStatus.DONE } - })); - } - const name = event.data.data.slice(6, -1); - setDataset(name); + case event.data.data?.startsWith('IDLE'): + [, dataset] = Array.from(event.data.data.matchAll(idleRegex))[0] as string[]; setDatasetStates(states => ({ ...states, - [name]: { name, status: DatasetDownloadStatus.IDLE } + [dataset]: { + name: dataset, + status: states[dataset] ? states[dataset].status : DatasetDownloadStatus.IDLE, + } })); break; - case event.data.data?.startsWith('Downloading data... ('): - [, done, total] = Array.from(event.data.data.matchAll(downloadRegex))[0] as string[]; + case event.data.data?.startsWith('DL'): + [, dataset, done, total] = Array.from(event.data.data.matchAll(downloadRegex))[0] as string[]; setDatasetStates(states => ({ ...states, - [dataset!]: { - name: dataset!, + [dataset]: { + name: dataset, status: DatasetDownloadStatus.DOWNLOADING, done: parseInt(done), total: parseInt(total) } })); break; - case event.data.data?.startsWith('Preparing... ('): - [, done, total] = Array.from(event.data.data.matchAll(processingRegex))[0] as string[]; + case event.data.data?.startsWith('END DL'): + [, dataset] = Array.from(event.data.data.matchAll(downloadEndRegex))[0] as string[]; + console.log('end dl', dataset); setDatasetStates(states => ({ ...states, - [dataset!]: { - name: dataset!, - status: DatasetDownloadStatus.PROCESSING, + [dataset]: { + name: dataset, + status: DatasetDownloadStatus.DONE, done: parseInt(done), total: parseInt(total) } })); break; - case event.data.data?.startsWith('Datasets initialized'): - setManagerState(DownloadManagerStatus.FINISHED); + case event.data.data?.startsWith('PROCESS'): + [, done, total] = Array.from(event.data.data.matchAll(processingRegex))[0] as string[]; + setProcessingState({ + name: 'Processing downloaded files', + status: DatasetDownloadStatus.PROCESSING, + done: parseInt(done), + total: parseInt(total) + }) break; - default: - console.log('Status: ', event.data.data); - if (dataset) { - setDatasetStates(states => ({ - ...states, - [dataset!]: { name: dataset!, status: DatasetDownloadStatus.IDLE } - })); - } + case event.data.data?.startsWith('INIT END'): + setManagerState(DownloadManagerStatus.FINISHED); + setDatasetStates(states => Object.fromEntries(Object.entries(states).map(([k, v], _) => [k, { ...v, status: DatasetDownloadStatus.DONE }]))); + setProcessingState({ name: 'Processing downloaded files', status: DatasetDownloadStatus.DONE }); break; } break; @@ -111,7 +113,12 @@ export function useDatasetDownloadManager() { }; worker?.addEventListener('message', handler); return () => worker?.removeEventListener('message', handler); - }, [worker, dataset]); + }, [worker]); + + let allStates = Object.values(datasetStates); + if (processingState) { + allStates.push(processingState); + } - return { managerState, datasetStates: Object.values(datasetStates), startDownload }; + return { managerState, datasetStates: allStates, startDownload }; } \ No newline at end of file diff --git a/src/libs/geant4_web/geant4_wasm/preload/preload_G4EMLOW8.6.1.js b/src/libs/geant4_web/geant4_wasm/preload/preload_G4EMLOW8.6.1.js index fec0c48bf..0b92d5231 100644 --- a/src/libs/geant4_web/geant4_wasm/preload/preload_G4EMLOW8.6.1.js +++ b/src/libs/geant4_web/geant4_wasm/preload/preload_G4EMLOW8.6.1.js @@ -56,8 +56,9 @@ return new Promise((loadDataResolve, loadDataReject) => { totalSize += download.total; } - Module['setStatus']?.(`Downloading data... (${totalLoaded}/${totalSize})`); + Module['setStatus']?.(`DL (G4EMLOW) (${totalLoaded}/${totalSize})`); } + Module['setStatus']?.(`END DL (G4EMLOW)`); const packageData = new Uint8Array(chunks.map((c) => c.length).reduce((a, b) => a + b, 0)); let offset = 0; @@ -365,6 +366,7 @@ loadDataResolve(); Module['preloadResults'][PACKAGE_NAME] = {fromCache: useCached}; if (useCached) { processPackageData(await fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, pkgMetadata)); + Module['setStatus']?.(`END DL (G4EMLOW)`); } else { var packageData = await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE); try { diff --git a/src/libs/geant4_web/geant4_wasm/preload/preload_G4ENSDFSTATE3.0.js b/src/libs/geant4_web/geant4_wasm/preload/preload_G4ENSDFSTATE3.0.js index 1e4a44698..258a5cc3b 100644 --- a/src/libs/geant4_web/geant4_wasm/preload/preload_G4ENSDFSTATE3.0.js +++ b/src/libs/geant4_web/geant4_wasm/preload/preload_G4ENSDFSTATE3.0.js @@ -56,8 +56,9 @@ return new Promise((loadDataResolve, loadDataReject) => { totalSize += download.total; } - Module['setStatus']?.(`Downloading data... (${totalLoaded}/${totalSize})`); + Module['setStatus']?.(`DL (G4ENSDFSTATE) (${totalLoaded}/${totalSize})`); } + Module['setStatus']?.(`END DL (G4ENSDFSTATE)`); const packageData = new Uint8Array(chunks.map((c) => c.length).reduce((a, b) => a + b, 0)); let offset = 0; @@ -289,6 +290,7 @@ loadDataResolve(); Module['preloadResults'][PACKAGE_NAME] = {fromCache: useCached}; if (useCached) { processPackageData(await fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, pkgMetadata)); + Module['setStatus']?.(`END DL (G4ENSDFSTATE)`); } else { var packageData = await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE); try { diff --git a/src/libs/geant4_web/geant4_wasm/preload/preload_G4NDL4.7.1.js b/src/libs/geant4_web/geant4_wasm/preload/preload_G4NDL4.7.1.js index 5fa1ba19a..6cea7293c 100644 --- a/src/libs/geant4_web/geant4_wasm/preload/preload_G4NDL4.7.1.js +++ b/src/libs/geant4_web/geant4_wasm/preload/preload_G4NDL4.7.1.js @@ -56,8 +56,9 @@ return new Promise((loadDataResolve, loadDataReject) => { totalSize += download.total; } - Module['setStatus']?.(`Downloading data... (${totalLoaded}/${totalSize})`); + Module['setStatus']?.(`DL (G4NDL) (${totalLoaded}/${totalSize})`); } + Module['setStatus']?.(`END DL (G4NDL)`); const packageData = new Uint8Array(chunks.map((c) => c.length).reduce((a, b) => a + b, 0)); let offset = 0; @@ -360,6 +361,7 @@ loadDataResolve(); Module['preloadResults'][PACKAGE_NAME] = {fromCache: useCached}; if (useCached) { processPackageData(await fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, pkgMetadata)); + Module['setStatus']?.(`END DL (G4NDL)`); } else { var packageData = await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE); try { diff --git a/src/libs/geant4_web/geant4_wasm/preload/preload_G4PARTICLEXS4.1.js b/src/libs/geant4_web/geant4_wasm/preload/preload_G4PARTICLEXS4.1.js index 51fbf030e..e7a8b4c13 100644 --- a/src/libs/geant4_web/geant4_wasm/preload/preload_G4PARTICLEXS4.1.js +++ b/src/libs/geant4_web/geant4_wasm/preload/preload_G4PARTICLEXS4.1.js @@ -56,8 +56,9 @@ return new Promise((loadDataResolve, loadDataReject) => { totalSize += download.total; } - Module['setStatus']?.(`Downloading data... (${totalLoaded}/${totalSize})`); + Module['setStatus']?.(`DL (G4PARTICLEXS) (${totalLoaded}/${totalSize})`); } + Module['setStatus']?.(`END DL (G4PARTICLEXS)`); const packageData = new Uint8Array(chunks.map((c) => c.length).reduce((a, b) => a + b, 0)); let offset = 0; @@ -298,6 +299,7 @@ loadDataResolve(); Module['preloadResults'][PACKAGE_NAME] = {fromCache: useCached}; if (useCached) { processPackageData(await fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, pkgMetadata)); + Module['setStatus']?.(`END DL (G4PARTICLEXS)`); } else { var packageData = await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE); try { diff --git a/src/libs/geant4_web/geant4_wasm/preload/preload_G4SAIDDATA2.0.js b/src/libs/geant4_web/geant4_wasm/preload/preload_G4SAIDDATA2.0.js index 1e6d139a7..65cfae365 100644 --- a/src/libs/geant4_web/geant4_wasm/preload/preload_G4SAIDDATA2.0.js +++ b/src/libs/geant4_web/geant4_wasm/preload/preload_G4SAIDDATA2.0.js @@ -56,8 +56,9 @@ return new Promise((loadDataResolve, loadDataReject) => { totalSize += download.total; } - Module['setStatus']?.(`Downloading data... (${totalLoaded}/${totalSize})`); + Module['setStatus']?.(`DL (G4SAIDDATA) (${totalLoaded}/${totalSize})`); } + Module['setStatus']?.(`END DL (G4SAIDDATA)`); const packageData = new Uint8Array(chunks.map((c) => c.length).reduce((a, b) => a + b, 0)); let offset = 0; @@ -289,6 +290,7 @@ loadDataResolve(); Module['preloadResults'][PACKAGE_NAME] = {fromCache: useCached}; if (useCached) { processPackageData(await fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, pkgMetadata)); + Module['setStatus']?.(`END DL (G4SAIDDATA)`); } else { var packageData = await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE); try { diff --git a/src/libs/geant4_web/geant4_wasm/preload/preload_PhotonEvaporation6.1.js b/src/libs/geant4_web/geant4_wasm/preload/preload_PhotonEvaporation6.1.js index aec733217..a6d8d24d0 100644 --- a/src/libs/geant4_web/geant4_wasm/preload/preload_PhotonEvaporation6.1.js +++ b/src/libs/geant4_web/geant4_wasm/preload/preload_PhotonEvaporation6.1.js @@ -56,8 +56,9 @@ return new Promise((loadDataResolve, loadDataReject) => { totalSize += download.total; } - Module['setStatus']?.(`Downloading data... (${totalLoaded}/${totalSize})`); + Module['setStatus']?.(`DL (PhotonEvaporation) (${totalLoaded}/${totalSize})`); } + Module['setStatus']?.(`END DL (PhotonEvaporation)`); const packageData = new Uint8Array(chunks.map((c) => c.length).reduce((a, b) => a + b, 0)); let offset = 0; @@ -289,6 +290,7 @@ loadDataResolve(); Module['preloadResults'][PACKAGE_NAME] = {fromCache: useCached}; if (useCached) { processPackageData(await fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, pkgMetadata)); + Module['setStatus']?.(`END DL (PhotonEvaporation)`); } else { var packageData = await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE); try { diff --git a/src/libs/geant4_web/geantWorker.worker.ts b/src/libs/geant4_web/geantWorker.worker.ts index 1920afb85..dd439cf0b 100644 --- a/src/libs/geant4_web/geantWorker.worker.ts +++ b/src/libs/geant4_web/geantWorker.worker.ts @@ -60,7 +60,7 @@ var preModule = { totalDependencies: 0, monitorRunDependencies: function (left: any) { this.totalDependencies = Math.max(this.totalDependencies, left); - preModule.setStatus(left ? 'Preparing... (' + (this.totalDependencies - left) + '/' + this.totalDependencies + ')' : 'All downloads complete.'); + preModule.setStatus(left ? 'PROCESS (' + (this.totalDependencies - left) + '/' + this.totalDependencies + ')' : 'All downloads complete.'); }, locateFile: function (path: any, prefix: any) { // if it's a mem init file, use a custom dir @@ -93,24 +93,17 @@ ctx.onmessage = async (event: MessageEvent) => { const res = await mod.then(async (module) => { console.log("Initializing lazy files..."); - postMessage({ type: 'init', data: "afsdafgdfghadsffadf" }); try { - postMessage({ type: 'status', data: 'Name: G4ENSDFSTATE' }); await initG4ENSDFSTATE(module); - postMessage({ type: 'status', data: 'Name: G4EMLOW' }); await initG4EMLOW(module); - postMessage({ type: 'status', data: 'Name: G4NDL' }); await initG4NDL(module); - postMessage({ type: 'status', data: 'Name: G4PARTICLEXS' }); await initG4PARTICLEXS(module); - postMessage({ type: 'status', data: 'Name: G4SAIDDATA' }); await initG4SAIDDATA(module); - postMessage({ type: 'status', data: 'Name: PhotonEvaporation' }); await initPhotoEvaporation(module); } catch (error: unknown) { console.error("Error initializing lazy files:", (error as Error).message); } - postMessage({ type: 'status', message: 'Datasets initialized' }); + postMessage({ type: 'status', data: 'INIT END' }); }); break; } From ecaaa3f9d6097071552f996b9cc2ebd588919d26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Kwinta?= <33606064+lkwinta@users.noreply.github.com> Date: Thu, 18 Sep 2025 15:11:54 +0000 Subject: [PATCH 4/4] some worker refactors --- .gitmodules | 5 +- src/libs/geant4_web/GeantWorkerInterface.ts | 19 + src/libs/geant4_web/geant-web-stubs | 1 + .../geant4_web/geant4_wasm/geant4_wasm.d.ts | 244 - .../geant4_web/geant4_wasm/geant4_wasm.js | 8314 ----------------- .../preload/preload_G4EMLOW8.6.1.js | 421 - .../preload/preload_G4ENSDFSTATE3.0.js | 345 - .../geant4_wasm/preload/preload_G4NDL4.7.1.js | 416 - .../preload/preload_G4PARTICLEXS4.1.js | 354 - .../preload/preload_G4SAIDDATA2.0.js | 345 - .../preload/preload_PhotonEvaporation6.1.js | 345 - src/libs/geant4_web/geantWorker.worker.ts | 85 +- 12 files changed, 67 insertions(+), 10827 deletions(-) create mode 100644 src/libs/geant4_web/GeantWorkerInterface.ts create mode 160000 src/libs/geant4_web/geant-web-stubs delete mode 100644 src/libs/geant4_web/geant4_wasm/geant4_wasm.d.ts delete mode 100644 src/libs/geant4_web/geant4_wasm/geant4_wasm.js delete mode 100644 src/libs/geant4_web/geant4_wasm/preload/preload_G4EMLOW8.6.1.js delete mode 100644 src/libs/geant4_web/geant4_wasm/preload/preload_G4ENSDFSTATE3.0.js delete mode 100644 src/libs/geant4_web/geant4_wasm/preload/preload_G4NDL4.7.1.js delete mode 100644 src/libs/geant4_web/geant4_wasm/preload/preload_G4PARTICLEXS4.1.js delete mode 100644 src/libs/geant4_web/geant4_wasm/preload/preload_G4SAIDDATA2.0.js delete mode 100644 src/libs/geant4_web/geant4_wasm/preload/preload_PhotonEvaporation6.1.js diff --git a/.gitmodules b/.gitmodules index cbb756961..1ec16cc05 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,9 @@ [submodule "public/libs/converter"] path = src/libs/converter - url = https://github.com/yaptide/converter + url = ../converter branch = master shallow = true +[submodule "src/libs/geant4_web/geant-web-stubs"] + path = src/libs/geant4_web/geant-web-stubs + url = ../geant-web-stubs diff --git a/src/libs/geant4_web/GeantWorkerInterface.ts b/src/libs/geant4_web/GeantWorkerInterface.ts new file mode 100644 index 000000000..f6b15daf4 --- /dev/null +++ b/src/libs/geant4_web/GeantWorkerInterface.ts @@ -0,0 +1,19 @@ + +export enum GeantWorkerMessageType { + INIT_DATA_FILES, + INIT_LAZY_FILES, + CREATE_FILE, + READ_FILE, + RUN_SIMULATION, + FILE_RESPONSE +} + +export type GeantWorkerMessageFile = { + name: string, + data: string +} + +export type GeantWorkerMessage = { + type: GeantWorkerMessageType, + data: GeantWorkerMessageFile | string +} \ No newline at end of file diff --git a/src/libs/geant4_web/geant-web-stubs b/src/libs/geant4_web/geant-web-stubs new file mode 160000 index 000000000..d7b6a4f97 --- /dev/null +++ b/src/libs/geant4_web/geant-web-stubs @@ -0,0 +1 @@ +Subproject commit d7b6a4f97fb445a7626188cf2c94595bff6ee481 diff --git a/src/libs/geant4_web/geant4_wasm/geant4_wasm.d.ts b/src/libs/geant4_web/geant4_wasm/geant4_wasm.d.ts deleted file mode 100644 index 640442604..000000000 --- a/src/libs/geant4_web/geant4_wasm/geant4_wasm.d.ts +++ /dev/null @@ -1,244 +0,0 @@ -// TypeScript bindings for emscripten-generated code. Automatically generated at compile time. -declare namespace RuntimeExports { - namespace FS { - export let root: any; - export let mounts: any[]; - export let devices: {}; - export let streams: any[]; - export let nextInode: number; - export let nameTable: any; - export let currentPath: string; - export let initialized: boolean; - export let ignorePermissions: boolean; - export let filesystems: any; - export let syncFSRequests: number; - export let readFiles: {}; - export { ErrnoError }; - export { FSStream }; - export { FSNode }; - export function lookupPath(path: any, opts?: {}): { - path: string; - node?: undefined; - } | { - path: string; - node: any; - }; - export function getPath(node: any): any; - export function hashName(parentid: any, name: any): number; - export function hashAddNode(node: any): void; - export function hashRemoveNode(node: any): void; - export function lookupNode(parent: any, name: any): any; - export function createNode(parent: any, name: any, mode: any, rdev: any): any; - export function destroyNode(node: any): void; - export function isRoot(node: any): boolean; - export function isMountpoint(node: any): boolean; - export function isFile(mode: any): boolean; - export function isDir(mode: any): boolean; - export function isLink(mode: any): boolean; - export function isChrdev(mode: any): boolean; - export function isBlkdev(mode: any): boolean; - export function isFIFO(mode: any): boolean; - export function isSocket(mode: any): boolean; - export function flagsToPermissionString(flag: any): string; - export function nodePermissions(node: any, perms: any): 0 | 2; - export function mayLookup(dir: any): any; - export function mayCreate(dir: any, name: any): any; - export function mayDelete(dir: any, name: any, isdir: any): any; - export function mayOpen(node: any, flags: any): any; - export function checkOpExists(op: any, err: any): any; - export let MAX_OPEN_FDS: number; - export function nextfd(): number; - export function getStreamChecked(fd: any): any; - export function getStream(fd: any): any; - export function createStream(stream: any, fd?: number): any; - export function closeStream(fd: any): void; - export function dupStream(origStream: any, fd?: number): any; - export function doSetAttr(stream: any, node: any, attr: any): void; - export namespace chrdev_stream_ops { - function open(stream: any): void; - function llseek(): never; - } - export function major(dev: any): number; - export function minor(dev: any): number; - export function makedev(ma: any, mi: any): number; - export function registerDevice(dev: any, ops: any): void; - export function getDevice(dev: any): any; - export function getMounts(mount: any): any[]; - export function syncfs(populate: any, callback: any): void; - export function mount(type: any, opts: any, mountpoint: any): any; - export function unmount(mountpoint: any): void; - export function lookup(parent: any, name: any): any; - export function mknod(path: any, mode: any, dev: any): any; - export function statfs(path: any): any; - export function statfsStream(stream: any): any; - export function statfsNode(node: any): { - bsize: number; - frsize: number; - blocks: number; - bfree: number; - bavail: number; - files: any; - ffree: number; - fsid: number; - flags: number; - namelen: number; - }; - export function create(path: any, mode?: number): any; - export function mkdir(path: any, mode?: number): any; - export function mkdirTree(path: any, mode: any): void; - export function mkdev(path: any, mode: any, dev: any): any; - export function symlink(oldpath: any, newpath: any): any; - export function rename(old_path: any, new_path: any): void; - export function rmdir(path: any): void; - export function readdir(path: any): any; - export function unlink(path: any): void; - export function readlink(path: any): any; - export function stat(path: any, dontFollow: any): any; - export function fstat(fd: any): any; - export function lstat(path: any): any; - export function doChmod(stream: any, node: any, mode: any, dontFollow: any): void; - export function chmod(path: any, mode: any, dontFollow: any): void; - export function lchmod(path: any, mode: any): void; - export function fchmod(fd: any, mode: any): void; - export function doChown(stream: any, node: any, dontFollow: any): void; - export function chown(path: any, uid: any, gid: any, dontFollow: any): void; - export function lchown(path: any, uid: any, gid: any): void; - export function fchown(fd: any, uid: any, gid: any): void; - export function doTruncate(stream: any, node: any, len: any): void; - export function truncate(path: any, len: any): void; - export function ftruncate(fd: any, len: any): void; - export function utime(path: any, atime: any, mtime: any): void; - export function open(path: any, flags: any, mode?: number): any; - export function close(stream: any): void; - export function isClosed(stream: any): boolean; - export function llseek(stream: any, offset: any, whence: any): any; - export function read(stream: any, buffer: any, offset: any, length: any, position: any): any; - export function write(stream: any, buffer: any, offset: any, length: any, position: any, canOwn: any): any; - export function mmap(stream: any, length: any, position: any, prot: any, flags: any): any; - export function msync(stream: any, buffer: any, offset: any, length: any, mmapFlags: any): any; - export function ioctl(stream: any, cmd: any, arg: any): any; - export function readFile(path: any, opts?: {}): Uint8Array; - export function writeFile(path: any, data: any, opts?: {}): void; - export function cwd(): any; - export function chdir(path: any): void; - export function createDefaultDirectories(): void; - export function createDefaultDevices(): void; - export function createSpecialDirectories(): void; - export function createStandardStreams(input: any, output: any, error: any): void; - export function staticInit(): void; - export function init(input: any, output: any, error: any): void; - export function quit(): void; - export function findObject(path: any, dontResolveLastLink: any): any; - export function analyzePath(path: any, dontResolveLastLink: any): { - isRoot: boolean; - exists: boolean; - error: number; - name: any; - path: any; - object: any; - parentExists: boolean; - parentPath: any; - parentObject: any; - }; - export function createPath(parent: any, path: any, canRead: any, canWrite: any): any; - export function createFile(parent: any, name: any, properties: any, canRead: any, canWrite: any): any; - export function createDataFile(parent: any, name: any, data: any, canRead: any, canWrite: any, canOwn: any): void; - export function createDevice(parent: any, name: any, input: any, output: any): any; - export function forceLoadFile(obj: any): boolean; - export function createLazyFile(parent: any, name: any, url: any, canRead: any, canWrite: any): any; - export function absolutePath(): void; - export function createFolder(): void; - export function createLink(): void; - export function joinPath(): void; - export function mmapAlloc(): void; - export function standardizePath(): void; - } - function FS_createPath(...args: any[]): any; - function FS_createDataFile(...args: any[]): any; - function FS_preloadFile(parent: any, name: any, url: any, canRead: any, canWrite: any, dontCreateFile: any, canOwn: any, preFinish: any): Promise; - function FS_unlink(...args: any[]): any; - function FS_createLazyFile(...args: any[]): any; - function FS_createDevice(...args: any[]): any; - let addRunDependency: any; - let removeRunDependency: any; -} -declare class ErrnoError extends Error { - constructor(errno: any); - errno: any; - code: string; -} -declare class FSStream { - shared: {}; - set object(val: any); - get object(): any; - node: any; - get isRead(): boolean; - get isWrite(): boolean; - get isAppend(): number; - set flags(val: any); - get flags(): any; - set position(val: any); - get position(): any; -} -declare class FSNode { - constructor(parent: any, name: any, mode: any, rdev: any); - node_ops: {}; - stream_ops: {}; - readMode: number; - writeMode: number; - mounted: any; - parent: any; - mount: any; - id: number; - name: any; - mode: any; - rdev: any; - atime: number; - mtime: number; - ctime: number; - set read(val: boolean); - get read(): boolean; - set write(val: boolean); - get write(): boolean; - get isFolder(): any; - get isDevice(): any; -} -interface WasmModule { -} - -export interface ClassHandle { - isAliasOf(other: ClassHandle): boolean; - delete(): void; - deleteLater(): this; - isDeleted(): boolean; - // @ts-ignore - If targeting lower than ESNext, this symbol might not exist. - [Symbol.dispose](): void; - clone(): this; -} -export interface TestClass extends ClassHandle { - testMethod(): number; - complicatedFunction(_0: vector_int): number; -} - -export interface vector_int extends ClassHandle { - push_back(_0: number): void; - resize(_0: number, _1: number): void; - size(): number; - get(_0: number): number | undefined; - set(_0: number, _1: number): boolean; -} - -interface EmbindModule { - TestClass: { - new(_0: number, _1: number): TestClass; - }; - vector_int: { - new(): vector_int; - }; - Geant4_init(): number; - Geant4_GDML(): number; - Geant4_run(): number; -} - -export type MainModule = WasmModule & typeof RuntimeExports & EmbindModule; -export default function MainModuleFactory (options?: unknown): Promise; diff --git a/src/libs/geant4_web/geant4_wasm/geant4_wasm.js b/src/libs/geant4_web/geant4_wasm/geant4_wasm.js deleted file mode 100644 index a6729eddf..000000000 --- a/src/libs/geant4_web/geant4_wasm/geant4_wasm.js +++ /dev/null @@ -1,8314 +0,0 @@ -// This code implements the `-sMODULARIZE` settings by taking the generated -// JS program code (INNER_JS_CODE) and wrapping it in a factory function. - -// When targetting node and ES6 we use `await import ..` in the generated code -// so the outer function needs to be marked as async. -async function createWasmModule(moduleArg = {}) { - var moduleRtn; - -// include: shell.js -// The Module object: Our interface to the outside world. We import -// and export values on it. There are various ways Module can be used: -// 1. Not defined. We create it here -// 2. A function parameter, function(moduleArg) => Promise -// 3. pre-run appended it, var Module = {}; ..generated code.. -// 4. External script tag defines var Module. -// We need to check if Module already exists (e.g. case 3 above). -// Substitution will be replaced with actual code on later stage of the build, -// this way Closure Compiler will not mangle it (e.g. case 4. above). -// Note that if you want to run closure, and also to use Module -// after the generated code, you will need to define var Module = {}; -// before the code. Then that object will be used in the code, and you -// can continue to use Module afterwards as well. -var Module = moduleArg; - -// Determine the runtime environment we are in. You can customize this by -// setting the ENVIRONMENT setting at compile time (see settings.js). - -var ENVIRONMENT_IS_WEB = false; -var ENVIRONMENT_IS_WORKER = true; -var ENVIRONMENT_IS_NODE = false; -var ENVIRONMENT_IS_SHELL = false; - -// --pre-jses are emitted after the Module integration code, so that they can -// refer to Module (if they choose; they can also define Module) -// include: ./setup_env.js -Module.preRun.push(function () { - ENV.G4LEDATA = '/data/G4EMLOW8.6.1'; - ENV.G4LEVELGAMMADATA = '/data/PhotonEvaporation6.1'; - ENV.G4NEUTRONHPDATA = '/data/G4NDL4.7.1'; - ENV.G4ENSDFSTATEDATA = '/data/G4ENSDFSTATE3.0'; - ENV.G4SAIDXSDATA = '/data/G4SAIDDATA2.0'; - ENV.G4PARTICLEXSDATA = '/data/G4PARTICLEXS4.1' -});// end include: ./setup_env.js - - -var arguments_ = []; -var thisProgram = './this.program'; -var quit_ = (status, toThrow) => { - throw toThrow; -}; - -var _scriptName = import.meta.url; - -// `/` should be present at the end if `scriptDirectory` is not empty -var scriptDirectory = ''; -function locateFile(path) { - if (Module['locateFile']) { - return Module['locateFile'](path, scriptDirectory); - } - return scriptDirectory + path; -} - -// Hooks that are implemented differently in different runtime environments. -var readAsync, readBinary; - -if (ENVIRONMENT_IS_SHELL) { - - const isNode = typeof process == 'object' && process.versions?.node && process.type != 'renderer'; - if (isNode || typeof window == 'object' || typeof WorkerGlobalScope != 'undefined') throw new Error('not compiled for this environment (did you build to HTML and try to run it not on the web, or set ENVIRONMENT to something - like node - and run it someplace else - like on the web?)'); - -} else - -// Note that this includes Node.js workers when relevant (pthreads is enabled). -// Node.js workers are detected as a combination of ENVIRONMENT_IS_WORKER and -// ENVIRONMENT_IS_NODE. -if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) { - try { - scriptDirectory = new URL('.', _scriptName).href; // includes trailing slash - } catch { - // Must be a `blob:` or `data:` URL (e.g. `blob:http://site.com/etc/etc`), we cannot - // infer anything from them. - } - - if (!(typeof window == 'object' || typeof WorkerGlobalScope != 'undefined')) throw new Error('not compiled for this environment (did you build to HTML and try to run it not on the web, or set ENVIRONMENT to something - like node - and run it someplace else - like on the web?)'); - - { -// include: web_or_worker_shell_read.js -if (ENVIRONMENT_IS_WORKER) { - readBinary = (url) => { - var xhr = new XMLHttpRequest(); - xhr.open('GET', url, false); - xhr.responseType = 'arraybuffer'; - xhr.send(null); - return new Uint8Array(/** @type{!ArrayBuffer} */(xhr.response)); - }; - } - - readAsync = async (url) => { - assert(!isFileURI(url), "readAsync does not work with file:// URLs"); - var response = await fetch(url, { credentials: 'same-origin' }); - if (response.ok) { - return response.arrayBuffer(); - } - throw new Error(response.status + ' : ' + response.url); - }; -// end include: web_or_worker_shell_read.js - } -} else -{ - throw new Error('environment detection error'); -} - -var out = console.log.bind(console); -var err = console.error.bind(console); - -var IDBFS = 'IDBFS is no longer included by default; build with -lidbfs.js'; -var PROXYFS = 'PROXYFS is no longer included by default; build with -lproxyfs.js'; -var WORKERFS = 'WORKERFS is no longer included by default; build with -lworkerfs.js'; -var FETCHFS = 'FETCHFS is no longer included by default; build with -lfetchfs.js'; -var ICASEFS = 'ICASEFS is no longer included by default; build with -licasefs.js'; -var JSFILEFS = 'JSFILEFS is no longer included by default; build with -ljsfilefs.js'; -var OPFS = 'OPFS is no longer included by default; build with -lopfs.js'; - -var NODEFS = 'NODEFS is no longer included by default; build with -lnodefs.js'; - -// perform assertions in shell.js after we set up out() and err(), as otherwise -// if an assertion fails it cannot print the message - -assert(!ENVIRONMENT_IS_WEB, 'web environment detected but not enabled at build time. Add `web` to `-sENVIRONMENT` to enable.'); - -assert(!ENVIRONMENT_IS_NODE, 'node environment detected but not enabled at build time. Add `node` to `-sENVIRONMENT` to enable.'); - -assert(!ENVIRONMENT_IS_SHELL, 'shell environment detected but not enabled at build time. Add `shell` to `-sENVIRONMENT` to enable.'); - -// end include: shell.js - -// include: preamble.js -// === Preamble library stuff === - -// Documentation for the public APIs defined in this file must be updated in: -// site/source/docs/api_reference/preamble.js.rst -// A prebuilt local version of the documentation is available at: -// site/build/text/docs/api_reference/preamble.js.txt -// You can also build docs locally as HTML or other formats in site/ -// An online HTML version (which may be of a different version of Emscripten) -// is up at http://kripken.github.io/emscripten-site/docs/api_reference/preamble.js.html - -var wasmBinary; - -if (typeof WebAssembly != 'object') { - err('no native wasm support detected'); -} - -// Wasm globals - -//======================================== -// Runtime essentials -//======================================== - -// whether we are quitting the application. no code should run after this. -// set in exit() and abort() -var ABORT = false; - -// set by exit() and abort(). Passed to 'onExit' handler. -// NOTE: This is also used as the process return code code in shell environments -// but only when noExitRuntime is false. -var EXITSTATUS; - -// In STRICT mode, we only define assert() when ASSERTIONS is set. i.e. we -// don't define it at all in release modes. This matches the behaviour of -// MINIMAL_RUNTIME. -// TODO(sbc): Make this the default even without STRICT enabled. -/** @type {function(*, string=)} */ -function assert(condition, text) { - if (!condition) { - abort('Assertion failed' + (text ? ': ' + text : '')); - } -} - -// We used to include malloc/free by default in the past. Show a helpful error in -// builds with assertions. - -/** - * Indicates whether filename is delivered via file protocol (as opposed to http/https) - * @noinline - */ -var isFileURI = (filename) => filename.startsWith('file://'); - -// include: runtime_common.js -// include: runtime_stack_check.js -// Initializes the stack cookie. Called at the startup of main and at the startup of each thread in pthreads mode. -function writeStackCookie() { - var max = _emscripten_stack_get_end(); - assert((max & 3) == 0); - // If the stack ends at address zero we write our cookies 4 bytes into the - // stack. This prevents interference with SAFE_HEAP and ASAN which also - // monitor writes to address zero. - if (max == 0) { - max += 4; - } - // The stack grow downwards towards _emscripten_stack_get_end. - // We write cookies to the final two words in the stack and detect if they are - // ever overwritten. - HEAPU32[((max)>>2)] = 0x02135467; - HEAPU32[(((max)+(4))>>2)] = 0x89BACDFE; - // Also test the global address 0 for integrity. - HEAPU32[((0)>>2)] = 1668509029; -} - -function checkStackCookie() { - if (ABORT) return; - var max = _emscripten_stack_get_end(); - // See writeStackCookie(). - if (max == 0) { - max += 4; - } - var cookie1 = HEAPU32[((max)>>2)]; - var cookie2 = HEAPU32[(((max)+(4))>>2)]; - if (cookie1 != 0x02135467 || cookie2 != 0x89BACDFE) { - abort(`Stack overflow! Stack cookie has been overwritten at ${ptrToString(max)}, expected hex dwords 0x89BACDFE and 0x2135467, but received ${ptrToString(cookie2)} ${ptrToString(cookie1)}`); - } - // Also test the global address 0 for integrity. - if (HEAPU32[((0)>>2)] != 0x63736d65 /* 'emsc' */) { - abort('Runtime error: The application has corrupted its heap memory area (address zero)!'); - } -} -// end include: runtime_stack_check.js -// include: runtime_exceptions.js -// end include: runtime_exceptions.js -// include: runtime_debug.js -var runtimeDebug = true; // Switch to false at runtime to disable logging at the right times - -// Used by XXXXX_DEBUG settings to output debug messages. -function dbg(...args) { - if (!runtimeDebug && typeof runtimeDebug != 'undefined') return; - // TODO(sbc): Make this configurable somehow. Its not always convenient for - // logging to show up as warnings. - console.warn(...args); -} - -// Endianness check -(() => { - var h16 = new Int16Array(1); - var h8 = new Int8Array(h16.buffer); - h16[0] = 0x6373; - if (h8[0] !== 0x73 || h8[1] !== 0x63) throw 'Runtime error: expected the system to be little-endian! (Run with -sSUPPORT_BIG_ENDIAN to bypass)'; -})(); - -function consumedModuleProp(prop) { - if (!Object.getOwnPropertyDescriptor(Module, prop)) { - Object.defineProperty(Module, prop, { - configurable: true, - set() { - abort(`Attempt to set \`Module.${prop}\` after it has already been processed. This can happen, for example, when code is injected via '--post-js' rather than '--pre-js'`); - - } - }); - } -} - -function makeInvalidEarlyAccess(name) { - return () => assert(false, `call to '${name}' via reference taken before Wasm module initialization`); - -} - -function ignoredModuleProp(prop) { - if (Object.getOwnPropertyDescriptor(Module, prop)) { - abort(`\`Module.${prop}\` was supplied but \`${prop}\` not included in INCOMING_MODULE_JS_API`); - } -} - -// forcing the filesystem exports a few things by default -function isExportedByForceFilesystem(name) { - return name === 'FS_createPath' || - name === 'FS_createDataFile' || - name === 'FS_createPreloadedFile' || - name === 'FS_preloadFile' || - name === 'FS_unlink' || - name === 'addRunDependency' || - // The old FS has some functionality that WasmFS lacks. - name === 'FS_createLazyFile' || - name === 'FS_createDevice' || - name === 'removeRunDependency'; -} - -/** - * Intercept access to a global symbol. This enables us to give informative - * warnings/errors when folks attempt to use symbols they did not include in - * their build, or no symbols that no longer exist. - */ -function hookGlobalSymbolAccess(sym, func) { - if (typeof globalThis != 'undefined' && !Object.getOwnPropertyDescriptor(globalThis, sym)) { - Object.defineProperty(globalThis, sym, { - configurable: true, - get() { - func(); - return undefined; - } - }); - } -} - -function missingGlobal(sym, msg) { - hookGlobalSymbolAccess(sym, () => { - warnOnce(`\`${sym}\` is not longer defined by emscripten. ${msg}`); - }); -} - -missingGlobal('buffer', 'Please use HEAP8.buffer or wasmMemory.buffer'); -missingGlobal('asm', 'Please use wasmExports instead'); - -function missingLibrarySymbol(sym) { - hookGlobalSymbolAccess(sym, () => { - // Can't `abort()` here because it would break code that does runtime - // checks. e.g. `if (typeof SDL === 'undefined')`. - var msg = `\`${sym}\` is a library symbol and not included by default; add it to your library.js __deps or to DEFAULT_LIBRARY_FUNCS_TO_INCLUDE on the command line`; - // DEFAULT_LIBRARY_FUNCS_TO_INCLUDE requires the name as it appears in - // library.js, which means $name for a JS name with no prefix, or name - // for a JS name like _name. - var librarySymbol = sym; - if (!librarySymbol.startsWith('_')) { - librarySymbol = '$' + sym; - } - msg += ` (e.g. -sDEFAULT_LIBRARY_FUNCS_TO_INCLUDE='${librarySymbol}')`; - if (isExportedByForceFilesystem(sym)) { - msg += '. Alternatively, forcing filesystem support (-sFORCE_FILESYSTEM) can export this for you'; - } - warnOnce(msg); - }); - - // Any symbol that is not included from the JS library is also (by definition) - // not exported on the Module object. - unexportedRuntimeSymbol(sym); -} - -function unexportedRuntimeSymbol(sym) { - if (!Object.getOwnPropertyDescriptor(Module, sym)) { - Object.defineProperty(Module, sym, { - configurable: true, - get() { - var msg = `'${sym}' was not exported. add it to EXPORTED_RUNTIME_METHODS (see the Emscripten FAQ)`; - if (isExportedByForceFilesystem(sym)) { - msg += '. Alternatively, forcing filesystem support (-sFORCE_FILESYSTEM) can export this for you'; - } - abort(msg); - } - }); - } -} - -// end include: runtime_debug.js -var readyPromiseResolve, readyPromiseReject; - -// Memory management - -var wasmMemory; - -var -/** @type {!Int8Array} */ - HEAP8, -/** @type {!Uint8Array} */ - HEAPU8, -/** @type {!Int16Array} */ - HEAP16, -/** @type {!Uint16Array} */ - HEAPU16, -/** @type {!Int32Array} */ - HEAP32, -/** @type {!Uint32Array} */ - HEAPU32, -/** @type {!Float32Array} */ - HEAPF32, -/** @type {!Float64Array} */ - HEAPF64; - -// BigInt64Array type is not correctly defined in closure -var -/** not-@type {!BigInt64Array} */ - HEAP64, -/* BigUint64Array type is not correctly defined in closure -/** not-@type {!BigUint64Array} */ - HEAPU64; - -var runtimeInitialized = false; - - - -function updateMemoryViews() { - var b = wasmMemory.buffer; - HEAP8 = new Int8Array(b); - HEAP16 = new Int16Array(b); - HEAPU8 = new Uint8Array(b); - HEAPU16 = new Uint16Array(b); - HEAP32 = new Int32Array(b); - HEAPU32 = new Uint32Array(b); - HEAPF32 = new Float32Array(b); - HEAPF64 = new Float64Array(b); - HEAP64 = new BigInt64Array(b); - HEAPU64 = new BigUint64Array(b); -} - -// include: memoryprofiler.js -// end include: memoryprofiler.js -// end include: runtime_common.js -assert(typeof Int32Array != 'undefined' && typeof Float64Array !== 'undefined' && Int32Array.prototype.subarray != undefined && Int32Array.prototype.set != undefined, - 'JS engine does not provide full typed array support'); - -function preRun() { - if (Module['preRun']) { - if (typeof Module['preRun'] == 'function') Module['preRun'] = [Module['preRun']]; - while (Module['preRun'].length) { - addOnPreRun(Module['preRun'].shift()); - } - } - consumedModuleProp('preRun'); - // Begin ATPRERUNS hooks - callRuntimeCallbacks(onPreRuns); - // End ATPRERUNS hooks -} - -function initRuntime() { - assert(!runtimeInitialized); - runtimeInitialized = true; - - checkStackCookie(); - - // Begin ATINITS hooks - if (!Module['noFSInit'] && !FS.initialized) FS.init(); -TTY.init(); -SOCKFS.root = FS.mount(SOCKFS, {}, null); - // End ATINITS hooks - - wasmExports['__wasm_call_ctors'](); - - // Begin ATPOSTCTORS hooks - FS.ignorePermissions = false; - // End ATPOSTCTORS hooks -} - -function postRun() { - checkStackCookie(); - // PThreads reuse the runtime from the main thread. - - if (Module['postRun']) { - if (typeof Module['postRun'] == 'function') Module['postRun'] = [Module['postRun']]; - while (Module['postRun'].length) { - addOnPostRun(Module['postRun'].shift()); - } - } - consumedModuleProp('postRun'); - - // Begin ATPOSTRUNS hooks - callRuntimeCallbacks(onPostRuns); - // End ATPOSTRUNS hooks -} - -// A counter of dependencies for calling run(). If we need to -// do asynchronous work before running, increment this and -// decrement it. Incrementing must happen in a place like -// Module.preRun (used by emcc to add file preloading). -// Note that you can add dependencies in preRun, even though -// it happens right before run - run will be postponed until -// the dependencies are met. -var runDependencies = 0; -var dependenciesFulfilled = null; // overridden to take different actions when all run dependencies are fulfilled -var runDependencyTracking = {}; -var runDependencyWatcher = null; - -function addRunDependency(id) { - runDependencies++; - - Module['monitorRunDependencies']?.(runDependencies); - - assert(id, 'addRunDependency requires an ID') - assert(!runDependencyTracking[id]); - runDependencyTracking[id] = 1; - if (runDependencyWatcher === null && typeof setInterval != 'undefined') { - // Check for missing dependencies every few seconds - runDependencyWatcher = setInterval(() => { - if (ABORT) { - clearInterval(runDependencyWatcher); - runDependencyWatcher = null; - return; - } - var shown = false; - for (var dep in runDependencyTracking) { - if (!shown) { - shown = true; - err('still waiting on run dependencies:'); - } - err(`dependency: ${dep}`); - } - if (shown) { - err('(end of list)'); - } - }, 10000); - } -} - -function removeRunDependency(id) { - runDependencies--; - - Module['monitorRunDependencies']?.(runDependencies); - - assert(id, 'removeRunDependency requires an ID'); - assert(runDependencyTracking[id]); - delete runDependencyTracking[id]; - if (runDependencies == 0) { - if (runDependencyWatcher !== null) { - clearInterval(runDependencyWatcher); - runDependencyWatcher = null; - } - if (dependenciesFulfilled) { - var callback = dependenciesFulfilled; - dependenciesFulfilled = null; - callback(); // can add another dependenciesFulfilled - } - } -} - -/** @param {string|number=} what */ -function abort(what) { - Module['onAbort']?.(what); - - what = 'Aborted(' + what + ')'; - // TODO(sbc): Should we remove printing and leave it up to whoever - // catches the exception? - err(what); - - ABORT = true; - - // Use a wasm runtime error, because a JS error might be seen as a foreign - // exception, which means we'd run destructors on it. We need the error to - // simply make the program stop. - // FIXME This approach does not work in Wasm EH because it currently does not assume - // all RuntimeErrors are from traps; it decides whether a RuntimeError is from - // a trap or not based on a hidden field within the object. So at the moment - // we don't have a way of throwing a wasm trap from JS. TODO Make a JS API that - // allows this in the wasm spec. - - // Suppress closure compiler warning here. Closure compiler's builtin extern - // definition for WebAssembly.RuntimeError claims it takes no arguments even - // though it can. - // TODO(https://github.com/google/closure-compiler/pull/3913): Remove if/when upstream closure gets fixed. - /** @suppress {checkTypes} */ - var e = new WebAssembly.RuntimeError(what); - - readyPromiseReject?.(e); - // Throw the error whether or not MODULARIZE is set because abort is used - // in code paths apart from instantiation where an exception is expected - // to be thrown when abort is called. - throw e; -} - -function createExportWrapper(name, nargs) { - return (...args) => { - assert(runtimeInitialized, `native function \`${name}\` called before runtime initialization`); - var f = wasmExports[name]; - assert(f, `exported native function \`${name}\` not found`); - // Only assert for too many arguments. Too few can be valid since the missing arguments will be zero filled. - assert(args.length <= nargs, `native function \`${name}\` called with ${args.length} args but expects ${nargs}`); - return f(...args); - }; -} - -var wasmBinaryFile; - -function findWasmBinary() { - if (Module['locateFile']) { - return locateFile('geant4_wasm.wasm'); - } - // Use bundler-friendly `new URL(..., import.meta.url)` pattern; works in browsers too. - return new URL('geant4_wasm.wasm', import.meta.url).href; -} - -function getBinarySync(file) { - if (file == wasmBinaryFile && wasmBinary) { - return new Uint8Array(wasmBinary); - } - if (readBinary) { - return readBinary(file); - } - throw 'both async and sync fetching of the wasm failed'; -} - -async function getWasmBinary(binaryFile) { - // If we don't have the binary yet, load it asynchronously using readAsync. - if (!wasmBinary) { - // Fetch the binary using readAsync - try { - var response = await readAsync(binaryFile); - return new Uint8Array(response); - } catch { - // Fall back to getBinarySync below; - } - } - - // Otherwise, getBinarySync should be able to get it synchronously - return getBinarySync(binaryFile); -} - -async function instantiateArrayBuffer(binaryFile, imports) { - try { - var binary = await getWasmBinary(binaryFile); - var instance = await WebAssembly.instantiate(binary, imports); - return instance; - } catch (reason) { - err(`failed to asynchronously prepare wasm: ${reason}`); - - // Warn on some common problems. - if (isFileURI(wasmBinaryFile)) { - err(`warning: Loading from a file URI (${wasmBinaryFile}) is not supported in most browsers. See https://emscripten.org/docs/getting_started/FAQ.html#how-do-i-run-a-local-webserver-for-testing-why-does-my-program-stall-in-downloading-or-preparing`); - } - abort(reason); - } -} - -async function instantiateAsync(binary, binaryFile, imports) { - if (!binary - ) { - try { - var response = fetch(binaryFile, { credentials: 'same-origin' }); - var instantiationResult = await WebAssembly.instantiateStreaming(response, imports); - return instantiationResult; - } catch (reason) { - // We expect the most common failure cause to be a bad MIME type for the binary, - // in which case falling back to ArrayBuffer instantiation should work. - err(`wasm streaming compile failed: ${reason}`); - err('falling back to ArrayBuffer instantiation'); - // fall back of instantiateArrayBuffer below - }; - } - return instantiateArrayBuffer(binaryFile, imports); -} - -function getWasmImports() { - // prepare imports - return { - 'env': wasmImports, - 'wasi_snapshot_preview1': wasmImports, - } -} - -// Create the wasm instance. -// Receives the wasm imports, returns the exports. -async function createWasm() { - // Load the wasm module and create an instance of using native support in the JS engine. - // handle a generated wasm instance, receiving its exports and - // performing other necessary setup - /** @param {WebAssembly.Module=} module*/ - function receiveInstance(instance, module) { - wasmExports = instance.exports; - - - - wasmMemory = wasmExports['memory']; - - assert(wasmMemory, 'memory not found in wasm exports'); - updateMemoryViews(); - - wasmTable = wasmExports['__indirect_function_table']; - - assert(wasmTable, 'table not found in wasm exports'); - - assignWasmExports(wasmExports); - removeRunDependency('wasm-instantiate'); - return wasmExports; - } - // wait for the pthread pool (if any) - addRunDependency('wasm-instantiate'); - - // Prefer streaming instantiation if available. - // Async compilation can be confusing when an error on the page overwrites Module - // (for example, if the order of elements is wrong, and the one defining Module is - // later), so we save Module and check it later. - var trueModule = Module; - function receiveInstantiationResult(result) { - // 'result' is a ResultObject object which has both the module and instance. - // receiveInstance() will swap in the exports (to Module.asm) so they can be called - assert(Module === trueModule, 'the Module object should not be replaced during async compilation - perhaps the order of HTML elements is wrong?'); - trueModule = null; - // TODO: Due to Closure regression https://github.com/google/closure-compiler/issues/3193, the above line no longer optimizes out down to the following line. - // When the regression is fixed, can restore the above PTHREADS-enabled path. - return receiveInstance(result['instance']); - } - - var info = getWasmImports(); - - // User shell pages can write their own Module.instantiateWasm = function(imports, successCallback) callback - // to manually instantiate the Wasm module themselves. This allows pages to - // run the instantiation parallel to any other async startup actions they are - // performing. - // Also pthreads and wasm workers initialize the wasm instance through this - // path. - if (Module['instantiateWasm']) { - return new Promise((resolve, reject) => { - try { - Module['instantiateWasm'](info, (mod, inst) => { - resolve(receiveInstance(mod, inst)); - }); - } catch(e) { - err(`Module.instantiateWasm callback failed with error: ${e}`); - reject(e); - } - }); - } - - wasmBinaryFile ??= findWasmBinary(); - var result = await instantiateAsync(wasmBinary, wasmBinaryFile, info); - var exports = receiveInstantiationResult(result); - return exports; -} - -// end include: preamble.js - -// Begin JS library code - - - class ExitStatus { - name = 'ExitStatus'; - constructor(status) { - this.message = `Program terminated with exit(${status})`; - this.status = status; - } - } - - var callRuntimeCallbacks = (callbacks) => { - while (callbacks.length > 0) { - // Pass the module as the first argument. - callbacks.shift()(Module); - } - }; - var onPostRuns = []; - var addOnPostRun = (cb) => onPostRuns.push(cb); - - var onPreRuns = []; - var addOnPreRun = (cb) => onPreRuns.push(cb); - - - - /** - * @param {number} ptr - * @param {string} type - */ - function getValue(ptr, type = 'i8') { - if (type.endsWith('*')) type = '*'; - switch (type) { - case 'i1': return HEAP8[ptr]; - case 'i8': return HEAP8[ptr]; - case 'i16': return HEAP16[((ptr)>>1)]; - case 'i32': return HEAP32[((ptr)>>2)]; - case 'i64': return HEAP64[((ptr)>>3)]; - case 'float': return HEAPF32[((ptr)>>2)]; - case 'double': return HEAPF64[((ptr)>>3)]; - case '*': return HEAPU32[((ptr)>>2)]; - default: abort(`invalid type for getValue: ${type}`); - } - } - - var noExitRuntime = true; - - var ptrToString = (ptr) => { - assert(typeof ptr === 'number'); - // With CAN_ADDRESS_2GB or MEMORY64, pointers are already unsigned. - ptr >>>= 0; - return '0x' + ptr.toString(16).padStart(8, '0'); - }; - - - /** - * @param {number} ptr - * @param {number} value - * @param {string} type - */ - function setValue(ptr, value, type = 'i8') { - if (type.endsWith('*')) type = '*'; - switch (type) { - case 'i1': HEAP8[ptr] = value; break; - case 'i8': HEAP8[ptr] = value; break; - case 'i16': HEAP16[((ptr)>>1)] = value; break; - case 'i32': HEAP32[((ptr)>>2)] = value; break; - case 'i64': HEAP64[((ptr)>>3)] = BigInt(value); break; - case 'float': HEAPF32[((ptr)>>2)] = value; break; - case 'double': HEAPF64[((ptr)>>3)] = value; break; - case '*': HEAPU32[((ptr)>>2)] = value; break; - default: abort(`invalid type for setValue: ${type}`); - } - } - - var stackRestore = (val) => __emscripten_stack_restore(val); - - var stackSave = () => _emscripten_stack_get_current(); - - var warnOnce = (text) => { - warnOnce.shown ||= {}; - if (!warnOnce.shown[text]) { - warnOnce.shown[text] = 1; - err(text); - } - }; - - var UTF8Decoder = typeof TextDecoder != 'undefined' ? new TextDecoder() : undefined; - - var findStringEnd = (heapOrArray, idx, maxBytesToRead, ignoreNul) => { - var maxIdx = idx + maxBytesToRead; - if (ignoreNul) return maxIdx; - // TextDecoder needs to know the byte length in advance, it doesn't stop on - // null terminator by itself. - // As a tiny code save trick, compare idx against maxIdx using a negation, - // so that maxBytesToRead=undefined/NaN means Infinity. - while (heapOrArray[idx] && !(idx >= maxIdx)) ++idx; - return idx; - }; - - - /** - * Given a pointer 'idx' to a null-terminated UTF8-encoded string in the given - * array that contains uint8 values, returns a copy of that string as a - * Javascript String object. - * heapOrArray is either a regular array, or a JavaScript typed array view. - * @param {number=} idx - * @param {number=} maxBytesToRead - * @param {boolean=} ignoreNul - If true, the function will not stop on a NUL character. - * @return {string} - */ - var UTF8ArrayToString = (heapOrArray, idx = 0, maxBytesToRead, ignoreNul) => { - - var endPtr = findStringEnd(heapOrArray, idx, maxBytesToRead, ignoreNul); - - // When using conditional TextDecoder, skip it for short strings as the overhead of the native call is not worth it. - if (endPtr - idx > 16 && heapOrArray.buffer && UTF8Decoder) { - return UTF8Decoder.decode(heapOrArray.subarray(idx, endPtr)); - } - var str = ''; - while (idx < endPtr) { - // For UTF8 byte structure, see: - // http://en.wikipedia.org/wiki/UTF-8#Description - // https://www.ietf.org/rfc/rfc2279.txt - // https://tools.ietf.org/html/rfc3629 - var u0 = heapOrArray[idx++]; - if (!(u0 & 0x80)) { str += String.fromCharCode(u0); continue; } - var u1 = heapOrArray[idx++] & 63; - if ((u0 & 0xE0) == 0xC0) { str += String.fromCharCode(((u0 & 31) << 6) | u1); continue; } - var u2 = heapOrArray[idx++] & 63; - if ((u0 & 0xF0) == 0xE0) { - u0 = ((u0 & 15) << 12) | (u1 << 6) | u2; - } else { - if ((u0 & 0xF8) != 0xF0) warnOnce('Invalid UTF-8 leading byte ' + ptrToString(u0) + ' encountered when deserializing a UTF-8 string in wasm memory to a JS string!'); - u0 = ((u0 & 7) << 18) | (u1 << 12) | (u2 << 6) | (heapOrArray[idx++] & 63); - } - - if (u0 < 0x10000) { - str += String.fromCharCode(u0); - } else { - var ch = u0 - 0x10000; - str += String.fromCharCode(0xD800 | (ch >> 10), 0xDC00 | (ch & 0x3FF)); - } - } - return str; - }; - - /** - * Given a pointer 'ptr' to a null-terminated UTF8-encoded string in the - * emscripten HEAP, returns a copy of that string as a Javascript String object. - * - * @param {number} ptr - * @param {number=} maxBytesToRead - An optional length that specifies the - * maximum number of bytes to read. You can omit this parameter to scan the - * string until the first 0 byte. If maxBytesToRead is passed, and the string - * at [ptr, ptr+maxBytesToReadr[ contains a null byte in the middle, then the - * string will cut short at that byte index. - * @param {boolean=} ignoreNul - If true, the function will not stop on a NUL character. - * @return {string} - */ - var UTF8ToString = (ptr, maxBytesToRead, ignoreNul) => { - assert(typeof ptr == 'number', `UTF8ToString expects a number (got ${typeof ptr})`); - return ptr ? UTF8ArrayToString(HEAPU8, ptr, maxBytesToRead, ignoreNul) : ''; - }; - var ___assert_fail = (condition, filename, line, func) => - abort(`Assertion failed: ${UTF8ToString(condition)}, at: ` + [filename ? UTF8ToString(filename) : 'unknown filename', line, func ? UTF8ToString(func) : 'unknown function']); - - class ExceptionInfo { - // excPtr - Thrown object pointer to wrap. Metadata pointer is calculated from it. - constructor(excPtr) { - this.excPtr = excPtr; - this.ptr = excPtr - 24; - } - - set_type(type) { - HEAPU32[(((this.ptr)+(4))>>2)] = type; - } - - get_type() { - return HEAPU32[(((this.ptr)+(4))>>2)]; - } - - set_destructor(destructor) { - HEAPU32[(((this.ptr)+(8))>>2)] = destructor; - } - - get_destructor() { - return HEAPU32[(((this.ptr)+(8))>>2)]; - } - - set_caught(caught) { - caught = caught ? 1 : 0; - HEAP8[(this.ptr)+(12)] = caught; - } - - get_caught() { - return HEAP8[(this.ptr)+(12)] != 0; - } - - set_rethrown(rethrown) { - rethrown = rethrown ? 1 : 0; - HEAP8[(this.ptr)+(13)] = rethrown; - } - - get_rethrown() { - return HEAP8[(this.ptr)+(13)] != 0; - } - - // Initialize native structure fields. Should be called once after allocated. - init(type, destructor) { - this.set_adjusted_ptr(0); - this.set_type(type); - this.set_destructor(destructor); - } - - set_adjusted_ptr(adjustedPtr) { - HEAPU32[(((this.ptr)+(16))>>2)] = adjustedPtr; - } - - get_adjusted_ptr() { - return HEAPU32[(((this.ptr)+(16))>>2)]; - } - } - - var exceptionLast = 0; - - var uncaughtExceptionCount = 0; - var ___cxa_throw = (ptr, type, destructor) => { - var info = new ExceptionInfo(ptr); - // Initialize ExceptionInfo content after it was allocated in __cxa_allocate_exception. - info.init(type, destructor); - exceptionLast = ptr; - uncaughtExceptionCount++; - assert(false, 'Exception thrown, but exception catching is not enabled. Compile with -sNO_DISABLE_EXCEPTION_CATCHING or -sEXCEPTION_CATCHING_ALLOWED=[..] to catch.'); - }; - - var PATH = { - isAbs:(path) => path.charAt(0) === '/', - splitPath:(filename) => { - var splitPathRe = /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/; - return splitPathRe.exec(filename).slice(1); - }, - normalizeArray:(parts, allowAboveRoot) => { - // if the path tries to go above the root, `up` ends up > 0 - var up = 0; - for (var i = parts.length - 1; i >= 0; i--) { - var last = parts[i]; - if (last === '.') { - parts.splice(i, 1); - } else if (last === '..') { - parts.splice(i, 1); - up++; - } else if (up) { - parts.splice(i, 1); - up--; - } - } - // if the path is allowed to go above the root, restore leading ..s - if (allowAboveRoot) { - for (; up; up--) { - parts.unshift('..'); - } - } - return parts; - }, - normalize:(path) => { - var isAbsolute = PATH.isAbs(path), - trailingSlash = path.slice(-1) === '/'; - // Normalize the path - path = PATH.normalizeArray(path.split('/').filter((p) => !!p), !isAbsolute).join('/'); - if (!path && !isAbsolute) { - path = '.'; - } - if (path && trailingSlash) { - path += '/'; - } - return (isAbsolute ? '/' : '') + path; - }, - dirname:(path) => { - var result = PATH.splitPath(path), - root = result[0], - dir = result[1]; - if (!root && !dir) { - // No dirname whatsoever - return '.'; - } - if (dir) { - // It has a dirname, strip trailing slash - dir = dir.slice(0, -1); - } - return root + dir; - }, - basename:(path) => path && path.match(/([^\/]+|\/)\/*$/)[1], - join:(...paths) => PATH.normalize(paths.join('/')), - join2:(l, r) => PATH.normalize(l + '/' + r), - }; - - var initRandomFill = () => { - - return (view) => crypto.getRandomValues(view); - }; - var randomFill = (view) => { - // Lazily init on the first invocation. - (randomFill = initRandomFill())(view); - }; - - - - var PATH_FS = { - resolve:(...args) => { - var resolvedPath = '', - resolvedAbsolute = false; - for (var i = args.length - 1; i >= -1 && !resolvedAbsolute; i--) { - var path = (i >= 0) ? args[i] : FS.cwd(); - // Skip empty and invalid entries - if (typeof path != 'string') { - throw new TypeError('Arguments to path.resolve must be strings'); - } else if (!path) { - return ''; // an invalid portion invalidates the whole thing - } - resolvedPath = path + '/' + resolvedPath; - resolvedAbsolute = PATH.isAbs(path); - } - // At this point the path should be resolved to a full absolute path, but - // handle relative paths to be safe (might happen when process.cwd() fails) - resolvedPath = PATH.normalizeArray(resolvedPath.split('/').filter((p) => !!p), !resolvedAbsolute).join('/'); - return ((resolvedAbsolute ? '/' : '') + resolvedPath) || '.'; - }, - relative:(from, to) => { - from = PATH_FS.resolve(from).slice(1); - to = PATH_FS.resolve(to).slice(1); - function trim(arr) { - var start = 0; - for (; start < arr.length; start++) { - if (arr[start] !== '') break; - } - var end = arr.length - 1; - for (; end >= 0; end--) { - if (arr[end] !== '') break; - } - if (start > end) return []; - return arr.slice(start, end - start + 1); - } - var fromParts = trim(from.split('/')); - var toParts = trim(to.split('/')); - var length = Math.min(fromParts.length, toParts.length); - var samePartsLength = length; - for (var i = 0; i < length; i++) { - if (fromParts[i] !== toParts[i]) { - samePartsLength = i; - break; - } - } - var outputParts = []; - for (var i = samePartsLength; i < fromParts.length; i++) { - outputParts.push('..'); - } - outputParts = outputParts.concat(toParts.slice(samePartsLength)); - return outputParts.join('/'); - }, - }; - - - - var FS_stdin_getChar_buffer = []; - - var lengthBytesUTF8 = (str) => { - var len = 0; - for (var i = 0; i < str.length; ++i) { - // Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code - // unit, not a Unicode code point of the character! So decode - // UTF16->UTF32->UTF8. - // See http://unicode.org/faq/utf_bom.html#utf16-3 - var c = str.charCodeAt(i); // possibly a lead surrogate - if (c <= 0x7F) { - len++; - } else if (c <= 0x7FF) { - len += 2; - } else if (c >= 0xD800 && c <= 0xDFFF) { - len += 4; ++i; - } else { - len += 3; - } - } - return len; - }; - - var stringToUTF8Array = (str, heap, outIdx, maxBytesToWrite) => { - assert(typeof str === 'string', `stringToUTF8Array expects a string (got ${typeof str})`); - // Parameter maxBytesToWrite is not optional. Negative values, 0, null, - // undefined and false each don't write out any bytes. - if (!(maxBytesToWrite > 0)) - return 0; - - var startIdx = outIdx; - var endIdx = outIdx + maxBytesToWrite - 1; // -1 for string null terminator. - for (var i = 0; i < str.length; ++i) { - // For UTF8 byte structure, see http://en.wikipedia.org/wiki/UTF-8#Description - // and https://www.ietf.org/rfc/rfc2279.txt - // and https://tools.ietf.org/html/rfc3629 - var u = str.codePointAt(i); - if (u <= 0x7F) { - if (outIdx >= endIdx) break; - heap[outIdx++] = u; - } else if (u <= 0x7FF) { - if (outIdx + 1 >= endIdx) break; - heap[outIdx++] = 0xC0 | (u >> 6); - heap[outIdx++] = 0x80 | (u & 63); - } else if (u <= 0xFFFF) { - if (outIdx + 2 >= endIdx) break; - heap[outIdx++] = 0xE0 | (u >> 12); - heap[outIdx++] = 0x80 | ((u >> 6) & 63); - heap[outIdx++] = 0x80 | (u & 63); - } else { - if (outIdx + 3 >= endIdx) break; - if (u > 0x10FFFF) warnOnce('Invalid Unicode code point ' + ptrToString(u) + ' encountered when serializing a JS string to a UTF-8 string in wasm memory! (Valid unicode code points should be in range 0-0x10FFFF).'); - heap[outIdx++] = 0xF0 | (u >> 18); - heap[outIdx++] = 0x80 | ((u >> 12) & 63); - heap[outIdx++] = 0x80 | ((u >> 6) & 63); - heap[outIdx++] = 0x80 | (u & 63); - // Gotcha: if codePoint is over 0xFFFF, it is represented as a surrogate pair in UTF-16. - // We need to manually skip over the second code unit for correct iteration. - i++; - } - } - // Null-terminate the pointer to the buffer. - heap[outIdx] = 0; - return outIdx - startIdx; - }; - /** @type {function(string, boolean=, number=)} */ - var intArrayFromString = (stringy, dontAddNull, length) => { - var len = length > 0 ? length : lengthBytesUTF8(stringy)+1; - var u8array = new Array(len); - var numBytesWritten = stringToUTF8Array(stringy, u8array, 0, u8array.length); - if (dontAddNull) u8array.length = numBytesWritten; - return u8array; - }; - var FS_stdin_getChar = () => { - if (!FS_stdin_getChar_buffer.length) { - var result = null; - {} - if (!result) { - return null; - } - FS_stdin_getChar_buffer = intArrayFromString(result, true); - } - return FS_stdin_getChar_buffer.shift(); - }; - var TTY = { - ttys:[], - init() { - // https://github.com/emscripten-core/emscripten/pull/1555 - // if (ENVIRONMENT_IS_NODE) { - // // currently, FS.init does not distinguish if process.stdin is a file or TTY - // // device, it always assumes it's a TTY device. because of this, we're forcing - // // process.stdin to UTF8 encoding to at least make stdin reading compatible - // // with text files until FS.init can be refactored. - // process.stdin.setEncoding('utf8'); - // } - }, - shutdown() { - // https://github.com/emscripten-core/emscripten/pull/1555 - // if (ENVIRONMENT_IS_NODE) { - // // inolen: any idea as to why node -e 'process.stdin.read()' wouldn't exit immediately (with process.stdin being a tty)? - // // isaacs: because now it's reading from the stream, you've expressed interest in it, so that read() kicks off a _read() which creates a ReadReq operation - // // inolen: I thought read() in that case was a synchronous operation that just grabbed some amount of buffered data if it exists? - // // isaacs: it is. but it also triggers a _read() call, which calls readStart() on the handle - // // isaacs: do process.stdin.pause() and i'd think it'd probably close the pending call - // process.stdin.pause(); - // } - }, - register(dev, ops) { - TTY.ttys[dev] = { input: [], output: [], ops: ops }; - FS.registerDevice(dev, TTY.stream_ops); - }, - stream_ops:{ - open(stream) { - var tty = TTY.ttys[stream.node.rdev]; - if (!tty) { - throw new FS.ErrnoError(43); - } - stream.tty = tty; - stream.seekable = false; - }, - close(stream) { - // flush any pending line data - stream.tty.ops.fsync(stream.tty); - }, - fsync(stream) { - stream.tty.ops.fsync(stream.tty); - }, - read(stream, buffer, offset, length, pos /* ignored */) { - if (!stream.tty || !stream.tty.ops.get_char) { - throw new FS.ErrnoError(60); - } - var bytesRead = 0; - for (var i = 0; i < length; i++) { - var result; - try { - result = stream.tty.ops.get_char(stream.tty); - } catch (e) { - throw new FS.ErrnoError(29); - } - if (result === undefined && bytesRead === 0) { - throw new FS.ErrnoError(6); - } - if (result === null || result === undefined) break; - bytesRead++; - buffer[offset+i] = result; - } - if (bytesRead) { - stream.node.atime = Date.now(); - } - return bytesRead; - }, - write(stream, buffer, offset, length, pos) { - if (!stream.tty || !stream.tty.ops.put_char) { - throw new FS.ErrnoError(60); - } - try { - for (var i = 0; i < length; i++) { - stream.tty.ops.put_char(stream.tty, buffer[offset+i]); - } - } catch (e) { - throw new FS.ErrnoError(29); - } - if (length) { - stream.node.mtime = stream.node.ctime = Date.now(); - } - return i; - }, - }, - default_tty_ops:{ - get_char(tty) { - return FS_stdin_getChar(); - }, - put_char(tty, val) { - if (val === null || val === 10) { - out(UTF8ArrayToString(tty.output)); - tty.output = []; - } else { - if (val != 0) tty.output.push(val); // val == 0 would cut text output off in the middle. - } - }, - fsync(tty) { - if (tty.output?.length > 0) { - out(UTF8ArrayToString(tty.output)); - tty.output = []; - } - }, - ioctl_tcgets(tty) { - // typical setting - return { - c_iflag: 25856, - c_oflag: 5, - c_cflag: 191, - c_lflag: 35387, - c_cc: [ - 0x03, 0x1c, 0x7f, 0x15, 0x04, 0x00, 0x01, 0x00, 0x11, 0x13, 0x1a, 0x00, - 0x12, 0x0f, 0x17, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - ] - }; - }, - ioctl_tcsets(tty, optional_actions, data) { - // currently just ignore - return 0; - }, - ioctl_tiocgwinsz(tty) { - return [24, 80]; - }, - }, - default_tty1_ops:{ - put_char(tty, val) { - if (val === null || val === 10) { - err(UTF8ArrayToString(tty.output)); - tty.output = []; - } else { - if (val != 0) tty.output.push(val); - } - }, - fsync(tty) { - if (tty.output?.length > 0) { - err(UTF8ArrayToString(tty.output)); - tty.output = []; - } - }, - }, - }; - - - var mmapAlloc = (size) => { - abort('internal error: mmapAlloc called but `emscripten_builtin_memalign` native symbol not exported'); - }; - var MEMFS = { - ops_table:null, - mount(mount) { - return MEMFS.createNode(null, '/', 16895, 0); - }, - createNode(parent, name, mode, dev) { - if (FS.isBlkdev(mode) || FS.isFIFO(mode)) { - // no supported - throw new FS.ErrnoError(63); - } - MEMFS.ops_table ||= { - dir: { - node: { - getattr: MEMFS.node_ops.getattr, - setattr: MEMFS.node_ops.setattr, - lookup: MEMFS.node_ops.lookup, - mknod: MEMFS.node_ops.mknod, - rename: MEMFS.node_ops.rename, - unlink: MEMFS.node_ops.unlink, - rmdir: MEMFS.node_ops.rmdir, - readdir: MEMFS.node_ops.readdir, - symlink: MEMFS.node_ops.symlink - }, - stream: { - llseek: MEMFS.stream_ops.llseek - } - }, - file: { - node: { - getattr: MEMFS.node_ops.getattr, - setattr: MEMFS.node_ops.setattr - }, - stream: { - llseek: MEMFS.stream_ops.llseek, - read: MEMFS.stream_ops.read, - write: MEMFS.stream_ops.write, - mmap: MEMFS.stream_ops.mmap, - msync: MEMFS.stream_ops.msync - } - }, - link: { - node: { - getattr: MEMFS.node_ops.getattr, - setattr: MEMFS.node_ops.setattr, - readlink: MEMFS.node_ops.readlink - }, - stream: {} - }, - chrdev: { - node: { - getattr: MEMFS.node_ops.getattr, - setattr: MEMFS.node_ops.setattr - }, - stream: FS.chrdev_stream_ops - } - }; - var node = FS.createNode(parent, name, mode, dev); - if (FS.isDir(node.mode)) { - node.node_ops = MEMFS.ops_table.dir.node; - node.stream_ops = MEMFS.ops_table.dir.stream; - node.contents = {}; - } else if (FS.isFile(node.mode)) { - node.node_ops = MEMFS.ops_table.file.node; - node.stream_ops = MEMFS.ops_table.file.stream; - node.usedBytes = 0; // The actual number of bytes used in the typed array, as opposed to contents.length which gives the whole capacity. - // When the byte data of the file is populated, this will point to either a typed array, or a normal JS array. Typed arrays are preferred - // for performance, and used by default. However, typed arrays are not resizable like normal JS arrays are, so there is a small disk size - // penalty involved for appending file writes that continuously grow a file similar to std::vector capacity vs used -scheme. - node.contents = null; - } else if (FS.isLink(node.mode)) { - node.node_ops = MEMFS.ops_table.link.node; - node.stream_ops = MEMFS.ops_table.link.stream; - } else if (FS.isChrdev(node.mode)) { - node.node_ops = MEMFS.ops_table.chrdev.node; - node.stream_ops = MEMFS.ops_table.chrdev.stream; - } - node.atime = node.mtime = node.ctime = Date.now(); - // add the new node to the parent - if (parent) { - parent.contents[name] = node; - parent.atime = parent.mtime = parent.ctime = node.atime; - } - return node; - }, - getFileDataAsTypedArray(node) { - if (!node.contents) return new Uint8Array(0); - if (node.contents.subarray) return node.contents.subarray(0, node.usedBytes); // Make sure to not return excess unused bytes. - return new Uint8Array(node.contents); - }, - expandFileStorage(node, newCapacity) { - var prevCapacity = node.contents ? node.contents.length : 0; - if (prevCapacity >= newCapacity) return; // No need to expand, the storage was already large enough. - // Don't expand strictly to the given requested limit if it's only a very small increase, but instead geometrically grow capacity. - // For small filesizes (<1MB), perform size*2 geometric increase, but for large sizes, do a much more conservative size*1.125 increase to - // avoid overshooting the allocation cap by a very large margin. - var CAPACITY_DOUBLING_MAX = 1024 * 1024; - newCapacity = Math.max(newCapacity, (prevCapacity * (prevCapacity < CAPACITY_DOUBLING_MAX ? 2.0 : 1.125)) >>> 0); - if (prevCapacity != 0) newCapacity = Math.max(newCapacity, 256); // At minimum allocate 256b for each file when expanding. - var oldContents = node.contents; - node.contents = new Uint8Array(newCapacity); // Allocate new storage. - if (node.usedBytes > 0) node.contents.set(oldContents.subarray(0, node.usedBytes), 0); // Copy old data over to the new storage. - }, - resizeFileStorage(node, newSize) { - if (node.usedBytes == newSize) return; - if (newSize == 0) { - node.contents = null; // Fully decommit when requesting a resize to zero. - node.usedBytes = 0; - } else { - var oldContents = node.contents; - node.contents = new Uint8Array(newSize); // Allocate new storage. - if (oldContents) { - node.contents.set(oldContents.subarray(0, Math.min(newSize, node.usedBytes))); // Copy old data over to the new storage. - } - node.usedBytes = newSize; - } - }, - node_ops:{ - getattr(node) { - var attr = {}; - // device numbers reuse inode numbers. - attr.dev = FS.isChrdev(node.mode) ? node.id : 1; - attr.ino = node.id; - attr.mode = node.mode; - attr.nlink = 1; - attr.uid = 0; - attr.gid = 0; - attr.rdev = node.rdev; - if (FS.isDir(node.mode)) { - attr.size = 4096; - } else if (FS.isFile(node.mode)) { - attr.size = node.usedBytes; - } else if (FS.isLink(node.mode)) { - attr.size = node.link.length; - } else { - attr.size = 0; - } - attr.atime = new Date(node.atime); - attr.mtime = new Date(node.mtime); - attr.ctime = new Date(node.ctime); - // NOTE: In our implementation, st_blocks = Math.ceil(st_size/st_blksize), - // but this is not required by the standard. - attr.blksize = 4096; - attr.blocks = Math.ceil(attr.size / attr.blksize); - return attr; - }, - setattr(node, attr) { - for (const key of ["mode", "atime", "mtime", "ctime"]) { - if (attr[key] != null) { - node[key] = attr[key]; - } - } - if (attr.size !== undefined) { - MEMFS.resizeFileStorage(node, attr.size); - } - }, - lookup(parent, name) { - throw new FS.ErrnoError(44); - }, - mknod(parent, name, mode, dev) { - return MEMFS.createNode(parent, name, mode, dev); - }, - rename(old_node, new_dir, new_name) { - var new_node; - try { - new_node = FS.lookupNode(new_dir, new_name); - } catch (e) {} - if (new_node) { - if (FS.isDir(old_node.mode)) { - // if we're overwriting a directory at new_name, make sure it's empty. - for (var i in new_node.contents) { - throw new FS.ErrnoError(55); - } - } - FS.hashRemoveNode(new_node); - } - // do the internal rewiring - delete old_node.parent.contents[old_node.name]; - new_dir.contents[new_name] = old_node; - old_node.name = new_name; - new_dir.ctime = new_dir.mtime = old_node.parent.ctime = old_node.parent.mtime = Date.now(); - }, - unlink(parent, name) { - delete parent.contents[name]; - parent.ctime = parent.mtime = Date.now(); - }, - rmdir(parent, name) { - var node = FS.lookupNode(parent, name); - for (var i in node.contents) { - throw new FS.ErrnoError(55); - } - delete parent.contents[name]; - parent.ctime = parent.mtime = Date.now(); - }, - readdir(node) { - return ['.', '..', ...Object.keys(node.contents)]; - }, - symlink(parent, newname, oldpath) { - var node = MEMFS.createNode(parent, newname, 0o777 | 40960, 0); - node.link = oldpath; - return node; - }, - readlink(node) { - if (!FS.isLink(node.mode)) { - throw new FS.ErrnoError(28); - } - return node.link; - }, - }, - stream_ops:{ - read(stream, buffer, offset, length, position) { - var contents = stream.node.contents; - if (position >= stream.node.usedBytes) return 0; - var size = Math.min(stream.node.usedBytes - position, length); - assert(size >= 0); - if (size > 8 && contents.subarray) { // non-trivial, and typed array - buffer.set(contents.subarray(position, position + size), offset); - } else { - for (var i = 0; i < size; i++) buffer[offset + i] = contents[position + i]; - } - return size; - }, - write(stream, buffer, offset, length, position, canOwn) { - // The data buffer should be a typed array view - assert(!(buffer instanceof ArrayBuffer)); - - if (!length) return 0; - var node = stream.node; - node.mtime = node.ctime = Date.now(); - - if (buffer.subarray && (!node.contents || node.contents.subarray)) { // This write is from a typed array to a typed array? - if (canOwn) { - assert(position === 0, 'canOwn must imply no weird position inside the file'); - node.contents = buffer.subarray(offset, offset + length); - node.usedBytes = length; - return length; - } else if (node.usedBytes === 0 && position === 0) { // If this is a simple first write to an empty file, do a fast set since we don't need to care about old data. - node.contents = buffer.slice(offset, offset + length); - node.usedBytes = length; - return length; - } else if (position + length <= node.usedBytes) { // Writing to an already allocated and used subrange of the file? - node.contents.set(buffer.subarray(offset, offset + length), position); - return length; - } - } - - // Appending to an existing file and we need to reallocate, or source data did not come as a typed array. - MEMFS.expandFileStorage(node, position+length); - if (node.contents.subarray && buffer.subarray) { - // Use typed array write which is available. - node.contents.set(buffer.subarray(offset, offset + length), position); - } else { - for (var i = 0; i < length; i++) { - node.contents[position + i] = buffer[offset + i]; // Or fall back to manual write if not. - } - } - node.usedBytes = Math.max(node.usedBytes, position + length); - return length; - }, - llseek(stream, offset, whence) { - var position = offset; - if (whence === 1) { - position += stream.position; - } else if (whence === 2) { - if (FS.isFile(stream.node.mode)) { - position += stream.node.usedBytes; - } - } - if (position < 0) { - throw new FS.ErrnoError(28); - } - return position; - }, - mmap(stream, length, position, prot, flags) { - if (!FS.isFile(stream.node.mode)) { - throw new FS.ErrnoError(43); - } - var ptr; - var allocated; - var contents = stream.node.contents; - // Only make a new copy when MAP_PRIVATE is specified. - if (!(flags & 2) && contents && contents.buffer === HEAP8.buffer) { - // We can't emulate MAP_SHARED when the file is not backed by the - // buffer we're mapping to (e.g. the HEAP buffer). - allocated = false; - ptr = contents.byteOffset; - } else { - allocated = true; - ptr = mmapAlloc(length); - if (!ptr) { - throw new FS.ErrnoError(48); - } - if (contents) { - // Try to avoid unnecessary slices. - if (position > 0 || position + length < contents.length) { - if (contents.subarray) { - contents = contents.subarray(position, position + length); - } else { - contents = Array.prototype.slice.call(contents, position, position + length); - } - } - HEAP8.set(contents, ptr); - } - } - return { ptr, allocated }; - }, - msync(stream, buffer, offset, length, mmapFlags) { - MEMFS.stream_ops.write(stream, buffer, 0, length, offset, false); - // should we check if bytesWritten and length are the same? - return 0; - }, - }, - }; - - var FS_modeStringToFlags = (str) => { - var flagModes = { - 'r': 0, - 'r+': 2, - 'w': 512 | 64 | 1, - 'w+': 512 | 64 | 2, - 'a': 1024 | 64 | 1, - 'a+': 1024 | 64 | 2, - }; - var flags = flagModes[str]; - if (typeof flags == 'undefined') { - throw new Error(`Unknown file open mode: ${str}`); - } - return flags; - }; - - var FS_getMode = (canRead, canWrite) => { - var mode = 0; - if (canRead) mode |= 292 | 73; - if (canWrite) mode |= 146; - return mode; - }; - - - - - var strError = (errno) => UTF8ToString(_strerror(errno)); - - var ERRNO_CODES = { - 'EPERM': 63, - 'ENOENT': 44, - 'ESRCH': 71, - 'EINTR': 27, - 'EIO': 29, - 'ENXIO': 60, - 'E2BIG': 1, - 'ENOEXEC': 45, - 'EBADF': 8, - 'ECHILD': 12, - 'EAGAIN': 6, - 'EWOULDBLOCK': 6, - 'ENOMEM': 48, - 'EACCES': 2, - 'EFAULT': 21, - 'ENOTBLK': 105, - 'EBUSY': 10, - 'EEXIST': 20, - 'EXDEV': 75, - 'ENODEV': 43, - 'ENOTDIR': 54, - 'EISDIR': 31, - 'EINVAL': 28, - 'ENFILE': 41, - 'EMFILE': 33, - 'ENOTTY': 59, - 'ETXTBSY': 74, - 'EFBIG': 22, - 'ENOSPC': 51, - 'ESPIPE': 70, - 'EROFS': 69, - 'EMLINK': 34, - 'EPIPE': 64, - 'EDOM': 18, - 'ERANGE': 68, - 'ENOMSG': 49, - 'EIDRM': 24, - 'ECHRNG': 106, - 'EL2NSYNC': 156, - 'EL3HLT': 107, - 'EL3RST': 108, - 'ELNRNG': 109, - 'EUNATCH': 110, - 'ENOCSI': 111, - 'EL2HLT': 112, - 'EDEADLK': 16, - 'ENOLCK': 46, - 'EBADE': 113, - 'EBADR': 114, - 'EXFULL': 115, - 'ENOANO': 104, - 'EBADRQC': 103, - 'EBADSLT': 102, - 'EDEADLOCK': 16, - 'EBFONT': 101, - 'ENOSTR': 100, - 'ENODATA': 116, - 'ETIME': 117, - 'ENOSR': 118, - 'ENONET': 119, - 'ENOPKG': 120, - 'EREMOTE': 121, - 'ENOLINK': 47, - 'EADV': 122, - 'ESRMNT': 123, - 'ECOMM': 124, - 'EPROTO': 65, - 'EMULTIHOP': 36, - 'EDOTDOT': 125, - 'EBADMSG': 9, - 'ENOTUNIQ': 126, - 'EBADFD': 127, - 'EREMCHG': 128, - 'ELIBACC': 129, - 'ELIBBAD': 130, - 'ELIBSCN': 131, - 'ELIBMAX': 132, - 'ELIBEXEC': 133, - 'ENOSYS': 52, - 'ENOTEMPTY': 55, - 'ENAMETOOLONG': 37, - 'ELOOP': 32, - 'EOPNOTSUPP': 138, - 'EPFNOSUPPORT': 139, - 'ECONNRESET': 15, - 'ENOBUFS': 42, - 'EAFNOSUPPORT': 5, - 'EPROTOTYPE': 67, - 'ENOTSOCK': 57, - 'ENOPROTOOPT': 50, - 'ESHUTDOWN': 140, - 'ECONNREFUSED': 14, - 'EADDRINUSE': 3, - 'ECONNABORTED': 13, - 'ENETUNREACH': 40, - 'ENETDOWN': 38, - 'ETIMEDOUT': 73, - 'EHOSTDOWN': 142, - 'EHOSTUNREACH': 23, - 'EINPROGRESS': 26, - 'EALREADY': 7, - 'EDESTADDRREQ': 17, - 'EMSGSIZE': 35, - 'EPROTONOSUPPORT': 66, - 'ESOCKTNOSUPPORT': 137, - 'EADDRNOTAVAIL': 4, - 'ENETRESET': 39, - 'EISCONN': 30, - 'ENOTCONN': 53, - 'ETOOMANYREFS': 141, - 'EUSERS': 136, - 'EDQUOT': 19, - 'ESTALE': 72, - 'ENOTSUP': 138, - 'ENOMEDIUM': 148, - 'EILSEQ': 25, - 'EOVERFLOW': 61, - 'ECANCELED': 11, - 'ENOTRECOVERABLE': 56, - 'EOWNERDEAD': 62, - 'ESTRPIPE': 135, - }; - - var asyncLoad = async (url) => { - var arrayBuffer = await readAsync(url); - assert(arrayBuffer, `Loading data file "${url}" failed (no arrayBuffer).`); - return new Uint8Array(arrayBuffer); - }; - - - var FS_createDataFile = (...args) => FS.createDataFile(...args); - - var getUniqueRunDependency = (id) => { - var orig = id; - while (1) { - if (!runDependencyTracking[id]) return id; - id = orig + Math.random(); - } - }; - - var preloadPlugins = []; - var FS_handledByPreloadPlugin = async (byteArray, fullname) => { - // Ensure plugins are ready. - if (typeof Browser != 'undefined') Browser.init(); - - for (var plugin of preloadPlugins) { - if (plugin['canHandle'](fullname)) { - assert(plugin['handle'].constructor.name === 'AsyncFunction', 'Filesystem plugin handlers must be async functions (See #24914)') - return plugin['handle'](byteArray, fullname); - } - } - // In no plugin handled this file then return the original/unmodified - // byteArray. - return byteArray; - }; - var FS_preloadFile = async (parent, name, url, canRead, canWrite, dontCreateFile, canOwn, preFinish) => { - // TODO we should allow people to just pass in a complete filename instead - // of parent and name being that we just join them anyways - var fullname = name ? PATH_FS.resolve(PATH.join2(parent, name)) : parent; - var dep = getUniqueRunDependency(`cp ${fullname}`); // might have several active requests for the same fullname - addRunDependency(dep); - - try { - var byteArray = url; - if (typeof url == 'string') { - byteArray = await asyncLoad(url); - } - - byteArray = await FS_handledByPreloadPlugin(byteArray, fullname); - preFinish?.(); - if (!dontCreateFile) { - FS_createDataFile(parent, name, byteArray, canRead, canWrite, canOwn); - } - } finally { - removeRunDependency(dep); - } - }; - var FS_createPreloadedFile = (parent, name, url, canRead, canWrite, onload, onerror, dontCreateFile, canOwn, preFinish) => { - FS_preloadFile(parent, name, url, canRead, canWrite, dontCreateFile, canOwn, preFinish).then(onload).catch(onerror); - }; - var FS = { - root:null, - mounts:[], - devices:{ - }, - streams:[], - nextInode:1, - nameTable:null, - currentPath:"/", - initialized:false, - ignorePermissions:true, - filesystems:null, - syncFSRequests:0, - readFiles:{ - }, - ErrnoError:class extends Error { - name = 'ErrnoError'; - // We set the `name` property to be able to identify `FS.ErrnoError` - // - the `name` is a standard ECMA-262 property of error objects. Kind of good to have it anyway. - // - when using PROXYFS, an error can come from an underlying FS - // as different FS objects have their own FS.ErrnoError each, - // the test `err instanceof FS.ErrnoError` won't detect an error coming from another filesystem, causing bugs. - // we'll use the reliable test `err.name == "ErrnoError"` instead - constructor(errno) { - super(runtimeInitialized ? strError(errno) : ''); - this.errno = errno; - for (var key in ERRNO_CODES) { - if (ERRNO_CODES[key] === errno) { - this.code = key; - break; - } - } - } - }, - FSStream:class { - shared = {}; - get object() { - return this.node; - } - set object(val) { - this.node = val; - } - get isRead() { - return (this.flags & 2097155) !== 1; - } - get isWrite() { - return (this.flags & 2097155) !== 0; - } - get isAppend() { - return (this.flags & 1024); - } - get flags() { - return this.shared.flags; - } - set flags(val) { - this.shared.flags = val; - } - get position() { - return this.shared.position; - } - set position(val) { - this.shared.position = val; - } - }, - FSNode:class { - node_ops = {}; - stream_ops = {}; - readMode = 292 | 73; - writeMode = 146; - mounted = null; - constructor(parent, name, mode, rdev) { - if (!parent) { - parent = this; // root node sets parent to itself - } - this.parent = parent; - this.mount = parent.mount; - this.id = FS.nextInode++; - this.name = name; - this.mode = mode; - this.rdev = rdev; - this.atime = this.mtime = this.ctime = Date.now(); - } - get read() { - return (this.mode & this.readMode) === this.readMode; - } - set read(val) { - val ? this.mode |= this.readMode : this.mode &= ~this.readMode; - } - get write() { - return (this.mode & this.writeMode) === this.writeMode; - } - set write(val) { - val ? this.mode |= this.writeMode : this.mode &= ~this.writeMode; - } - get isFolder() { - return FS.isDir(this.mode); - } - get isDevice() { - return FS.isChrdev(this.mode); - } - }, - lookupPath(path, opts = {}) { - if (!path) { - throw new FS.ErrnoError(44); - } - opts.follow_mount ??= true - - if (!PATH.isAbs(path)) { - path = FS.cwd() + '/' + path; - } - - // limit max consecutive symlinks to 40 (SYMLOOP_MAX). - linkloop: for (var nlinks = 0; nlinks < 40; nlinks++) { - // split the absolute path - var parts = path.split('/').filter((p) => !!p); - - // start at the root - var current = FS.root; - var current_path = '/'; - - for (var i = 0; i < parts.length; i++) { - var islast = (i === parts.length-1); - if (islast && opts.parent) { - // stop resolving - break; - } - - if (parts[i] === '.') { - continue; - } - - if (parts[i] === '..') { - current_path = PATH.dirname(current_path); - if (FS.isRoot(current)) { - path = current_path + '/' + parts.slice(i + 1).join('/'); - // We're making progress here, don't let many consecutive ..'s - // lead to ELOOP - nlinks--; - continue linkloop; - } else { - current = current.parent; - } - continue; - } - - current_path = PATH.join2(current_path, parts[i]); - try { - current = FS.lookupNode(current, parts[i]); - } catch (e) { - // if noent_okay is true, suppress a ENOENT in the last component - // and return an object with an undefined node. This is needed for - // resolving symlinks in the path when creating a file. - if ((e?.errno === 44) && islast && opts.noent_okay) { - return { path: current_path }; - } - throw e; - } - - // jump to the mount's root node if this is a mountpoint - if (FS.isMountpoint(current) && (!islast || opts.follow_mount)) { - current = current.mounted.root; - } - - // by default, lookupPath will not follow a symlink if it is the final path component. - // setting opts.follow = true will override this behavior. - if (FS.isLink(current.mode) && (!islast || opts.follow)) { - if (!current.node_ops.readlink) { - throw new FS.ErrnoError(52); - } - var link = current.node_ops.readlink(current); - if (!PATH.isAbs(link)) { - link = PATH.dirname(current_path) + '/' + link; - } - path = link + '/' + parts.slice(i + 1).join('/'); - continue linkloop; - } - } - return { path: current_path, node: current }; - } - throw new FS.ErrnoError(32); - }, - getPath(node) { - var path; - while (true) { - if (FS.isRoot(node)) { - var mount = node.mount.mountpoint; - if (!path) return mount; - return mount[mount.length-1] !== '/' ? `${mount}/${path}` : mount + path; - } - path = path ? `${node.name}/${path}` : node.name; - node = node.parent; - } - }, - hashName(parentid, name) { - var hash = 0; - - for (var i = 0; i < name.length; i++) { - hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0; - } - return ((parentid + hash) >>> 0) % FS.nameTable.length; - }, - hashAddNode(node) { - var hash = FS.hashName(node.parent.id, node.name); - node.name_next = FS.nameTable[hash]; - FS.nameTable[hash] = node; - }, - hashRemoveNode(node) { - var hash = FS.hashName(node.parent.id, node.name); - if (FS.nameTable[hash] === node) { - FS.nameTable[hash] = node.name_next; - } else { - var current = FS.nameTable[hash]; - while (current) { - if (current.name_next === node) { - current.name_next = node.name_next; - break; - } - current = current.name_next; - } - } - }, - lookupNode(parent, name) { - var errCode = FS.mayLookup(parent); - if (errCode) { - throw new FS.ErrnoError(errCode); - } - var hash = FS.hashName(parent.id, name); - for (var node = FS.nameTable[hash]; node; node = node.name_next) { - var nodeName = node.name; - if (node.parent.id === parent.id && nodeName === name) { - return node; - } - } - // if we failed to find it in the cache, call into the VFS - return FS.lookup(parent, name); - }, - createNode(parent, name, mode, rdev) { - assert(typeof parent == 'object') - var node = new FS.FSNode(parent, name, mode, rdev); - - FS.hashAddNode(node); - - return node; - }, - destroyNode(node) { - FS.hashRemoveNode(node); - }, - isRoot(node) { - return node === node.parent; - }, - isMountpoint(node) { - return !!node.mounted; - }, - isFile(mode) { - return (mode & 61440) === 32768; - }, - isDir(mode) { - return (mode & 61440) === 16384; - }, - isLink(mode) { - return (mode & 61440) === 40960; - }, - isChrdev(mode) { - return (mode & 61440) === 8192; - }, - isBlkdev(mode) { - return (mode & 61440) === 24576; - }, - isFIFO(mode) { - return (mode & 61440) === 4096; - }, - isSocket(mode) { - return (mode & 49152) === 49152; - }, - flagsToPermissionString(flag) { - var perms = ['r', 'w', 'rw'][flag & 3]; - if ((flag & 512)) { - perms += 'w'; - } - return perms; - }, - nodePermissions(node, perms) { - if (FS.ignorePermissions) { - return 0; - } - // return 0 if any user, group or owner bits are set. - if (perms.includes('r') && !(node.mode & 292)) { - return 2; - } else if (perms.includes('w') && !(node.mode & 146)) { - return 2; - } else if (perms.includes('x') && !(node.mode & 73)) { - return 2; - } - return 0; - }, - mayLookup(dir) { - if (!FS.isDir(dir.mode)) return 54; - var errCode = FS.nodePermissions(dir, 'x'); - if (errCode) return errCode; - if (!dir.node_ops.lookup) return 2; - return 0; - }, - mayCreate(dir, name) { - if (!FS.isDir(dir.mode)) { - return 54; - } - try { - var node = FS.lookupNode(dir, name); - return 20; - } catch (e) { - } - return FS.nodePermissions(dir, 'wx'); - }, - mayDelete(dir, name, isdir) { - var node; - try { - node = FS.lookupNode(dir, name); - } catch (e) { - return e.errno; - } - var errCode = FS.nodePermissions(dir, 'wx'); - if (errCode) { - return errCode; - } - if (isdir) { - if (!FS.isDir(node.mode)) { - return 54; - } - if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) { - return 10; - } - } else { - if (FS.isDir(node.mode)) { - return 31; - } - } - return 0; - }, - mayOpen(node, flags) { - if (!node) { - return 44; - } - if (FS.isLink(node.mode)) { - return 32; - } else if (FS.isDir(node.mode)) { - if (FS.flagsToPermissionString(flags) !== 'r' // opening for write - || (flags & (512 | 64))) { // TODO: check for O_SEARCH? (== search for dir only) - return 31; - } - } - return FS.nodePermissions(node, FS.flagsToPermissionString(flags)); - }, - checkOpExists(op, err) { - if (!op) { - throw new FS.ErrnoError(err); - } - return op; - }, - MAX_OPEN_FDS:4096, - nextfd() { - for (var fd = 0; fd <= FS.MAX_OPEN_FDS; fd++) { - if (!FS.streams[fd]) { - return fd; - } - } - throw new FS.ErrnoError(33); - }, - getStreamChecked(fd) { - var stream = FS.getStream(fd); - if (!stream) { - throw new FS.ErrnoError(8); - } - return stream; - }, - getStream:(fd) => FS.streams[fd], - createStream(stream, fd = -1) { - assert(fd >= -1); - - // clone it, so we can return an instance of FSStream - stream = Object.assign(new FS.FSStream(), stream); - if (fd == -1) { - fd = FS.nextfd(); - } - stream.fd = fd; - FS.streams[fd] = stream; - return stream; - }, - closeStream(fd) { - FS.streams[fd] = null; - }, - dupStream(origStream, fd = -1) { - var stream = FS.createStream(origStream, fd); - stream.stream_ops?.dup?.(stream); - return stream; - }, - doSetAttr(stream, node, attr) { - var setattr = stream?.stream_ops.setattr; - var arg = setattr ? stream : node; - setattr ??= node.node_ops.setattr; - FS.checkOpExists(setattr, 63) - setattr(arg, attr); - }, - chrdev_stream_ops:{ - open(stream) { - var device = FS.getDevice(stream.node.rdev); - // override node's stream ops with the device's - stream.stream_ops = device.stream_ops; - // forward the open call - stream.stream_ops.open?.(stream); - }, - llseek() { - throw new FS.ErrnoError(70); - }, - }, - major:(dev) => ((dev) >> 8), - minor:(dev) => ((dev) & 0xff), - makedev:(ma, mi) => ((ma) << 8 | (mi)), - registerDevice(dev, ops) { - FS.devices[dev] = { stream_ops: ops }; - }, - getDevice:(dev) => FS.devices[dev], - getMounts(mount) { - var mounts = []; - var check = [mount]; - - while (check.length) { - var m = check.pop(); - - mounts.push(m); - - check.push(...m.mounts); - } - - return mounts; - }, - syncfs(populate, callback) { - if (typeof populate == 'function') { - callback = populate; - populate = false; - } - - FS.syncFSRequests++; - - if (FS.syncFSRequests > 1) { - err(`warning: ${FS.syncFSRequests} FS.syncfs operations in flight at once, probably just doing extra work`); - } - - var mounts = FS.getMounts(FS.root.mount); - var completed = 0; - - function doCallback(errCode) { - assert(FS.syncFSRequests > 0); - FS.syncFSRequests--; - return callback(errCode); - } - - function done(errCode) { - if (errCode) { - if (!done.errored) { - done.errored = true; - return doCallback(errCode); - } - return; - } - if (++completed >= mounts.length) { - doCallback(null); - } - }; - - // sync all mounts - mounts.forEach((mount) => { - if (!mount.type.syncfs) { - return done(null); - } - mount.type.syncfs(mount, populate, done); - }); - }, - mount(type, opts, mountpoint) { - if (typeof type == 'string') { - // The filesystem was not included, and instead we have an error - // message stored in the variable. - throw type; - } - var root = mountpoint === '/'; - var pseudo = !mountpoint; - var node; - - if (root && FS.root) { - throw new FS.ErrnoError(10); - } else if (!root && !pseudo) { - var lookup = FS.lookupPath(mountpoint, { follow_mount: false }); - - mountpoint = lookup.path; // use the absolute path - node = lookup.node; - - if (FS.isMountpoint(node)) { - throw new FS.ErrnoError(10); - } - - if (!FS.isDir(node.mode)) { - throw new FS.ErrnoError(54); - } - } - - var mount = { - type, - opts, - mountpoint, - mounts: [] - }; - - // create a root node for the fs - var mountRoot = type.mount(mount); - mountRoot.mount = mount; - mount.root = mountRoot; - - if (root) { - FS.root = mountRoot; - } else if (node) { - // set as a mountpoint - node.mounted = mount; - - // add the new mount to the current mount's children - if (node.mount) { - node.mount.mounts.push(mount); - } - } - - return mountRoot; - }, - unmount(mountpoint) { - var lookup = FS.lookupPath(mountpoint, { follow_mount: false }); - - if (!FS.isMountpoint(lookup.node)) { - throw new FS.ErrnoError(28); - } - - // destroy the nodes for this mount, and all its child mounts - var node = lookup.node; - var mount = node.mounted; - var mounts = FS.getMounts(mount); - - Object.keys(FS.nameTable).forEach((hash) => { - var current = FS.nameTable[hash]; - - while (current) { - var next = current.name_next; - - if (mounts.includes(current.mount)) { - FS.destroyNode(current); - } - - current = next; - } - }); - - // no longer a mountpoint - node.mounted = null; - - // remove this mount from the child mounts - var idx = node.mount.mounts.indexOf(mount); - assert(idx !== -1); - node.mount.mounts.splice(idx, 1); - }, - lookup(parent, name) { - return parent.node_ops.lookup(parent, name); - }, - mknod(path, mode, dev) { - var lookup = FS.lookupPath(path, { parent: true }); - var parent = lookup.node; - var name = PATH.basename(path); - if (!name) { - throw new FS.ErrnoError(28); - } - if (name === '.' || name === '..') { - throw new FS.ErrnoError(20); - } - var errCode = FS.mayCreate(parent, name); - if (errCode) { - throw new FS.ErrnoError(errCode); - } - if (!parent.node_ops.mknod) { - throw new FS.ErrnoError(63); - } - return parent.node_ops.mknod(parent, name, mode, dev); - }, - statfs(path) { - return FS.statfsNode(FS.lookupPath(path, {follow: true}).node); - }, - statfsStream(stream) { - // We keep a separate statfsStream function because noderawfs overrides - // it. In noderawfs, stream.node is sometimes null. Instead, we need to - // look at stream.path. - return FS.statfsNode(stream.node); - }, - statfsNode(node) { - // NOTE: None of the defaults here are true. We're just returning safe and - // sane values. Currently nodefs and rawfs replace these defaults, - // other file systems leave them alone. - var rtn = { - bsize: 4096, - frsize: 4096, - blocks: 1e6, - bfree: 5e5, - bavail: 5e5, - files: FS.nextInode, - ffree: FS.nextInode - 1, - fsid: 42, - flags: 2, - namelen: 255, - }; - - if (node.node_ops.statfs) { - Object.assign(rtn, node.node_ops.statfs(node.mount.opts.root)); - } - return rtn; - }, - create(path, mode = 0o666) { - mode &= 4095; - mode |= 32768; - return FS.mknod(path, mode, 0); - }, - mkdir(path, mode = 0o777) { - mode &= 511 | 512; - mode |= 16384; - return FS.mknod(path, mode, 0); - }, - mkdirTree(path, mode) { - var dirs = path.split('/'); - var d = ''; - for (var dir of dirs) { - if (!dir) continue; - if (d || PATH.isAbs(path)) d += '/'; - d += dir; - try { - FS.mkdir(d, mode); - } catch(e) { - if (e.errno != 20) throw e; - } - } - }, - mkdev(path, mode, dev) { - if (typeof dev == 'undefined') { - dev = mode; - mode = 0o666; - } - mode |= 8192; - return FS.mknod(path, mode, dev); - }, - symlink(oldpath, newpath) { - if (!PATH_FS.resolve(oldpath)) { - throw new FS.ErrnoError(44); - } - var lookup = FS.lookupPath(newpath, { parent: true }); - var parent = lookup.node; - if (!parent) { - throw new FS.ErrnoError(44); - } - var newname = PATH.basename(newpath); - var errCode = FS.mayCreate(parent, newname); - if (errCode) { - throw new FS.ErrnoError(errCode); - } - if (!parent.node_ops.symlink) { - throw new FS.ErrnoError(63); - } - return parent.node_ops.symlink(parent, newname, oldpath); - }, - rename(old_path, new_path) { - var old_dirname = PATH.dirname(old_path); - var new_dirname = PATH.dirname(new_path); - var old_name = PATH.basename(old_path); - var new_name = PATH.basename(new_path); - // parents must exist - var lookup, old_dir, new_dir; - - // let the errors from non existent directories percolate up - lookup = FS.lookupPath(old_path, { parent: true }); - old_dir = lookup.node; - lookup = FS.lookupPath(new_path, { parent: true }); - new_dir = lookup.node; - - if (!old_dir || !new_dir) throw new FS.ErrnoError(44); - // need to be part of the same mount - if (old_dir.mount !== new_dir.mount) { - throw new FS.ErrnoError(75); - } - // source must exist - var old_node = FS.lookupNode(old_dir, old_name); - // old path should not be an ancestor of the new path - var relative = PATH_FS.relative(old_path, new_dirname); - if (relative.charAt(0) !== '.') { - throw new FS.ErrnoError(28); - } - // new path should not be an ancestor of the old path - relative = PATH_FS.relative(new_path, old_dirname); - if (relative.charAt(0) !== '.') { - throw new FS.ErrnoError(55); - } - // see if the new path already exists - var new_node; - try { - new_node = FS.lookupNode(new_dir, new_name); - } catch (e) { - // not fatal - } - // early out if nothing needs to change - if (old_node === new_node) { - return; - } - // we'll need to delete the old entry - var isdir = FS.isDir(old_node.mode); - var errCode = FS.mayDelete(old_dir, old_name, isdir); - if (errCode) { - throw new FS.ErrnoError(errCode); - } - // need delete permissions if we'll be overwriting. - // need create permissions if new doesn't already exist. - errCode = new_node ? - FS.mayDelete(new_dir, new_name, isdir) : - FS.mayCreate(new_dir, new_name); - if (errCode) { - throw new FS.ErrnoError(errCode); - } - if (!old_dir.node_ops.rename) { - throw new FS.ErrnoError(63); - } - if (FS.isMountpoint(old_node) || (new_node && FS.isMountpoint(new_node))) { - throw new FS.ErrnoError(10); - } - // if we are going to change the parent, check write permissions - if (new_dir !== old_dir) { - errCode = FS.nodePermissions(old_dir, 'w'); - if (errCode) { - throw new FS.ErrnoError(errCode); - } - } - // remove the node from the lookup hash - FS.hashRemoveNode(old_node); - // do the underlying fs rename - try { - old_dir.node_ops.rename(old_node, new_dir, new_name); - // update old node (we do this here to avoid each backend - // needing to) - old_node.parent = new_dir; - } catch (e) { - throw e; - } finally { - // add the node back to the hash (in case node_ops.rename - // changed its name) - FS.hashAddNode(old_node); - } - }, - rmdir(path) { - var lookup = FS.lookupPath(path, { parent: true }); - var parent = lookup.node; - var name = PATH.basename(path); - var node = FS.lookupNode(parent, name); - var errCode = FS.mayDelete(parent, name, true); - if (errCode) { - throw new FS.ErrnoError(errCode); - } - if (!parent.node_ops.rmdir) { - throw new FS.ErrnoError(63); - } - if (FS.isMountpoint(node)) { - throw new FS.ErrnoError(10); - } - parent.node_ops.rmdir(parent, name); - FS.destroyNode(node); - }, - readdir(path) { - var lookup = FS.lookupPath(path, { follow: true }); - var node = lookup.node; - var readdir = FS.checkOpExists(node.node_ops.readdir, 54); - return readdir(node); - }, - unlink(path) { - var lookup = FS.lookupPath(path, { parent: true }); - var parent = lookup.node; - if (!parent) { - throw new FS.ErrnoError(44); - } - var name = PATH.basename(path); - var node = FS.lookupNode(parent, name); - var errCode = FS.mayDelete(parent, name, false); - if (errCode) { - // According to POSIX, we should map EISDIR to EPERM, but - // we instead do what Linux does (and we must, as we use - // the musl linux libc). - throw new FS.ErrnoError(errCode); - } - if (!parent.node_ops.unlink) { - throw new FS.ErrnoError(63); - } - if (FS.isMountpoint(node)) { - throw new FS.ErrnoError(10); - } - parent.node_ops.unlink(parent, name); - FS.destroyNode(node); - }, - readlink(path) { - var lookup = FS.lookupPath(path); - var link = lookup.node; - if (!link) { - throw new FS.ErrnoError(44); - } - if (!link.node_ops.readlink) { - throw new FS.ErrnoError(28); - } - return link.node_ops.readlink(link); - }, - stat(path, dontFollow) { - var lookup = FS.lookupPath(path, { follow: !dontFollow }); - var node = lookup.node; - var getattr = FS.checkOpExists(node.node_ops.getattr, 63); - return getattr(node); - }, - fstat(fd) { - var stream = FS.getStreamChecked(fd); - var node = stream.node; - var getattr = stream.stream_ops.getattr; - var arg = getattr ? stream : node; - getattr ??= node.node_ops.getattr; - FS.checkOpExists(getattr, 63) - return getattr(arg); - }, - lstat(path) { - return FS.stat(path, true); - }, - doChmod(stream, node, mode, dontFollow) { - FS.doSetAttr(stream, node, { - mode: (mode & 4095) | (node.mode & ~4095), - ctime: Date.now(), - dontFollow - }); - }, - chmod(path, mode, dontFollow) { - var node; - if (typeof path == 'string') { - var lookup = FS.lookupPath(path, { follow: !dontFollow }); - node = lookup.node; - } else { - node = path; - } - FS.doChmod(null, node, mode, dontFollow); - }, - lchmod(path, mode) { - FS.chmod(path, mode, true); - }, - fchmod(fd, mode) { - var stream = FS.getStreamChecked(fd); - FS.doChmod(stream, stream.node, mode, false); - }, - doChown(stream, node, dontFollow) { - FS.doSetAttr(stream, node, { - timestamp: Date.now(), - dontFollow - // we ignore the uid / gid for now - }); - }, - chown(path, uid, gid, dontFollow) { - var node; - if (typeof path == 'string') { - var lookup = FS.lookupPath(path, { follow: !dontFollow }); - node = lookup.node; - } else { - node = path; - } - FS.doChown(null, node, dontFollow); - }, - lchown(path, uid, gid) { - FS.chown(path, uid, gid, true); - }, - fchown(fd, uid, gid) { - var stream = FS.getStreamChecked(fd); - FS.doChown(stream, stream.node, false); - }, - doTruncate(stream, node, len) { - if (FS.isDir(node.mode)) { - throw new FS.ErrnoError(31); - } - if (!FS.isFile(node.mode)) { - throw new FS.ErrnoError(28); - } - var errCode = FS.nodePermissions(node, 'w'); - if (errCode) { - throw new FS.ErrnoError(errCode); - } - FS.doSetAttr(stream, node, { - size: len, - timestamp: Date.now() - }); - }, - truncate(path, len) { - if (len < 0) { - throw new FS.ErrnoError(28); - } - var node; - if (typeof path == 'string') { - var lookup = FS.lookupPath(path, { follow: true }); - node = lookup.node; - } else { - node = path; - } - FS.doTruncate(null, node, len); - }, - ftruncate(fd, len) { - var stream = FS.getStreamChecked(fd); - if (len < 0 || (stream.flags & 2097155) === 0) { - throw new FS.ErrnoError(28); - } - FS.doTruncate(stream, stream.node, len); - }, - utime(path, atime, mtime) { - var lookup = FS.lookupPath(path, { follow: true }); - var node = lookup.node; - var setattr = FS.checkOpExists(node.node_ops.setattr, 63); - setattr(node, { - atime: atime, - mtime: mtime - }); - }, - open(path, flags, mode = 0o666) { - if (path === "") { - throw new FS.ErrnoError(44); - } - flags = typeof flags == 'string' ? FS_modeStringToFlags(flags) : flags; - if ((flags & 64)) { - mode = (mode & 4095) | 32768; - } else { - mode = 0; - } - var node; - var isDirPath; - if (typeof path == 'object') { - node = path; - } else { - isDirPath = path.endsWith("/"); - // noent_okay makes it so that if the final component of the path - // doesn't exist, lookupPath returns `node: undefined`. `path` will be - // updated to point to the target of all symlinks. - var lookup = FS.lookupPath(path, { - follow: !(flags & 131072), - noent_okay: true - }); - node = lookup.node; - path = lookup.path; - } - // perhaps we need to create the node - var created = false; - if ((flags & 64)) { - if (node) { - // if O_CREAT and O_EXCL are set, error out if the node already exists - if ((flags & 128)) { - throw new FS.ErrnoError(20); - } - } else if (isDirPath) { - throw new FS.ErrnoError(31); - } else { - // node doesn't exist, try to create it - // Ignore the permission bits here to ensure we can `open` this new - // file below. We use chmod below the apply the permissions once the - // file is open. - node = FS.mknod(path, mode | 0o777, 0); - created = true; - } - } - if (!node) { - throw new FS.ErrnoError(44); - } - // can't truncate a device - if (FS.isChrdev(node.mode)) { - flags &= ~512; - } - // if asked only for a directory, then this must be one - if ((flags & 65536) && !FS.isDir(node.mode)) { - throw new FS.ErrnoError(54); - } - // check permissions, if this is not a file we just created now (it is ok to - // create and write to a file with read-only permissions; it is read-only - // for later use) - if (!created) { - var errCode = FS.mayOpen(node, flags); - if (errCode) { - throw new FS.ErrnoError(errCode); - } - } - // do truncation if necessary - if ((flags & 512) && !created) { - FS.truncate(node, 0); - } - // we've already handled these, don't pass down to the underlying vfs - flags &= ~(128 | 512 | 131072); - - // register the stream with the filesystem - var stream = FS.createStream({ - node, - path: FS.getPath(node), // we want the absolute path to the node - flags, - seekable: true, - position: 0, - stream_ops: node.stream_ops, - // used by the file family libc calls (fopen, fwrite, ferror, etc.) - ungotten: [], - error: false - }); - // call the new stream's open function - if (stream.stream_ops.open) { - stream.stream_ops.open(stream); - } - if (created) { - FS.chmod(node, mode & 0o777); - } - if (Module['logReadFiles'] && !(flags & 1)) { - if (!(path in FS.readFiles)) { - FS.readFiles[path] = 1; - } - } - return stream; - }, - close(stream) { - if (FS.isClosed(stream)) { - throw new FS.ErrnoError(8); - } - if (stream.getdents) stream.getdents = null; // free readdir state - try { - if (stream.stream_ops.close) { - stream.stream_ops.close(stream); - } - } catch (e) { - throw e; - } finally { - FS.closeStream(stream.fd); - } - stream.fd = null; - }, - isClosed(stream) { - return stream.fd === null; - }, - llseek(stream, offset, whence) { - if (FS.isClosed(stream)) { - throw new FS.ErrnoError(8); - } - if (!stream.seekable || !stream.stream_ops.llseek) { - throw new FS.ErrnoError(70); - } - if (whence != 0 && whence != 1 && whence != 2) { - throw new FS.ErrnoError(28); - } - stream.position = stream.stream_ops.llseek(stream, offset, whence); - stream.ungotten = []; - return stream.position; - }, - read(stream, buffer, offset, length, position) { - assert(offset >= 0); - if (length < 0 || position < 0) { - throw new FS.ErrnoError(28); - } - if (FS.isClosed(stream)) { - throw new FS.ErrnoError(8); - } - if ((stream.flags & 2097155) === 1) { - throw new FS.ErrnoError(8); - } - if (FS.isDir(stream.node.mode)) { - throw new FS.ErrnoError(31); - } - if (!stream.stream_ops.read) { - throw new FS.ErrnoError(28); - } - var seeking = typeof position != 'undefined'; - if (!seeking) { - position = stream.position; - } else if (!stream.seekable) { - throw new FS.ErrnoError(70); - } - var bytesRead = stream.stream_ops.read(stream, buffer, offset, length, position); - if (!seeking) stream.position += bytesRead; - return bytesRead; - }, - write(stream, buffer, offset, length, position, canOwn) { - assert(offset >= 0); - if (length < 0 || position < 0) { - throw new FS.ErrnoError(28); - } - if (FS.isClosed(stream)) { - throw new FS.ErrnoError(8); - } - if ((stream.flags & 2097155) === 0) { - throw new FS.ErrnoError(8); - } - if (FS.isDir(stream.node.mode)) { - throw new FS.ErrnoError(31); - } - if (!stream.stream_ops.write) { - throw new FS.ErrnoError(28); - } - if (stream.seekable && stream.flags & 1024) { - // seek to the end before writing in append mode - FS.llseek(stream, 0, 2); - } - var seeking = typeof position != 'undefined'; - if (!seeking) { - position = stream.position; - } else if (!stream.seekable) { - throw new FS.ErrnoError(70); - } - var bytesWritten = stream.stream_ops.write(stream, buffer, offset, length, position, canOwn); - if (!seeking) stream.position += bytesWritten; - return bytesWritten; - }, - mmap(stream, length, position, prot, flags) { - // User requests writing to file (prot & PROT_WRITE != 0). - // Checking if we have permissions to write to the file unless - // MAP_PRIVATE flag is set. According to POSIX spec it is possible - // to write to file opened in read-only mode with MAP_PRIVATE flag, - // as all modifications will be visible only in the memory of - // the current process. - if ((prot & 2) !== 0 - && (flags & 2) === 0 - && (stream.flags & 2097155) !== 2) { - throw new FS.ErrnoError(2); - } - if ((stream.flags & 2097155) === 1) { - throw new FS.ErrnoError(2); - } - if (!stream.stream_ops.mmap) { - throw new FS.ErrnoError(43); - } - if (!length) { - throw new FS.ErrnoError(28); - } - return stream.stream_ops.mmap(stream, length, position, prot, flags); - }, - msync(stream, buffer, offset, length, mmapFlags) { - assert(offset >= 0); - if (!stream.stream_ops.msync) { - return 0; - } - return stream.stream_ops.msync(stream, buffer, offset, length, mmapFlags); - }, - ioctl(stream, cmd, arg) { - if (!stream.stream_ops.ioctl) { - throw new FS.ErrnoError(59); - } - return stream.stream_ops.ioctl(stream, cmd, arg); - }, - readFile(path, opts = {}) { - opts.flags = opts.flags || 0; - opts.encoding = opts.encoding || 'binary'; - if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') { - throw new Error(`Invalid encoding type "${opts.encoding}"`); - } - var stream = FS.open(path, opts.flags); - var stat = FS.stat(path); - var length = stat.size; - var buf = new Uint8Array(length); - FS.read(stream, buf, 0, length, 0); - if (opts.encoding === 'utf8') { - buf = UTF8ArrayToString(buf); - } - FS.close(stream); - return buf; - }, - writeFile(path, data, opts = {}) { - opts.flags = opts.flags || 577; - var stream = FS.open(path, opts.flags, opts.mode); - if (typeof data == 'string') { - data = new Uint8Array(intArrayFromString(data, true)); - } - if (ArrayBuffer.isView(data)) { - FS.write(stream, data, 0, data.byteLength, undefined, opts.canOwn); - } else { - throw new Error('Unsupported data type'); - } - FS.close(stream); - }, - cwd:() => FS.currentPath, - chdir(path) { - var lookup = FS.lookupPath(path, { follow: true }); - if (lookup.node === null) { - throw new FS.ErrnoError(44); - } - if (!FS.isDir(lookup.node.mode)) { - throw new FS.ErrnoError(54); - } - var errCode = FS.nodePermissions(lookup.node, 'x'); - if (errCode) { - throw new FS.ErrnoError(errCode); - } - FS.currentPath = lookup.path; - }, - createDefaultDirectories() { - FS.mkdir('/tmp'); - FS.mkdir('/home'); - FS.mkdir('/home/web_user'); - }, - createDefaultDevices() { - // create /dev - FS.mkdir('/dev'); - // setup /dev/null - FS.registerDevice(FS.makedev(1, 3), { - read: () => 0, - write: (stream, buffer, offset, length, pos) => length, - llseek: () => 0, - }); - FS.mkdev('/dev/null', FS.makedev(1, 3)); - // setup /dev/tty and /dev/tty1 - // stderr needs to print output using err() rather than out() - // so we register a second tty just for it. - TTY.register(FS.makedev(5, 0), TTY.default_tty_ops); - TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops); - FS.mkdev('/dev/tty', FS.makedev(5, 0)); - FS.mkdev('/dev/tty1', FS.makedev(6, 0)); - // setup /dev/[u]random - // use a buffer to avoid overhead of individual crypto calls per byte - var randomBuffer = new Uint8Array(1024), randomLeft = 0; - var randomByte = () => { - if (randomLeft === 0) { - randomFill(randomBuffer); - randomLeft = randomBuffer.byteLength; - } - return randomBuffer[--randomLeft]; - }; - FS.createDevice('/dev', 'random', randomByte); - FS.createDevice('/dev', 'urandom', randomByte); - // we're not going to emulate the actual shm device, - // just create the tmp dirs that reside in it commonly - FS.mkdir('/dev/shm'); - FS.mkdir('/dev/shm/tmp'); - }, - createSpecialDirectories() { - // create /proc/self/fd which allows /proc/self/fd/6 => readlink gives the - // name of the stream for fd 6 (see test_unistd_ttyname) - FS.mkdir('/proc'); - var proc_self = FS.mkdir('/proc/self'); - FS.mkdir('/proc/self/fd'); - FS.mount({ - mount() { - var node = FS.createNode(proc_self, 'fd', 16895, 73); - node.stream_ops = { - llseek: MEMFS.stream_ops.llseek, - }; - node.node_ops = { - lookup(parent, name) { - var fd = +name; - var stream = FS.getStreamChecked(fd); - var ret = { - parent: null, - mount: { mountpoint: 'fake' }, - node_ops: { readlink: () => stream.path }, - id: fd + 1, - }; - ret.parent = ret; // make it look like a simple root node - return ret; - }, - readdir() { - return Array.from(FS.streams.entries()) - .filter(([k, v]) => v) - .map(([k, v]) => k.toString()); - } - }; - return node; - } - }, {}, '/proc/self/fd'); - }, - createStandardStreams(input, output, error) { - // TODO deprecate the old functionality of a single - // input / output callback and that utilizes FS.createDevice - // and instead require a unique set of stream ops - - // by default, we symlink the standard streams to the - // default tty devices. however, if the standard streams - // have been overwritten we create a unique device for - // them instead. - if (input) { - FS.createDevice('/dev', 'stdin', input); - } else { - FS.symlink('/dev/tty', '/dev/stdin'); - } - if (output) { - FS.createDevice('/dev', 'stdout', null, output); - } else { - FS.symlink('/dev/tty', '/dev/stdout'); - } - if (error) { - FS.createDevice('/dev', 'stderr', null, error); - } else { - FS.symlink('/dev/tty1', '/dev/stderr'); - } - - // open default streams for the stdin, stdout and stderr devices - var stdin = FS.open('/dev/stdin', 0); - var stdout = FS.open('/dev/stdout', 1); - var stderr = FS.open('/dev/stderr', 1); - assert(stdin.fd === 0, `invalid handle for stdin (${stdin.fd})`); - assert(stdout.fd === 1, `invalid handle for stdout (${stdout.fd})`); - assert(stderr.fd === 2, `invalid handle for stderr (${stderr.fd})`); - }, - staticInit() { - FS.nameTable = new Array(4096); - - FS.mount(MEMFS, {}, '/'); - - FS.createDefaultDirectories(); - FS.createDefaultDevices(); - FS.createSpecialDirectories(); - - FS.filesystems = { - 'MEMFS': MEMFS, - }; - }, - init(input, output, error) { - assert(!FS.initialized, 'FS.init was previously called. If you want to initialize later with custom parameters, remove any earlier calls (note that one is automatically added to the generated code)'); - FS.initialized = true; - - // Allow Module.stdin etc. to provide defaults, if none explicitly passed to us here - input ??= Module['stdin']; - output ??= Module['stdout']; - error ??= Module['stderr']; - - FS.createStandardStreams(input, output, error); - }, - quit() { - FS.initialized = false; - // force-flush all streams, so we get musl std streams printed out - _fflush(0); - // close all of our streams - for (var stream of FS.streams) { - if (stream) { - FS.close(stream); - } - } - }, - findObject(path, dontResolveLastLink) { - var ret = FS.analyzePath(path, dontResolveLastLink); - if (!ret.exists) { - return null; - } - return ret.object; - }, - analyzePath(path, dontResolveLastLink) { - // operate from within the context of the symlink's target - try { - var lookup = FS.lookupPath(path, { follow: !dontResolveLastLink }); - path = lookup.path; - } catch (e) { - } - var ret = { - isRoot: false, exists: false, error: 0, name: null, path: null, object: null, - parentExists: false, parentPath: null, parentObject: null - }; - try { - var lookup = FS.lookupPath(path, { parent: true }); - ret.parentExists = true; - ret.parentPath = lookup.path; - ret.parentObject = lookup.node; - ret.name = PATH.basename(path); - lookup = FS.lookupPath(path, { follow: !dontResolveLastLink }); - ret.exists = true; - ret.path = lookup.path; - ret.object = lookup.node; - ret.name = lookup.node.name; - ret.isRoot = lookup.path === '/'; - } catch (e) { - ret.error = e.errno; - }; - return ret; - }, - createPath(parent, path, canRead, canWrite) { - parent = typeof parent == 'string' ? parent : FS.getPath(parent); - var parts = path.split('/').reverse(); - while (parts.length) { - var part = parts.pop(); - if (!part) continue; - var current = PATH.join2(parent, part); - try { - FS.mkdir(current); - } catch (e) { - if (e.errno != 20) throw e; - } - parent = current; - } - return current; - }, - createFile(parent, name, properties, canRead, canWrite) { - var path = PATH.join2(typeof parent == 'string' ? parent : FS.getPath(parent), name); - var mode = FS_getMode(canRead, canWrite); - return FS.create(path, mode); - }, - createDataFile(parent, name, data, canRead, canWrite, canOwn) { - var path = name; - if (parent) { - parent = typeof parent == 'string' ? parent : FS.getPath(parent); - path = name ? PATH.join2(parent, name) : parent; - } - var mode = FS_getMode(canRead, canWrite); - var node = FS.create(path, mode); - if (data) { - if (typeof data == 'string') { - var arr = new Array(data.length); - for (var i = 0, len = data.length; i < len; ++i) arr[i] = data.charCodeAt(i); - data = arr; - } - // make sure we can write to the file - FS.chmod(node, mode | 146); - var stream = FS.open(node, 577); - FS.write(stream, data, 0, data.length, 0, canOwn); - FS.close(stream); - FS.chmod(node, mode); - } - }, - createDevice(parent, name, input, output) { - var path = PATH.join2(typeof parent == 'string' ? parent : FS.getPath(parent), name); - var mode = FS_getMode(!!input, !!output); - FS.createDevice.major ??= 64; - var dev = FS.makedev(FS.createDevice.major++, 0); - // Create a fake device that a set of stream ops to emulate - // the old behavior. - FS.registerDevice(dev, { - open(stream) { - stream.seekable = false; - }, - close(stream) { - // flush any pending line data - if (output?.buffer?.length) { - output(10); - } - }, - read(stream, buffer, offset, length, pos /* ignored */) { - var bytesRead = 0; - for (var i = 0; i < length; i++) { - var result; - try { - result = input(); - } catch (e) { - throw new FS.ErrnoError(29); - } - if (result === undefined && bytesRead === 0) { - throw new FS.ErrnoError(6); - } - if (result === null || result === undefined) break; - bytesRead++; - buffer[offset+i] = result; - } - if (bytesRead) { - stream.node.atime = Date.now(); - } - return bytesRead; - }, - write(stream, buffer, offset, length, pos) { - for (var i = 0; i < length; i++) { - try { - output(buffer[offset+i]); - } catch (e) { - throw new FS.ErrnoError(29); - } - } - if (length) { - stream.node.mtime = stream.node.ctime = Date.now(); - } - return i; - } - }); - return FS.mkdev(path, mode, dev); - }, - forceLoadFile(obj) { - if (obj.isDevice || obj.isFolder || obj.link || obj.contents) return true; - if (typeof XMLHttpRequest != 'undefined') { - throw new Error("Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread."); - } else { // Command-line. - try { - obj.contents = readBinary(obj.url); - obj.usedBytes = obj.contents.length; - } catch (e) { - throw new FS.ErrnoError(29); - } - } - }, - createLazyFile(parent, name, url, canRead, canWrite) { - // Lazy chunked Uint8Array (implements get and length from Uint8Array). - // Actual getting is abstracted away for eventual reuse. - class LazyUint8Array { - lengthKnown = false; - chunks = []; // Loaded chunks. Index is the chunk number - get(idx) { - if (idx > this.length-1 || idx < 0) { - return undefined; - } - var chunkOffset = idx % this.chunkSize; - var chunkNum = (idx / this.chunkSize)|0; - return this.getter(chunkNum)[chunkOffset]; - } - setDataGetter(getter) { - this.getter = getter; - } - cacheLength() { - // Find length - var xhr = new XMLHttpRequest(); - xhr.open('HEAD', url, false); - xhr.send(null); - if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status); - var datalength = Number(xhr.getResponseHeader("Content-length")); - var header; - var hasByteServing = (header = xhr.getResponseHeader("Accept-Ranges")) && header === "bytes"; - var usesGzip = (header = xhr.getResponseHeader("Content-Encoding")) && header === "gzip"; - - var chunkSize = 1024*1024; // Chunk size in bytes - - if (!hasByteServing) chunkSize = datalength; - - // Function to get a range from the remote URL. - var doXHR = (from, to) => { - if (from > to) throw new Error("invalid range (" + from + ", " + to + ") or no bytes requested!"); - if (to > datalength-1) throw new Error("only " + datalength + " bytes available! programmer error!"); - - // TODO: Use mozResponseArrayBuffer, responseStream, etc. if available. - var xhr = new XMLHttpRequest(); - xhr.open('GET', url, false); - if (datalength !== chunkSize) xhr.setRequestHeader("Range", "bytes=" + from + "-" + to); - - // Some hints to the browser that we want binary data. - xhr.responseType = 'arraybuffer'; - if (xhr.overrideMimeType) { - xhr.overrideMimeType('text/plain; charset=x-user-defined'); - } - - xhr.send(null); - if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status); - if (xhr.response !== undefined) { - return new Uint8Array(/** @type{Array} */(xhr.response || [])); - } - return intArrayFromString(xhr.responseText || '', true); - }; - var lazyArray = this; - lazyArray.setDataGetter((chunkNum) => { - var start = chunkNum * chunkSize; - var end = (chunkNum+1) * chunkSize - 1; // including this byte - end = Math.min(end, datalength-1); // if datalength-1 is selected, this is the last block - if (typeof lazyArray.chunks[chunkNum] == 'undefined') { - lazyArray.chunks[chunkNum] = doXHR(start, end); - } - if (typeof lazyArray.chunks[chunkNum] == 'undefined') throw new Error('doXHR failed!'); - return lazyArray.chunks[chunkNum]; - }); - - if (usesGzip || !datalength) { - // if the server uses gzip or doesn't supply the length, we have to download the whole file to get the (uncompressed) length - chunkSize = datalength = 1; // this will force getter(0)/doXHR do download the whole file - datalength = this.getter(0).length; - chunkSize = datalength; - out("LazyFiles on gzip forces download of the whole file when length is accessed"); - } - - this._length = datalength; - this._chunkSize = chunkSize; - this.lengthKnown = true; - } - get length() { - if (!this.lengthKnown) { - this.cacheLength(); - } - return this._length; - } - get chunkSize() { - if (!this.lengthKnown) { - this.cacheLength(); - } - return this._chunkSize; - } - } - - if (typeof XMLHttpRequest != 'undefined') { - if (!ENVIRONMENT_IS_WORKER) throw 'Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc'; - var lazyArray = new LazyUint8Array(); - var properties = { isDevice: false, contents: lazyArray }; - } else { - var properties = { isDevice: false, url: url }; - } - - var node = FS.createFile(parent, name, properties, canRead, canWrite); - // This is a total hack, but I want to get this lazy file code out of the - // core of MEMFS. If we want to keep this lazy file concept I feel it should - // be its own thin LAZYFS proxying calls to MEMFS. - if (properties.contents) { - node.contents = properties.contents; - } else if (properties.url) { - node.contents = null; - node.url = properties.url; - } - // Add a function that defers querying the file size until it is asked the first time. - Object.defineProperties(node, { - usedBytes: { - get: function() { return this.contents.length; } - } - }); - // override each stream op with one that tries to force load the lazy file first - var stream_ops = {}; - var keys = Object.keys(node.stream_ops); - keys.forEach((key) => { - var fn = node.stream_ops[key]; - stream_ops[key] = (...args) => { - FS.forceLoadFile(node); - return fn(...args); - }; - }); - function writeChunks(stream, buffer, offset, length, position) { - var contents = stream.node.contents; - if (position >= contents.length) - return 0; - var size = Math.min(contents.length - position, length); - assert(size >= 0); - if (contents.slice) { // normal array - for (var i = 0; i < size; i++) { - buffer[offset + i] = contents[position + i]; - } - } else { - for (var i = 0; i < size; i++) { // LazyUint8Array from sync binary XHR - buffer[offset + i] = contents.get(position + i); - } - } - return size; - } - // use a custom read function - stream_ops.read = (stream, buffer, offset, length, position) => { - FS.forceLoadFile(node); - return writeChunks(stream, buffer, offset, length, position) - }; - // use a custom mmap function - stream_ops.mmap = (stream, length, position, prot, flags) => { - FS.forceLoadFile(node); - var ptr = mmapAlloc(length); - if (!ptr) { - throw new FS.ErrnoError(48); - } - writeChunks(stream, HEAP8, ptr, length, position); - return { ptr, allocated: true }; - }; - node.stream_ops = stream_ops; - return node; - }, - absolutePath() { - abort('FS.absolutePath has been removed; use PATH_FS.resolve instead'); - }, - createFolder() { - abort('FS.createFolder has been removed; use FS.mkdir instead'); - }, - createLink() { - abort('FS.createLink has been removed; use FS.symlink instead'); - }, - joinPath() { - abort('FS.joinPath has been removed; use PATH.join instead'); - }, - mmapAlloc() { - abort('FS.mmapAlloc has been replaced by the top level function mmapAlloc'); - }, - standardizePath() { - abort('FS.standardizePath has been removed; use PATH.normalize instead'); - }, - }; - - var SYSCALLS = { - DEFAULT_POLLMASK:5, - calculateAt(dirfd, path, allowEmpty) { - if (PATH.isAbs(path)) { - return path; - } - // relative path - var dir; - if (dirfd === -100) { - dir = FS.cwd(); - } else { - var dirstream = SYSCALLS.getStreamFromFD(dirfd); - dir = dirstream.path; - } - if (path.length == 0) { - if (!allowEmpty) { - throw new FS.ErrnoError(44);; - } - return dir; - } - return dir + '/' + path; - }, - writeStat(buf, stat) { - HEAPU32[((buf)>>2)] = stat.dev; - HEAPU32[(((buf)+(4))>>2)] = stat.mode; - HEAPU32[(((buf)+(8))>>2)] = stat.nlink; - HEAPU32[(((buf)+(12))>>2)] = stat.uid; - HEAPU32[(((buf)+(16))>>2)] = stat.gid; - HEAPU32[(((buf)+(20))>>2)] = stat.rdev; - HEAP64[(((buf)+(24))>>3)] = BigInt(stat.size); - HEAP32[(((buf)+(32))>>2)] = 4096; - HEAP32[(((buf)+(36))>>2)] = stat.blocks; - var atime = stat.atime.getTime(); - var mtime = stat.mtime.getTime(); - var ctime = stat.ctime.getTime(); - HEAP64[(((buf)+(40))>>3)] = BigInt(Math.floor(atime / 1000)); - HEAPU32[(((buf)+(48))>>2)] = (atime % 1000) * 1000 * 1000; - HEAP64[(((buf)+(56))>>3)] = BigInt(Math.floor(mtime / 1000)); - HEAPU32[(((buf)+(64))>>2)] = (mtime % 1000) * 1000 * 1000; - HEAP64[(((buf)+(72))>>3)] = BigInt(Math.floor(ctime / 1000)); - HEAPU32[(((buf)+(80))>>2)] = (ctime % 1000) * 1000 * 1000; - HEAP64[(((buf)+(88))>>3)] = BigInt(stat.ino); - return 0; - }, - writeStatFs(buf, stats) { - HEAPU32[(((buf)+(4))>>2)] = stats.bsize; - HEAPU32[(((buf)+(60))>>2)] = stats.bsize; - HEAP64[(((buf)+(8))>>3)] = BigInt(stats.blocks); - HEAP64[(((buf)+(16))>>3)] = BigInt(stats.bfree); - HEAP64[(((buf)+(24))>>3)] = BigInt(stats.bavail); - HEAP64[(((buf)+(32))>>3)] = BigInt(stats.files); - HEAP64[(((buf)+(40))>>3)] = BigInt(stats.ffree); - HEAPU32[(((buf)+(48))>>2)] = stats.fsid; - HEAPU32[(((buf)+(64))>>2)] = stats.flags; // ST_NOSUID - HEAPU32[(((buf)+(56))>>2)] = stats.namelen; - }, - doMsync(addr, stream, len, flags, offset) { - if (!FS.isFile(stream.node.mode)) { - throw new FS.ErrnoError(43); - } - if (flags & 2) { - // MAP_PRIVATE calls need not to be synced back to underlying fs - return 0; - } - var buffer = HEAPU8.slice(addr, addr + len); - FS.msync(stream, buffer, offset, len, flags); - }, - getStreamFromFD(fd) { - var stream = FS.getStreamChecked(fd); - return stream; - }, - varargs:undefined, - getStr(ptr) { - var ret = UTF8ToString(ptr); - return ret; - }, - }; - function ___syscall_chmod(path, mode) { - try { - - path = SYSCALLS.getStr(path); - FS.chmod(path, mode); - return 0; - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - } - - var SOCKFS = { - websocketArgs:{ - }, - callbacks:{ - }, - on(event, callback) { - SOCKFS.callbacks[event] = callback; - }, - emit(event, param) { - SOCKFS.callbacks[event]?.(param); - }, - mount(mount) { - // The incomming Module['websocket'] can be used for configuring - // configuring subprotocol/url, etc - SOCKFS.websocketArgs = Module['websocket'] || {}; - // Add the Event registration mechanism to the exported websocket configuration - // object so we can register network callbacks from native JavaScript too. - // For more documentation see system/include/emscripten/emscripten.h - (Module['websocket'] ??= {})['on'] = SOCKFS.on; - - return FS.createNode(null, '/', 16895, 0); - }, - createSocket(family, type, protocol) { - // Emscripten only supports AF_INET - if (family != 2) { - throw new FS.ErrnoError(5); - } - type &= ~526336; // Some applications may pass it; it makes no sense for a single process. - // Emscripten only supports SOCK_STREAM and SOCK_DGRAM - if (type != 1 && type != 2) { - throw new FS.ErrnoError(28); - } - var streaming = type == 1; - if (streaming && protocol && protocol != 6) { - throw new FS.ErrnoError(66); // if SOCK_STREAM, must be tcp or 0. - } - - // create our internal socket structure - var sock = { - family, - type, - protocol, - server: null, - error: null, // Used in getsockopt for SOL_SOCKET/SO_ERROR test - peers: {}, - pending: [], - recv_queue: [], - sock_ops: SOCKFS.websocket_sock_ops - }; - - // create the filesystem node to store the socket structure - var name = SOCKFS.nextname(); - var node = FS.createNode(SOCKFS.root, name, 49152, 0); - node.sock = sock; - - // and the wrapping stream that enables library functions such - // as read and write to indirectly interact with the socket - var stream = FS.createStream({ - path: name, - node, - flags: 2, - seekable: false, - stream_ops: SOCKFS.stream_ops - }); - - // map the new stream to the socket structure (sockets have a 1:1 - // relationship with a stream) - sock.stream = stream; - - return sock; - }, - getSocket(fd) { - var stream = FS.getStream(fd); - if (!stream || !FS.isSocket(stream.node.mode)) { - return null; - } - return stream.node.sock; - }, - stream_ops:{ - poll(stream) { - var sock = stream.node.sock; - return sock.sock_ops.poll(sock); - }, - ioctl(stream, request, varargs) { - var sock = stream.node.sock; - return sock.sock_ops.ioctl(sock, request, varargs); - }, - read(stream, buffer, offset, length, position /* ignored */) { - var sock = stream.node.sock; - var msg = sock.sock_ops.recvmsg(sock, length); - if (!msg) { - // socket is closed - return 0; - } - buffer.set(msg.buffer, offset); - return msg.buffer.length; - }, - write(stream, buffer, offset, length, position /* ignored */) { - var sock = stream.node.sock; - return sock.sock_ops.sendmsg(sock, buffer, offset, length); - }, - close(stream) { - var sock = stream.node.sock; - sock.sock_ops.close(sock); - }, - }, - nextname() { - if (!SOCKFS.nextname.current) { - SOCKFS.nextname.current = 0; - } - return `socket[${SOCKFS.nextname.current++}]`; - }, - websocket_sock_ops:{ - createPeer(sock, addr, port) { - var ws; - - if (typeof addr == 'object') { - ws = addr; - addr = null; - port = null; - } - - if (ws) { - // for sockets that've already connected (e.g. we're the server) - // we can inspect the _socket property for the address - if (ws._socket) { - addr = ws._socket.remoteAddress; - port = ws._socket.remotePort; - } - // if we're just now initializing a connection to the remote, - // inspect the url property - else { - var result = /ws[s]?:\/\/([^:]+):(\d+)/.exec(ws.url); - if (!result) { - throw new Error('WebSocket URL must be in the format ws(s)://address:port'); - } - addr = result[1]; - port = parseInt(result[2], 10); - } - } else { - // create the actual websocket object and connect - try { - // The default value is 'ws://' the replace is needed because the compiler replaces '//' comments with '#' - // comments without checking context, so we'd end up with ws:#, the replace swaps the '#' for '//' again. - var url = 'ws://'.replace('#', '//'); - // Make the WebSocket subprotocol (Sec-WebSocket-Protocol) default to binary if no configuration is set. - var subProtocols = 'binary'; // The default value is 'binary' - // The default WebSocket options - var opts = undefined; - - // Fetch runtime WebSocket URL config. - if (SOCKFS.websocketArgs['url']) { - url = SOCKFS.websocketArgs['url']; - } - // Fetch runtime WebSocket subprotocol config. - if (SOCKFS.websocketArgs['subprotocol']) { - subProtocols = SOCKFS.websocketArgs['subprotocol']; - } else if (SOCKFS.websocketArgs['subprotocol'] === null) { - subProtocols = 'null' - } - - if (url === 'ws://' || url === 'wss://') { // Is the supplied URL config just a prefix, if so complete it. - var parts = addr.split('/'); - url = url + parts[0] + ":" + port + "/" + parts.slice(1).join('/'); - } - - if (subProtocols !== 'null') { - // The regex trims the string (removes spaces at the beginning and end, then splits the string by - // , into an Array. Whitespace removal is important for Websockify and ws. - subProtocols = subProtocols.replace(/^ +| +$/g,"").split(/ *, */); - - opts = subProtocols; - } - - // If node we use the ws library. - var WebSocketConstructor; - { - WebSocketConstructor = WebSocket; - } - ws = new WebSocketConstructor(url, opts); - ws.binaryType = 'arraybuffer'; - } catch (e) { - throw new FS.ErrnoError(23); - } - } - - var peer = { - addr, - port, - socket: ws, - msg_send_queue: [] - }; - - SOCKFS.websocket_sock_ops.addPeer(sock, peer); - SOCKFS.websocket_sock_ops.handlePeerEvents(sock, peer); - - // if this is a bound dgram socket, send the port number first to allow - // us to override the ephemeral port reported to us by remotePort on the - // remote end. - if (sock.type === 2 && typeof sock.sport != 'undefined') { - peer.msg_send_queue.push(new Uint8Array([ - 255, 255, 255, 255, - 'p'.charCodeAt(0), 'o'.charCodeAt(0), 'r'.charCodeAt(0), 't'.charCodeAt(0), - ((sock.sport & 0xff00) >> 8) , (sock.sport & 0xff) - ])); - } - - return peer; - }, - getPeer(sock, addr, port) { - return sock.peers[addr + ':' + port]; - }, - addPeer(sock, peer) { - sock.peers[peer.addr + ':' + peer.port] = peer; - }, - removePeer(sock, peer) { - delete sock.peers[peer.addr + ':' + peer.port]; - }, - handlePeerEvents(sock, peer) { - var first = true; - - var handleOpen = function () { - - sock.connecting = false; - SOCKFS.emit('open', sock.stream.fd); - - try { - var queued = peer.msg_send_queue.shift(); - while (queued) { - peer.socket.send(queued); - queued = peer.msg_send_queue.shift(); - } - } catch (e) { - // not much we can do here in the way of proper error handling as we've already - // lied and said this data was sent. shut it down. - peer.socket.close(); - } - }; - - function handleMessage(data) { - if (typeof data == 'string') { - var encoder = new TextEncoder(); // should be utf-8 - data = encoder.encode(data); // make a typed array from the string - } else { - assert(data.byteLength !== undefined); // must receive an ArrayBuffer - if (data.byteLength == 0) { - // An empty ArrayBuffer will emit a pseudo disconnect event - // as recv/recvmsg will return zero which indicates that a socket - // has performed a shutdown although the connection has not been disconnected yet. - return; - } - data = new Uint8Array(data); // make a typed array view on the array buffer - } - - // if this is the port message, override the peer's port with it - var wasfirst = first; - first = false; - if (wasfirst && - data.length === 10 && - data[0] === 255 && data[1] === 255 && data[2] === 255 && data[3] === 255 && - data[4] === 'p'.charCodeAt(0) && data[5] === 'o'.charCodeAt(0) && data[6] === 'r'.charCodeAt(0) && data[7] === 't'.charCodeAt(0)) { - // update the peer's port and it's key in the peer map - var newport = ((data[8] << 8) | data[9]); - SOCKFS.websocket_sock_ops.removePeer(sock, peer); - peer.port = newport; - SOCKFS.websocket_sock_ops.addPeer(sock, peer); - return; - } - - sock.recv_queue.push({ addr: peer.addr, port: peer.port, data: data }); - SOCKFS.emit('message', sock.stream.fd); - }; - - if (ENVIRONMENT_IS_NODE) { - peer.socket.on('open', handleOpen); - peer.socket.on('message', function(data, isBinary) { - if (!isBinary) { - return; - } - handleMessage((new Uint8Array(data)).buffer); // copy from node Buffer -> ArrayBuffer - }); - peer.socket.on('close', function() { - SOCKFS.emit('close', sock.stream.fd); - }); - peer.socket.on('error', function(error) { - // Although the ws library may pass errors that may be more descriptive than - // ECONNREFUSED they are not necessarily the expected error code e.g. - // ENOTFOUND on getaddrinfo seems to be node.js specific, so using ECONNREFUSED - // is still probably the most useful thing to do. - sock.error = 14; // Used in getsockopt for SOL_SOCKET/SO_ERROR test. - SOCKFS.emit('error', [sock.stream.fd, sock.error, 'ECONNREFUSED: Connection refused']); - // don't throw - }); - } else { - peer.socket.onopen = handleOpen; - peer.socket.onclose = function() { - SOCKFS.emit('close', sock.stream.fd); - }; - peer.socket.onmessage = function peer_socket_onmessage(event) { - handleMessage(event.data); - }; - peer.socket.onerror = function(error) { - // The WebSocket spec only allows a 'simple event' to be thrown on error, - // so we only really know as much as ECONNREFUSED. - sock.error = 14; // Used in getsockopt for SOL_SOCKET/SO_ERROR test. - SOCKFS.emit('error', [sock.stream.fd, sock.error, 'ECONNREFUSED: Connection refused']); - }; - } - }, - poll(sock) { - if (sock.type === 1 && sock.server) { - // listen sockets should only say they're available for reading - // if there are pending clients. - return sock.pending.length ? (64 | 1) : 0; - } - - var mask = 0; - var dest = sock.type === 1 ? // we only care about the socket state for connection-based sockets - SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport) : - null; - - if (sock.recv_queue.length || - !dest || // connection-less sockets are always ready to read - (dest && dest.socket.readyState === dest.socket.CLOSING) || - (dest && dest.socket.readyState === dest.socket.CLOSED)) { // let recv return 0 once closed - mask |= (64 | 1); - } - - if (!dest || // connection-less sockets are always ready to write - (dest && dest.socket.readyState === dest.socket.OPEN)) { - mask |= 4; - } - - if ((dest && dest.socket.readyState === dest.socket.CLOSING) || - (dest && dest.socket.readyState === dest.socket.CLOSED)) { - // When an non-blocking connect fails mark the socket as writable. - // Its up to the calling code to then use getsockopt with SO_ERROR to - // retrieve the error. - // See https://man7.org/linux/man-pages/man2/connect.2.html - if (sock.connecting) { - mask |= 4; - } else { - mask |= 16; - } - } - - return mask; - }, - ioctl(sock, request, arg) { - switch (request) { - case 21531: - var bytes = 0; - if (sock.recv_queue.length) { - bytes = sock.recv_queue[0].data.length; - } - HEAP32[((arg)>>2)] = bytes; - return 0; - case 21537: - var on = HEAP32[((arg)>>2)]; - if (on) { - sock.stream.flags |= 2048; - } else { - sock.stream.flags &= ~2048; - } - return 0; - default: - return 28; - } - }, - close(sock) { - // if we've spawned a listen server, close it - if (sock.server) { - try { - sock.server.close(); - } catch (e) { - } - sock.server = null; - } - // close any peer connections - for (var peer of Object.values(sock.peers)) { - try { - peer.socket.close(); - } catch (e) { - } - SOCKFS.websocket_sock_ops.removePeer(sock, peer); - } - return 0; - }, - bind(sock, addr, port) { - if (typeof sock.saddr != 'undefined' || typeof sock.sport != 'undefined') { - throw new FS.ErrnoError(28); // already bound - } - sock.saddr = addr; - sock.sport = port; - // in order to emulate dgram sockets, we need to launch a listen server when - // binding on a connection-less socket - // note: this is only required on the server side - if (sock.type === 2) { - // close the existing server if it exists - if (sock.server) { - sock.server.close(); - sock.server = null; - } - // swallow error operation not supported error that occurs when binding in the - // browser where this isn't supported - try { - sock.sock_ops.listen(sock, 0); - } catch (e) { - if (!(e.name === 'ErrnoError')) throw e; - if (e.errno !== 138) throw e; - } - } - }, - connect(sock, addr, port) { - if (sock.server) { - throw new FS.ErrnoError(138); - } - - // TODO autobind - // if (!sock.addr && sock.type == 2) { - // } - - // early out if we're already connected / in the middle of connecting - if (typeof sock.daddr != 'undefined' && typeof sock.dport != 'undefined') { - var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport); - if (dest) { - if (dest.socket.readyState === dest.socket.CONNECTING) { - throw new FS.ErrnoError(7); - } else { - throw new FS.ErrnoError(30); - } - } - } - - // add the socket to our peer list and set our - // destination address / port to match - var peer = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port); - sock.daddr = peer.addr; - sock.dport = peer.port; - - // because we cannot synchronously block to wait for the WebSocket - // connection to complete, we return here pretending that the connection - // was a success. - sock.connecting = true; - }, - listen(sock, backlog) { - if (!ENVIRONMENT_IS_NODE) { - throw new FS.ErrnoError(138); - } - }, - accept(listensock) { - if (!listensock.server || !listensock.pending.length) { - throw new FS.ErrnoError(28); - } - var newsock = listensock.pending.shift(); - newsock.stream.flags = listensock.stream.flags; - return newsock; - }, - getname(sock, peer) { - var addr, port; - if (peer) { - if (sock.daddr === undefined || sock.dport === undefined) { - throw new FS.ErrnoError(53); - } - addr = sock.daddr; - port = sock.dport; - } else { - // TODO saddr and sport will be set for bind()'d UDP sockets, but what - // should we be returning for TCP sockets that've been connect()'d? - addr = sock.saddr || 0; - port = sock.sport || 0; - } - return { addr, port }; - }, - sendmsg(sock, buffer, offset, length, addr, port) { - if (sock.type === 2) { - // connection-less sockets will honor the message address, - // and otherwise fall back to the bound destination address - if (addr === undefined || port === undefined) { - addr = sock.daddr; - port = sock.dport; - } - // if there was no address to fall back to, error out - if (addr === undefined || port === undefined) { - throw new FS.ErrnoError(17); - } - } else { - // connection-based sockets will only use the bound - addr = sock.daddr; - port = sock.dport; - } - - // find the peer for the destination address - var dest = SOCKFS.websocket_sock_ops.getPeer(sock, addr, port); - - // early out if not connected with a connection-based socket - if (sock.type === 1) { - if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) { - throw new FS.ErrnoError(53); - } - } - - // create a copy of the incoming data to send, as the WebSocket API - // doesn't work entirely with an ArrayBufferView, it'll just send - // the entire underlying buffer - if (ArrayBuffer.isView(buffer)) { - offset += buffer.byteOffset; - buffer = buffer.buffer; - } - - var data = buffer.slice(offset, offset + length); - - // if we don't have a cached connectionless UDP datagram connection, or - // the TCP socket is still connecting, queue the message to be sent upon - // connect, and lie, saying the data was sent now. - if (!dest || dest.socket.readyState !== dest.socket.OPEN) { - // if we're not connected, open a new connection - if (sock.type === 2) { - if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) { - dest = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port); - } - } - dest.msg_send_queue.push(data); - return length; - } - - try { - // send the actual data - dest.socket.send(data); - return length; - } catch (e) { - throw new FS.ErrnoError(28); - } - }, - recvmsg(sock, length) { - // http://pubs.opengroup.org/onlinepubs/7908799/xns/recvmsg.html - if (sock.type === 1 && sock.server) { - // tcp servers should not be recv()'ing on the listen socket - throw new FS.ErrnoError(53); - } - - var queued = sock.recv_queue.shift(); - if (!queued) { - if (sock.type === 1) { - var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport); - - if (!dest) { - // if we have a destination address but are not connected, error out - throw new FS.ErrnoError(53); - } - if (dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) { - // return null if the socket has closed - return null; - } - // else, our socket is in a valid state but truly has nothing available - throw new FS.ErrnoError(6); - } - throw new FS.ErrnoError(6); - } - - // queued.data will be an ArrayBuffer if it's unadulterated, but if it's - // requeued TCP data it'll be an ArrayBufferView - var queuedLength = queued.data.byteLength || queued.data.length; - var queuedOffset = queued.data.byteOffset || 0; - var queuedBuffer = queued.data.buffer || queued.data; - var bytesRead = Math.min(length, queuedLength); - var res = { - buffer: new Uint8Array(queuedBuffer, queuedOffset, bytesRead), - addr: queued.addr, - port: queued.port - }; - - // push back any unread data for TCP connections - if (sock.type === 1 && bytesRead < queuedLength) { - var bytesRemaining = queuedLength - bytesRead; - queued.data = new Uint8Array(queuedBuffer, queuedOffset + bytesRead, bytesRemaining); - sock.recv_queue.unshift(queued); - } - - return res; - }, - }, - }; - - var getSocketFromFD = (fd) => { - var socket = SOCKFS.getSocket(fd); - if (!socket) throw new FS.ErrnoError(8); - return socket; - }; - - var inetNtop4 = (addr) => - (addr & 0xff) + '.' + ((addr >> 8) & 0xff) + '.' + ((addr >> 16) & 0xff) + '.' + ((addr >> 24) & 0xff); - - - var inetNtop6 = (ints) => { - // ref: http://www.ietf.org/rfc/rfc2373.txt - section 2.5.4 - // Format for IPv4 compatible and mapped 128-bit IPv6 Addresses - // 128-bits are split into eight 16-bit words - // stored in network byte order (big-endian) - // | 80 bits | 16 | 32 bits | - // +-----------------------------------------------------------------+ - // | 10 bytes | 2 | 4 bytes | - // +--------------------------------------+--------------------------+ - // + 5 words | 1 | 2 words | - // +--------------------------------------+--------------------------+ - // |0000..............................0000|0000| IPv4 ADDRESS | (compatible) - // +--------------------------------------+----+---------------------+ - // |0000..............................0000|FFFF| IPv4 ADDRESS | (mapped) - // +--------------------------------------+----+---------------------+ - var str = ""; - var word = 0; - var longest = 0; - var lastzero = 0; - var zstart = 0; - var len = 0; - var i = 0; - var parts = [ - ints[0] & 0xffff, - (ints[0] >> 16), - ints[1] & 0xffff, - (ints[1] >> 16), - ints[2] & 0xffff, - (ints[2] >> 16), - ints[3] & 0xffff, - (ints[3] >> 16) - ]; - - // Handle IPv4-compatible, IPv4-mapped, loopback and any/unspecified addresses - - var hasipv4 = true; - var v4part = ""; - // check if the 10 high-order bytes are all zeros (first 5 words) - for (i = 0; i < 5; i++) { - if (parts[i] !== 0) { hasipv4 = false; break; } - } - - if (hasipv4) { - // low-order 32-bits store an IPv4 address (bytes 13 to 16) (last 2 words) - v4part = inetNtop4(parts[6] | (parts[7] << 16)); - // IPv4-mapped IPv6 address if 16-bit value (bytes 11 and 12) == 0xFFFF (6th word) - if (parts[5] === -1) { - str = "::ffff:"; - str += v4part; - return str; - } - // IPv4-compatible IPv6 address if 16-bit value (bytes 11 and 12) == 0x0000 (6th word) - if (parts[5] === 0) { - str = "::"; - //special case IPv6 addresses - if (v4part === "0.0.0.0") v4part = ""; // any/unspecified address - if (v4part === "0.0.0.1") v4part = "1";// loopback address - str += v4part; - return str; - } - } - - // Handle all other IPv6 addresses - - // first run to find the longest contiguous zero words - for (word = 0; word < 8; word++) { - if (parts[word] === 0) { - if (word - lastzero > 1) { - len = 0; - } - lastzero = word; - len++; - } - if (len > longest) { - longest = len; - zstart = word - longest + 1; - } - } - - for (word = 0; word < 8; word++) { - if (longest > 1) { - // compress contiguous zeros - to produce "::" - if (parts[word] === 0 && word >= zstart && word < (zstart + longest) ) { - if (word === zstart) { - str += ":"; - if (zstart === 0) str += ":"; //leading zeros case - } - continue; - } - } - // converts 16-bit words from big-endian to little-endian before converting to hex string - str += Number(_ntohs(parts[word] & 0xffff)).toString(16); - str += word < 7 ? ":" : ""; - } - return str; - }; - - var readSockaddr = (sa, salen) => { - // family / port offsets are common to both sockaddr_in and sockaddr_in6 - var family = HEAP16[((sa)>>1)]; - var port = _ntohs(HEAPU16[(((sa)+(2))>>1)]); - var addr; - - switch (family) { - case 2: - if (salen !== 16) { - return { errno: 28 }; - } - addr = HEAP32[(((sa)+(4))>>2)]; - addr = inetNtop4(addr); - break; - case 10: - if (salen !== 28) { - return { errno: 28 }; - } - addr = [ - HEAP32[(((sa)+(8))>>2)], - HEAP32[(((sa)+(12))>>2)], - HEAP32[(((sa)+(16))>>2)], - HEAP32[(((sa)+(20))>>2)] - ]; - addr = inetNtop6(addr); - break; - default: - return { errno: 5 }; - } - - return { family: family, addr: addr, port: port }; - }; - - - var inetPton4 = (str) => { - var b = str.split('.'); - for (var i = 0; i < 4; i++) { - var tmp = Number(b[i]); - if (isNaN(tmp)) return null; - b[i] = tmp; - } - return (b[0] | (b[1] << 8) | (b[2] << 16) | (b[3] << 24)) >>> 0; - }; - - var inetPton6 = (str) => { - var words; - var w, offset, z, i; - /* http://home.deds.nl/~aeron/regex/ */ - var valid6regx = /^((?=.*::)(?!.*::.+::)(::)?([\dA-F]{1,4}:(:|\b)|){5}|([\dA-F]{1,4}:){6})((([\dA-F]{1,4}((?!\3)::|:\b|$))|(?!\2\3)){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})$/i - var parts = []; - if (!valid6regx.test(str)) { - return null; - } - if (str === "::") { - return [0, 0, 0, 0, 0, 0, 0, 0]; - } - // Z placeholder to keep track of zeros when splitting the string on ":" - if (str.startsWith("::")) { - str = str.replace("::", "Z:"); // leading zeros case - } else { - str = str.replace("::", ":Z:"); - } - - if (str.indexOf(".") > 0) { - // parse IPv4 embedded stress - str = str.replace(new RegExp('[.]', 'g'), ":"); - words = str.split(":"); - words[words.length-4] = Number(words[words.length-4]) + Number(words[words.length-3])*256; - words[words.length-3] = Number(words[words.length-2]) + Number(words[words.length-1])*256; - words = words.slice(0, words.length-2); - } else { - words = str.split(":"); - } - - offset = 0; z = 0; - for (w=0; w < words.length; w++) { - if (typeof words[w] == 'string') { - if (words[w] === 'Z') { - // compressed zeros - write appropriate number of zero words - for (z = 0; z < (8 - words.length+1); z++) { - parts[w+z] = 0; - } - offset = z-1; - } else { - // parse hex to field to 16-bit value and write it in network byte-order - parts[w+offset] = _htons(parseInt(words[w],16)); - } - } else { - // parsed IPv4 words - parts[w+offset] = words[w]; - } - } - return [ - (parts[1] << 16) | parts[0], - (parts[3] << 16) | parts[2], - (parts[5] << 16) | parts[4], - (parts[7] << 16) | parts[6] - ]; - }; - var DNS = { - address_map:{ - id:1, - addrs:{ - }, - names:{ - }, - }, - lookup_name(name) { - // If the name is already a valid ipv4 / ipv6 address, don't generate a fake one. - var res = inetPton4(name); - if (res !== null) { - return name; - } - res = inetPton6(name); - if (res !== null) { - return name; - } - - // See if this name is already mapped. - var addr; - - if (DNS.address_map.addrs[name]) { - addr = DNS.address_map.addrs[name]; - } else { - var id = DNS.address_map.id++; - assert(id < 65535, 'exceeded max address mappings of 65535'); - - addr = '172.29.' + (id & 0xff) + '.' + (id & 0xff00); - - DNS.address_map.names[addr] = name; - DNS.address_map.addrs[name] = addr; - } - - return addr; - }, - lookup_addr(addr) { - if (DNS.address_map.names[addr]) { - return DNS.address_map.names[addr]; - } - - return null; - }, - }; - var getSocketAddress = (addrp, addrlen) => { - var info = readSockaddr(addrp, addrlen); - if (info.errno) throw new FS.ErrnoError(info.errno); - info.addr = DNS.lookup_addr(info.addr) || info.addr; - return info; - }; - function ___syscall_connect(fd, addr, addrlen, d1, d2, d3) { - try { - - var sock = getSocketFromFD(fd); - var info = getSocketAddress(addr, addrlen); - sock.sock_ops.connect(sock, info.addr, info.port); - return 0; - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - } - - function ___syscall_dup(fd) { - try { - - var old = SYSCALLS.getStreamFromFD(fd); - return FS.dupStream(old).fd; - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - } - - function ___syscall_faccessat(dirfd, path, amode, flags) { - try { - - path = SYSCALLS.getStr(path); - assert(!flags || flags == 512); - path = SYSCALLS.calculateAt(dirfd, path); - if (amode & ~7) { - // need a valid mode - return -28; - } - var lookup = FS.lookupPath(path, { follow: true }); - var node = lookup.node; - if (!node) { - return -44; - } - var perms = ''; - if (amode & 4) perms += 'r'; - if (amode & 2) perms += 'w'; - if (amode & 1) perms += 'x'; - if (perms /* otherwise, they've just passed F_OK */ && FS.nodePermissions(node, perms)) { - return -2; - } - return 0; - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - } - - function ___syscall_fchmod(fd, mode) { - try { - - FS.fchmod(fd, mode); - return 0; - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - } - - /** @suppress {duplicate } */ - var syscallGetVarargI = () => { - assert(SYSCALLS.varargs != undefined); - // the `+` prepended here is necessary to convince the JSCompiler that varargs is indeed a number. - var ret = HEAP32[((+SYSCALLS.varargs)>>2)]; - SYSCALLS.varargs += 4; - return ret; - }; - var syscallGetVarargP = syscallGetVarargI; - - - function ___syscall_fcntl64(fd, cmd, varargs) { - SYSCALLS.varargs = varargs; - try { - - var stream = SYSCALLS.getStreamFromFD(fd); - switch (cmd) { - case 0: { - var arg = syscallGetVarargI(); - if (arg < 0) { - return -28; - } - while (FS.streams[arg]) { - arg++; - } - var newStream; - newStream = FS.dupStream(stream, arg); - return newStream.fd; - } - case 1: - case 2: - return 0; // FD_CLOEXEC makes no sense for a single process. - case 3: - return stream.flags; - case 4: { - var arg = syscallGetVarargI(); - stream.flags |= arg; - return 0; - } - case 12: { - var arg = syscallGetVarargP(); - var offset = 0; - // We're always unlocked. - HEAP16[(((arg)+(offset))>>1)] = 2; - return 0; - } - case 13: - case 14: - // Pretend that the locking is successful. These are process-level locks, - // and Emscripten programs are a single process. If we supported linking a - // filesystem between programs, we'd need to do more here. - // See https://github.com/emscripten-core/emscripten/issues/23697 - return 0; - } - return -28; - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - } - - function ___syscall_fstat64(fd, buf) { - try { - - return SYSCALLS.writeStat(buf, FS.fstat(fd)); - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - } - - var INT53_MAX = 9007199254740992; - - var INT53_MIN = -9007199254740992; - var bigintToI53Checked = (num) => (num < INT53_MIN || num > INT53_MAX) ? NaN : Number(num); - function ___syscall_ftruncate64(fd, length) { - length = bigintToI53Checked(length); - - - try { - - if (isNaN(length)) return -61; - FS.ftruncate(fd, length); - return 0; - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - ; - } - - - var stringToUTF8 = (str, outPtr, maxBytesToWrite) => { - assert(typeof maxBytesToWrite == 'number', 'stringToUTF8(str, outPtr, maxBytesToWrite) is missing the third parameter that specifies the length of the output buffer!'); - return stringToUTF8Array(str, HEAPU8, outPtr, maxBytesToWrite); - }; - function ___syscall_getcwd(buf, size) { - try { - - if (size === 0) return -28; - var cwd = FS.cwd(); - var cwdLengthInBytes = lengthBytesUTF8(cwd) + 1; - if (size < cwdLengthInBytes) return -68; - stringToUTF8(cwd, buf, size); - return cwdLengthInBytes; - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - } - - - function ___syscall_ioctl(fd, op, varargs) { - SYSCALLS.varargs = varargs; - try { - - var stream = SYSCALLS.getStreamFromFD(fd); - switch (op) { - case 21509: { - if (!stream.tty) return -59; - return 0; - } - case 21505: { - if (!stream.tty) return -59; - if (stream.tty.ops.ioctl_tcgets) { - var termios = stream.tty.ops.ioctl_tcgets(stream); - var argp = syscallGetVarargP(); - HEAP32[((argp)>>2)] = termios.c_iflag || 0; - HEAP32[(((argp)+(4))>>2)] = termios.c_oflag || 0; - HEAP32[(((argp)+(8))>>2)] = termios.c_cflag || 0; - HEAP32[(((argp)+(12))>>2)] = termios.c_lflag || 0; - for (var i = 0; i < 32; i++) { - HEAP8[(argp + i)+(17)] = termios.c_cc[i] || 0; - } - return 0; - } - return 0; - } - case 21510: - case 21511: - case 21512: { - if (!stream.tty) return -59; - return 0; // no-op, not actually adjusting terminal settings - } - case 21506: - case 21507: - case 21508: { - if (!stream.tty) return -59; - if (stream.tty.ops.ioctl_tcsets) { - var argp = syscallGetVarargP(); - var c_iflag = HEAP32[((argp)>>2)]; - var c_oflag = HEAP32[(((argp)+(4))>>2)]; - var c_cflag = HEAP32[(((argp)+(8))>>2)]; - var c_lflag = HEAP32[(((argp)+(12))>>2)]; - var c_cc = [] - for (var i = 0; i < 32; i++) { - c_cc.push(HEAP8[(argp + i)+(17)]); - } - return stream.tty.ops.ioctl_tcsets(stream.tty, op, { c_iflag, c_oflag, c_cflag, c_lflag, c_cc }); - } - return 0; // no-op, not actually adjusting terminal settings - } - case 21519: { - if (!stream.tty) return -59; - var argp = syscallGetVarargP(); - HEAP32[((argp)>>2)] = 0; - return 0; - } - case 21520: { - if (!stream.tty) return -59; - return -28; // not supported - } - case 21537: - case 21531: { - var argp = syscallGetVarargP(); - return FS.ioctl(stream, op, argp); - } - case 21523: { - // TODO: in theory we should write to the winsize struct that gets - // passed in, but for now musl doesn't read anything on it - if (!stream.tty) return -59; - if (stream.tty.ops.ioctl_tiocgwinsz) { - var winsize = stream.tty.ops.ioctl_tiocgwinsz(stream.tty); - var argp = syscallGetVarargP(); - HEAP16[((argp)>>1)] = winsize[0]; - HEAP16[(((argp)+(2))>>1)] = winsize[1]; - } - return 0; - } - case 21524: { - // TODO: technically, this ioctl call should change the window size. - // but, since emscripten doesn't have any concept of a terminal window - // yet, we'll just silently throw it away as we do TIOCGWINSZ - if (!stream.tty) return -59; - return 0; - } - case 21515: { - if (!stream.tty) return -59; - return 0; - } - default: return -28; // not supported - } - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - } - - function ___syscall_lstat64(path, buf) { - try { - - path = SYSCALLS.getStr(path); - return SYSCALLS.writeStat(buf, FS.lstat(path)); - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - } - - function ___syscall_newfstatat(dirfd, path, buf, flags) { - try { - - path = SYSCALLS.getStr(path); - var nofollow = flags & 256; - var allowEmpty = flags & 4096; - flags = flags & (~6400); - assert(!flags, `unknown flags in __syscall_newfstatat: ${flags}`); - path = SYSCALLS.calculateAt(dirfd, path, allowEmpty); - return SYSCALLS.writeStat(buf, nofollow ? FS.lstat(path) : FS.stat(path)); - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - } - - - function ___syscall_openat(dirfd, path, flags, varargs) { - SYSCALLS.varargs = varargs; - try { - - path = SYSCALLS.getStr(path); - path = SYSCALLS.calculateAt(dirfd, path); - var mode = varargs ? syscallGetVarargI() : 0; - return FS.open(path, flags, mode).fd; - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - } - - - - function ___syscall_readlinkat(dirfd, path, buf, bufsize) { - try { - - path = SYSCALLS.getStr(path); - path = SYSCALLS.calculateAt(dirfd, path); - if (bufsize <= 0) return -28; - var ret = FS.readlink(path); - - var len = Math.min(bufsize, lengthBytesUTF8(ret)); - var endChar = HEAP8[buf+len]; - stringToUTF8(ret, buf, bufsize+1); - // readlink is one of the rare functions that write out a C string, but does never append a null to the output buffer(!) - // stringToUTF8() always appends a null byte, so restore the character under the null byte after the write. - HEAP8[buf+len] = endChar; - return len; - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - } - - - - - var zeroMemory = (ptr, size) => HEAPU8.fill(0, ptr, ptr + size); - - /** @param {number=} addrlen */ - var writeSockaddr = (sa, family, addr, port, addrlen) => { - switch (family) { - case 2: - addr = inetPton4(addr); - zeroMemory(sa, 16); - if (addrlen) { - HEAP32[((addrlen)>>2)] = 16; - } - HEAP16[((sa)>>1)] = family; - HEAP32[(((sa)+(4))>>2)] = addr; - HEAP16[(((sa)+(2))>>1)] = _htons(port); - break; - case 10: - addr = inetPton6(addr); - zeroMemory(sa, 28); - if (addrlen) { - HEAP32[((addrlen)>>2)] = 28; - } - HEAP32[((sa)>>2)] = family; - HEAP32[(((sa)+(8))>>2)] = addr[0]; - HEAP32[(((sa)+(12))>>2)] = addr[1]; - HEAP32[(((sa)+(16))>>2)] = addr[2]; - HEAP32[(((sa)+(20))>>2)] = addr[3]; - HEAP16[(((sa)+(2))>>1)] = _htons(port); - break; - default: - return 5; - } - return 0; - }; - - function ___syscall_recvfrom(fd, buf, len, flags, addr, addrlen) { - try { - - var sock = getSocketFromFD(fd); - var msg = sock.sock_ops.recvmsg(sock, len); - if (!msg) return 0; // socket is closed - if (addr) { - var errno = writeSockaddr(addr, sock.family, DNS.lookup_name(msg.addr), msg.port, addrlen); - assert(!errno); - } - HEAPU8.set(msg.buffer, buf); - return msg.buffer.byteLength; - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - } - - function ___syscall_rmdir(path) { - try { - - path = SYSCALLS.getStr(path); - FS.rmdir(path); - return 0; - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - } - - - function ___syscall_sendto(fd, message, length, flags, addr, addr_len) { - try { - - var sock = getSocketFromFD(fd); - if (!addr) { - // send, no address provided - return FS.write(sock.stream, HEAP8, message, length); - } - var dest = getSocketAddress(addr, addr_len); - // sendto an address - return sock.sock_ops.sendmsg(sock, HEAP8, message, length, dest.addr, dest.port); - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - } - - function ___syscall_socket(domain, type, protocol) { - try { - - var sock = SOCKFS.createSocket(domain, type, protocol); - assert(sock.stream.fd < 64); // XXX ? select() assumes socket fd values are in 0..63 - return sock.stream.fd; - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - } - - function ___syscall_stat64(path, buf) { - try { - - path = SYSCALLS.getStr(path); - return SYSCALLS.writeStat(buf, FS.stat(path)); - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - } - - function ___syscall_unlinkat(dirfd, path, flags) { - try { - - path = SYSCALLS.getStr(path); - path = SYSCALLS.calculateAt(dirfd, path); - if (!flags) { - FS.unlink(path); - } else if (flags === 512) { - FS.rmdir(path); - } else { - return -28; - } - return 0; - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return -e.errno; - } - } - - var __abort_js = () => - abort('native code called abort()'); - - var AsciiToString = (ptr) => { - var str = ''; - while (1) { - var ch = HEAPU8[ptr++]; - if (!ch) return str; - str += String.fromCharCode(ch); - } - }; - - var awaitingDependencies = { - }; - - var registeredTypes = { - }; - - var typeDependencies = { - }; - - var BindingError = class BindingError extends Error { constructor(message) { super(message); this.name = 'BindingError'; }}; - var throwBindingError = (message) => { throw new BindingError(message); }; - /** @param {Object=} options */ - function sharedRegisterType(rawType, registeredInstance, options = {}) { - var name = registeredInstance.name; - if (!rawType) { - throwBindingError(`type "${name}" must have a positive integer typeid pointer`); - } - if (registeredTypes.hasOwnProperty(rawType)) { - if (options.ignoreDuplicateRegistrations) { - return; - } else { - throwBindingError(`Cannot register type '${name}' twice`); - } - } - - registeredTypes[rawType] = registeredInstance; - delete typeDependencies[rawType]; - - if (awaitingDependencies.hasOwnProperty(rawType)) { - var callbacks = awaitingDependencies[rawType]; - delete awaitingDependencies[rawType]; - callbacks.forEach((cb) => cb()); - } - } - /** @param {Object=} options */ - function registerType(rawType, registeredInstance, options = {}) { - return sharedRegisterType(rawType, registeredInstance, options); - } - - var integerReadValueFromPointer = (name, width, signed) => { - // integers are quite common, so generate very specialized functions - switch (width) { - case 1: return signed ? - (pointer) => HEAP8[pointer] : - (pointer) => HEAPU8[pointer]; - case 2: return signed ? - (pointer) => HEAP16[((pointer)>>1)] : - (pointer) => HEAPU16[((pointer)>>1)] - case 4: return signed ? - (pointer) => HEAP32[((pointer)>>2)] : - (pointer) => HEAPU32[((pointer)>>2)] - case 8: return signed ? - (pointer) => HEAP64[((pointer)>>3)] : - (pointer) => HEAPU64[((pointer)>>3)] - default: - throw new TypeError(`invalid integer width (${width}): ${name}`); - } - }; - - var embindRepr = (v) => { - if (v === null) { - return 'null'; - } - var t = typeof v; - if (t === 'object' || t === 'array' || t === 'function') { - return v.toString(); - } else { - return '' + v; - } - }; - - var assertIntegerRange = (typeName, value, minRange, maxRange) => { - if (value < minRange || value > maxRange) { - throw new TypeError(`Passing a number "${embindRepr(value)}" from JS side to C/C++ side to an argument of type "${typeName}", which is outside the valid range [${minRange}, ${maxRange}]!`); - } - }; - /** @suppress {globalThis} */ - var __embind_register_bigint = (primitiveType, name, size, minRange, maxRange) => { - name = AsciiToString(name); - - const isUnsignedType = minRange === 0n; - - let fromWireType = (value) => value; - if (isUnsignedType) { - // uint64 get converted to int64 in ABI, fix them up like we do for 32-bit integers. - const bitSize = size * 8; - fromWireType = (value) => { - return BigInt.asUintN(bitSize, value); - } - maxRange = fromWireType(maxRange); - } - - registerType(primitiveType, { - name, - fromWireType: fromWireType, - toWireType: (destructors, value) => { - if (typeof value == "number") { - value = BigInt(value); - } - else if (typeof value != "bigint") { - throw new TypeError(`Cannot convert "${embindRepr(value)}" to ${this.name}`); - } - assertIntegerRange(name, value, minRange, maxRange); - return value; - }, - readValueFromPointer: integerReadValueFromPointer(name, size, !isUnsignedType), - destructorFunction: null, // This type does not need a destructor - }); - }; - - - /** @suppress {globalThis} */ - var __embind_register_bool = (rawType, name, trueValue, falseValue) => { - name = AsciiToString(name); - registerType(rawType, { - name, - fromWireType: function(wt) { - // ambiguous emscripten ABI: sometimes return values are - // true or false, and sometimes integers (0 or 1) - return !!wt; - }, - toWireType: function(destructors, o) { - return o ? trueValue : falseValue; - }, - readValueFromPointer: function(pointer) { - return this.fromWireType(HEAPU8[pointer]); - }, - destructorFunction: null, // This type does not need a destructor - }); - }; - - - - var shallowCopyInternalPointer = (o) => { - return { - count: o.count, - deleteScheduled: o.deleteScheduled, - preservePointerOnDelete: o.preservePointerOnDelete, - ptr: o.ptr, - ptrType: o.ptrType, - smartPtr: o.smartPtr, - smartPtrType: o.smartPtrType, - }; - }; - - var throwInstanceAlreadyDeleted = (obj) => { - function getInstanceTypeName(handle) { - return handle.$$.ptrType.registeredClass.name; - } - throwBindingError(getInstanceTypeName(obj) + ' instance already deleted'); - }; - - var finalizationRegistry = false; - - var detachFinalizer = (handle) => {}; - - var runDestructor = ($$) => { - if ($$.smartPtr) { - $$.smartPtrType.rawDestructor($$.smartPtr); - } else { - $$.ptrType.registeredClass.rawDestructor($$.ptr); - } - }; - var releaseClassHandle = ($$) => { - $$.count.value -= 1; - var toDelete = 0 === $$.count.value; - if (toDelete) { - runDestructor($$); - } - }; - - var downcastPointer = (ptr, ptrClass, desiredClass) => { - if (ptrClass === desiredClass) { - return ptr; - } - if (undefined === desiredClass.baseClass) { - return null; // no conversion - } - - var rv = downcastPointer(ptr, ptrClass, desiredClass.baseClass); - if (rv === null) { - return null; - } - return desiredClass.downcast(rv); - }; - - var registeredPointers = { - }; - - var registeredInstances = { - }; - - var getBasestPointer = (class_, ptr) => { - if (ptr === undefined) { - throwBindingError('ptr should not be undefined'); - } - while (class_.baseClass) { - ptr = class_.upcast(ptr); - class_ = class_.baseClass; - } - return ptr; - }; - var getInheritedInstance = (class_, ptr) => { - ptr = getBasestPointer(class_, ptr); - return registeredInstances[ptr]; - }; - - var InternalError = class InternalError extends Error { constructor(message) { super(message); this.name = 'InternalError'; }}; - var throwInternalError = (message) => { throw new InternalError(message); }; - - var makeClassHandle = (prototype, record) => { - if (!record.ptrType || !record.ptr) { - throwInternalError('makeClassHandle requires ptr and ptrType'); - } - var hasSmartPtrType = !!record.smartPtrType; - var hasSmartPtr = !!record.smartPtr; - if (hasSmartPtrType !== hasSmartPtr) { - throwInternalError('Both smartPtrType and smartPtr must be specified'); - } - record.count = { value: 1 }; - return attachFinalizer(Object.create(prototype, { - $$: { - value: record, - writable: true, - }, - })); - }; - /** @suppress {globalThis} */ - function RegisteredPointer_fromWireType(ptr) { - // ptr is a raw pointer (or a raw smartpointer) - - // rawPointer is a maybe-null raw pointer - var rawPointer = this.getPointee(ptr); - if (!rawPointer) { - this.destructor(ptr); - return null; - } - - var registeredInstance = getInheritedInstance(this.registeredClass, rawPointer); - if (undefined !== registeredInstance) { - // JS object has been neutered, time to repopulate it - if (0 === registeredInstance.$$.count.value) { - registeredInstance.$$.ptr = rawPointer; - registeredInstance.$$.smartPtr = ptr; - return registeredInstance['clone'](); - } else { - // else, just increment reference count on existing object - // it already has a reference to the smart pointer - var rv = registeredInstance['clone'](); - this.destructor(ptr); - return rv; - } - } - - function makeDefaultHandle() { - if (this.isSmartPointer) { - return makeClassHandle(this.registeredClass.instancePrototype, { - ptrType: this.pointeeType, - ptr: rawPointer, - smartPtrType: this, - smartPtr: ptr, - }); - } else { - return makeClassHandle(this.registeredClass.instancePrototype, { - ptrType: this, - ptr, - }); - } - } - - var actualType = this.registeredClass.getActualType(rawPointer); - var registeredPointerRecord = registeredPointers[actualType]; - if (!registeredPointerRecord) { - return makeDefaultHandle.call(this); - } - - var toType; - if (this.isConst) { - toType = registeredPointerRecord.constPointerType; - } else { - toType = registeredPointerRecord.pointerType; - } - var dp = downcastPointer( - rawPointer, - this.registeredClass, - toType.registeredClass); - if (dp === null) { - return makeDefaultHandle.call(this); - } - if (this.isSmartPointer) { - return makeClassHandle(toType.registeredClass.instancePrototype, { - ptrType: toType, - ptr: dp, - smartPtrType: this, - smartPtr: ptr, - }); - } else { - return makeClassHandle(toType.registeredClass.instancePrototype, { - ptrType: toType, - ptr: dp, - }); - } - } - var attachFinalizer = (handle) => { - if ('undefined' === typeof FinalizationRegistry) { - attachFinalizer = (handle) => handle; - return handle; - } - // If the running environment has a FinalizationRegistry (see - // https://github.com/tc39/proposal-weakrefs), then attach finalizers - // for class handles. We check for the presence of FinalizationRegistry - // at run-time, not build-time. - finalizationRegistry = new FinalizationRegistry((info) => { - console.warn(info.leakWarning); - releaseClassHandle(info.$$); - }); - attachFinalizer = (handle) => { - var $$ = handle.$$; - var hasSmartPtr = !!$$.smartPtr; - if (hasSmartPtr) { - // We should not call the destructor on raw pointers in case other code expects the pointee to live - var info = { $$: $$ }; - // Create a warning as an Error instance in advance so that we can store - // the current stacktrace and point to it when / if a leak is detected. - // This is more useful than the empty stacktrace of `FinalizationRegistry` - // callback. - var cls = $$.ptrType.registeredClass; - var err = new Error(`Embind found a leaked C++ instance ${cls.name} <${ptrToString($$.ptr)}>.\n` + - "We'll free it automatically in this case, but this functionality is not reliable across various environments.\n" + - "Make sure to invoke .delete() manually once you're done with the instance instead.\n" + - "Originally allocated"); // `.stack` will add "at ..." after this sentence - if ('captureStackTrace' in Error) { - Error.captureStackTrace(err, RegisteredPointer_fromWireType); - } - info.leakWarning = err.stack.replace(/^Error: /, ''); - finalizationRegistry.register(handle, info, handle); - } - return handle; - }; - detachFinalizer = (handle) => finalizationRegistry.unregister(handle); - return attachFinalizer(handle); - }; - - - - - var deletionQueue = []; - var flushPendingDeletes = () => { - while (deletionQueue.length) { - var obj = deletionQueue.pop(); - obj.$$.deleteScheduled = false; - obj['delete'](); - } - }; - - var delayFunction; - var init_ClassHandle = () => { - let proto = ClassHandle.prototype; - - Object.assign(proto, { - "isAliasOf"(other) { - if (!(this instanceof ClassHandle)) { - return false; - } - if (!(other instanceof ClassHandle)) { - return false; - } - - var leftClass = this.$$.ptrType.registeredClass; - var left = this.$$.ptr; - other.$$ = /** @type {Object} */ (other.$$); - var rightClass = other.$$.ptrType.registeredClass; - var right = other.$$.ptr; - - while (leftClass.baseClass) { - left = leftClass.upcast(left); - leftClass = leftClass.baseClass; - } - - while (rightClass.baseClass) { - right = rightClass.upcast(right); - rightClass = rightClass.baseClass; - } - - return leftClass === rightClass && left === right; - }, - - "clone"() { - if (!this.$$.ptr) { - throwInstanceAlreadyDeleted(this); - } - - if (this.$$.preservePointerOnDelete) { - this.$$.count.value += 1; - return this; - } else { - var clone = attachFinalizer(Object.create(Object.getPrototypeOf(this), { - $$: { - value: shallowCopyInternalPointer(this.$$), - } - })); - - clone.$$.count.value += 1; - clone.$$.deleteScheduled = false; - return clone; - } - }, - - "delete"() { - if (!this.$$.ptr) { - throwInstanceAlreadyDeleted(this); - } - - if (this.$$.deleteScheduled && !this.$$.preservePointerOnDelete) { - throwBindingError('Object already scheduled for deletion'); - } - - detachFinalizer(this); - releaseClassHandle(this.$$); - - if (!this.$$.preservePointerOnDelete) { - this.$$.smartPtr = undefined; - this.$$.ptr = undefined; - } - }, - - "isDeleted"() { - return !this.$$.ptr; - }, - - "deleteLater"() { - if (!this.$$.ptr) { - throwInstanceAlreadyDeleted(this); - } - if (this.$$.deleteScheduled && !this.$$.preservePointerOnDelete) { - throwBindingError('Object already scheduled for deletion'); - } - deletionQueue.push(this); - if (deletionQueue.length === 1 && delayFunction) { - delayFunction(flushPendingDeletes); - } - this.$$.deleteScheduled = true; - return this; - }, - }); - - // Support `using ...` from https://github.com/tc39/proposal-explicit-resource-management. - const symbolDispose = Symbol.dispose; - if (symbolDispose) { - proto[symbolDispose] = proto['delete']; - } - }; - /** @constructor */ - function ClassHandle() { - } - - var createNamedFunction = (name, func) => Object.defineProperty(func, 'name', { value: name }); - - - var ensureOverloadTable = (proto, methodName, humanName) => { - if (undefined === proto[methodName].overloadTable) { - var prevFunc = proto[methodName]; - // Inject an overload resolver function that routes to the appropriate overload based on the number of arguments. - proto[methodName] = function(...args) { - // TODO This check can be removed in -O3 level "unsafe" optimizations. - if (!proto[methodName].overloadTable.hasOwnProperty(args.length)) { - throwBindingError(`Function '${humanName}' called with an invalid number of arguments (${args.length}) - expects one of (${proto[methodName].overloadTable})!`); - } - return proto[methodName].overloadTable[args.length].apply(this, args); - }; - // Move the previous function into the overload table. - proto[methodName].overloadTable = []; - proto[methodName].overloadTable[prevFunc.argCount] = prevFunc; - } - }; - - /** @param {number=} numArguments */ - var exposePublicSymbol = (name, value, numArguments) => { - if (Module.hasOwnProperty(name)) { - if (undefined === numArguments || (undefined !== Module[name].overloadTable && undefined !== Module[name].overloadTable[numArguments])) { - throwBindingError(`Cannot register public name '${name}' twice`); - } - - // We are exposing a function with the same name as an existing function. Create an overload table and a function selector - // that routes between the two. - ensureOverloadTable(Module, name, name); - if (Module[name].overloadTable.hasOwnProperty(numArguments)) { - throwBindingError(`Cannot register multiple overloads of a function with the same number of arguments (${numArguments})!`); - } - // Add the new function into the overload table. - Module[name].overloadTable[numArguments] = value; - } else { - Module[name] = value; - Module[name].argCount = numArguments; - } - }; - - var char_0 = 48; - - var char_9 = 57; - var makeLegalFunctionName = (name) => { - assert(typeof name === 'string'); - name = name.replace(/[^a-zA-Z0-9_]/g, '$'); - var f = name.charCodeAt(0); - if (f >= char_0 && f <= char_9) { - return `_${name}`; - } - return name; - }; - - - /** @constructor */ - function RegisteredClass(name, - constructor, - instancePrototype, - rawDestructor, - baseClass, - getActualType, - upcast, - downcast) { - this.name = name; - this.constructor = constructor; - this.instancePrototype = instancePrototype; - this.rawDestructor = rawDestructor; - this.baseClass = baseClass; - this.getActualType = getActualType; - this.upcast = upcast; - this.downcast = downcast; - this.pureVirtualFunctions = []; - } - - - var upcastPointer = (ptr, ptrClass, desiredClass) => { - while (ptrClass !== desiredClass) { - if (!ptrClass.upcast) { - throwBindingError(`Expected null or instance of ${desiredClass.name}, got an instance of ${ptrClass.name}`); - } - ptr = ptrClass.upcast(ptr); - ptrClass = ptrClass.baseClass; - } - return ptr; - }; - - /** @suppress {globalThis} */ - function constNoSmartPtrRawPointerToWireType(destructors, handle) { - if (handle === null) { - if (this.isReference) { - throwBindingError(`null is not a valid ${this.name}`); - } - return 0; - } - - if (!handle.$$) { - throwBindingError(`Cannot pass "${embindRepr(handle)}" as a ${this.name}`); - } - if (!handle.$$.ptr) { - throwBindingError(`Cannot pass deleted object as a pointer of type ${this.name}`); - } - var handleClass = handle.$$.ptrType.registeredClass; - var ptr = upcastPointer(handle.$$.ptr, handleClass, this.registeredClass); - return ptr; - } - - - /** @suppress {globalThis} */ - function genericPointerToWireType(destructors, handle) { - var ptr; - if (handle === null) { - if (this.isReference) { - throwBindingError(`null is not a valid ${this.name}`); - } - - if (this.isSmartPointer) { - ptr = this.rawConstructor(); - if (destructors !== null) { - destructors.push(this.rawDestructor, ptr); - } - return ptr; - } else { - return 0; - } - } - - if (!handle || !handle.$$) { - throwBindingError(`Cannot pass "${embindRepr(handle)}" as a ${this.name}`); - } - if (!handle.$$.ptr) { - throwBindingError(`Cannot pass deleted object as a pointer of type ${this.name}`); - } - if (!this.isConst && handle.$$.ptrType.isConst) { - throwBindingError(`Cannot convert argument of type ${(handle.$$.smartPtrType ? handle.$$.smartPtrType.name : handle.$$.ptrType.name)} to parameter type ${this.name}`); - } - var handleClass = handle.$$.ptrType.registeredClass; - ptr = upcastPointer(handle.$$.ptr, handleClass, this.registeredClass); - - if (this.isSmartPointer) { - // TODO: this is not strictly true - // We could support BY_EMVAL conversions from raw pointers to smart pointers - // because the smart pointer can hold a reference to the handle - if (undefined === handle.$$.smartPtr) { - throwBindingError('Passing raw pointer to smart pointer is illegal'); - } - - switch (this.sharingPolicy) { - case 0: // NONE - // no upcasting - if (handle.$$.smartPtrType === this) { - ptr = handle.$$.smartPtr; - } else { - throwBindingError(`Cannot convert argument of type ${(handle.$$.smartPtrType ? handle.$$.smartPtrType.name : handle.$$.ptrType.name)} to parameter type ${this.name}`); - } - break; - - case 1: // INTRUSIVE - ptr = handle.$$.smartPtr; - break; - - case 2: // BY_EMVAL - if (handle.$$.smartPtrType === this) { - ptr = handle.$$.smartPtr; - } else { - var clonedHandle = handle['clone'](); - ptr = this.rawShare( - ptr, - Emval.toHandle(() => clonedHandle['delete']()) - ); - if (destructors !== null) { - destructors.push(this.rawDestructor, ptr); - } - } - break; - - default: - throwBindingError('Unsupporting sharing policy'); - } - } - return ptr; - } - - - - /** @suppress {globalThis} */ - function nonConstNoSmartPtrRawPointerToWireType(destructors, handle) { - if (handle === null) { - if (this.isReference) { - throwBindingError(`null is not a valid ${this.name}`); - } - return 0; - } - - if (!handle.$$) { - throwBindingError(`Cannot pass "${embindRepr(handle)}" as a ${this.name}`); - } - if (!handle.$$.ptr) { - throwBindingError(`Cannot pass deleted object as a pointer of type ${this.name}`); - } - if (handle.$$.ptrType.isConst) { - throwBindingError(`Cannot convert argument of type ${handle.$$.ptrType.name} to parameter type ${this.name}`); - } - var handleClass = handle.$$.ptrType.registeredClass; - var ptr = upcastPointer(handle.$$.ptr, handleClass, this.registeredClass); - return ptr; - } - - - /** @suppress {globalThis} */ - function readPointer(pointer) { - return this.fromWireType(HEAPU32[((pointer)>>2)]); - } - - var init_RegisteredPointer = () => { - Object.assign(RegisteredPointer.prototype, { - getPointee(ptr) { - if (this.rawGetPointee) { - ptr = this.rawGetPointee(ptr); - } - return ptr; - }, - destructor(ptr) { - this.rawDestructor?.(ptr); - }, - readValueFromPointer: readPointer, - fromWireType: RegisteredPointer_fromWireType, - }); - }; - /** @constructor - @param {*=} pointeeType, - @param {*=} sharingPolicy, - @param {*=} rawGetPointee, - @param {*=} rawConstructor, - @param {*=} rawShare, - @param {*=} rawDestructor, - */ - function RegisteredPointer( - name, - registeredClass, - isReference, - isConst, - - // smart pointer properties - isSmartPointer, - pointeeType, - sharingPolicy, - rawGetPointee, - rawConstructor, - rawShare, - rawDestructor - ) { - this.name = name; - this.registeredClass = registeredClass; - this.isReference = isReference; - this.isConst = isConst; - - // smart pointer properties - this.isSmartPointer = isSmartPointer; - this.pointeeType = pointeeType; - this.sharingPolicy = sharingPolicy; - this.rawGetPointee = rawGetPointee; - this.rawConstructor = rawConstructor; - this.rawShare = rawShare; - this.rawDestructor = rawDestructor; - - if (!isSmartPointer && registeredClass.baseClass === undefined) { - if (isConst) { - this.toWireType = constNoSmartPtrRawPointerToWireType; - this.destructorFunction = null; - } else { - this.toWireType = nonConstNoSmartPtrRawPointerToWireType; - this.destructorFunction = null; - } - } else { - this.toWireType = genericPointerToWireType; - // Here we must leave this.destructorFunction undefined, since whether genericPointerToWireType returns - // a pointer that needs to be freed up is runtime-dependent, and cannot be evaluated at registration time. - // TODO: Create an alternative mechanism that allows removing the use of var destructors = []; array in - // craftInvokerFunction altogether. - } - } - - /** @param {number=} numArguments */ - var replacePublicSymbol = (name, value, numArguments) => { - if (!Module.hasOwnProperty(name)) { - throwInternalError('Replacing nonexistent public symbol'); - } - // If there's an overload table for this symbol, replace the symbol in the overload table instead. - if (undefined !== Module[name].overloadTable && undefined !== numArguments) { - Module[name].overloadTable[numArguments] = value; - } else { - Module[name] = value; - Module[name].argCount = numArguments; - } - }; - - - - var wasmTableMirror = []; - - /** @type {WebAssembly.Table} */ - var wasmTable; - var getWasmTableEntry = (funcPtr) => { - var func = wasmTableMirror[funcPtr]; - if (!func) { - /** @suppress {checkTypes} */ - wasmTableMirror[funcPtr] = func = wasmTable.get(funcPtr); - } - /** @suppress {checkTypes} */ - assert(wasmTable.get(funcPtr) == func, 'JavaScript-side Wasm function table mirror is out of date!'); - return func; - }; - var embind__requireFunction = (signature, rawFunction, isAsync = false) => { - assert(!isAsync, 'Async bindings are only supported with JSPI.'); - - signature = AsciiToString(signature); - - function makeDynCaller() { - var rtn = getWasmTableEntry(rawFunction); - return rtn; - } - - var fp = makeDynCaller(); - if (typeof fp != 'function') { - throwBindingError(`unknown function pointer with signature ${signature}: ${rawFunction}`); - } - return fp; - }; - - - - class UnboundTypeError extends Error {} - - - - var getTypeName = (type) => { - var ptr = ___getTypeName(type); - var rv = AsciiToString(ptr); - _free(ptr); - return rv; - }; - var throwUnboundTypeError = (message, types) => { - var unboundTypes = []; - var seen = {}; - function visit(type) { - if (seen[type]) { - return; - } - if (registeredTypes[type]) { - return; - } - if (typeDependencies[type]) { - typeDependencies[type].forEach(visit); - return; - } - unboundTypes.push(type); - seen[type] = true; - } - types.forEach(visit); - - throw new UnboundTypeError(`${message}: ` + unboundTypes.map(getTypeName).join([', '])); - }; - - - - - var whenDependentTypesAreResolved = (myTypes, dependentTypes, getTypeConverters) => { - myTypes.forEach((type) => typeDependencies[type] = dependentTypes); - - function onComplete(typeConverters) { - var myTypeConverters = getTypeConverters(typeConverters); - if (myTypeConverters.length !== myTypes.length) { - throwInternalError('Mismatched type converter count'); - } - for (var i = 0; i < myTypes.length; ++i) { - registerType(myTypes[i], myTypeConverters[i]); - } - } - - var typeConverters = new Array(dependentTypes.length); - var unregisteredTypes = []; - var registered = 0; - dependentTypes.forEach((dt, i) => { - if (registeredTypes.hasOwnProperty(dt)) { - typeConverters[i] = registeredTypes[dt]; - } else { - unregisteredTypes.push(dt); - if (!awaitingDependencies.hasOwnProperty(dt)) { - awaitingDependencies[dt] = []; - } - awaitingDependencies[dt].push(() => { - typeConverters[i] = registeredTypes[dt]; - ++registered; - if (registered === unregisteredTypes.length) { - onComplete(typeConverters); - } - }); - } - }); - if (0 === unregisteredTypes.length) { - onComplete(typeConverters); - } - }; - var __embind_register_class = (rawType, - rawPointerType, - rawConstPointerType, - baseClassRawType, - getActualTypeSignature, - getActualType, - upcastSignature, - upcast, - downcastSignature, - downcast, - name, - destructorSignature, - rawDestructor) => { - name = AsciiToString(name); - getActualType = embind__requireFunction(getActualTypeSignature, getActualType); - upcast &&= embind__requireFunction(upcastSignature, upcast); - downcast &&= embind__requireFunction(downcastSignature, downcast); - rawDestructor = embind__requireFunction(destructorSignature, rawDestructor); - var legalFunctionName = makeLegalFunctionName(name); - - exposePublicSymbol(legalFunctionName, function() { - // this code cannot run if baseClassRawType is zero - throwUnboundTypeError(`Cannot construct ${name} due to unbound types`, [baseClassRawType]); - }); - - whenDependentTypesAreResolved( - [rawType, rawPointerType, rawConstPointerType], - baseClassRawType ? [baseClassRawType] : [], - (base) => { - base = base[0]; - - var baseClass; - var basePrototype; - if (baseClassRawType) { - baseClass = base.registeredClass; - basePrototype = baseClass.instancePrototype; - } else { - basePrototype = ClassHandle.prototype; - } - - var constructor = createNamedFunction(name, function(...args) { - if (Object.getPrototypeOf(this) !== instancePrototype) { - throw new BindingError(`Use 'new' to construct ${name}`); - } - if (undefined === registeredClass.constructor_body) { - throw new BindingError(`${name} has no accessible constructor`); - } - var body = registeredClass.constructor_body[args.length]; - if (undefined === body) { - throw new BindingError(`Tried to invoke ctor of ${name} with invalid number of parameters (${args.length}) - expected (${Object.keys(registeredClass.constructor_body).toString()}) parameters instead!`); - } - return body.apply(this, args); - }); - - var instancePrototype = Object.create(basePrototype, { - constructor: { value: constructor }, - }); - - constructor.prototype = instancePrototype; - - var registeredClass = new RegisteredClass(name, - constructor, - instancePrototype, - rawDestructor, - baseClass, - getActualType, - upcast, - downcast); - - if (registeredClass.baseClass) { - // Keep track of class hierarchy. Used to allow sub-classes to inherit class functions. - registeredClass.baseClass.__derivedClasses ??= []; - - registeredClass.baseClass.__derivedClasses.push(registeredClass); - } - - var referenceConverter = new RegisteredPointer(name, - registeredClass, - true, - false, - false); - - var pointerConverter = new RegisteredPointer(name + '*', - registeredClass, - false, - false, - false); - - var constPointerConverter = new RegisteredPointer(name + ' const*', - registeredClass, - false, - true, - false); - - registeredPointers[rawType] = { - pointerType: pointerConverter, - constPointerType: constPointerConverter - }; - - replacePublicSymbol(legalFunctionName, constructor); - - return [referenceConverter, pointerConverter, constPointerConverter]; - } - ); - }; - - var heap32VectorToArray = (count, firstElement) => { - var array = []; - for (var i = 0; i < count; i++) { - // TODO(https://github.com/emscripten-core/emscripten/issues/17310): - // Find a way to hoist the `>> 2` or `>> 3` out of this loop. - array.push(HEAPU32[(((firstElement)+(i * 4))>>2)]); - } - return array; - }; - - - - - var runDestructors = (destructors) => { - while (destructors.length) { - var ptr = destructors.pop(); - var del = destructors.pop(); - del(ptr); - } - }; - - - function usesDestructorStack(argTypes) { - // Skip return value at index 0 - it's not deleted here. - for (var i = 1; i < argTypes.length; ++i) { - // The type does not define a destructor function - must use dynamic stack - if (argTypes[i] !== null && argTypes[i].destructorFunction === undefined) { - return true; - } - } - return false; - } - - - function checkArgCount(numArgs, minArgs, maxArgs, humanName, throwBindingError) { - if (numArgs < minArgs || numArgs > maxArgs) { - var argCountMessage = minArgs == maxArgs ? minArgs : `${minArgs} to ${maxArgs}`; - throwBindingError(`function ${humanName} called with ${numArgs} arguments, expected ${argCountMessage}`); - } - } - function createJsInvoker(argTypes, isClassMethodFunc, returns, isAsync) { - var needsDestructorStack = usesDestructorStack(argTypes); - var argCount = argTypes.length - 2; - var argsList = []; - var argsListWired = ['fn']; - if (isClassMethodFunc) { - argsListWired.push('thisWired'); - } - for (var i = 0; i < argCount; ++i) { - argsList.push(`arg${i}`) - argsListWired.push(`arg${i}Wired`) - } - argsList = argsList.join(',') - argsListWired = argsListWired.join(',') - - var invokerFnBody = `return function (${argsList}) {\n`; - - invokerFnBody += "checkArgCount(arguments.length, minArgs, maxArgs, humanName, throwBindingError);\n"; - - if (needsDestructorStack) { - invokerFnBody += "var destructors = [];\n"; - } - - var dtorStack = needsDestructorStack ? "destructors" : "null"; - var args1 = ["humanName", "throwBindingError", "invoker", "fn", "runDestructors", "fromRetWire", "toClassParamWire"]; - - if (isClassMethodFunc) { - invokerFnBody += `var thisWired = toClassParamWire(${dtorStack}, this);\n`; - } - - for (var i = 0; i < argCount; ++i) { - var argName = `toArg${i}Wire`; - invokerFnBody += `var arg${i}Wired = ${argName}(${dtorStack}, arg${i});\n`; - args1.push(argName); - } - - invokerFnBody += (returns || isAsync ? "var rv = ":"") + `invoker(${argsListWired});\n`; - - var returnVal = returns ? "rv" : ""; - - if (needsDestructorStack) { - invokerFnBody += "runDestructors(destructors);\n"; - } else { - for (var i = isClassMethodFunc?1:2; i < argTypes.length; ++i) { // Skip return value at index 0 - it's not deleted here. Also skip class type if not a method. - var paramName = (i === 1 ? "thisWired" : ("arg"+(i - 2)+"Wired")); - if (argTypes[i].destructorFunction !== null) { - invokerFnBody += `${paramName}_dtor(${paramName});\n`; - args1.push(`${paramName}_dtor`); - } - } - } - - if (returns) { - invokerFnBody += "var ret = fromRetWire(rv);\n" + - "return ret;\n"; - } else { - } - - invokerFnBody += "}\n"; - - args1.push('checkArgCount', 'minArgs', 'maxArgs'); - invokerFnBody = `if (arguments.length !== ${args1.length}){ throw new Error(humanName + "Expected ${args1.length} closure arguments " + arguments.length + " given."); }\n${invokerFnBody}`; - return new Function(args1, invokerFnBody); - } - - function getRequiredArgCount(argTypes) { - var requiredArgCount = argTypes.length - 2; - for (var i = argTypes.length - 1; i >= 2; --i) { - if (!argTypes[i].optional) { - break; - } - requiredArgCount--; - } - return requiredArgCount; - } - - function craftInvokerFunction(humanName, argTypes, classType, cppInvokerFunc, cppTargetFunc, /** boolean= */ isAsync) { - // humanName: a human-readable string name for the function to be generated. - // argTypes: An array that contains the embind type objects for all types in the function signature. - // argTypes[0] is the type object for the function return value. - // argTypes[1] is the type object for function this object/class type, or null if not crafting an invoker for a class method. - // argTypes[2...] are the actual function parameters. - // classType: The embind type object for the class to be bound, or null if this is not a method of a class. - // cppInvokerFunc: JS Function object to the C++-side function that interops into C++ code. - // cppTargetFunc: Function pointer (an integer to FUNCTION_TABLE) to the target C++ function the cppInvokerFunc will end up calling. - // isAsync: Optional. If true, returns an async function. Async bindings are only supported with JSPI. - var argCount = argTypes.length; - - if (argCount < 2) { - throwBindingError("argTypes array size mismatch! Must at least get return value and 'this' types!"); - } - - assert(!isAsync, 'Async bindings are only supported with JSPI.'); - var isClassMethodFunc = (argTypes[1] !== null && classType !== null); - - // Free functions with signature "void function()" do not need an invoker that marshalls between wire types. - // TODO: This omits argument count check - enable only at -O3 or similar. - // if (ENABLE_UNSAFE_OPTS && argCount == 2 && argTypes[0].name == "void" && !isClassMethodFunc) { - // return FUNCTION_TABLE[fn]; - // } - - // Determine if we need to use a dynamic stack to store the destructors for the function parameters. - // TODO: Remove this completely once all function invokers are being dynamically generated. - var needsDestructorStack = usesDestructorStack(argTypes); - - var returns = !argTypes[0].isVoid; - - var expectedArgCount = argCount - 2; - var minArgs = getRequiredArgCount(argTypes); - // Builld the arguments that will be passed into the closure around the invoker - // function. - var retType = argTypes[0]; - var instType = argTypes[1]; - var closureArgs = [humanName, throwBindingError, cppInvokerFunc, cppTargetFunc, runDestructors, retType.fromWireType.bind(retType), instType?.toWireType.bind(instType)]; - for (var i = 2; i < argCount; ++i) { - var argType = argTypes[i]; - closureArgs.push(argType.toWireType.bind(argType)); - } - if (!needsDestructorStack) { - // Skip return value at index 0 - it's not deleted here. Also skip class type if not a method. - for (var i = isClassMethodFunc?1:2; i < argTypes.length; ++i) { - if (argTypes[i].destructorFunction !== null) { - closureArgs.push(argTypes[i].destructorFunction); - } - } - } - closureArgs.push(checkArgCount, minArgs, expectedArgCount); - - let invokerFactory = createJsInvoker(argTypes, isClassMethodFunc, returns, isAsync); - var invokerFn = invokerFactory(...closureArgs); - return createNamedFunction(humanName, invokerFn); - } - var __embind_register_class_constructor = ( - rawClassType, - argCount, - rawArgTypesAddr, - invokerSignature, - invoker, - rawConstructor - ) => { - assert(argCount > 0); - var rawArgTypes = heap32VectorToArray(argCount, rawArgTypesAddr); - invoker = embind__requireFunction(invokerSignature, invoker); - var args = [rawConstructor]; - var destructors = []; - - whenDependentTypesAreResolved([], [rawClassType], (classType) => { - classType = classType[0]; - var humanName = `constructor ${classType.name}`; - - if (undefined === classType.registeredClass.constructor_body) { - classType.registeredClass.constructor_body = []; - } - if (undefined !== classType.registeredClass.constructor_body[argCount - 1]) { - throw new BindingError(`Cannot register multiple constructors with identical number of parameters (${argCount-1}) for class '${classType.name}'! Overload resolution is currently only performed using the parameter count, not actual type info!`); - } - classType.registeredClass.constructor_body[argCount - 1] = () => { - throwUnboundTypeError(`Cannot construct ${classType.name} due to unbound types`, rawArgTypes); - }; - - whenDependentTypesAreResolved([], rawArgTypes, (argTypes) => { - // Insert empty slot for context type (argTypes[1]). - argTypes.splice(1, 0, null); - classType.registeredClass.constructor_body[argCount - 1] = craftInvokerFunction(humanName, argTypes, null, invoker, rawConstructor); - return []; - }); - return []; - }); - }; - - - - - - - - var getFunctionName = (signature) => { - signature = signature.trim(); - const argsIndex = signature.indexOf("("); - if (argsIndex === -1) return signature; - assert(signature.endsWith(")"), "Parentheses for argument names should match."); - return signature.slice(0, argsIndex); - }; - var __embind_register_class_function = (rawClassType, - methodName, - argCount, - rawArgTypesAddr, // [ReturnType, ThisType, Args...] - invokerSignature, - rawInvoker, - context, - isPureVirtual, - isAsync, - isNonnullReturn) => { - var rawArgTypes = heap32VectorToArray(argCount, rawArgTypesAddr); - methodName = AsciiToString(methodName); - methodName = getFunctionName(methodName); - rawInvoker = embind__requireFunction(invokerSignature, rawInvoker, isAsync); - - whenDependentTypesAreResolved([], [rawClassType], (classType) => { - classType = classType[0]; - var humanName = `${classType.name}.${methodName}`; - - if (methodName.startsWith("@@")) { - methodName = Symbol[methodName.substring(2)]; - } - - if (isPureVirtual) { - classType.registeredClass.pureVirtualFunctions.push(methodName); - } - - function unboundTypesHandler() { - throwUnboundTypeError(`Cannot call ${humanName} due to unbound types`, rawArgTypes); - } - - var proto = classType.registeredClass.instancePrototype; - var method = proto[methodName]; - if (undefined === method || (undefined === method.overloadTable && method.className !== classType.name && method.argCount === argCount - 2)) { - // This is the first overload to be registered, OR we are replacing a - // function in the base class with a function in the derived class. - unboundTypesHandler.argCount = argCount - 2; - unboundTypesHandler.className = classType.name; - proto[methodName] = unboundTypesHandler; - } else { - // There was an existing function with the same name registered. Set up - // a function overload routing table. - ensureOverloadTable(proto, methodName, humanName); - proto[methodName].overloadTable[argCount - 2] = unboundTypesHandler; - } - - whenDependentTypesAreResolved([], rawArgTypes, (argTypes) => { - var memberFunction = craftInvokerFunction(humanName, argTypes, classType, rawInvoker, context, isAsync); - - // Replace the initial unbound-handler-stub function with the - // appropriate member function, now that all types are resolved. If - // multiple overloads are registered for this function, the function - // goes into an overload table. - if (undefined === proto[methodName].overloadTable) { - // Set argCount in case an overload is registered later - memberFunction.argCount = argCount - 2; - proto[methodName] = memberFunction; - } else { - proto[methodName].overloadTable[argCount - 2] = memberFunction; - } - - return []; - }); - return []; - }); - }; - - - var emval_freelist = []; - - var emval_handles = [0,1,,1,null,1,true,1,false,1]; - var __emval_decref = (handle) => { - if (handle > 9 && 0 === --emval_handles[handle + 1]) { - assert(emval_handles[handle] !== undefined, `Decref for unallocated handle.`); - emval_handles[handle] = undefined; - emval_freelist.push(handle); - } - }; - - - - var Emval = { - toValue:(handle) => { - if (!handle) { - throwBindingError(`Cannot use deleted val. handle = ${handle}`); - } - // handle 2 is supposed to be `undefined`. - assert(handle === 2 || emval_handles[handle] !== undefined && handle % 2 === 0, `invalid handle: ${handle}`); - return emval_handles[handle]; - }, - toHandle:(value) => { - switch (value) { - case undefined: return 2; - case null: return 4; - case true: return 6; - case false: return 8; - default:{ - const handle = emval_freelist.pop() || emval_handles.length; - emval_handles[handle] = value; - emval_handles[handle + 1] = 1; - return handle; - } - } - }, - }; - - var EmValType = { - name: 'emscripten::val', - fromWireType: (handle) => { - var rv = Emval.toValue(handle); - __emval_decref(handle); - return rv; - }, - toWireType: (destructors, value) => Emval.toHandle(value), - readValueFromPointer: readPointer, - destructorFunction: null, // This type does not need a destructor - - // TODO: do we need a deleteObject here? write a test where - // emval is passed into JS via an interface - }; - var __embind_register_emval = (rawType) => registerType(rawType, EmValType); - - var floatReadValueFromPointer = (name, width) => { - switch (width) { - case 4: return function(pointer) { - return this.fromWireType(HEAPF32[((pointer)>>2)]); - }; - case 8: return function(pointer) { - return this.fromWireType(HEAPF64[((pointer)>>3)]); - }; - default: - throw new TypeError(`invalid float width (${width}): ${name}`); - } - }; - - - - var __embind_register_float = (rawType, name, size) => { - name = AsciiToString(name); - registerType(rawType, { - name, - fromWireType: (value) => value, - toWireType: (destructors, value) => { - if (typeof value != "number" && typeof value != "boolean") { - throw new TypeError(`Cannot convert ${embindRepr(value)} to ${this.name}`); - } - // The VM will perform JS to Wasm value conversion, according to the spec: - // https://www.w3.org/TR/wasm-js-api-1/#towebassemblyvalue - return value; - }, - readValueFromPointer: floatReadValueFromPointer(name, size), - destructorFunction: null, // This type does not need a destructor - }); - }; - - - - - - - - - - var __embind_register_function = (name, argCount, rawArgTypesAddr, signature, rawInvoker, fn, isAsync, isNonnullReturn) => { - var argTypes = heap32VectorToArray(argCount, rawArgTypesAddr); - name = AsciiToString(name); - name = getFunctionName(name); - - rawInvoker = embind__requireFunction(signature, rawInvoker, isAsync); - - exposePublicSymbol(name, function() { - throwUnboundTypeError(`Cannot call ${name} due to unbound types`, argTypes); - }, argCount - 1); - - whenDependentTypesAreResolved([], argTypes, (argTypes) => { - var invokerArgsArray = [argTypes[0] /* return value */, null /* no class 'this'*/].concat(argTypes.slice(1) /* actual params */); - replacePublicSymbol(name, craftInvokerFunction(name, invokerArgsArray, null /* no class 'this'*/, rawInvoker, fn, isAsync), argCount - 1); - return []; - }); - }; - - - - - - /** @suppress {globalThis} */ - var __embind_register_integer = (primitiveType, name, size, minRange, maxRange) => { - name = AsciiToString(name); - - const isUnsignedType = minRange === 0; - - let fromWireType = (value) => value; - if (isUnsignedType) { - var bitshift = 32 - 8*size; - fromWireType = (value) => (value << bitshift) >>> bitshift; - maxRange = fromWireType(maxRange); - } - - registerType(primitiveType, { - name, - fromWireType: fromWireType, - toWireType: (destructors, value) => { - if (typeof value != "number" && typeof value != "boolean") { - throw new TypeError(`Cannot convert "${embindRepr(value)}" to ${name}`); - } - assertIntegerRange(name, value, minRange, maxRange); - // The VM will perform JS to Wasm value conversion, according to the spec: - // https://www.w3.org/TR/wasm-js-api-1/#towebassemblyvalue - return value; - }, - readValueFromPointer: integerReadValueFromPointer(name, size, minRange !== 0), - destructorFunction: null, // This type does not need a destructor - }); - }; - - - var __embind_register_memory_view = (rawType, dataTypeIndex, name) => { - var typeMapping = [ - Int8Array, - Uint8Array, - Int16Array, - Uint16Array, - Int32Array, - Uint32Array, - Float32Array, - Float64Array, - BigInt64Array, - BigUint64Array, - ]; - - var TA = typeMapping[dataTypeIndex]; - - function decodeMemoryView(handle) { - var size = HEAPU32[((handle)>>2)]; - var data = HEAPU32[(((handle)+(4))>>2)]; - return new TA(HEAP8.buffer, data, size); - } - - name = AsciiToString(name); - registerType(rawType, { - name, - fromWireType: decodeMemoryView, - readValueFromPointer: decodeMemoryView, - }, { - ignoreDuplicateRegistrations: true, - }); - }; - - - var EmValOptionalType = Object.assign({optional: true}, EmValType);; - var __embind_register_optional = (rawOptionalType, rawType) => { - registerType(rawOptionalType, EmValOptionalType); - }; - - - - var __embind_register_smart_ptr = (rawType, - rawPointeeType, - name, - sharingPolicy, - getPointeeSignature, - rawGetPointee, - constructorSignature, - rawConstructor, - shareSignature, - rawShare, - destructorSignature, - rawDestructor) => { - name = AsciiToString(name); - rawGetPointee = embind__requireFunction(getPointeeSignature, rawGetPointee); - rawConstructor = embind__requireFunction(constructorSignature, rawConstructor); - rawShare = embind__requireFunction(shareSignature, rawShare); - rawDestructor = embind__requireFunction(destructorSignature, rawDestructor); - - whenDependentTypesAreResolved([rawType], [rawPointeeType], (pointeeType) => { - pointeeType = pointeeType[0]; - - var registeredPointer = new RegisteredPointer(name, - pointeeType.registeredClass, - false, - false, - // smart pointer properties - true, - pointeeType, - sharingPolicy, - rawGetPointee, - rawConstructor, - rawShare, - rawDestructor); - return [registeredPointer]; - }); - }; - - - - - - - - - - var __embind_register_std_string = (rawType, name) => { - name = AsciiToString(name); - var stdStringIsUTF8 = true; - - registerType(rawType, { - name, - // For some method names we use string keys here since they are part of - // the public/external API and/or used by the runtime-generated code. - fromWireType(value) { - var length = HEAPU32[((value)>>2)]; - var payload = value + 4; - - var str; - if (stdStringIsUTF8) { - str = UTF8ToString(payload, length, true); - } else { - str = ''; - for (var i = 0; i < length; ++i) { - str += String.fromCharCode(HEAPU8[payload + i]); - } - } - - _free(value); - - return str; - }, - toWireType(destructors, value) { - if (value instanceof ArrayBuffer) { - value = new Uint8Array(value); - } - - var length; - var valueIsOfTypeString = (typeof value == 'string'); - - // We accept `string` or array views with single byte elements - if (!(valueIsOfTypeString || (ArrayBuffer.isView(value) && value.BYTES_PER_ELEMENT == 1))) { - throwBindingError('Cannot pass non-string to std::string'); - } - if (stdStringIsUTF8 && valueIsOfTypeString) { - length = lengthBytesUTF8(value); - } else { - length = value.length; - } - - // assumes POINTER_SIZE alignment - var base = _malloc(4 + length + 1); - var ptr = base + 4; - HEAPU32[((base)>>2)] = length; - if (valueIsOfTypeString) { - if (stdStringIsUTF8) { - stringToUTF8(value, ptr, length + 1); - } else { - for (var i = 0; i < length; ++i) { - var charCode = value.charCodeAt(i); - if (charCode > 255) { - _free(base); - throwBindingError('String has UTF-16 code units that do not fit in 8 bits'); - } - HEAPU8[ptr + i] = charCode; - } - } - } else { - HEAPU8.set(value, ptr); - } - - if (destructors !== null) { - destructors.push(_free, base); - } - return base; - }, - readValueFromPointer: readPointer, - destructorFunction(ptr) { - _free(ptr); - }, - }); - }; - - - - - var UTF16Decoder = typeof TextDecoder != 'undefined' ? new TextDecoder('utf-16le') : undefined;; - - var UTF16ToString = (ptr, maxBytesToRead, ignoreNul) => { - assert(ptr % 2 == 0, 'Pointer passed to UTF16ToString must be aligned to two bytes!'); - var idx = ((ptr)>>1); - var endIdx = findStringEnd(HEAPU16, idx, maxBytesToRead / 2, ignoreNul); - - // When using conditional TextDecoder, skip it for short strings as the overhead of the native call is not worth it. - if (endIdx - idx > 16 && UTF16Decoder) - return UTF16Decoder.decode(HEAPU16.subarray(idx, endIdx)); - - // Fallback: decode without UTF16Decoder - var str = ''; - - // If maxBytesToRead is not passed explicitly, it will be undefined, and the - // for-loop's condition will always evaluate to true. The loop is then - // terminated on the first null char. - for (var i = idx; i < endIdx; ++i) { - var codeUnit = HEAPU16[i]; - // fromCharCode constructs a character from a UTF-16 code unit, so we can - // pass the UTF16 string right through. - str += String.fromCharCode(codeUnit); - } - - return str; - }; - - var stringToUTF16 = (str, outPtr, maxBytesToWrite) => { - assert(outPtr % 2 == 0, 'Pointer passed to stringToUTF16 must be aligned to two bytes!'); - assert(typeof maxBytesToWrite == 'number', 'stringToUTF16(str, outPtr, maxBytesToWrite) is missing the third parameter that specifies the length of the output buffer!'); - // Backwards compatibility: if max bytes is not specified, assume unsafe unbounded write is allowed. - maxBytesToWrite ??= 0x7FFFFFFF; - if (maxBytesToWrite < 2) return 0; - maxBytesToWrite -= 2; // Null terminator. - var startPtr = outPtr; - var numCharsToWrite = (maxBytesToWrite < str.length*2) ? (maxBytesToWrite / 2) : str.length; - for (var i = 0; i < numCharsToWrite; ++i) { - // charCodeAt returns a UTF-16 encoded code unit, so it can be directly written to the HEAP. - var codeUnit = str.charCodeAt(i); // possibly a lead surrogate - HEAP16[((outPtr)>>1)] = codeUnit; - outPtr += 2; - } - // Null-terminate the pointer to the HEAP. - HEAP16[((outPtr)>>1)] = 0; - return outPtr - startPtr; - }; - - var lengthBytesUTF16 = (str) => str.length*2; - - var UTF32ToString = (ptr, maxBytesToRead, ignoreNul) => { - assert(ptr % 4 == 0, 'Pointer passed to UTF32ToString must be aligned to four bytes!'); - var str = ''; - var startIdx = ((ptr)>>2); - // If maxBytesToRead is not passed explicitly, it will be undefined, and this - // will always evaluate to true. This saves on code size. - for (var i = 0; !(i >= maxBytesToRead / 4); i++) { - var utf32 = HEAPU32[startIdx + i]; - if (!utf32 && !ignoreNul) break; - str += String.fromCodePoint(utf32); - } - return str; - }; - - var stringToUTF32 = (str, outPtr, maxBytesToWrite) => { - assert(outPtr % 4 == 0, 'Pointer passed to stringToUTF32 must be aligned to four bytes!'); - assert(typeof maxBytesToWrite == 'number', 'stringToUTF32(str, outPtr, maxBytesToWrite) is missing the third parameter that specifies the length of the output buffer!'); - // Backwards compatibility: if max bytes is not specified, assume unsafe unbounded write is allowed. - maxBytesToWrite ??= 0x7FFFFFFF; - if (maxBytesToWrite < 4) return 0; - var startPtr = outPtr; - var endPtr = startPtr + maxBytesToWrite - 4; - for (var i = 0; i < str.length; ++i) { - var codePoint = str.codePointAt(i); - // Gotcha: if codePoint is over 0xFFFF, it is represented as a surrogate pair in UTF-16. - // We need to manually skip over the second code unit for correct iteration. - if (codePoint > 0xFFFF) { - i++; - } - HEAP32[((outPtr)>>2)] = codePoint; - outPtr += 4; - if (outPtr + 4 > endPtr) break; - } - // Null-terminate the pointer to the HEAP. - HEAP32[((outPtr)>>2)] = 0; - return outPtr - startPtr; - }; - - var lengthBytesUTF32 = (str) => { - var len = 0; - for (var i = 0; i < str.length; ++i) { - var codePoint = str.codePointAt(i); - // Gotcha: if codePoint is over 0xFFFF, it is represented as a surrogate pair in UTF-16. - // We need to manually skip over the second code unit for correct iteration. - if (codePoint > 0xFFFF) { - i++; - } - len += 4; - } - - return len; - }; - var __embind_register_std_wstring = (rawType, charSize, name) => { - name = AsciiToString(name); - var decodeString, encodeString, lengthBytesUTF; - if (charSize === 2) { - decodeString = UTF16ToString; - encodeString = stringToUTF16; - lengthBytesUTF = lengthBytesUTF16; - } else { - assert(charSize === 4, 'only 2-byte and 4-byte strings are currently supported'); - decodeString = UTF32ToString; - encodeString = stringToUTF32; - lengthBytesUTF = lengthBytesUTF32; - } - registerType(rawType, { - name, - fromWireType: (value) => { - // Code mostly taken from _embind_register_std_string fromWireType - var length = HEAPU32[((value)>>2)]; - var str = decodeString(value + 4, length * charSize, true); - - _free(value); - - return str; - }, - toWireType: (destructors, value) => { - if (!(typeof value == 'string')) { - throwBindingError(`Cannot pass non-string to C++ string type ${name}`); - } - - // assumes POINTER_SIZE alignment - var length = lengthBytesUTF(value); - var ptr = _malloc(4 + length + charSize); - HEAPU32[((ptr)>>2)] = length / charSize; - - encodeString(value, ptr + 4, length + charSize); - - if (destructors !== null) { - destructors.push(_free, ptr); - } - return ptr; - }, - readValueFromPointer: readPointer, - destructorFunction(ptr) { - _free(ptr); - } - }); - }; - - - var __embind_register_void = (rawType, name) => { - name = AsciiToString(name); - registerType(rawType, { - isVoid: true, // void return values can be optimized out sometimes - name, - fromWireType: () => undefined, - // TODO: assert if anything else is given? - toWireType: (destructors, o) => undefined, - }); - }; - - var __emscripten_system = (command) => { - // int system(const char *command); - // http://pubs.opengroup.org/onlinepubs/000095399/functions/system.html - // Can't call external programs. - if (!command) return 0; // no shell available - return -52; - }; - - var __emscripten_throw_longjmp = () => { - throw Infinity; - }; - - var emval_methodCallers = []; - var emval_addMethodCaller = (caller) => { - var id = emval_methodCallers.length; - emval_methodCallers.push(caller); - return id; - }; - - - - var requireRegisteredType = (rawType, humanName) => { - var impl = registeredTypes[rawType]; - if (undefined === impl) { - throwBindingError(`${humanName} has unknown type ${getTypeName(rawType)}`); - } - return impl; - }; - var emval_lookupTypes = (argCount, argTypes) => { - var a = new Array(argCount); - for (var i = 0; i < argCount; ++i) { - a[i] = requireRegisteredType(HEAPU32[(((argTypes)+(i*4))>>2)], - `parameter ${i}`); - } - return a; - }; - - - var emval_returnValue = (toReturnWire, destructorsRef, handle) => { - var destructors = []; - var result = toReturnWire(destructors, handle); - if (destructors.length) { - // void, primitives and any other types w/o destructors don't need to allocate a handle - HEAPU32[((destructorsRef)>>2)] = Emval.toHandle(destructors); - } - return result; - }; - - - var emval_symbols = { - }; - - var getStringOrSymbol = (address) => { - var symbol = emval_symbols[address]; - if (symbol === undefined) { - return AsciiToString(address); - } - return symbol; - }; - var __emval_create_invoker = (argCount, argTypesPtr, kind) => { - var GenericWireTypeSize = 8; - - var [retType, ...argTypes] = emval_lookupTypes(argCount, argTypesPtr); - var toReturnWire = retType.toWireType.bind(retType); - var argFromPtr = argTypes.map(type => type.readValueFromPointer.bind(type)); - argCount--; // remove the extracted return type - - var captures = {'toValue': Emval.toValue}; - var args = argFromPtr.map((argFromPtr, i) => { - var captureName = `argFromPtr${i}`; - captures[captureName] = argFromPtr; - return `${captureName}(args${i ? '+' + i * GenericWireTypeSize : ''})`; - }); - var functionBody; - switch (kind){ - case 0: - functionBody = 'toValue(handle)'; - break; - case 2: - functionBody = 'new (toValue(handle))'; - break; - case 3: - functionBody = ''; - break; - case 1: - captures['getStringOrSymbol'] = getStringOrSymbol; - functionBody = 'toValue(handle)[getStringOrSymbol(methodName)]'; - break; - } - functionBody += `(${args})`; - if (!retType.isVoid) { - captures['toReturnWire'] = toReturnWire; - captures['emval_returnValue'] = emval_returnValue; - functionBody = `return emval_returnValue(toReturnWire, destructorsRef, ${functionBody})`; - } - functionBody = `return function (handle, methodName, destructorsRef, args) { - ${functionBody} - }`; - - var invokerFunction = new Function(Object.keys(captures), functionBody)(...Object.values(captures)); - var functionName = `methodCaller<(${argTypes.map(t => t.name)}) => ${retType.name}>`; - return emval_addMethodCaller(createNamedFunction(functionName, invokerFunction)); - }; - - - var __emval_incref = (handle) => { - if (handle > 9) { - emval_handles[handle + 1] += 1; - } - }; - - - - var __emval_invoke = (caller, handle, methodName, destructorsRef, args) => { - return emval_methodCallers[caller](handle, methodName, destructorsRef, args); - }; - - - - var __emval_run_destructors = (handle) => { - var destructors = Emval.toValue(handle); - runDestructors(destructors); - __emval_decref(handle); - }; - - function __gmtime_js(time, tmPtr) { - time = bigintToI53Checked(time); - - - var date = new Date(time * 1000); - HEAP32[((tmPtr)>>2)] = date.getUTCSeconds(); - HEAP32[(((tmPtr)+(4))>>2)] = date.getUTCMinutes(); - HEAP32[(((tmPtr)+(8))>>2)] = date.getUTCHours(); - HEAP32[(((tmPtr)+(12))>>2)] = date.getUTCDate(); - HEAP32[(((tmPtr)+(16))>>2)] = date.getUTCMonth(); - HEAP32[(((tmPtr)+(20))>>2)] = date.getUTCFullYear()-1900; - HEAP32[(((tmPtr)+(24))>>2)] = date.getUTCDay(); - var start = Date.UTC(date.getUTCFullYear(), 0, 1, 0, 0, 0, 0); - var yday = ((date.getTime() - start) / (1000 * 60 * 60 * 24))|0; - HEAP32[(((tmPtr)+(28))>>2)] = yday; - ; - } - - var isLeapYear = (year) => year%4 === 0 && (year%100 !== 0 || year%400 === 0); - - var MONTH_DAYS_LEAP_CUMULATIVE = [0,31,60,91,121,152,182,213,244,274,305,335]; - - var MONTH_DAYS_REGULAR_CUMULATIVE = [0,31,59,90,120,151,181,212,243,273,304,334]; - var ydayFromDate = (date) => { - var leap = isLeapYear(date.getFullYear()); - var monthDaysCumulative = (leap ? MONTH_DAYS_LEAP_CUMULATIVE : MONTH_DAYS_REGULAR_CUMULATIVE); - var yday = monthDaysCumulative[date.getMonth()] + date.getDate() - 1; // -1 since it's days since Jan 1 - - return yday; - }; - - function __localtime_js(time, tmPtr) { - time = bigintToI53Checked(time); - - - var date = new Date(time*1000); - HEAP32[((tmPtr)>>2)] = date.getSeconds(); - HEAP32[(((tmPtr)+(4))>>2)] = date.getMinutes(); - HEAP32[(((tmPtr)+(8))>>2)] = date.getHours(); - HEAP32[(((tmPtr)+(12))>>2)] = date.getDate(); - HEAP32[(((tmPtr)+(16))>>2)] = date.getMonth(); - HEAP32[(((tmPtr)+(20))>>2)] = date.getFullYear()-1900; - HEAP32[(((tmPtr)+(24))>>2)] = date.getDay(); - - var yday = ydayFromDate(date)|0; - HEAP32[(((tmPtr)+(28))>>2)] = yday; - HEAP32[(((tmPtr)+(36))>>2)] = -(date.getTimezoneOffset() * 60); - - // Attention: DST is in December in South, and some regions don't have DST at all. - var start = new Date(date.getFullYear(), 0, 1); - var summerOffset = new Date(date.getFullYear(), 6, 1).getTimezoneOffset(); - var winterOffset = start.getTimezoneOffset(); - var dst = (summerOffset != winterOffset && date.getTimezoneOffset() == Math.min(winterOffset, summerOffset))|0; - HEAP32[(((tmPtr)+(32))>>2)] = dst; - ; - } - - - var __mktime_js = function(tmPtr) { - - var ret = (() => { - var date = new Date(HEAP32[(((tmPtr)+(20))>>2)] + 1900, - HEAP32[(((tmPtr)+(16))>>2)], - HEAP32[(((tmPtr)+(12))>>2)], - HEAP32[(((tmPtr)+(8))>>2)], - HEAP32[(((tmPtr)+(4))>>2)], - HEAP32[((tmPtr)>>2)], - 0); - - // There's an ambiguous hour when the time goes back; the tm_isdst field is - // used to disambiguate it. Date() basically guesses, so we fix it up if it - // guessed wrong, or fill in tm_isdst with the guess if it's -1. - var dst = HEAP32[(((tmPtr)+(32))>>2)]; - var guessedOffset = date.getTimezoneOffset(); - var start = new Date(date.getFullYear(), 0, 1); - var summerOffset = new Date(date.getFullYear(), 6, 1).getTimezoneOffset(); - var winterOffset = start.getTimezoneOffset(); - var dstOffset = Math.min(winterOffset, summerOffset); // DST is in December in South - if (dst < 0) { - // Attention: some regions don't have DST at all. - HEAP32[(((tmPtr)+(32))>>2)] = Number(summerOffset != winterOffset && dstOffset == guessedOffset); - } else if ((dst > 0) != (dstOffset == guessedOffset)) { - var nonDstOffset = Math.max(winterOffset, summerOffset); - var trueOffset = dst > 0 ? dstOffset : nonDstOffset; - // Don't try setMinutes(date.getMinutes() + ...) -- it's messed up. - date.setTime(date.getTime() + (trueOffset - guessedOffset)*60000); - } - - HEAP32[(((tmPtr)+(24))>>2)] = date.getDay(); - var yday = ydayFromDate(date)|0; - HEAP32[(((tmPtr)+(28))>>2)] = yday; - // To match expected behavior, update fields from date - HEAP32[((tmPtr)>>2)] = date.getSeconds(); - HEAP32[(((tmPtr)+(4))>>2)] = date.getMinutes(); - HEAP32[(((tmPtr)+(8))>>2)] = date.getHours(); - HEAP32[(((tmPtr)+(12))>>2)] = date.getDate(); - HEAP32[(((tmPtr)+(16))>>2)] = date.getMonth(); - HEAP32[(((tmPtr)+(20))>>2)] = date.getYear(); - - var timeMs = date.getTime(); - if (isNaN(timeMs)) { - return -1; - } - // Return time in microseconds - return timeMs / 1000; - })(); - return BigInt(ret); - }; - - - var __tzset_js = (timezone, daylight, std_name, dst_name) => { - // TODO: Use (malleable) environment variables instead of system settings. - var currentYear = new Date().getFullYear(); - var winter = new Date(currentYear, 0, 1); - var summer = new Date(currentYear, 6, 1); - var winterOffset = winter.getTimezoneOffset(); - var summerOffset = summer.getTimezoneOffset(); - - // Local standard timezone offset. Local standard time is not adjusted for - // daylight savings. This code uses the fact that getTimezoneOffset returns - // a greater value during Standard Time versus Daylight Saving Time (DST). - // Thus it determines the expected output during Standard Time, and it - // compares whether the output of the given date the same (Standard) or less - // (DST). - var stdTimezoneOffset = Math.max(winterOffset, summerOffset); - - // timezone is specified as seconds west of UTC ("The external variable - // `timezone` shall be set to the difference, in seconds, between - // Coordinated Universal Time (UTC) and local standard time."), the same - // as returned by stdTimezoneOffset. - // See http://pubs.opengroup.org/onlinepubs/009695399/functions/tzset.html - HEAPU32[((timezone)>>2)] = stdTimezoneOffset * 60; - - HEAP32[((daylight)>>2)] = Number(winterOffset != summerOffset); - - var extractZone = (timezoneOffset) => { - // Why inverse sign? - // Read here https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/getTimezoneOffset - var sign = timezoneOffset >= 0 ? "-" : "+"; - - var absOffset = Math.abs(timezoneOffset) - var hours = String(Math.floor(absOffset / 60)).padStart(2, "0"); - var minutes = String(absOffset % 60).padStart(2, "0"); - - return `UTC${sign}${hours}${minutes}`; - } - - var winterName = extractZone(winterOffset); - var summerName = extractZone(summerOffset); - assert(winterName); - assert(summerName); - assert(lengthBytesUTF8(winterName) <= 16, `timezone name truncated to fit in TZNAME_MAX (${winterName})`); - assert(lengthBytesUTF8(summerName) <= 16, `timezone name truncated to fit in TZNAME_MAX (${summerName})`); - if (summerOffset < winterOffset) { - // Northern hemisphere - stringToUTF8(winterName, std_name, 17); - stringToUTF8(summerName, dst_name, 17); - } else { - stringToUTF8(winterName, dst_name, 17); - stringToUTF8(summerName, std_name, 17); - } - }; - - var _emscripten_get_now = () => performance.now(); - - var _emscripten_date_now = () => Date.now(); - - var nowIsMonotonic = 1; - - var checkWasiClock = (clock_id) => clock_id >= 0 && clock_id <= 3; - - function _clock_time_get(clk_id, ignored_precision, ptime) { - ignored_precision = bigintToI53Checked(ignored_precision); - - - if (!checkWasiClock(clk_id)) { - return 28; - } - var now; - // all wasi clocks but realtime are monotonic - if (clk_id === 0) { - now = _emscripten_date_now(); - } else if (nowIsMonotonic) { - now = _emscripten_get_now(); - } else { - return 52; - } - // "now" is in ms, and wasi times are in ns. - var nsec = Math.round(now * 1000 * 1000); - HEAP64[((ptime)>>3)] = BigInt(nsec); - return 0; - ; - } - - - var _emscripten_err = (str) => err(UTF8ToString(str)); - - var getHeapMax = () => - HEAPU8.length; - var _emscripten_get_heap_max = () => getHeapMax(); - - - var abortOnCannotGrowMemory = (requestedSize) => { - abort(`Cannot enlarge memory arrays to size ${requestedSize} bytes (OOM). Either (1) compile with -sINITIAL_MEMORY=X with X higher than the current value ${HEAP8.length}, (2) compile with -sALLOW_MEMORY_GROWTH which allows increasing the size at runtime, or (3) if you want malloc to return NULL (0) instead of this abort, compile with -sABORTING_MALLOC=0`); - }; - var _emscripten_resize_heap = (requestedSize) => { - var oldSize = HEAPU8.length; - // With CAN_ADDRESS_2GB or MEMORY64, pointers are already unsigned. - requestedSize >>>= 0; - abortOnCannotGrowMemory(requestedSize); - }; - - var ENV = { - }; - - var getExecutableName = () => thisProgram || './this.program'; - var getEnvStrings = () => { - if (!getEnvStrings.strings) { - // Default values. - // Browser language detection #8751 - var lang = ((typeof navigator == 'object' && navigator.language) || 'C').replace('-', '_') + '.UTF-8'; - var env = { - 'USER': 'web_user', - 'LOGNAME': 'web_user', - 'PATH': '/', - 'PWD': '/', - 'HOME': '/home/web_user', - 'LANG': lang, - '_': getExecutableName() - }; - // Apply the user-provided values, if any. - for (var x in ENV) { - // x is a key in ENV; if ENV[x] is undefined, that means it was - // explicitly set to be so. We allow user code to do that to - // force variables with default values to remain unset. - if (ENV[x] === undefined) delete env[x]; - else env[x] = ENV[x]; - } - var strings = []; - for (var x in env) { - strings.push(`${x}=${env[x]}`); - } - getEnvStrings.strings = strings; - } - return getEnvStrings.strings; - }; - - var _environ_get = (__environ, environ_buf) => { - var bufSize = 0; - var envp = 0; - for (var string of getEnvStrings()) { - var ptr = environ_buf + bufSize; - HEAPU32[(((__environ)+(envp))>>2)] = ptr; - bufSize += stringToUTF8(string, ptr, Infinity) + 1; - envp += 4; - } - return 0; - }; - - - var _environ_sizes_get = (penviron_count, penviron_buf_size) => { - var strings = getEnvStrings(); - HEAPU32[((penviron_count)>>2)] = strings.length; - var bufSize = 0; - for (var string of strings) { - bufSize += lengthBytesUTF8(string) + 1; - } - HEAPU32[((penviron_buf_size)>>2)] = bufSize; - return 0; - }; - - - var runtimeKeepaliveCounter = 0; - var keepRuntimeAlive = () => noExitRuntime || runtimeKeepaliveCounter > 0; - var _proc_exit = (code) => { - EXITSTATUS = code; - if (!keepRuntimeAlive()) { - Module['onExit']?.(code); - ABORT = true; - } - quit_(code, new ExitStatus(code)); - }; - - - /** @suppress {duplicate } */ - /** @param {boolean|number=} implicit */ - var exitJS = (status, implicit) => { - EXITSTATUS = status; - - checkUnflushedContent(); - - // if exit() was called explicitly, warn the user if the runtime isn't actually being shut down - if (keepRuntimeAlive() && !implicit) { - var msg = `program exited (with status: ${status}), but keepRuntimeAlive() is set (counter=${runtimeKeepaliveCounter}) due to an async operation, so halting execution but not exiting the runtime or preventing further async execution (you can use emscripten_force_exit, if you want to force a true shutdown)`; - readyPromiseReject?.(msg); - err(msg); - } - - _proc_exit(status); - }; - var _exit = exitJS; - - function _fd_close(fd) { - try { - - var stream = SYSCALLS.getStreamFromFD(fd); - FS.close(stream); - return 0; - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return e.errno; - } - } - - function _fd_fdstat_get(fd, pbuf) { - try { - - var rightsBase = 0; - var rightsInheriting = 0; - var flags = 0; - { - var stream = SYSCALLS.getStreamFromFD(fd); - // All character devices are terminals (other things a Linux system would - // assume is a character device, like the mouse, we have special APIs for). - var type = stream.tty ? 2 : - FS.isDir(stream.mode) ? 3 : - FS.isLink(stream.mode) ? 7 : - 4; - } - HEAP8[pbuf] = type; - HEAP16[(((pbuf)+(2))>>1)] = flags; - HEAP64[(((pbuf)+(8))>>3)] = BigInt(rightsBase); - HEAP64[(((pbuf)+(16))>>3)] = BigInt(rightsInheriting); - return 0; - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return e.errno; - } - } - - /** @param {number=} offset */ - var doReadv = (stream, iov, iovcnt, offset) => { - var ret = 0; - for (var i = 0; i < iovcnt; i++) { - var ptr = HEAPU32[((iov)>>2)]; - var len = HEAPU32[(((iov)+(4))>>2)]; - iov += 8; - var curr = FS.read(stream, HEAP8, ptr, len, offset); - if (curr < 0) return -1; - ret += curr; - if (curr < len) break; // nothing more to read - if (typeof offset != 'undefined') { - offset += curr; - } - } - return ret; - }; - - function _fd_read(fd, iov, iovcnt, pnum) { - try { - - var stream = SYSCALLS.getStreamFromFD(fd); - var num = doReadv(stream, iov, iovcnt); - HEAPU32[((pnum)>>2)] = num; - return 0; - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return e.errno; - } - } - - - function _fd_seek(fd, offset, whence, newOffset) { - offset = bigintToI53Checked(offset); - - - try { - - if (isNaN(offset)) return 61; - var stream = SYSCALLS.getStreamFromFD(fd); - FS.llseek(stream, offset, whence); - HEAP64[((newOffset)>>3)] = BigInt(stream.position); - if (stream.getdents && offset === 0 && whence === 0) stream.getdents = null; // reset readdir state - return 0; - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return e.errno; - } - ; - } - - function _fd_sync(fd) { - try { - - var stream = SYSCALLS.getStreamFromFD(fd); - if (stream.stream_ops?.fsync) { - return stream.stream_ops.fsync(stream); - } - return 0; // we can't do anything synchronously; the in-memory FS is already synced to - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return e.errno; - } - } - - /** @param {number=} offset */ - var doWritev = (stream, iov, iovcnt, offset) => { - var ret = 0; - for (var i = 0; i < iovcnt; i++) { - var ptr = HEAPU32[((iov)>>2)]; - var len = HEAPU32[(((iov)+(4))>>2)]; - iov += 8; - var curr = FS.write(stream, HEAP8, ptr, len, offset); - if (curr < 0) return -1; - ret += curr; - if (curr < len) { - // No more space to write. - break; - } - if (typeof offset != 'undefined') { - offset += curr; - } - } - return ret; - }; - - function _fd_write(fd, iov, iovcnt, pnum) { - try { - - var stream = SYSCALLS.getStreamFromFD(fd); - var num = doWritev(stream, iov, iovcnt); - HEAPU32[((pnum)>>2)] = num; - return 0; - } catch (e) { - if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; - return e.errno; - } - } - - - - - - - - - - var _getaddrinfo = (node, service, hint, out) => { - // Note getaddrinfo currently only returns a single addrinfo with ai_next defaulting to NULL. When NULL - // hints are specified or ai_family set to AF_UNSPEC or ai_socktype or ai_protocol set to 0 then we - // really should provide a linked list of suitable addrinfo values. - var addrs = []; - var canon = null; - var addr = 0; - var port = 0; - var flags = 0; - var family = 0; - var type = 0; - var proto = 0; - var ai, last; - - function allocaddrinfo(family, type, proto, canon, addr, port) { - var sa, salen, ai; - var errno; - - salen = family === 10 ? - 28 : - 16; - addr = family === 10 ? - inetNtop6(addr) : - inetNtop4(addr); - sa = _malloc(salen); - errno = writeSockaddr(sa, family, addr, port); - assert(!errno); - - ai = _malloc(32); - HEAP32[(((ai)+(4))>>2)] = family; - HEAP32[(((ai)+(8))>>2)] = type; - HEAP32[(((ai)+(12))>>2)] = proto; - HEAPU32[(((ai)+(24))>>2)] = canon; - HEAPU32[(((ai)+(20))>>2)] = sa; - if (family === 10) { - HEAP32[(((ai)+(16))>>2)] = 28; - } else { - HEAP32[(((ai)+(16))>>2)] = 16; - } - HEAP32[(((ai)+(28))>>2)] = 0; - - return ai; - } - - if (hint) { - flags = HEAP32[((hint)>>2)]; - family = HEAP32[(((hint)+(4))>>2)]; - type = HEAP32[(((hint)+(8))>>2)]; - proto = HEAP32[(((hint)+(12))>>2)]; - } - if (type && !proto) { - proto = type === 2 ? 17 : 6; - } - if (!type && proto) { - type = proto === 17 ? 2 : 1; - } - - // If type or proto are set to zero in hints we should really be returning multiple addrinfo values, but for - // now default to a TCP STREAM socket so we can at least return a sensible addrinfo given NULL hints. - if (proto === 0) { - proto = 6; - } - if (type === 0) { - type = 1; - } - - if (!node && !service) { - return -2; - } - if (flags & ~(1|2|4| - 1024|8|16|32)) { - return -1; - } - if (hint !== 0 && (HEAP32[((hint)>>2)] & 2) && !node) { - return -1; - } - if (flags & 32) { - // TODO - return -2; - } - if (type !== 0 && type !== 1 && type !== 2) { - return -7; - } - if (family !== 0 && family !== 2 && family !== 10) { - return -6; - } - - if (service) { - service = UTF8ToString(service); - port = parseInt(service, 10); - - if (isNaN(port)) { - if (flags & 1024) { - return -2; - } - // TODO support resolving well-known service names from: - // http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt - return -8; - } - } - - if (!node) { - if (family === 0) { - family = 2; - } - if ((flags & 1) === 0) { - if (family === 2) { - addr = _htonl(2130706433); - } else { - addr = [0, 0, 0, _htonl(1)]; - } - } - ai = allocaddrinfo(family, type, proto, null, addr, port); - HEAPU32[((out)>>2)] = ai; - return 0; - } - - // - // try as a numeric address - // - node = UTF8ToString(node); - addr = inetPton4(node); - if (addr !== null) { - // incoming node is a valid ipv4 address - if (family === 0 || family === 2) { - family = 2; - } - else if (family === 10 && (flags & 8)) { - addr = [0, 0, _htonl(0xffff), addr]; - family = 10; - } else { - return -2; - } - } else { - addr = inetPton6(node); - if (addr !== null) { - // incoming node is a valid ipv6 address - if (family === 0 || family === 10) { - family = 10; - } else { - return -2; - } - } - } - if (addr != null) { - ai = allocaddrinfo(family, type, proto, node, addr, port); - HEAPU32[((out)>>2)] = ai; - return 0; - } - if (flags & 4) { - return -2; - } - - // - // try as a hostname - // - // resolve the hostname to a temporary fake address - node = DNS.lookup_name(node); - addr = inetPton4(node); - if (family === 0) { - family = 2; - } else if (family === 10) { - addr = [0, 0, _htonl(0xffff), addr]; - } - ai = allocaddrinfo(family, type, proto, null, addr, port); - HEAPU32[((out)>>2)] = ai; - return 0; - }; - - - - - var FS_createPath = (...args) => FS.createPath(...args); - - - - var FS_unlink = (...args) => FS.unlink(...args); - - var FS_createLazyFile = (...args) => FS.createLazyFile(...args); - - var FS_createDevice = (...args) => FS.createDevice(...args); - - FS.createPreloadedFile = FS_createPreloadedFile; - FS.preloadFile = FS_preloadFile; - FS.staticInit();; -init_ClassHandle(); -init_RegisteredPointer(); -assert(emval_handles.length === 5 * 2); -// End JS library code - -// include: postlibrary.js -// This file is included after the automatically-generated JS library code -// but before the wasm module is created. - -{ - - // Begin ATMODULES hooks - if (Module['noExitRuntime']) noExitRuntime = Module['noExitRuntime']; -if (Module['preloadPlugins']) preloadPlugins = Module['preloadPlugins']; -if (Module['print']) out = Module['print']; -if (Module['printErr']) err = Module['printErr']; -if (Module['wasmBinary']) wasmBinary = Module['wasmBinary']; - // End ATMODULES hooks - - checkIncomingModuleAPI(); - - if (Module['arguments']) arguments_ = Module['arguments']; - if (Module['thisProgram']) thisProgram = Module['thisProgram']; - - // Assertions on removed incoming Module JS APIs. - assert(typeof Module['memoryInitializerPrefixURL'] == 'undefined', 'Module.memoryInitializerPrefixURL option was removed, use Module.locateFile instead'); - assert(typeof Module['pthreadMainPrefixURL'] == 'undefined', 'Module.pthreadMainPrefixURL option was removed, use Module.locateFile instead'); - assert(typeof Module['cdInitializerPrefixURL'] == 'undefined', 'Module.cdInitializerPrefixURL option was removed, use Module.locateFile instead'); - assert(typeof Module['filePackagePrefixURL'] == 'undefined', 'Module.filePackagePrefixURL option was removed, use Module.locateFile instead'); - assert(typeof Module['read'] == 'undefined', 'Module.read option was removed'); - assert(typeof Module['readAsync'] == 'undefined', 'Module.readAsync option was removed (modify readAsync in JS)'); - assert(typeof Module['readBinary'] == 'undefined', 'Module.readBinary option was removed (modify readBinary in JS)'); - assert(typeof Module['setWindowTitle'] == 'undefined', 'Module.setWindowTitle option was removed (modify emscripten_set_window_title in JS)'); - assert(typeof Module['TOTAL_MEMORY'] == 'undefined', 'Module.TOTAL_MEMORY has been renamed Module.INITIAL_MEMORY'); - assert(typeof Module['ENVIRONMENT'] == 'undefined', 'Module.ENVIRONMENT has been deprecated. To force the environment, use the ENVIRONMENT compile-time option (for example, -sENVIRONMENT=web or -sENVIRONMENT=node)'); - assert(typeof Module['STACK_SIZE'] == 'undefined', 'STACK_SIZE can no longer be set at runtime. Use -sSTACK_SIZE at link time') - // If memory is defined in wasm, the user can't provide it, or set INITIAL_MEMORY - assert(typeof Module['wasmMemory'] == 'undefined', 'Use of `wasmMemory` detected. Use -sIMPORTED_MEMORY to define wasmMemory externally'); - assert(typeof Module['INITIAL_MEMORY'] == 'undefined', 'Detected runtime INITIAL_MEMORY setting. Use -sIMPORTED_MEMORY to define wasmMemory dynamically'); - -} - -// Begin runtime exports - Module['addRunDependency'] = addRunDependency; - Module['removeRunDependency'] = removeRunDependency; - Module['FS_preloadFile'] = FS_preloadFile; - Module['FS_unlink'] = FS_unlink; - Module['FS_createPath'] = FS_createPath; - Module['FS_createDevice'] = FS_createDevice; - Module['FS'] = FS; - Module['FS_createDataFile'] = FS_createDataFile; - Module['FS_createLazyFile'] = FS_createLazyFile; - var missingLibrarySymbols = [ - 'writeI53ToI64', - 'writeI53ToI64Clamped', - 'writeI53ToI64Signaling', - 'writeI53ToU64Clamped', - 'writeI53ToU64Signaling', - 'readI53FromI64', - 'readI53FromU64', - 'convertI32PairToI53', - 'convertI32PairToI53Checked', - 'convertU32PairToI53', - 'stackAlloc', - 'getTempRet0', - 'setTempRet0', - 'growMemory', - 'withStackSave', - 'readEmAsmArgs', - 'jstoi_q', - 'autoResumeAudioContext', - 'getDynCaller', - 'dynCall', - 'handleException', - 'runtimeKeepalivePush', - 'runtimeKeepalivePop', - 'callUserCallback', - 'maybeExit', - 'asmjsMangle', - 'alignMemory', - 'HandleAllocator', - 'getNativeTypeSize', - 'addOnInit', - 'addOnPostCtor', - 'addOnPreMain', - 'addOnExit', - 'STACK_SIZE', - 'STACK_ALIGN', - 'POINTER_SIZE', - 'ASSERTIONS', - 'ccall', - 'cwrap', - 'convertJsFunctionToWasm', - 'getEmptyTableSlot', - 'updateTableMap', - 'getFunctionAddress', - 'addFunction', - 'removeFunction', - 'intArrayToString', - 'stringToAscii', - 'stringToNewUTF8', - 'stringToUTF8OnStack', - 'writeArrayToMemory', - 'registerKeyEventCallback', - 'maybeCStringToJsString', - 'findEventTarget', - 'getBoundingClientRect', - 'fillMouseEventData', - 'registerMouseEventCallback', - 'registerWheelEventCallback', - 'registerUiEventCallback', - 'registerFocusEventCallback', - 'fillDeviceOrientationEventData', - 'registerDeviceOrientationEventCallback', - 'fillDeviceMotionEventData', - 'registerDeviceMotionEventCallback', - 'screenOrientation', - 'fillOrientationChangeEventData', - 'registerOrientationChangeEventCallback', - 'fillFullscreenChangeEventData', - 'registerFullscreenChangeEventCallback', - 'JSEvents_requestFullscreen', - 'JSEvents_resizeCanvasForFullscreen', - 'registerRestoreOldStyle', - 'hideEverythingExceptGivenElement', - 'restoreHiddenElements', - 'setLetterbox', - 'softFullscreenResizeWebGLRenderTarget', - 'doRequestFullscreen', - 'fillPointerlockChangeEventData', - 'registerPointerlockChangeEventCallback', - 'registerPointerlockErrorEventCallback', - 'requestPointerLock', - 'fillVisibilityChangeEventData', - 'registerVisibilityChangeEventCallback', - 'registerTouchEventCallback', - 'fillGamepadEventData', - 'registerGamepadEventCallback', - 'registerBeforeUnloadEventCallback', - 'fillBatteryEventData', - 'registerBatteryEventCallback', - 'setCanvasElementSize', - 'getCanvasElementSize', - 'jsStackTrace', - 'getCallstack', - 'convertPCtoSourceLocation', - 'wasiRightsToMuslOFlags', - 'wasiOFlagsToMuslOFlags', - 'safeSetTimeout', - 'setImmediateWrapped', - 'safeRequestAnimationFrame', - 'clearImmediateWrapped', - 'registerPostMainLoop', - 'registerPreMainLoop', - 'getPromise', - 'makePromise', - 'idsToPromises', - 'makePromiseCallback', - 'findMatchingCatch', - 'Browser_asyncPrepareDataCounter', - 'arraySum', - 'addDays', - 'FS_mkdirTree', - '_setNetworkCallback', - 'heapObjectForWebGLType', - 'toTypedArrayIndex', - 'webgl_enable_ANGLE_instanced_arrays', - 'webgl_enable_OES_vertex_array_object', - 'webgl_enable_WEBGL_draw_buffers', - 'webgl_enable_WEBGL_multi_draw', - 'webgl_enable_EXT_polygon_offset_clamp', - 'webgl_enable_EXT_clip_control', - 'webgl_enable_WEBGL_polygon_mode', - 'emscriptenWebGLGet', - 'computeUnpackAlignedImageSize', - 'colorChannelsInGlTextureFormat', - 'emscriptenWebGLGetTexPixelData', - 'emscriptenWebGLGetUniform', - 'webglGetUniformLocation', - 'webglPrepareUniformLocationsBeforeFirstUse', - 'webglGetLeftBracePos', - 'emscriptenWebGLGetVertexAttrib', - '__glGetActiveAttribOrUniform', - 'writeGLArray', - 'registerWebGlEventCallback', - 'runAndAbortIfError', - 'ALLOC_NORMAL', - 'ALLOC_STACK', - 'allocate', - 'writeStringToMemory', - 'writeAsciiToMemory', - 'demangle', - 'stackTrace', - 'getFunctionArgsName', - 'createJsInvokerSignature', - 'PureVirtualError', - 'registerInheritedInstance', - 'unregisterInheritedInstance', - 'getInheritedInstanceCount', - 'getLiveInheritedInstances', - 'enumReadValueFromPointer', - 'setDelayFunction', - 'validateThis', - 'count_emval_handles', - 'emval_get_global', -]; -missingLibrarySymbols.forEach(missingLibrarySymbol) - - var unexportedSymbols = [ - 'run', - 'out', - 'err', - 'callMain', - 'abort', - 'wasmMemory', - 'wasmExports', - 'HEAPF32', - 'HEAPF64', - 'HEAP8', - 'HEAPU8', - 'HEAP16', - 'HEAPU16', - 'HEAP32', - 'HEAPU32', - 'HEAP64', - 'HEAPU64', - 'writeStackCookie', - 'checkStackCookie', - 'INT53_MAX', - 'INT53_MIN', - 'bigintToI53Checked', - 'stackSave', - 'stackRestore', - 'ptrToString', - 'zeroMemory', - 'exitJS', - 'getHeapMax', - 'abortOnCannotGrowMemory', - 'ENV', - 'ERRNO_CODES', - 'strError', - 'inetPton4', - 'inetNtop4', - 'inetPton6', - 'inetNtop6', - 'readSockaddr', - 'writeSockaddr', - 'DNS', - 'Protocols', - 'Sockets', - 'timers', - 'warnOnce', - 'readEmAsmArgsArray', - 'getExecutableName', - 'keepRuntimeAlive', - 'asyncLoad', - 'mmapAlloc', - 'wasmTable', - 'getUniqueRunDependency', - 'noExitRuntime', - 'addOnPreRun', - 'addOnPostRun', - 'freeTableIndexes', - 'functionsInTableMap', - 'setValue', - 'getValue', - 'PATH', - 'PATH_FS', - 'UTF8Decoder', - 'UTF8ArrayToString', - 'UTF8ToString', - 'stringToUTF8Array', - 'stringToUTF8', - 'lengthBytesUTF8', - 'intArrayFromString', - 'AsciiToString', - 'UTF16Decoder', - 'UTF16ToString', - 'stringToUTF16', - 'lengthBytesUTF16', - 'UTF32ToString', - 'stringToUTF32', - 'lengthBytesUTF32', - 'JSEvents', - 'specialHTMLTargets', - 'findCanvasEventTarget', - 'currentFullscreenStrategy', - 'restoreOldWindowedStyle', - 'UNWIND_CACHE', - 'ExitStatus', - 'getEnvStrings', - 'checkWasiClock', - 'doReadv', - 'doWritev', - 'initRandomFill', - 'randomFill', - 'emSetImmediate', - 'emClearImmediate_deps', - 'emClearImmediate', - 'promiseMap', - 'uncaughtExceptionCount', - 'exceptionLast', - 'exceptionCaught', - 'ExceptionInfo', - 'Browser', - 'requestFullscreen', - 'requestFullScreen', - 'setCanvasSize', - 'getUserMedia', - 'createContext', - 'getPreloadedImageData__data', - 'wget', - 'MONTH_DAYS_REGULAR', - 'MONTH_DAYS_LEAP', - 'MONTH_DAYS_REGULAR_CUMULATIVE', - 'MONTH_DAYS_LEAP_CUMULATIVE', - 'isLeapYear', - 'ydayFromDate', - 'SYSCALLS', - 'getSocketFromFD', - 'getSocketAddress', - 'preloadPlugins', - 'FS_createPreloadedFile', - 'FS_modeStringToFlags', - 'FS_getMode', - 'FS_stdin_getChar_buffer', - 'FS_stdin_getChar', - 'FS_readFile', - 'FS_root', - 'FS_mounts', - 'FS_devices', - 'FS_streams', - 'FS_nextInode', - 'FS_nameTable', - 'FS_currentPath', - 'FS_initialized', - 'FS_ignorePermissions', - 'FS_filesystems', - 'FS_syncFSRequests', - 'FS_readFiles', - 'FS_lookupPath', - 'FS_getPath', - 'FS_hashName', - 'FS_hashAddNode', - 'FS_hashRemoveNode', - 'FS_lookupNode', - 'FS_createNode', - 'FS_destroyNode', - 'FS_isRoot', - 'FS_isMountpoint', - 'FS_isFile', - 'FS_isDir', - 'FS_isLink', - 'FS_isChrdev', - 'FS_isBlkdev', - 'FS_isFIFO', - 'FS_isSocket', - 'FS_flagsToPermissionString', - 'FS_nodePermissions', - 'FS_mayLookup', - 'FS_mayCreate', - 'FS_mayDelete', - 'FS_mayOpen', - 'FS_checkOpExists', - 'FS_nextfd', - 'FS_getStreamChecked', - 'FS_getStream', - 'FS_createStream', - 'FS_closeStream', - 'FS_dupStream', - 'FS_doSetAttr', - 'FS_chrdev_stream_ops', - 'FS_major', - 'FS_minor', - 'FS_makedev', - 'FS_registerDevice', - 'FS_getDevice', - 'FS_getMounts', - 'FS_syncfs', - 'FS_mount', - 'FS_unmount', - 'FS_lookup', - 'FS_mknod', - 'FS_statfs', - 'FS_statfsStream', - 'FS_statfsNode', - 'FS_create', - 'FS_mkdir', - 'FS_mkdev', - 'FS_symlink', - 'FS_rename', - 'FS_rmdir', - 'FS_readdir', - 'FS_readlink', - 'FS_stat', - 'FS_fstat', - 'FS_lstat', - 'FS_doChmod', - 'FS_chmod', - 'FS_lchmod', - 'FS_fchmod', - 'FS_doChown', - 'FS_chown', - 'FS_lchown', - 'FS_fchown', - 'FS_doTruncate', - 'FS_truncate', - 'FS_ftruncate', - 'FS_utime', - 'FS_open', - 'FS_close', - 'FS_isClosed', - 'FS_llseek', - 'FS_read', - 'FS_write', - 'FS_mmap', - 'FS_msync', - 'FS_ioctl', - 'FS_writeFile', - 'FS_cwd', - 'FS_chdir', - 'FS_createDefaultDirectories', - 'FS_createDefaultDevices', - 'FS_createSpecialDirectories', - 'FS_createStandardStreams', - 'FS_staticInit', - 'FS_init', - 'FS_quit', - 'FS_findObject', - 'FS_analyzePath', - 'FS_createFile', - 'FS_forceLoadFile', - 'FS_absolutePath', - 'FS_createFolder', - 'FS_createLink', - 'FS_joinPath', - 'FS_mmapAlloc', - 'FS_standardizePath', - 'MEMFS', - 'TTY', - 'PIPEFS', - 'SOCKFS', - 'tempFixedLengthArray', - 'miniTempWebGLFloatBuffers', - 'miniTempWebGLIntBuffers', - 'GL', - 'AL', - 'GLUT', - 'EGL', - 'GLEW', - 'IDBStore', - 'SDL', - 'SDL_gfx', - 'allocateUTF8', - 'allocateUTF8OnStack', - 'print', - 'printErr', - 'jstoi_s', - 'InternalError', - 'BindingError', - 'throwInternalError', - 'throwBindingError', - 'registeredTypes', - 'awaitingDependencies', - 'typeDependencies', - 'tupleRegistrations', - 'structRegistrations', - 'sharedRegisterType', - 'whenDependentTypesAreResolved', - 'getTypeName', - 'getFunctionName', - 'heap32VectorToArray', - 'requireRegisteredType', - 'usesDestructorStack', - 'checkArgCount', - 'getRequiredArgCount', - 'createJsInvoker', - 'UnboundTypeError', - 'EmValType', - 'EmValOptionalType', - 'throwUnboundTypeError', - 'ensureOverloadTable', - 'exposePublicSymbol', - 'replacePublicSymbol', - 'createNamedFunction', - 'embindRepr', - 'registeredInstances', - 'getBasestPointer', - 'getInheritedInstance', - 'registeredPointers', - 'registerType', - 'integerReadValueFromPointer', - 'floatReadValueFromPointer', - 'assertIntegerRange', - 'readPointer', - 'runDestructors', - 'craftInvokerFunction', - 'embind__requireFunction', - 'genericPointerToWireType', - 'constNoSmartPtrRawPointerToWireType', - 'nonConstNoSmartPtrRawPointerToWireType', - 'init_RegisteredPointer', - 'RegisteredPointer', - 'RegisteredPointer_fromWireType', - 'runDestructor', - 'releaseClassHandle', - 'finalizationRegistry', - 'detachFinalizer_deps', - 'detachFinalizer', - 'attachFinalizer', - 'makeClassHandle', - 'init_ClassHandle', - 'ClassHandle', - 'throwInstanceAlreadyDeleted', - 'deletionQueue', - 'flushPendingDeletes', - 'delayFunction', - 'RegisteredClass', - 'shallowCopyInternalPointer', - 'downcastPointer', - 'upcastPointer', - 'char_0', - 'char_9', - 'makeLegalFunctionName', - 'emval_freelist', - 'emval_handles', - 'emval_symbols', - 'getStringOrSymbol', - 'Emval', - 'emval_returnValue', - 'emval_lookupTypes', - 'emval_methodCallers', - 'emval_addMethodCaller', -]; -unexportedSymbols.forEach(unexportedRuntimeSymbol); - - // End runtime exports - // Begin JS library exports - // End JS library exports - -// end include: postlibrary.js - -function checkIncomingModuleAPI() { - ignoredModuleProp('fetchSettings'); -} - -// Imports from the Wasm binary. -var ___getTypeName = makeInvalidEarlyAccess('___getTypeName'); -var _free = makeInvalidEarlyAccess('_free'); -var _malloc = makeInvalidEarlyAccess('_malloc'); -var _fflush = makeInvalidEarlyAccess('_fflush'); -var _emscripten_stack_get_end = makeInvalidEarlyAccess('_emscripten_stack_get_end'); -var _emscripten_stack_get_base = makeInvalidEarlyAccess('_emscripten_stack_get_base'); -var _htonl = makeInvalidEarlyAccess('_htonl'); -var _htons = makeInvalidEarlyAccess('_htons'); -var _ntohs = makeInvalidEarlyAccess('_ntohs'); -var _strerror = makeInvalidEarlyAccess('_strerror'); -var _setThrew = makeInvalidEarlyAccess('_setThrew'); -var _emscripten_stack_init = makeInvalidEarlyAccess('_emscripten_stack_init'); -var _emscripten_stack_get_free = makeInvalidEarlyAccess('_emscripten_stack_get_free'); -var __emscripten_stack_restore = makeInvalidEarlyAccess('__emscripten_stack_restore'); -var __emscripten_stack_alloc = makeInvalidEarlyAccess('__emscripten_stack_alloc'); -var _emscripten_stack_get_current = makeInvalidEarlyAccess('_emscripten_stack_get_current'); - -function assignWasmExports(wasmExports) { - ___getTypeName = createExportWrapper('__getTypeName', 1); - _free = createExportWrapper('free', 1); - _malloc = createExportWrapper('malloc', 1); - _fflush = createExportWrapper('fflush', 1); - _emscripten_stack_get_end = wasmExports['emscripten_stack_get_end']; - _emscripten_stack_get_base = wasmExports['emscripten_stack_get_base']; - _htonl = createExportWrapper('htonl', 1); - _htons = createExportWrapper('htons', 1); - _ntohs = createExportWrapper('ntohs', 1); - _strerror = createExportWrapper('strerror', 1); - _setThrew = createExportWrapper('setThrew', 2); - _emscripten_stack_init = wasmExports['emscripten_stack_init']; - _emscripten_stack_get_free = wasmExports['emscripten_stack_get_free']; - __emscripten_stack_restore = wasmExports['_emscripten_stack_restore']; - __emscripten_stack_alloc = wasmExports['_emscripten_stack_alloc']; - _emscripten_stack_get_current = wasmExports['emscripten_stack_get_current']; -} -var wasmImports = { - /** @export */ - __assert_fail: ___assert_fail, - /** @export */ - __cxa_throw: ___cxa_throw, - /** @export */ - __syscall_chmod: ___syscall_chmod, - /** @export */ - __syscall_connect: ___syscall_connect, - /** @export */ - __syscall_dup: ___syscall_dup, - /** @export */ - __syscall_faccessat: ___syscall_faccessat, - /** @export */ - __syscall_fchmod: ___syscall_fchmod, - /** @export */ - __syscall_fcntl64: ___syscall_fcntl64, - /** @export */ - __syscall_fstat64: ___syscall_fstat64, - /** @export */ - __syscall_ftruncate64: ___syscall_ftruncate64, - /** @export */ - __syscall_getcwd: ___syscall_getcwd, - /** @export */ - __syscall_ioctl: ___syscall_ioctl, - /** @export */ - __syscall_lstat64: ___syscall_lstat64, - /** @export */ - __syscall_newfstatat: ___syscall_newfstatat, - /** @export */ - __syscall_openat: ___syscall_openat, - /** @export */ - __syscall_readlinkat: ___syscall_readlinkat, - /** @export */ - __syscall_recvfrom: ___syscall_recvfrom, - /** @export */ - __syscall_rmdir: ___syscall_rmdir, - /** @export */ - __syscall_sendto: ___syscall_sendto, - /** @export */ - __syscall_socket: ___syscall_socket, - /** @export */ - __syscall_stat64: ___syscall_stat64, - /** @export */ - __syscall_unlinkat: ___syscall_unlinkat, - /** @export */ - _abort_js: __abort_js, - /** @export */ - _embind_register_bigint: __embind_register_bigint, - /** @export */ - _embind_register_bool: __embind_register_bool, - /** @export */ - _embind_register_class: __embind_register_class, - /** @export */ - _embind_register_class_constructor: __embind_register_class_constructor, - /** @export */ - _embind_register_class_function: __embind_register_class_function, - /** @export */ - _embind_register_emval: __embind_register_emval, - /** @export */ - _embind_register_float: __embind_register_float, - /** @export */ - _embind_register_function: __embind_register_function, - /** @export */ - _embind_register_integer: __embind_register_integer, - /** @export */ - _embind_register_memory_view: __embind_register_memory_view, - /** @export */ - _embind_register_optional: __embind_register_optional, - /** @export */ - _embind_register_smart_ptr: __embind_register_smart_ptr, - /** @export */ - _embind_register_std_string: __embind_register_std_string, - /** @export */ - _embind_register_std_wstring: __embind_register_std_wstring, - /** @export */ - _embind_register_void: __embind_register_void, - /** @export */ - _emscripten_system: __emscripten_system, - /** @export */ - _emscripten_throw_longjmp: __emscripten_throw_longjmp, - /** @export */ - _emval_create_invoker: __emval_create_invoker, - /** @export */ - _emval_decref: __emval_decref, - /** @export */ - _emval_incref: __emval_incref, - /** @export */ - _emval_invoke: __emval_invoke, - /** @export */ - _emval_run_destructors: __emval_run_destructors, - /** @export */ - _gmtime_js: __gmtime_js, - /** @export */ - _localtime_js: __localtime_js, - /** @export */ - _mktime_js: __mktime_js, - /** @export */ - _tzset_js: __tzset_js, - /** @export */ - clock_time_get: _clock_time_get, - /** @export */ - emscripten_date_now: _emscripten_date_now, - /** @export */ - emscripten_err: _emscripten_err, - /** @export */ - emscripten_get_heap_max: _emscripten_get_heap_max, - /** @export */ - emscripten_get_now: _emscripten_get_now, - /** @export */ - emscripten_resize_heap: _emscripten_resize_heap, - /** @export */ - environ_get: _environ_get, - /** @export */ - environ_sizes_get: _environ_sizes_get, - /** @export */ - exit: _exit, - /** @export */ - fd_close: _fd_close, - /** @export */ - fd_fdstat_get: _fd_fdstat_get, - /** @export */ - fd_read: _fd_read, - /** @export */ - fd_seek: _fd_seek, - /** @export */ - fd_sync: _fd_sync, - /** @export */ - fd_write: _fd_write, - /** @export */ - getaddrinfo: _getaddrinfo, - /** @export */ - invoke_ii, - /** @export */ - invoke_v, - /** @export */ - invoke_vi, - /** @export */ - invoke_vii -}; -var wasmExports = await createWasm(); - -function invoke_vii(index,a1,a2) { - var sp = stackSave(); - try { - getWasmTableEntry(index)(a1,a2); - } catch(e) { - stackRestore(sp); - if (e !== e+0) throw e; - _setThrew(1, 0); - } -} - -function invoke_vi(index,a1) { - var sp = stackSave(); - try { - getWasmTableEntry(index)(a1); - } catch(e) { - stackRestore(sp); - if (e !== e+0) throw e; - _setThrew(1, 0); - } -} - -function invoke_ii(index,a1) { - var sp = stackSave(); - try { - return getWasmTableEntry(index)(a1); - } catch(e) { - stackRestore(sp); - if (e !== e+0) throw e; - _setThrew(1, 0); - } -} - -function invoke_v(index) { - var sp = stackSave(); - try { - getWasmTableEntry(index)(); - } catch(e) { - stackRestore(sp); - if (e !== e+0) throw e; - _setThrew(1, 0); - } -} - - -// include: postamble.js -// === Auto-generated postamble setup entry stuff === - -var calledRun; - -function stackCheckInit() { - // This is normally called automatically during __wasm_call_ctors but need to - // get these values before even running any of the ctors so we call it redundantly - // here. - _emscripten_stack_init(); - // TODO(sbc): Move writeStackCookie to native to to avoid this. - writeStackCookie(); -} - -function run() { - - if (runDependencies > 0) { - dependenciesFulfilled = run; - return; - } - - stackCheckInit(); - - preRun(); - - // a preRun added a dependency, run will be called later - if (runDependencies > 0) { - dependenciesFulfilled = run; - return; - } - - function doRun() { - // run may have just been called through dependencies being fulfilled just in this very frame, - // or while the async setStatus time below was happening - assert(!calledRun); - calledRun = true; - Module['calledRun'] = true; - - if (ABORT) return; - - initRuntime(); - - readyPromiseResolve?.(Module); - Module['onRuntimeInitialized']?.(); - consumedModuleProp('onRuntimeInitialized'); - - assert(!Module['_main'], 'compiled without a main, but one is present. if you added it from JS, use Module["onRuntimeInitialized"]'); - - postRun(); - } - - if (Module['setStatus']) { - Module['setStatus']('Running...'); - setTimeout(() => { - setTimeout(() => Module['setStatus'](''), 1); - doRun(); - }, 1); - } else - { - doRun(); - } - checkStackCookie(); -} - -function checkUnflushedContent() { - // Compiler settings do not allow exiting the runtime, so flushing - // the streams is not possible. but in ASSERTIONS mode we check - // if there was something to flush, and if so tell the user they - // should request that the runtime be exitable. - // Normally we would not even include flush() at all, but in ASSERTIONS - // builds we do so just for this check, and here we see if there is any - // content to flush, that is, we check if there would have been - // something a non-ASSERTIONS build would have not seen. - // How we flush the streams depends on whether we are in SYSCALLS_REQUIRE_FILESYSTEM=0 - // mode (which has its own special function for this; otherwise, all - // the code is inside libc) - var oldOut = out; - var oldErr = err; - var has = false; - out = err = (x) => { - has = true; - } - try { // it doesn't matter if it fails - _fflush(0); - // also flush in the JS FS layer - ['stdout', 'stderr'].forEach((name) => { - var info = FS.analyzePath('/dev/' + name); - if (!info) return; - var stream = info.object; - var rdev = stream.rdev; - var tty = TTY.ttys[rdev]; - if (tty?.output?.length) { - has = true; - } - }); - } catch(e) {} - out = oldOut; - err = oldErr; - if (has) { - warnOnce('stdio streams had content in them that was not flushed. you should set EXIT_RUNTIME to 1 (see the Emscripten FAQ), or make sure to emit a newline when you printf etc.'); - } -} - -function preInit() { - if (Module['preInit']) { - if (typeof Module['preInit'] == 'function') Module['preInit'] = [Module['preInit']]; - while (Module['preInit'].length > 0) { - Module['preInit'].shift()(); - } - } - consumedModuleProp('preInit'); -} - -preInit(); -run(); - -// end include: postamble.js - -// include: postamble_modularize.js -// In MODULARIZE mode we wrap the generated code in a factory function -// and return either the Module itself, or a promise of the module. -// -// We assign to the `moduleRtn` global here and configure closure to see -// this as and extern so it won't get minified. - -if (runtimeInitialized) { - moduleRtn = Module; -} else { - // Set up the promise that indicates the Module is initialized - moduleRtn = new Promise((resolve, reject) => { - readyPromiseResolve = resolve; - readyPromiseReject = reject; - }); -} - -// Assertion for attempting to access module properties on the incoming -// moduleArg. In the past we used this object as the prototype of the module -// and assigned properties to it, but now we return a distinct object. This -// keeps the instance private until it is ready (i.e the promise has been -// resolved). -for (const prop of Object.keys(Module)) { - if (!(prop in moduleArg)) { - Object.defineProperty(moduleArg, prop, { - configurable: true, - get() { - abort(`Access to module property ('${prop}') is no longer possible via the module constructor argument; Instead, use the result of the module constructor.`) - } - }); - } -} -// end include: postamble_modularize.js - - - - return moduleRtn; -} - -// Export using a UMD style export, or ES6 exports if selected -export default createWasmModule; - diff --git a/src/libs/geant4_web/geant4_wasm/preload/preload_G4EMLOW8.6.1.js b/src/libs/geant4_web/geant4_wasm/preload/preload_G4EMLOW8.6.1.js deleted file mode 100644 index 0b92d5231..000000000 --- a/src/libs/geant4_web/geant4_wasm/preload/preload_G4EMLOW8.6.1.js +++ /dev/null @@ -1,421 +0,0 @@ -export default async function loadDataFile(Module) { - - Module['expectedDataFileDownloads'] ??= 0; - Module['expectedDataFileDownloads']++; - // Do not attempt to redownload the virtual filesystem data when in a pthread or a Wasm Worker context. - var isPthread = typeof ENVIRONMENT_IS_PTHREAD != 'undefined' && ENVIRONMENT_IS_PTHREAD; - var isWasmWorker = typeof ENVIRONMENT_IS_WASM_WORKER != 'undefined' && ENVIRONMENT_IS_WASM_WORKER; - if (isPthread || isWasmWorker) return; -return new Promise((loadDataResolve, loadDataReject) => { - async function loadPackage(metadata) { - - var PACKAGE_PATH = ''; - if (typeof window === 'object') { - PACKAGE_PATH = window['encodeURIComponent'](window.location.pathname.substring(0, window.location.pathname.lastIndexOf('/')) + '/'); - } else if (typeof process === 'undefined' && typeof location !== 'undefined') { - // web worker - PACKAGE_PATH = encodeURIComponent(location.pathname.substring(0, location.pathname.lastIndexOf('/')) + '/'); - } - var PACKAGE_NAME = '/workspaces/geant-wasm/build_wasm/data/G4EMLOW8.6.1.data'; - var REMOTE_PACKAGE_BASE = 'G4EMLOW8.6.1.data'; - var REMOTE_PACKAGE_NAME = Module['locateFile']?.(REMOTE_PACKAGE_BASE, '') ?? REMOTE_PACKAGE_BASE; - var REMOTE_PACKAGE_SIZE = metadata['remote_package_size']; - - async function fetchRemotePackage(packageName, packageSize) { - - Module['dataFileDownloads'] ??= {}; - try { - var response = await fetch(packageName); - } catch (e) { - throw new Error(`Network Error: ${packageName}`, {e}); - } - if (!response.ok) { - throw new Error(`${response.status}: ${response.url}`); - } - - const chunks = []; - const headers = response.headers; - const total = Number(headers.get('Content-Length') ?? packageSize); - let loaded = 0; - - Module['setStatus']?.('Downloading data...'); - const reader = response.body.getReader(); - - while (1) { - var {done, value} = await reader.read(); - if (done) break; - chunks.push(value); - loaded += value.length; - Module['dataFileDownloads'][packageName] = {loaded, total}; - - let totalLoaded = 0; - let totalSize = 0; - - for (const download of Object.values(Module['dataFileDownloads'])) { - totalLoaded += download.loaded; - totalSize += download.total; - } - - Module['setStatus']?.(`DL (G4EMLOW) (${totalLoaded}/${totalSize})`); - } - Module['setStatus']?.(`END DL (G4EMLOW)`); - - const packageData = new Uint8Array(chunks.map((c) => c.length).reduce((a, b) => a + b, 0)); - let offset = 0; - for (const chunk of chunks) { - packageData.set(chunk, offset); - offset += chunk.length; - } - return packageData.buffer; - } - - async function runWithFS(Module) { - - function assert(check, msg) { - if (!check) throw new Error(msg); - } -Module['FS_createPath']("/", "data", true, true); -Module['FS_createPath']("/data", "G4EMLOW8.6.1", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "JAEAESData", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "XRayReflection_data", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "auger", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "brem", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "brem_SB", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/brem_SB", "SBTables", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "charge_transf", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "comp", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "dna", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "doppler", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "dpwa", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/dpwa", "dcss", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/dpwa/dcss", "el", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/dpwa/dcss", "pos", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/dpwa", "stables", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/dpwa/stables", "el", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/dpwa/stables", "pos", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "epics2017", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/epics2017", "comp", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/epics2017", "pair", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/epics2017", "phot", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/epics2017", "rayl", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "estar", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/estar", "estar_basic", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/estar/estar_basic", "elems", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/estar/estar_basic", "mater", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/estar", "estar_long", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/estar/estar_long", "elems", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/estar/estar_long", "mater", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "fluor", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "fluor_ANSTO", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "fluor_Bearden", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "fluor_XDB_EADL", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "ion_stopping_data", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/ion_stopping_data", "icru73", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/ion_stopping_data", "icru90", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "ioni", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "livermore", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/livermore", "brem", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/livermore", "comp", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/livermore", "pair", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/livermore", "pairdata", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/livermore", "phot_epics2014", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/livermore", "rayl", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/livermore", "tripdata", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "microelec", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/microelec", "Elastic", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/microelec", "Inelastic", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/microelec", "Structure", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "msc_GS", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/msc_GS", "GSGrid_1", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/msc_GS", "GSGrid_2", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/msc_GS", "MottCor", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/msc_GS/MottCor", "el", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/msc_GS/MottCor", "pos", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/msc_GS", "PWACor", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/msc_GS/PWACor", "el", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/msc_GS/PWACor", "pos", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "mupair", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "penelope", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/penelope", "bremsstrahlung", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/penelope", "pairproduction", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/penelope", "photoelectric", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/penelope", "rayleigh", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/penelope/rayleigh", "MIFF", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "photoelectric_angular", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "pixe", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/pixe", "ecpssr", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/pixe/ecpssr", "alpha", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/pixe/ecpssr", "proton", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/pixe", "kacsPaul", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/pixe", "kpcsPaul", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/pixe", "uf", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1", "pixe_ANSTO", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/pixe_ANSTO", "alpha", true, true); -Module['FS_createPath']("/data/G4EMLOW8.6.1/pixe_ANSTO", "proton", true, true); - - /** @constructor */ - function DataRequest(start, end, audio) { - this.start = start; - this.end = end; - this.audio = audio; - } - DataRequest.prototype = { - requests: {}, - open: function(mode, name) { - this.name = name; - this.requests[name] = this; - Module['addRunDependency'](`fp ${this.name}`); - }, - send: function() {}, - onload: function() { - var byteArray = this.byteArray.subarray(this.start, this.end); - this.finish(byteArray); - }, - finish: async function(byteArray) { - var that = this; - // canOwn this data in the filesystem, it is a slice into the heap that will never change - Module['FS_createDataFile'](this.name, null, byteArray, true, true, true); - Module['removeRunDependency'](`fp ${that.name}`); -loadDataResolve(); - this.requests[this.name] = null; - } - }; - - var files = metadata['files']; - for (var i = 0; i < files.length; ++i) { - new DataRequest(files[i]['start'], files[i]['end'], files[i]['audio'] || 0).open('GET', files[i]['filename']); - } - - var PACKAGE_UUID = metadata['package_uuid']; - var IDB_RO = "readonly"; - var IDB_RW = "readwrite"; - var DB_NAME = "EM_PRELOAD_CACHE"; - var DB_VERSION = 1; - var METADATA_STORE_NAME = 'METADATA'; - var PACKAGE_STORE_NAME = 'PACKAGES'; - - async function openDatabase() { - if (typeof indexedDB == 'undefined') { - throw new Error('using IndexedDB to cache data can only be done on a web page or in a web worker'); - } - return new Promise((resolve, reject) => { - var openRequest = indexedDB.open(DB_NAME, DB_VERSION); - openRequest.onupgradeneeded = (event) => { - var db = /** @type {IDBDatabase} */ (event.target.result); - - if (db.objectStoreNames.contains(PACKAGE_STORE_NAME)) { - db.deleteObjectStore(PACKAGE_STORE_NAME); - } - var packages = db.createObjectStore(PACKAGE_STORE_NAME); - - if (db.objectStoreNames.contains(METADATA_STORE_NAME)) { - db.deleteObjectStore(METADATA_STORE_NAME); - } - var metadata = db.createObjectStore(METADATA_STORE_NAME); - }; - openRequest.onsuccess = (event) => { - var db = /** @type {IDBDatabase} */ (event.target.result); - resolve(db); - }; - openRequest.onerror = reject; - }); - } - - // This is needed as chromium has a limit on per-entry files in IndexedDB - // https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&sq=package:chromium&g=0&l=177 - // https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60 - // We set the chunk size to 64MB to stay well-below the limit - var CHUNK_SIZE = 64 * 1024 * 1024; - - async function cacheRemotePackage(db, packageName, packageData, packageMeta) { - var transactionPackages = db.transaction([PACKAGE_STORE_NAME], IDB_RW); - var packages = transactionPackages.objectStore(PACKAGE_STORE_NAME); - var chunkSliceStart = 0; - var nextChunkSliceStart = 0; - var chunkCount = Math.ceil(packageData.byteLength / CHUNK_SIZE); - var finishedChunks = 0; - - return new Promise((resolve, reject) => { - for (var chunkId = 0; chunkId < chunkCount; chunkId++) { - nextChunkSliceStart += CHUNK_SIZE; - var putPackageRequest = packages.put( - packageData.slice(chunkSliceStart, nextChunkSliceStart), - `package/${packageName}/${chunkId}` - ); - chunkSliceStart = nextChunkSliceStart; - putPackageRequest.onsuccess = (event) => { - finishedChunks++; - if (finishedChunks == chunkCount) { - var transaction_metadata = db.transaction( - [METADATA_STORE_NAME], - IDB_RW - ); - var metadata = transaction_metadata.objectStore(METADATA_STORE_NAME); - var putMetadataRequest = metadata.put( - { - 'uuid': packageMeta.uuid, - 'chunkCount': chunkCount - }, - `metadata/${packageName}` - ); - putMetadataRequest.onsuccess = (event) => resolve(packageData); - putMetadataRequest.onerror = reject; - } - }; - putPackageRequest.onerror = reject; - } - }); - } - - /* - * Check if there's a cached package, and if so whether it's the latest available. - * Resolves to the cached metadata, or `null` if it is missing or out-of-date. - */ - async function checkCachedPackage(db, packageName) { - var transaction = db.transaction([METADATA_STORE_NAME], IDB_RO); - var metadata = transaction.objectStore(METADATA_STORE_NAME); - var getRequest = metadata.get(`metadata/${packageName}`); - return new Promise((resolve, reject) => { - getRequest.onsuccess = (event) => { - var result = event.target.result; - if (result && PACKAGE_UUID === result['uuid']) { - resolve(result); - } else { - resolve(null); - } - } - getRequest.onerror = reject; - }); - } - - async function fetchCachedPackage(db, packageName, metadata) { - var transaction = db.transaction([PACKAGE_STORE_NAME], IDB_RO); - var packages = transaction.objectStore(PACKAGE_STORE_NAME); - - var chunksDone = 0; - var totalSize = 0; - var chunkCount = metadata['chunkCount']; - var chunks = new Array(chunkCount); - - return new Promise((resolve, reject) => { - for (var chunkId = 0; chunkId < chunkCount; chunkId++) { - var getRequest = packages.get(`package/${packageName}/${chunkId}`); - getRequest.onsuccess = (event) => { - if (!event.target.result) { - reject(`CachedPackageNotFound for: ${packageName}`); - return; - } - // If there's only 1 chunk, there's nothing to concatenate it with so we can just return it now - if (chunkCount == 1) { - resolve(event.target.result); - } else { - chunksDone++; - totalSize += event.target.result.byteLength; - chunks.push(event.target.result); - if (chunksDone == chunkCount) { - if (chunksDone == 1) { - resolve(event.target.result); - } else { - var tempTyped = new Uint8Array(totalSize); - var byteOffset = 0; - for (var chunkId in chunks) { - var buffer = chunks[chunkId]; - tempTyped.set(new Uint8Array(buffer), byteOffset); - byteOffset += buffer.byteLength; - buffer = undefined; - } - chunks = undefined; - resolve(tempTyped.buffer); - tempTyped = undefined; - } - } - } - }; - getRequest.onerror = reject; - } - }); - } - - function processPackageData(arrayBuffer) { - assert(arrayBuffer, 'Loading data file failed.'); - assert(arrayBuffer.constructor.name === ArrayBuffer.name, 'bad input to processPackageData'); - var byteArray = new Uint8Array(arrayBuffer); - var curr; - // Reuse the bytearray from the XHR as the source for file reads. - DataRequest.prototype.byteArray = byteArray; - var files = metadata['files']; - for (var i = 0; i < files.length; ++i) { - DataRequest.prototype.requests[files[i].filename].onload(); - } Module['removeRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4EMLOW8.6.1.data'); - - } - Module['addRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4EMLOW8.6.1.data'); - - Module['preloadResults'] ??= {}; - - async function preloadFallback(error) { - console.error(error); - console.error('falling back to default preload behavior'); - processPackageData(await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE)); - } - - try { - var db = await openDatabase(); - var pkgMetadata = await checkCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME); - var useCached = !!pkgMetadata; - Module['preloadResults'][PACKAGE_NAME] = {fromCache: useCached}; - if (useCached) { - processPackageData(await fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, pkgMetadata)); - Module['setStatus']?.(`END DL (G4EMLOW)`); - } else { - var packageData = await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE); - try { - processPackageData(await cacheRemotePackage(db, PACKAGE_PATH + PACKAGE_NAME, packageData, {uuid:PACKAGE_UUID})) - } catch (error) { - console.error(error); - processPackageData(packageData); - } - } - } catch(e) { - await preloadFallback(e) - .catch((error) => { - loadDataReject(error); - }); - } - - Module['setStatus']?.('Downloading...'); - - } - if (Module['calledRun']) { - runWithFS(Module) - .catch((error) => { - loadDataReject(error); - }); - } else { - (Module['preRun'] ??= []).push(runWithFS); // FS is not initialized yet, wait for it - } - - Module['removeRunDependency']('preload_G4EMLOW8.6.1.js.metadata'); - } - - async function runMetaWithFS() { - Module['addRunDependency']('preload_G4EMLOW8.6.1.js.metadata'); - var metadataUrl = Module['locateFile']?.('preload_G4EMLOW8.6.1.js.metadata', '') ?? 'preload_G4EMLOW8.6.1.js.metadata'; - - var response = await fetch(metadataUrl); - if (!response.ok) { - throw new Error(`${response.status}: ${response.url}`); - } - var json = await response.json(); - return loadPackage(json); - } - - if (Module['calledRun']) { - runMetaWithFS(); - } else { - (Module['preRun'] ??= []).push(runMetaWithFS); - } - - }); -} -// END the loadDataFile function diff --git a/src/libs/geant4_web/geant4_wasm/preload/preload_G4ENSDFSTATE3.0.js b/src/libs/geant4_web/geant4_wasm/preload/preload_G4ENSDFSTATE3.0.js deleted file mode 100644 index 258a5cc3b..000000000 --- a/src/libs/geant4_web/geant4_wasm/preload/preload_G4ENSDFSTATE3.0.js +++ /dev/null @@ -1,345 +0,0 @@ -export default async function loadDataFile(Module) { - - Module['expectedDataFileDownloads'] ??= 0; - Module['expectedDataFileDownloads']++; - // Do not attempt to redownload the virtual filesystem data when in a pthread or a Wasm Worker context. - var isPthread = typeof ENVIRONMENT_IS_PTHREAD != 'undefined' && ENVIRONMENT_IS_PTHREAD; - var isWasmWorker = typeof ENVIRONMENT_IS_WASM_WORKER != 'undefined' && ENVIRONMENT_IS_WASM_WORKER; - if (isPthread || isWasmWorker) return; -return new Promise((loadDataResolve, loadDataReject) => { - async function loadPackage(metadata) { - - var PACKAGE_PATH = ''; - if (typeof window === 'object') { - PACKAGE_PATH = window['encodeURIComponent'](window.location.pathname.substring(0, window.location.pathname.lastIndexOf('/')) + '/'); - } else if (typeof process === 'undefined' && typeof location !== 'undefined') { - // web worker - PACKAGE_PATH = encodeURIComponent(location.pathname.substring(0, location.pathname.lastIndexOf('/')) + '/'); - } - var PACKAGE_NAME = '/workspaces/geant-wasm/build_wasm/data/G4ENSDFSTATE3.0.data'; - var REMOTE_PACKAGE_BASE = 'G4ENSDFSTATE3.0.data'; - var REMOTE_PACKAGE_NAME = Module['locateFile']?.(REMOTE_PACKAGE_BASE, '') ?? REMOTE_PACKAGE_BASE; - var REMOTE_PACKAGE_SIZE = metadata['remote_package_size']; - - async function fetchRemotePackage(packageName, packageSize) { - - Module['dataFileDownloads'] ??= {}; - try { - var response = await fetch(packageName); - } catch (e) { - throw new Error(`Network Error: ${packageName}`, {e}); - } - if (!response.ok) { - throw new Error(`${response.status}: ${response.url}`); - } - - const chunks = []; - const headers = response.headers; - const total = Number(headers.get('Content-Length') ?? packageSize); - let loaded = 0; - - Module['setStatus']?.('Downloading data...'); - const reader = response.body.getReader(); - - while (1) { - var {done, value} = await reader.read(); - if (done) break; - chunks.push(value); - loaded += value.length; - Module['dataFileDownloads'][packageName] = {loaded, total}; - - let totalLoaded = 0; - let totalSize = 0; - - for (const download of Object.values(Module['dataFileDownloads'])) { - totalLoaded += download.loaded; - totalSize += download.total; - } - - Module['setStatus']?.(`DL (G4ENSDFSTATE) (${totalLoaded}/${totalSize})`); - } - Module['setStatus']?.(`END DL (G4ENSDFSTATE)`); - - const packageData = new Uint8Array(chunks.map((c) => c.length).reduce((a, b) => a + b, 0)); - let offset = 0; - for (const chunk of chunks) { - packageData.set(chunk, offset); - offset += chunk.length; - } - return packageData.buffer; - } - - async function runWithFS(Module) { - - function assert(check, msg) { - if (!check) throw new Error(msg); - } -Module['FS_createPath']("/", "data", true, true); -Module['FS_createPath']("/data", "G4ENSDFSTATE3.0", true, true); - - /** @constructor */ - function DataRequest(start, end, audio) { - this.start = start; - this.end = end; - this.audio = audio; - } - DataRequest.prototype = { - requests: {}, - open: function(mode, name) { - this.name = name; - this.requests[name] = this; - Module['addRunDependency'](`fp ${this.name}`); - }, - send: function() {}, - onload: function() { - var byteArray = this.byteArray.subarray(this.start, this.end); - this.finish(byteArray); - }, - finish: async function(byteArray) { - var that = this; - // canOwn this data in the filesystem, it is a slice into the heap that will never change - Module['FS_createDataFile'](this.name, null, byteArray, true, true, true); - Module['removeRunDependency'](`fp ${that.name}`); -loadDataResolve(); - this.requests[this.name] = null; - } - }; - - var files = metadata['files']; - for (var i = 0; i < files.length; ++i) { - new DataRequest(files[i]['start'], files[i]['end'], files[i]['audio'] || 0).open('GET', files[i]['filename']); - } - - var PACKAGE_UUID = metadata['package_uuid']; - var IDB_RO = "readonly"; - var IDB_RW = "readwrite"; - var DB_NAME = "EM_PRELOAD_CACHE"; - var DB_VERSION = 1; - var METADATA_STORE_NAME = 'METADATA'; - var PACKAGE_STORE_NAME = 'PACKAGES'; - - async function openDatabase() { - if (typeof indexedDB == 'undefined') { - throw new Error('using IndexedDB to cache data can only be done on a web page or in a web worker'); - } - return new Promise((resolve, reject) => { - var openRequest = indexedDB.open(DB_NAME, DB_VERSION); - openRequest.onupgradeneeded = (event) => { - var db = /** @type {IDBDatabase} */ (event.target.result); - - if (db.objectStoreNames.contains(PACKAGE_STORE_NAME)) { - db.deleteObjectStore(PACKAGE_STORE_NAME); - } - var packages = db.createObjectStore(PACKAGE_STORE_NAME); - - if (db.objectStoreNames.contains(METADATA_STORE_NAME)) { - db.deleteObjectStore(METADATA_STORE_NAME); - } - var metadata = db.createObjectStore(METADATA_STORE_NAME); - }; - openRequest.onsuccess = (event) => { - var db = /** @type {IDBDatabase} */ (event.target.result); - resolve(db); - }; - openRequest.onerror = reject; - }); - } - - // This is needed as chromium has a limit on per-entry files in IndexedDB - // https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&sq=package:chromium&g=0&l=177 - // https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60 - // We set the chunk size to 64MB to stay well-below the limit - var CHUNK_SIZE = 64 * 1024 * 1024; - - async function cacheRemotePackage(db, packageName, packageData, packageMeta) { - var transactionPackages = db.transaction([PACKAGE_STORE_NAME], IDB_RW); - var packages = transactionPackages.objectStore(PACKAGE_STORE_NAME); - var chunkSliceStart = 0; - var nextChunkSliceStart = 0; - var chunkCount = Math.ceil(packageData.byteLength / CHUNK_SIZE); - var finishedChunks = 0; - - return new Promise((resolve, reject) => { - for (var chunkId = 0; chunkId < chunkCount; chunkId++) { - nextChunkSliceStart += CHUNK_SIZE; - var putPackageRequest = packages.put( - packageData.slice(chunkSliceStart, nextChunkSliceStart), - `package/${packageName}/${chunkId}` - ); - chunkSliceStart = nextChunkSliceStart; - putPackageRequest.onsuccess = (event) => { - finishedChunks++; - if (finishedChunks == chunkCount) { - var transaction_metadata = db.transaction( - [METADATA_STORE_NAME], - IDB_RW - ); - var metadata = transaction_metadata.objectStore(METADATA_STORE_NAME); - var putMetadataRequest = metadata.put( - { - 'uuid': packageMeta.uuid, - 'chunkCount': chunkCount - }, - `metadata/${packageName}` - ); - putMetadataRequest.onsuccess = (event) => resolve(packageData); - putMetadataRequest.onerror = reject; - } - }; - putPackageRequest.onerror = reject; - } - }); - } - - /* - * Check if there's a cached package, and if so whether it's the latest available. - * Resolves to the cached metadata, or `null` if it is missing or out-of-date. - */ - async function checkCachedPackage(db, packageName) { - var transaction = db.transaction([METADATA_STORE_NAME], IDB_RO); - var metadata = transaction.objectStore(METADATA_STORE_NAME); - var getRequest = metadata.get(`metadata/${packageName}`); - return new Promise((resolve, reject) => { - getRequest.onsuccess = (event) => { - var result = event.target.result; - if (result && PACKAGE_UUID === result['uuid']) { - resolve(result); - } else { - resolve(null); - } - } - getRequest.onerror = reject; - }); - } - - async function fetchCachedPackage(db, packageName, metadata) { - var transaction = db.transaction([PACKAGE_STORE_NAME], IDB_RO); - var packages = transaction.objectStore(PACKAGE_STORE_NAME); - - var chunksDone = 0; - var totalSize = 0; - var chunkCount = metadata['chunkCount']; - var chunks = new Array(chunkCount); - - return new Promise((resolve, reject) => { - for (var chunkId = 0; chunkId < chunkCount; chunkId++) { - var getRequest = packages.get(`package/${packageName}/${chunkId}`); - getRequest.onsuccess = (event) => { - if (!event.target.result) { - reject(`CachedPackageNotFound for: ${packageName}`); - return; - } - // If there's only 1 chunk, there's nothing to concatenate it with so we can just return it now - if (chunkCount == 1) { - resolve(event.target.result); - } else { - chunksDone++; - totalSize += event.target.result.byteLength; - chunks.push(event.target.result); - if (chunksDone == chunkCount) { - if (chunksDone == 1) { - resolve(event.target.result); - } else { - var tempTyped = new Uint8Array(totalSize); - var byteOffset = 0; - for (var chunkId in chunks) { - var buffer = chunks[chunkId]; - tempTyped.set(new Uint8Array(buffer), byteOffset); - byteOffset += buffer.byteLength; - buffer = undefined; - } - chunks = undefined; - resolve(tempTyped.buffer); - tempTyped = undefined; - } - } - } - }; - getRequest.onerror = reject; - } - }); - } - - function processPackageData(arrayBuffer) { - assert(arrayBuffer, 'Loading data file failed.'); - assert(arrayBuffer.constructor.name === ArrayBuffer.name, 'bad input to processPackageData'); - var byteArray = new Uint8Array(arrayBuffer); - var curr; - // Reuse the bytearray from the XHR as the source for file reads. - DataRequest.prototype.byteArray = byteArray; - var files = metadata['files']; - for (var i = 0; i < files.length; ++i) { - DataRequest.prototype.requests[files[i].filename].onload(); - } Module['removeRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4ENSDFSTATE3.0.data'); - - } - Module['addRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4ENSDFSTATE3.0.data'); - - Module['preloadResults'] ??= {}; - - async function preloadFallback(error) { - console.error(error); - console.error('falling back to default preload behavior'); - processPackageData(await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE)); - } - - try { - var db = await openDatabase(); - var pkgMetadata = await checkCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME); - var useCached = !!pkgMetadata; - Module['preloadResults'][PACKAGE_NAME] = {fromCache: useCached}; - if (useCached) { - processPackageData(await fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, pkgMetadata)); - Module['setStatus']?.(`END DL (G4ENSDFSTATE)`); - } else { - var packageData = await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE); - try { - processPackageData(await cacheRemotePackage(db, PACKAGE_PATH + PACKAGE_NAME, packageData, {uuid:PACKAGE_UUID})) - } catch (error) { - console.error(error); - processPackageData(packageData); - } - } - } catch(e) { - await preloadFallback(e) - .catch((error) => { - loadDataReject(error); - }); - } - - Module['setStatus']?.('Downloading...'); - - } - if (Module['calledRun']) { - runWithFS(Module) - .catch((error) => { - loadDataReject(error); - }); - } else { - (Module['preRun'] ??= []).push(runWithFS); // FS is not initialized yet, wait for it - } - - Module['removeRunDependency']('preload_G4ENSDFSTATE3.0.js.metadata'); - } - - async function runMetaWithFS() { - Module['addRunDependency']('preload_G4ENSDFSTATE3.0.js.metadata'); - var metadataUrl = Module['locateFile']?.('preload_G4ENSDFSTATE3.0.js.metadata', '') ?? 'preload_G4ENSDFSTATE3.0.js.metadata'; - - var response = await fetch(metadataUrl); - if (!response.ok) { - throw new Error(`${response.status}: ${response.url}`); - } - var json = await response.json(); - return loadPackage(json); - } - - if (Module['calledRun']) { - runMetaWithFS(); - } else { - (Module['preRun'] ??= []).push(runMetaWithFS); - } - - }); -} -// END the loadDataFile function diff --git a/src/libs/geant4_web/geant4_wasm/preload/preload_G4NDL4.7.1.js b/src/libs/geant4_web/geant4_wasm/preload/preload_G4NDL4.7.1.js deleted file mode 100644 index 6cea7293c..000000000 --- a/src/libs/geant4_web/geant4_wasm/preload/preload_G4NDL4.7.1.js +++ /dev/null @@ -1,416 +0,0 @@ -export default async function loadDataFile(Module) { - - Module['expectedDataFileDownloads'] ??= 0; - Module['expectedDataFileDownloads']++; - // Do not attempt to redownload the virtual filesystem data when in a pthread or a Wasm Worker context. - var isPthread = typeof ENVIRONMENT_IS_PTHREAD != 'undefined' && ENVIRONMENT_IS_PTHREAD; - var isWasmWorker = typeof ENVIRONMENT_IS_WASM_WORKER != 'undefined' && ENVIRONMENT_IS_WASM_WORKER; - if (isPthread || isWasmWorker) return; -return new Promise((loadDataResolve, loadDataReject) => { - async function loadPackage(metadata) { - - var PACKAGE_PATH = ''; - if (typeof window === 'object') { - PACKAGE_PATH = window['encodeURIComponent'](window.location.pathname.substring(0, window.location.pathname.lastIndexOf('/')) + '/'); - } else if (typeof process === 'undefined' && typeof location !== 'undefined') { - // web worker - PACKAGE_PATH = encodeURIComponent(location.pathname.substring(0, location.pathname.lastIndexOf('/')) + '/'); - } - var PACKAGE_NAME = '/workspaces/geant-wasm/build_wasm/data/G4NDL4.7.1.data'; - var REMOTE_PACKAGE_BASE = 'G4NDL4.7.1.data'; - var REMOTE_PACKAGE_NAME = Module['locateFile']?.(REMOTE_PACKAGE_BASE, '') ?? REMOTE_PACKAGE_BASE; - var REMOTE_PACKAGE_SIZE = metadata['remote_package_size']; - - async function fetchRemotePackage(packageName, packageSize) { - - Module['dataFileDownloads'] ??= {}; - try { - var response = await fetch(packageName); - } catch (e) { - throw new Error(`Network Error: ${packageName}`, {e}); - } - if (!response.ok) { - throw new Error(`${response.status}: ${response.url}`); - } - - const chunks = []; - const headers = response.headers; - const total = Number(headers.get('Content-Length') ?? packageSize); - let loaded = 0; - - Module['setStatus']?.('Downloading data...'); - const reader = response.body.getReader(); - - while (1) { - var {done, value} = await reader.read(); - if (done) break; - chunks.push(value); - loaded += value.length; - Module['dataFileDownloads'][packageName] = {loaded, total}; - - let totalLoaded = 0; - let totalSize = 0; - - for (const download of Object.values(Module['dataFileDownloads'])) { - totalLoaded += download.loaded; - totalSize += download.total; - } - - Module['setStatus']?.(`DL (G4NDL) (${totalLoaded}/${totalSize})`); - } - Module['setStatus']?.(`END DL (G4NDL)`); - - const packageData = new Uint8Array(chunks.map((c) => c.length).reduce((a, b) => a + b, 0)); - let offset = 0; - for (const chunk of chunks) { - packageData.set(chunk, offset); - offset += chunk.length; - } - return packageData.buffer; - } - - async function runWithFS(Module) { - - function assert(check, msg) { - if (!check) throw new Error(msg); - } -Module['FS_createPath']("/", "data", true, true); -Module['FS_createPath']("/data", "G4NDL4.7.1", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1", "Capture", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Capture", "CrossSection", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Capture", "FS", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Capture", "FSMF6", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1", "Elastic", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Elastic", "CrossSection", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Elastic", "FS", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1", "Fission", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Fission", "CrossSection", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Fission", "FC", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Fission", "FF", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Fission", "FS", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Fission", "LC", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Fission", "SC", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Fission", "TC", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1", "Inelastic", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "CrossSection", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F01", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F02", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F03", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F04", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F05", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F06", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F07", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F08", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F09", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F10", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F11", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F12", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F13", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F14", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F15", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F17", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F18", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F19", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F20", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F21", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F22", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F23", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F24", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F25", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F26", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F27", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F28", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F29", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F30", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F31", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F32", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F33", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F34", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F35", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "F36", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/Inelastic", "Gammas", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1", "IsotopeProduction", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/IsotopeProduction", "CrossSection", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1", "JENDL_HE", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/JENDL_HE", "neutron", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/JENDL_HE/neutron", "Elastic", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/JENDL_HE/neutron/Elastic", "CrossSection", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/JENDL_HE/neutron", "Inelastic", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/JENDL_HE/neutron/Inelastic", "CrossSection", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1", "ThermalScattering", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/ThermalScattering", "Coherent", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/ThermalScattering/Coherent", "CrossSection", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/ThermalScattering/Coherent", "FS", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/ThermalScattering", "Incoherent", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/ThermalScattering/Incoherent", "CrossSection", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/ThermalScattering/Incoherent", "FS", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/ThermalScattering", "Inelastic", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/ThermalScattering/Inelastic", "CrossSection", true, true); -Module['FS_createPath']("/data/G4NDL4.7.1/ThermalScattering/Inelastic", "FS", true, true); - - /** @constructor */ - function DataRequest(start, end, audio) { - this.start = start; - this.end = end; - this.audio = audio; - } - DataRequest.prototype = { - requests: {}, - open: function(mode, name) { - this.name = name; - this.requests[name] = this; - Module['addRunDependency'](`fp ${this.name}`); - }, - send: function() {}, - onload: function() { - var byteArray = this.byteArray.subarray(this.start, this.end); - this.finish(byteArray); - }, - finish: async function(byteArray) { - var that = this; - // canOwn this data in the filesystem, it is a slice into the heap that will never change - Module['FS_createDataFile'](this.name, null, byteArray, true, true, true); - Module['removeRunDependency'](`fp ${that.name}`); -loadDataResolve(); - this.requests[this.name] = null; - } - }; - - var files = metadata['files']; - for (var i = 0; i < files.length; ++i) { - new DataRequest(files[i]['start'], files[i]['end'], files[i]['audio'] || 0).open('GET', files[i]['filename']); - } - - var PACKAGE_UUID = metadata['package_uuid']; - var IDB_RO = "readonly"; - var IDB_RW = "readwrite"; - var DB_NAME = "EM_PRELOAD_CACHE"; - var DB_VERSION = 1; - var METADATA_STORE_NAME = 'METADATA'; - var PACKAGE_STORE_NAME = 'PACKAGES'; - - async function openDatabase() { - if (typeof indexedDB == 'undefined') { - throw new Error('using IndexedDB to cache data can only be done on a web page or in a web worker'); - } - return new Promise((resolve, reject) => { - var openRequest = indexedDB.open(DB_NAME, DB_VERSION); - openRequest.onupgradeneeded = (event) => { - var db = /** @type {IDBDatabase} */ (event.target.result); - - if (db.objectStoreNames.contains(PACKAGE_STORE_NAME)) { - db.deleteObjectStore(PACKAGE_STORE_NAME); - } - var packages = db.createObjectStore(PACKAGE_STORE_NAME); - - if (db.objectStoreNames.contains(METADATA_STORE_NAME)) { - db.deleteObjectStore(METADATA_STORE_NAME); - } - var metadata = db.createObjectStore(METADATA_STORE_NAME); - }; - openRequest.onsuccess = (event) => { - var db = /** @type {IDBDatabase} */ (event.target.result); - resolve(db); - }; - openRequest.onerror = reject; - }); - } - - // This is needed as chromium has a limit on per-entry files in IndexedDB - // https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&sq=package:chromium&g=0&l=177 - // https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60 - // We set the chunk size to 64MB to stay well-below the limit - var CHUNK_SIZE = 64 * 1024 * 1024; - - async function cacheRemotePackage(db, packageName, packageData, packageMeta) { - var transactionPackages = db.transaction([PACKAGE_STORE_NAME], IDB_RW); - var packages = transactionPackages.objectStore(PACKAGE_STORE_NAME); - var chunkSliceStart = 0; - var nextChunkSliceStart = 0; - var chunkCount = Math.ceil(packageData.byteLength / CHUNK_SIZE); - var finishedChunks = 0; - - return new Promise((resolve, reject) => { - for (var chunkId = 0; chunkId < chunkCount; chunkId++) { - nextChunkSliceStart += CHUNK_SIZE; - var putPackageRequest = packages.put( - packageData.slice(chunkSliceStart, nextChunkSliceStart), - `package/${packageName}/${chunkId}` - ); - chunkSliceStart = nextChunkSliceStart; - putPackageRequest.onsuccess = (event) => { - finishedChunks++; - if (finishedChunks == chunkCount) { - var transaction_metadata = db.transaction( - [METADATA_STORE_NAME], - IDB_RW - ); - var metadata = transaction_metadata.objectStore(METADATA_STORE_NAME); - var putMetadataRequest = metadata.put( - { - 'uuid': packageMeta.uuid, - 'chunkCount': chunkCount - }, - `metadata/${packageName}` - ); - putMetadataRequest.onsuccess = (event) => resolve(packageData); - putMetadataRequest.onerror = reject; - } - }; - putPackageRequest.onerror = reject; - } - }); - } - - /* - * Check if there's a cached package, and if so whether it's the latest available. - * Resolves to the cached metadata, or `null` if it is missing or out-of-date. - */ - async function checkCachedPackage(db, packageName) { - var transaction = db.transaction([METADATA_STORE_NAME], IDB_RO); - var metadata = transaction.objectStore(METADATA_STORE_NAME); - var getRequest = metadata.get(`metadata/${packageName}`); - return new Promise((resolve, reject) => { - getRequest.onsuccess = (event) => { - var result = event.target.result; - if (result && PACKAGE_UUID === result['uuid']) { - resolve(result); - } else { - resolve(null); - } - } - getRequest.onerror = reject; - }); - } - - async function fetchCachedPackage(db, packageName, metadata) { - var transaction = db.transaction([PACKAGE_STORE_NAME], IDB_RO); - var packages = transaction.objectStore(PACKAGE_STORE_NAME); - - var chunksDone = 0; - var totalSize = 0; - var chunkCount = metadata['chunkCount']; - var chunks = new Array(chunkCount); - - return new Promise((resolve, reject) => { - for (var chunkId = 0; chunkId < chunkCount; chunkId++) { - var getRequest = packages.get(`package/${packageName}/${chunkId}`); - getRequest.onsuccess = (event) => { - if (!event.target.result) { - reject(`CachedPackageNotFound for: ${packageName}`); - return; - } - // If there's only 1 chunk, there's nothing to concatenate it with so we can just return it now - if (chunkCount == 1) { - resolve(event.target.result); - } else { - chunksDone++; - totalSize += event.target.result.byteLength; - chunks.push(event.target.result); - if (chunksDone == chunkCount) { - if (chunksDone == 1) { - resolve(event.target.result); - } else { - var tempTyped = new Uint8Array(totalSize); - var byteOffset = 0; - for (var chunkId in chunks) { - var buffer = chunks[chunkId]; - tempTyped.set(new Uint8Array(buffer), byteOffset); - byteOffset += buffer.byteLength; - buffer = undefined; - } - chunks = undefined; - resolve(tempTyped.buffer); - tempTyped = undefined; - } - } - } - }; - getRequest.onerror = reject; - } - }); - } - - function processPackageData(arrayBuffer) { - assert(arrayBuffer, 'Loading data file failed.'); - assert(arrayBuffer.constructor.name === ArrayBuffer.name, 'bad input to processPackageData'); - var byteArray = new Uint8Array(arrayBuffer); - var curr; - // Reuse the bytearray from the XHR as the source for file reads. - DataRequest.prototype.byteArray = byteArray; - var files = metadata['files']; - for (var i = 0; i < files.length; ++i) { - DataRequest.prototype.requests[files[i].filename].onload(); - } Module['removeRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4NDL4.7.1.data'); - - } - Module['addRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4NDL4.7.1.data'); - - Module['preloadResults'] ??= {}; - - async function preloadFallback(error) { - console.error(error); - console.error('falling back to default preload behavior'); - processPackageData(await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE)); - } - - try { - var db = await openDatabase(); - var pkgMetadata = await checkCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME); - var useCached = !!pkgMetadata; - Module['preloadResults'][PACKAGE_NAME] = {fromCache: useCached}; - if (useCached) { - processPackageData(await fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, pkgMetadata)); - Module['setStatus']?.(`END DL (G4NDL)`); - } else { - var packageData = await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE); - try { - processPackageData(await cacheRemotePackage(db, PACKAGE_PATH + PACKAGE_NAME, packageData, {uuid:PACKAGE_UUID})) - } catch (error) { - console.error(error); - processPackageData(packageData); - } - } - } catch(e) { - await preloadFallback(e) - .catch((error) => { - loadDataReject(error); - }); - } - - Module['setStatus']?.('Downloading...'); - - } - if (Module['calledRun']) { - runWithFS(Module) - .catch((error) => { - loadDataReject(error); - }); - } else { - (Module['preRun'] ??= []).push(runWithFS); // FS is not initialized yet, wait for it - } - - Module['removeRunDependency']('preload_G4NDL4.7.1.js.metadata'); - } - - async function runMetaWithFS() { - Module['addRunDependency']('preload_G4NDL4.7.1.js.metadata'); - var metadataUrl = Module['locateFile']?.('preload_G4NDL4.7.1.js.metadata', '') ?? 'preload_G4NDL4.7.1.js.metadata'; - - var response = await fetch(metadataUrl); - if (!response.ok) { - throw new Error(`${response.status}: ${response.url}`); - } - var json = await response.json(); - return loadPackage(json); - } - - if (Module['calledRun']) { - runMetaWithFS(); - } else { - (Module['preRun'] ??= []).push(runMetaWithFS); - } - - }); -} -// END the loadDataFile function diff --git a/src/libs/geant4_web/geant4_wasm/preload/preload_G4PARTICLEXS4.1.js b/src/libs/geant4_web/geant4_wasm/preload/preload_G4PARTICLEXS4.1.js deleted file mode 100644 index e7a8b4c13..000000000 --- a/src/libs/geant4_web/geant4_wasm/preload/preload_G4PARTICLEXS4.1.js +++ /dev/null @@ -1,354 +0,0 @@ -export default async function loadDataFile(Module) { - - Module['expectedDataFileDownloads'] ??= 0; - Module['expectedDataFileDownloads']++; - // Do not attempt to redownload the virtual filesystem data when in a pthread or a Wasm Worker context. - var isPthread = typeof ENVIRONMENT_IS_PTHREAD != 'undefined' && ENVIRONMENT_IS_PTHREAD; - var isWasmWorker = typeof ENVIRONMENT_IS_WASM_WORKER != 'undefined' && ENVIRONMENT_IS_WASM_WORKER; - if (isPthread || isWasmWorker) return; -return new Promise((loadDataResolve, loadDataReject) => { - async function loadPackage(metadata) { - - var PACKAGE_PATH = ''; - if (typeof window === 'object') { - PACKAGE_PATH = window['encodeURIComponent'](window.location.pathname.substring(0, window.location.pathname.lastIndexOf('/')) + '/'); - } else if (typeof process === 'undefined' && typeof location !== 'undefined') { - // web worker - PACKAGE_PATH = encodeURIComponent(location.pathname.substring(0, location.pathname.lastIndexOf('/')) + '/'); - } - var PACKAGE_NAME = '/workspaces/geant-wasm/build_wasm/data/G4PARTICLEXS4.1.data'; - var REMOTE_PACKAGE_BASE = 'G4PARTICLEXS4.1.data'; - var REMOTE_PACKAGE_NAME = Module['locateFile']?.(REMOTE_PACKAGE_BASE, '') ?? REMOTE_PACKAGE_BASE; - var REMOTE_PACKAGE_SIZE = metadata['remote_package_size']; - - async function fetchRemotePackage(packageName, packageSize) { - - Module['dataFileDownloads'] ??= {}; - try { - var response = await fetch(packageName); - } catch (e) { - throw new Error(`Network Error: ${packageName}`, {e}); - } - if (!response.ok) { - throw new Error(`${response.status}: ${response.url}`); - } - - const chunks = []; - const headers = response.headers; - const total = Number(headers.get('Content-Length') ?? packageSize); - let loaded = 0; - - Module['setStatus']?.('Downloading data...'); - const reader = response.body.getReader(); - - while (1) { - var {done, value} = await reader.read(); - if (done) break; - chunks.push(value); - loaded += value.length; - Module['dataFileDownloads'][packageName] = {loaded, total}; - - let totalLoaded = 0; - let totalSize = 0; - - for (const download of Object.values(Module['dataFileDownloads'])) { - totalLoaded += download.loaded; - totalSize += download.total; - } - - Module['setStatus']?.(`DL (G4PARTICLEXS) (${totalLoaded}/${totalSize})`); - } - Module['setStatus']?.(`END DL (G4PARTICLEXS)`); - - const packageData = new Uint8Array(chunks.map((c) => c.length).reduce((a, b) => a + b, 0)); - let offset = 0; - for (const chunk of chunks) { - packageData.set(chunk, offset); - offset += chunk.length; - } - return packageData.buffer; - } - - async function runWithFS(Module) { - - function assert(check, msg) { - if (!check) throw new Error(msg); - } -Module['FS_createPath']("/", "data", true, true); -Module['FS_createPath']("/data", "G4PARTICLEXS4.1", true, true); -Module['FS_createPath']("/data/G4PARTICLEXS4.1", "He3", true, true); -Module['FS_createPath']("/data/G4PARTICLEXS4.1", "alpha", true, true); -Module['FS_createPath']("/data/G4PARTICLEXS4.1", "deuteron", true, true); -Module['FS_createPath']("/data/G4PARTICLEXS4.1", "gamma", true, true); -Module['FS_createPath']("/data/G4PARTICLEXS4.1", "neutrino", true, true); -Module['FS_createPath']("/data/G4PARTICLEXS4.1/neutrino", "nu_mu", true, true); -Module['FS_createPath']("/data/G4PARTICLEXS4.1", "neutron", true, true); -Module['FS_createPath']("/data/G4PARTICLEXS4.1", "proton", true, true); -Module['FS_createPath']("/data/G4PARTICLEXS4.1", "triton", true, true); - - /** @constructor */ - function DataRequest(start, end, audio) { - this.start = start; - this.end = end; - this.audio = audio; - } - DataRequest.prototype = { - requests: {}, - open: function(mode, name) { - this.name = name; - this.requests[name] = this; - Module['addRunDependency'](`fp ${this.name}`); - }, - send: function() {}, - onload: function() { - var byteArray = this.byteArray.subarray(this.start, this.end); - this.finish(byteArray); - }, - finish: async function(byteArray) { - var that = this; - // canOwn this data in the filesystem, it is a slice into the heap that will never change - Module['FS_createDataFile'](this.name, null, byteArray, true, true, true); - Module['removeRunDependency'](`fp ${that.name}`); -loadDataResolve(); - this.requests[this.name] = null; - } - }; - - var files = metadata['files']; - for (var i = 0; i < files.length; ++i) { - new DataRequest(files[i]['start'], files[i]['end'], files[i]['audio'] || 0).open('GET', files[i]['filename']); - } - - var PACKAGE_UUID = metadata['package_uuid']; - var IDB_RO = "readonly"; - var IDB_RW = "readwrite"; - var DB_NAME = "EM_PRELOAD_CACHE"; - var DB_VERSION = 1; - var METADATA_STORE_NAME = 'METADATA'; - var PACKAGE_STORE_NAME = 'PACKAGES'; - - async function openDatabase() { - if (typeof indexedDB == 'undefined') { - throw new Error('using IndexedDB to cache data can only be done on a web page or in a web worker'); - } - return new Promise((resolve, reject) => { - var openRequest = indexedDB.open(DB_NAME, DB_VERSION); - openRequest.onupgradeneeded = (event) => { - var db = /** @type {IDBDatabase} */ (event.target.result); - - if (db.objectStoreNames.contains(PACKAGE_STORE_NAME)) { - db.deleteObjectStore(PACKAGE_STORE_NAME); - } - var packages = db.createObjectStore(PACKAGE_STORE_NAME); - - if (db.objectStoreNames.contains(METADATA_STORE_NAME)) { - db.deleteObjectStore(METADATA_STORE_NAME); - } - var metadata = db.createObjectStore(METADATA_STORE_NAME); - }; - openRequest.onsuccess = (event) => { - var db = /** @type {IDBDatabase} */ (event.target.result); - resolve(db); - }; - openRequest.onerror = reject; - }); - } - - // This is needed as chromium has a limit on per-entry files in IndexedDB - // https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&sq=package:chromium&g=0&l=177 - // https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60 - // We set the chunk size to 64MB to stay well-below the limit - var CHUNK_SIZE = 64 * 1024 * 1024; - - async function cacheRemotePackage(db, packageName, packageData, packageMeta) { - var transactionPackages = db.transaction([PACKAGE_STORE_NAME], IDB_RW); - var packages = transactionPackages.objectStore(PACKAGE_STORE_NAME); - var chunkSliceStart = 0; - var nextChunkSliceStart = 0; - var chunkCount = Math.ceil(packageData.byteLength / CHUNK_SIZE); - var finishedChunks = 0; - - return new Promise((resolve, reject) => { - for (var chunkId = 0; chunkId < chunkCount; chunkId++) { - nextChunkSliceStart += CHUNK_SIZE; - var putPackageRequest = packages.put( - packageData.slice(chunkSliceStart, nextChunkSliceStart), - `package/${packageName}/${chunkId}` - ); - chunkSliceStart = nextChunkSliceStart; - putPackageRequest.onsuccess = (event) => { - finishedChunks++; - if (finishedChunks == chunkCount) { - var transaction_metadata = db.transaction( - [METADATA_STORE_NAME], - IDB_RW - ); - var metadata = transaction_metadata.objectStore(METADATA_STORE_NAME); - var putMetadataRequest = metadata.put( - { - 'uuid': packageMeta.uuid, - 'chunkCount': chunkCount - }, - `metadata/${packageName}` - ); - putMetadataRequest.onsuccess = (event) => resolve(packageData); - putMetadataRequest.onerror = reject; - } - }; - putPackageRequest.onerror = reject; - } - }); - } - - /* - * Check if there's a cached package, and if so whether it's the latest available. - * Resolves to the cached metadata, or `null` if it is missing or out-of-date. - */ - async function checkCachedPackage(db, packageName) { - var transaction = db.transaction([METADATA_STORE_NAME], IDB_RO); - var metadata = transaction.objectStore(METADATA_STORE_NAME); - var getRequest = metadata.get(`metadata/${packageName}`); - return new Promise((resolve, reject) => { - getRequest.onsuccess = (event) => { - var result = event.target.result; - if (result && PACKAGE_UUID === result['uuid']) { - resolve(result); - } else { - resolve(null); - } - } - getRequest.onerror = reject; - }); - } - - async function fetchCachedPackage(db, packageName, metadata) { - var transaction = db.transaction([PACKAGE_STORE_NAME], IDB_RO); - var packages = transaction.objectStore(PACKAGE_STORE_NAME); - - var chunksDone = 0; - var totalSize = 0; - var chunkCount = metadata['chunkCount']; - var chunks = new Array(chunkCount); - - return new Promise((resolve, reject) => { - for (var chunkId = 0; chunkId < chunkCount; chunkId++) { - var getRequest = packages.get(`package/${packageName}/${chunkId}`); - getRequest.onsuccess = (event) => { - if (!event.target.result) { - reject(`CachedPackageNotFound for: ${packageName}`); - return; - } - // If there's only 1 chunk, there's nothing to concatenate it with so we can just return it now - if (chunkCount == 1) { - resolve(event.target.result); - } else { - chunksDone++; - totalSize += event.target.result.byteLength; - chunks.push(event.target.result); - if (chunksDone == chunkCount) { - if (chunksDone == 1) { - resolve(event.target.result); - } else { - var tempTyped = new Uint8Array(totalSize); - var byteOffset = 0; - for (var chunkId in chunks) { - var buffer = chunks[chunkId]; - tempTyped.set(new Uint8Array(buffer), byteOffset); - byteOffset += buffer.byteLength; - buffer = undefined; - } - chunks = undefined; - resolve(tempTyped.buffer); - tempTyped = undefined; - } - } - } - }; - getRequest.onerror = reject; - } - }); - } - - function processPackageData(arrayBuffer) { - assert(arrayBuffer, 'Loading data file failed.'); - assert(arrayBuffer.constructor.name === ArrayBuffer.name, 'bad input to processPackageData'); - var byteArray = new Uint8Array(arrayBuffer); - var curr; - // Reuse the bytearray from the XHR as the source for file reads. - DataRequest.prototype.byteArray = byteArray; - var files = metadata['files']; - for (var i = 0; i < files.length; ++i) { - DataRequest.prototype.requests[files[i].filename].onload(); - } Module['removeRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4PARTICLEXS4.1.data'); - - } - Module['addRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4PARTICLEXS4.1.data'); - - Module['preloadResults'] ??= {}; - - async function preloadFallback(error) { - console.error(error); - console.error('falling back to default preload behavior'); - processPackageData(await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE)); - } - - try { - var db = await openDatabase(); - var pkgMetadata = await checkCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME); - var useCached = !!pkgMetadata; - Module['preloadResults'][PACKAGE_NAME] = {fromCache: useCached}; - if (useCached) { - processPackageData(await fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, pkgMetadata)); - Module['setStatus']?.(`END DL (G4PARTICLEXS)`); - } else { - var packageData = await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE); - try { - processPackageData(await cacheRemotePackage(db, PACKAGE_PATH + PACKAGE_NAME, packageData, {uuid:PACKAGE_UUID})) - } catch (error) { - console.error(error); - processPackageData(packageData); - } - } - } catch(e) { - await preloadFallback(e) - .catch((error) => { - loadDataReject(error); - }); - } - - Module['setStatus']?.('Downloading...'); - - } - if (Module['calledRun']) { - runWithFS(Module) - .catch((error) => { - loadDataReject(error); - }); - } else { - (Module['preRun'] ??= []).push(runWithFS); // FS is not initialized yet, wait for it - } - - Module['removeRunDependency']('preload_G4PARTICLEXS4.1.js.metadata'); - } - - async function runMetaWithFS() { - Module['addRunDependency']('preload_G4PARTICLEXS4.1.js.metadata'); - var metadataUrl = Module['locateFile']?.('preload_G4PARTICLEXS4.1.js.metadata', '') ?? 'preload_G4PARTICLEXS4.1.js.metadata'; - - var response = await fetch(metadataUrl); - if (!response.ok) { - throw new Error(`${response.status}: ${response.url}`); - } - var json = await response.json(); - return loadPackage(json); - } - - if (Module['calledRun']) { - runMetaWithFS(); - } else { - (Module['preRun'] ??= []).push(runMetaWithFS); - } - - }); -} -// END the loadDataFile function diff --git a/src/libs/geant4_web/geant4_wasm/preload/preload_G4SAIDDATA2.0.js b/src/libs/geant4_web/geant4_wasm/preload/preload_G4SAIDDATA2.0.js deleted file mode 100644 index 65cfae365..000000000 --- a/src/libs/geant4_web/geant4_wasm/preload/preload_G4SAIDDATA2.0.js +++ /dev/null @@ -1,345 +0,0 @@ -export default async function loadDataFile(Module) { - - Module['expectedDataFileDownloads'] ??= 0; - Module['expectedDataFileDownloads']++; - // Do not attempt to redownload the virtual filesystem data when in a pthread or a Wasm Worker context. - var isPthread = typeof ENVIRONMENT_IS_PTHREAD != 'undefined' && ENVIRONMENT_IS_PTHREAD; - var isWasmWorker = typeof ENVIRONMENT_IS_WASM_WORKER != 'undefined' && ENVIRONMENT_IS_WASM_WORKER; - if (isPthread || isWasmWorker) return; -return new Promise((loadDataResolve, loadDataReject) => { - async function loadPackage(metadata) { - - var PACKAGE_PATH = ''; - if (typeof window === 'object') { - PACKAGE_PATH = window['encodeURIComponent'](window.location.pathname.substring(0, window.location.pathname.lastIndexOf('/')) + '/'); - } else if (typeof process === 'undefined' && typeof location !== 'undefined') { - // web worker - PACKAGE_PATH = encodeURIComponent(location.pathname.substring(0, location.pathname.lastIndexOf('/')) + '/'); - } - var PACKAGE_NAME = '/workspaces/geant-wasm/build_wasm/data/G4SAIDDATA2.0.data'; - var REMOTE_PACKAGE_BASE = 'G4SAIDDATA2.0.data'; - var REMOTE_PACKAGE_NAME = Module['locateFile']?.(REMOTE_PACKAGE_BASE, '') ?? REMOTE_PACKAGE_BASE; - var REMOTE_PACKAGE_SIZE = metadata['remote_package_size']; - - async function fetchRemotePackage(packageName, packageSize) { - - Module['dataFileDownloads'] ??= {}; - try { - var response = await fetch(packageName); - } catch (e) { - throw new Error(`Network Error: ${packageName}`, {e}); - } - if (!response.ok) { - throw new Error(`${response.status}: ${response.url}`); - } - - const chunks = []; - const headers = response.headers; - const total = Number(headers.get('Content-Length') ?? packageSize); - let loaded = 0; - - Module['setStatus']?.('Downloading data...'); - const reader = response.body.getReader(); - - while (1) { - var {done, value} = await reader.read(); - if (done) break; - chunks.push(value); - loaded += value.length; - Module['dataFileDownloads'][packageName] = {loaded, total}; - - let totalLoaded = 0; - let totalSize = 0; - - for (const download of Object.values(Module['dataFileDownloads'])) { - totalLoaded += download.loaded; - totalSize += download.total; - } - - Module['setStatus']?.(`DL (G4SAIDDATA) (${totalLoaded}/${totalSize})`); - } - Module['setStatus']?.(`END DL (G4SAIDDATA)`); - - const packageData = new Uint8Array(chunks.map((c) => c.length).reduce((a, b) => a + b, 0)); - let offset = 0; - for (const chunk of chunks) { - packageData.set(chunk, offset); - offset += chunk.length; - } - return packageData.buffer; - } - - async function runWithFS(Module) { - - function assert(check, msg) { - if (!check) throw new Error(msg); - } -Module['FS_createPath']("/", "data", true, true); -Module['FS_createPath']("/data", "G4SAIDDATA2.0", true, true); - - /** @constructor */ - function DataRequest(start, end, audio) { - this.start = start; - this.end = end; - this.audio = audio; - } - DataRequest.prototype = { - requests: {}, - open: function(mode, name) { - this.name = name; - this.requests[name] = this; - Module['addRunDependency'](`fp ${this.name}`); - }, - send: function() {}, - onload: function() { - var byteArray = this.byteArray.subarray(this.start, this.end); - this.finish(byteArray); - }, - finish: async function(byteArray) { - var that = this; - // canOwn this data in the filesystem, it is a slice into the heap that will never change - Module['FS_createDataFile'](this.name, null, byteArray, true, true, true); - Module['removeRunDependency'](`fp ${that.name}`); -loadDataResolve(); - this.requests[this.name] = null; - } - }; - - var files = metadata['files']; - for (var i = 0; i < files.length; ++i) { - new DataRequest(files[i]['start'], files[i]['end'], files[i]['audio'] || 0).open('GET', files[i]['filename']); - } - - var PACKAGE_UUID = metadata['package_uuid']; - var IDB_RO = "readonly"; - var IDB_RW = "readwrite"; - var DB_NAME = "EM_PRELOAD_CACHE"; - var DB_VERSION = 1; - var METADATA_STORE_NAME = 'METADATA'; - var PACKAGE_STORE_NAME = 'PACKAGES'; - - async function openDatabase() { - if (typeof indexedDB == 'undefined') { - throw new Error('using IndexedDB to cache data can only be done on a web page or in a web worker'); - } - return new Promise((resolve, reject) => { - var openRequest = indexedDB.open(DB_NAME, DB_VERSION); - openRequest.onupgradeneeded = (event) => { - var db = /** @type {IDBDatabase} */ (event.target.result); - - if (db.objectStoreNames.contains(PACKAGE_STORE_NAME)) { - db.deleteObjectStore(PACKAGE_STORE_NAME); - } - var packages = db.createObjectStore(PACKAGE_STORE_NAME); - - if (db.objectStoreNames.contains(METADATA_STORE_NAME)) { - db.deleteObjectStore(METADATA_STORE_NAME); - } - var metadata = db.createObjectStore(METADATA_STORE_NAME); - }; - openRequest.onsuccess = (event) => { - var db = /** @type {IDBDatabase} */ (event.target.result); - resolve(db); - }; - openRequest.onerror = reject; - }); - } - - // This is needed as chromium has a limit on per-entry files in IndexedDB - // https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&sq=package:chromium&g=0&l=177 - // https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60 - // We set the chunk size to 64MB to stay well-below the limit - var CHUNK_SIZE = 64 * 1024 * 1024; - - async function cacheRemotePackage(db, packageName, packageData, packageMeta) { - var transactionPackages = db.transaction([PACKAGE_STORE_NAME], IDB_RW); - var packages = transactionPackages.objectStore(PACKAGE_STORE_NAME); - var chunkSliceStart = 0; - var nextChunkSliceStart = 0; - var chunkCount = Math.ceil(packageData.byteLength / CHUNK_SIZE); - var finishedChunks = 0; - - return new Promise((resolve, reject) => { - for (var chunkId = 0; chunkId < chunkCount; chunkId++) { - nextChunkSliceStart += CHUNK_SIZE; - var putPackageRequest = packages.put( - packageData.slice(chunkSliceStart, nextChunkSliceStart), - `package/${packageName}/${chunkId}` - ); - chunkSliceStart = nextChunkSliceStart; - putPackageRequest.onsuccess = (event) => { - finishedChunks++; - if (finishedChunks == chunkCount) { - var transaction_metadata = db.transaction( - [METADATA_STORE_NAME], - IDB_RW - ); - var metadata = transaction_metadata.objectStore(METADATA_STORE_NAME); - var putMetadataRequest = metadata.put( - { - 'uuid': packageMeta.uuid, - 'chunkCount': chunkCount - }, - `metadata/${packageName}` - ); - putMetadataRequest.onsuccess = (event) => resolve(packageData); - putMetadataRequest.onerror = reject; - } - }; - putPackageRequest.onerror = reject; - } - }); - } - - /* - * Check if there's a cached package, and if so whether it's the latest available. - * Resolves to the cached metadata, or `null` if it is missing or out-of-date. - */ - async function checkCachedPackage(db, packageName) { - var transaction = db.transaction([METADATA_STORE_NAME], IDB_RO); - var metadata = transaction.objectStore(METADATA_STORE_NAME); - var getRequest = metadata.get(`metadata/${packageName}`); - return new Promise((resolve, reject) => { - getRequest.onsuccess = (event) => { - var result = event.target.result; - if (result && PACKAGE_UUID === result['uuid']) { - resolve(result); - } else { - resolve(null); - } - } - getRequest.onerror = reject; - }); - } - - async function fetchCachedPackage(db, packageName, metadata) { - var transaction = db.transaction([PACKAGE_STORE_NAME], IDB_RO); - var packages = transaction.objectStore(PACKAGE_STORE_NAME); - - var chunksDone = 0; - var totalSize = 0; - var chunkCount = metadata['chunkCount']; - var chunks = new Array(chunkCount); - - return new Promise((resolve, reject) => { - for (var chunkId = 0; chunkId < chunkCount; chunkId++) { - var getRequest = packages.get(`package/${packageName}/${chunkId}`); - getRequest.onsuccess = (event) => { - if (!event.target.result) { - reject(`CachedPackageNotFound for: ${packageName}`); - return; - } - // If there's only 1 chunk, there's nothing to concatenate it with so we can just return it now - if (chunkCount == 1) { - resolve(event.target.result); - } else { - chunksDone++; - totalSize += event.target.result.byteLength; - chunks.push(event.target.result); - if (chunksDone == chunkCount) { - if (chunksDone == 1) { - resolve(event.target.result); - } else { - var tempTyped = new Uint8Array(totalSize); - var byteOffset = 0; - for (var chunkId in chunks) { - var buffer = chunks[chunkId]; - tempTyped.set(new Uint8Array(buffer), byteOffset); - byteOffset += buffer.byteLength; - buffer = undefined; - } - chunks = undefined; - resolve(tempTyped.buffer); - tempTyped = undefined; - } - } - } - }; - getRequest.onerror = reject; - } - }); - } - - function processPackageData(arrayBuffer) { - assert(arrayBuffer, 'Loading data file failed.'); - assert(arrayBuffer.constructor.name === ArrayBuffer.name, 'bad input to processPackageData'); - var byteArray = new Uint8Array(arrayBuffer); - var curr; - // Reuse the bytearray from the XHR as the source for file reads. - DataRequest.prototype.byteArray = byteArray; - var files = metadata['files']; - for (var i = 0; i < files.length; ++i) { - DataRequest.prototype.requests[files[i].filename].onload(); - } Module['removeRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4SAIDDATA2.0.data'); - - } - Module['addRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/G4SAIDDATA2.0.data'); - - Module['preloadResults'] ??= {}; - - async function preloadFallback(error) { - console.error(error); - console.error('falling back to default preload behavior'); - processPackageData(await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE)); - } - - try { - var db = await openDatabase(); - var pkgMetadata = await checkCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME); - var useCached = !!pkgMetadata; - Module['preloadResults'][PACKAGE_NAME] = {fromCache: useCached}; - if (useCached) { - processPackageData(await fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, pkgMetadata)); - Module['setStatus']?.(`END DL (G4SAIDDATA)`); - } else { - var packageData = await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE); - try { - processPackageData(await cacheRemotePackage(db, PACKAGE_PATH + PACKAGE_NAME, packageData, {uuid:PACKAGE_UUID})) - } catch (error) { - console.error(error); - processPackageData(packageData); - } - } - } catch(e) { - await preloadFallback(e) - .catch((error) => { - loadDataReject(error); - }); - } - - Module['setStatus']?.('Downloading...'); - - } - if (Module['calledRun']) { - runWithFS(Module) - .catch((error) => { - loadDataReject(error); - }); - } else { - (Module['preRun'] ??= []).push(runWithFS); // FS is not initialized yet, wait for it - } - - Module['removeRunDependency']('preload_G4SAIDDATA2.0.js.metadata'); - } - - async function runMetaWithFS() { - Module['addRunDependency']('preload_G4SAIDDATA2.0.js.metadata'); - var metadataUrl = Module['locateFile']?.('preload_G4SAIDDATA2.0.js.metadata', '') ?? 'preload_G4SAIDDATA2.0.js.metadata'; - - var response = await fetch(metadataUrl); - if (!response.ok) { - throw new Error(`${response.status}: ${response.url}`); - } - var json = await response.json(); - return loadPackage(json); - } - - if (Module['calledRun']) { - runMetaWithFS(); - } else { - (Module['preRun'] ??= []).push(runMetaWithFS); - } - - }); -} -// END the loadDataFile function diff --git a/src/libs/geant4_web/geant4_wasm/preload/preload_PhotonEvaporation6.1.js b/src/libs/geant4_web/geant4_wasm/preload/preload_PhotonEvaporation6.1.js deleted file mode 100644 index a6d8d24d0..000000000 --- a/src/libs/geant4_web/geant4_wasm/preload/preload_PhotonEvaporation6.1.js +++ /dev/null @@ -1,345 +0,0 @@ -export default async function loadDataFile(Module) { - - Module['expectedDataFileDownloads'] ??= 0; - Module['expectedDataFileDownloads']++; - // Do not attempt to redownload the virtual filesystem data when in a pthread or a Wasm Worker context. - var isPthread = typeof ENVIRONMENT_IS_PTHREAD != 'undefined' && ENVIRONMENT_IS_PTHREAD; - var isWasmWorker = typeof ENVIRONMENT_IS_WASM_WORKER != 'undefined' && ENVIRONMENT_IS_WASM_WORKER; - if (isPthread || isWasmWorker) return; -return new Promise((loadDataResolve, loadDataReject) => { - async function loadPackage(metadata) { - - var PACKAGE_PATH = ''; - if (typeof window === 'object') { - PACKAGE_PATH = window['encodeURIComponent'](window.location.pathname.substring(0, window.location.pathname.lastIndexOf('/')) + '/'); - } else if (typeof process === 'undefined' && typeof location !== 'undefined') { - // web worker - PACKAGE_PATH = encodeURIComponent(location.pathname.substring(0, location.pathname.lastIndexOf('/')) + '/'); - } - var PACKAGE_NAME = '/workspaces/geant-wasm/build_wasm/data/PhotonEvaporation6.1.data'; - var REMOTE_PACKAGE_BASE = 'PhotonEvaporation6.1.data'; - var REMOTE_PACKAGE_NAME = Module['locateFile']?.(REMOTE_PACKAGE_BASE, '') ?? REMOTE_PACKAGE_BASE; - var REMOTE_PACKAGE_SIZE = metadata['remote_package_size']; - - async function fetchRemotePackage(packageName, packageSize) { - - Module['dataFileDownloads'] ??= {}; - try { - var response = await fetch(packageName); - } catch (e) { - throw new Error(`Network Error: ${packageName}`, {e}); - } - if (!response.ok) { - throw new Error(`${response.status}: ${response.url}`); - } - - const chunks = []; - const headers = response.headers; - const total = Number(headers.get('Content-Length') ?? packageSize); - let loaded = 0; - - Module['setStatus']?.('Downloading data...'); - const reader = response.body.getReader(); - - while (1) { - var {done, value} = await reader.read(); - if (done) break; - chunks.push(value); - loaded += value.length; - Module['dataFileDownloads'][packageName] = {loaded, total}; - - let totalLoaded = 0; - let totalSize = 0; - - for (const download of Object.values(Module['dataFileDownloads'])) { - totalLoaded += download.loaded; - totalSize += download.total; - } - - Module['setStatus']?.(`DL (PhotonEvaporation) (${totalLoaded}/${totalSize})`); - } - Module['setStatus']?.(`END DL (PhotonEvaporation)`); - - const packageData = new Uint8Array(chunks.map((c) => c.length).reduce((a, b) => a + b, 0)); - let offset = 0; - for (const chunk of chunks) { - packageData.set(chunk, offset); - offset += chunk.length; - } - return packageData.buffer; - } - - async function runWithFS(Module) { - - function assert(check, msg) { - if (!check) throw new Error(msg); - } -Module['FS_createPath']("/", "data", true, true); -Module['FS_createPath']("/data", "PhotonEvaporation6.1", true, true); - - /** @constructor */ - function DataRequest(start, end, audio) { - this.start = start; - this.end = end; - this.audio = audio; - } - DataRequest.prototype = { - requests: {}, - open: function(mode, name) { - this.name = name; - this.requests[name] = this; - Module['addRunDependency'](`fp ${this.name}`); - }, - send: function() {}, - onload: function() { - var byteArray = this.byteArray.subarray(this.start, this.end); - this.finish(byteArray); - }, - finish: async function(byteArray) { - var that = this; - // canOwn this data in the filesystem, it is a slice into the heap that will never change - Module['FS_createDataFile'](this.name, null, byteArray, true, true, true); - Module['removeRunDependency'](`fp ${that.name}`); -loadDataResolve(); - this.requests[this.name] = null; - } - }; - - var files = metadata['files']; - for (var i = 0; i < files.length; ++i) { - new DataRequest(files[i]['start'], files[i]['end'], files[i]['audio'] || 0).open('GET', files[i]['filename']); - } - - var PACKAGE_UUID = metadata['package_uuid']; - var IDB_RO = "readonly"; - var IDB_RW = "readwrite"; - var DB_NAME = "EM_PRELOAD_CACHE"; - var DB_VERSION = 1; - var METADATA_STORE_NAME = 'METADATA'; - var PACKAGE_STORE_NAME = 'PACKAGES'; - - async function openDatabase() { - if (typeof indexedDB == 'undefined') { - throw new Error('using IndexedDB to cache data can only be done on a web page or in a web worker'); - } - return new Promise((resolve, reject) => { - var openRequest = indexedDB.open(DB_NAME, DB_VERSION); - openRequest.onupgradeneeded = (event) => { - var db = /** @type {IDBDatabase} */ (event.target.result); - - if (db.objectStoreNames.contains(PACKAGE_STORE_NAME)) { - db.deleteObjectStore(PACKAGE_STORE_NAME); - } - var packages = db.createObjectStore(PACKAGE_STORE_NAME); - - if (db.objectStoreNames.contains(METADATA_STORE_NAME)) { - db.deleteObjectStore(METADATA_STORE_NAME); - } - var metadata = db.createObjectStore(METADATA_STORE_NAME); - }; - openRequest.onsuccess = (event) => { - var db = /** @type {IDBDatabase} */ (event.target.result); - resolve(db); - }; - openRequest.onerror = reject; - }); - } - - // This is needed as chromium has a limit on per-entry files in IndexedDB - // https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&sq=package:chromium&g=0&l=177 - // https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60 - // We set the chunk size to 64MB to stay well-below the limit - var CHUNK_SIZE = 64 * 1024 * 1024; - - async function cacheRemotePackage(db, packageName, packageData, packageMeta) { - var transactionPackages = db.transaction([PACKAGE_STORE_NAME], IDB_RW); - var packages = transactionPackages.objectStore(PACKAGE_STORE_NAME); - var chunkSliceStart = 0; - var nextChunkSliceStart = 0; - var chunkCount = Math.ceil(packageData.byteLength / CHUNK_SIZE); - var finishedChunks = 0; - - return new Promise((resolve, reject) => { - for (var chunkId = 0; chunkId < chunkCount; chunkId++) { - nextChunkSliceStart += CHUNK_SIZE; - var putPackageRequest = packages.put( - packageData.slice(chunkSliceStart, nextChunkSliceStart), - `package/${packageName}/${chunkId}` - ); - chunkSliceStart = nextChunkSliceStart; - putPackageRequest.onsuccess = (event) => { - finishedChunks++; - if (finishedChunks == chunkCount) { - var transaction_metadata = db.transaction( - [METADATA_STORE_NAME], - IDB_RW - ); - var metadata = transaction_metadata.objectStore(METADATA_STORE_NAME); - var putMetadataRequest = metadata.put( - { - 'uuid': packageMeta.uuid, - 'chunkCount': chunkCount - }, - `metadata/${packageName}` - ); - putMetadataRequest.onsuccess = (event) => resolve(packageData); - putMetadataRequest.onerror = reject; - } - }; - putPackageRequest.onerror = reject; - } - }); - } - - /* - * Check if there's a cached package, and if so whether it's the latest available. - * Resolves to the cached metadata, or `null` if it is missing or out-of-date. - */ - async function checkCachedPackage(db, packageName) { - var transaction = db.transaction([METADATA_STORE_NAME], IDB_RO); - var metadata = transaction.objectStore(METADATA_STORE_NAME); - var getRequest = metadata.get(`metadata/${packageName}`); - return new Promise((resolve, reject) => { - getRequest.onsuccess = (event) => { - var result = event.target.result; - if (result && PACKAGE_UUID === result['uuid']) { - resolve(result); - } else { - resolve(null); - } - } - getRequest.onerror = reject; - }); - } - - async function fetchCachedPackage(db, packageName, metadata) { - var transaction = db.transaction([PACKAGE_STORE_NAME], IDB_RO); - var packages = transaction.objectStore(PACKAGE_STORE_NAME); - - var chunksDone = 0; - var totalSize = 0; - var chunkCount = metadata['chunkCount']; - var chunks = new Array(chunkCount); - - return new Promise((resolve, reject) => { - for (var chunkId = 0; chunkId < chunkCount; chunkId++) { - var getRequest = packages.get(`package/${packageName}/${chunkId}`); - getRequest.onsuccess = (event) => { - if (!event.target.result) { - reject(`CachedPackageNotFound for: ${packageName}`); - return; - } - // If there's only 1 chunk, there's nothing to concatenate it with so we can just return it now - if (chunkCount == 1) { - resolve(event.target.result); - } else { - chunksDone++; - totalSize += event.target.result.byteLength; - chunks.push(event.target.result); - if (chunksDone == chunkCount) { - if (chunksDone == 1) { - resolve(event.target.result); - } else { - var tempTyped = new Uint8Array(totalSize); - var byteOffset = 0; - for (var chunkId in chunks) { - var buffer = chunks[chunkId]; - tempTyped.set(new Uint8Array(buffer), byteOffset); - byteOffset += buffer.byteLength; - buffer = undefined; - } - chunks = undefined; - resolve(tempTyped.buffer); - tempTyped = undefined; - } - } - } - }; - getRequest.onerror = reject; - } - }); - } - - function processPackageData(arrayBuffer) { - assert(arrayBuffer, 'Loading data file failed.'); - assert(arrayBuffer.constructor.name === ArrayBuffer.name, 'bad input to processPackageData'); - var byteArray = new Uint8Array(arrayBuffer); - var curr; - // Reuse the bytearray from the XHR as the source for file reads. - DataRequest.prototype.byteArray = byteArray; - var files = metadata['files']; - for (var i = 0; i < files.length; ++i) { - DataRequest.prototype.requests[files[i].filename].onload(); - } Module['removeRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/PhotonEvaporation6.1.data'); - - } - Module['addRunDependency']('datafile_/workspaces/geant-wasm/build_wasm/data/PhotonEvaporation6.1.data'); - - Module['preloadResults'] ??= {}; - - async function preloadFallback(error) { - console.error(error); - console.error('falling back to default preload behavior'); - processPackageData(await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE)); - } - - try { - var db = await openDatabase(); - var pkgMetadata = await checkCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME); - var useCached = !!pkgMetadata; - Module['preloadResults'][PACKAGE_NAME] = {fromCache: useCached}; - if (useCached) { - processPackageData(await fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, pkgMetadata)); - Module['setStatus']?.(`END DL (PhotonEvaporation)`); - } else { - var packageData = await fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE); - try { - processPackageData(await cacheRemotePackage(db, PACKAGE_PATH + PACKAGE_NAME, packageData, {uuid:PACKAGE_UUID})) - } catch (error) { - console.error(error); - processPackageData(packageData); - } - } - } catch(e) { - await preloadFallback(e) - .catch((error) => { - loadDataReject(error); - }); - } - - Module['setStatus']?.('Downloading...'); - - } - if (Module['calledRun']) { - runWithFS(Module) - .catch((error) => { - loadDataReject(error); - }); - } else { - (Module['preRun'] ??= []).push(runWithFS); // FS is not initialized yet, wait for it - } - - Module['removeRunDependency']('preload_PhotonEvaporation6.1.js.metadata'); - } - - async function runMetaWithFS() { - Module['addRunDependency']('preload_PhotonEvaporation6.1.js.metadata'); - var metadataUrl = Module['locateFile']?.('preload_PhotonEvaporation6.1.js.metadata', '') ?? 'preload_PhotonEvaporation6.1.js.metadata'; - - var response = await fetch(metadataUrl); - if (!response.ok) { - throw new Error(`${response.status}: ${response.url}`); - } - var json = await response.json(); - return loadPackage(json); - } - - if (Module['calledRun']) { - runMetaWithFS(); - } else { - (Module['preRun'] ??= []).push(runMetaWithFS); - } - - }); -} -// END the loadDataFile function diff --git a/src/libs/geant4_web/geantWorker.worker.ts b/src/libs/geant4_web/geantWorker.worker.ts index dd439cf0b..73d4565f7 100644 --- a/src/libs/geant4_web/geantWorker.worker.ts +++ b/src/libs/geant4_web/geantWorker.worker.ts @@ -1,11 +1,19 @@ -import createMainModule from './geant4_wasm/geant4_wasm' +import createMainModule from './geant-web-stubs/geant4_wasm' -import { default as initG4EMLOW } from './geant4_wasm/preload/preload_G4EMLOW8.6.1'; -import { default as initG4ENSDFSTATE } from './geant4_wasm/preload/preload_G4ENSDFSTATE3.0'; -import { default as initG4NDL } from './geant4_wasm/preload/preload_G4NDL4.7.1'; -import { default as initG4PARTICLEXS } from './geant4_wasm/preload/preload_G4PARTICLEXS4.1'; -import { default as initG4SAIDDATA } from './geant4_wasm/preload/preload_G4SAIDDATA2.0'; -import { default as initPhotoEvaporation } from './geant4_wasm/preload/preload_PhotonEvaporation6.1'; +import { default as initG4EMLOW } from './geant-web-stubs/preload/preload_G4EMLOW8.6.1'; +import { default as initG4ENSDFSTATE } from './geant-web-stubs/preload/preload_G4ENSDFSTATE3.0'; +import { default as initG4NDL } from './geant-web-stubs/preload/preload_G4NDL4.7.1'; +import { default as initG4PARTICLEXS } from './geant-web-stubs/preload/preload_G4PARTICLEXS4.1'; +import { default as initG4SAIDDATA } from './geant-web-stubs/preload/preload_G4SAIDDATA2.0'; +import { default as initPhotoEvaporation } from './geant-web-stubs/preload/preload_PhotonEvaporation6.1'; + +import { + GeantWorkerMessage, + GeantWorkerMessageType, + GeantWorkerMessageFile +} from './GeantWorkerInterface'; + +import { TextDecoder } from 'util'; const s3_prefix_map: Record = { ".wasm": "https://s3p.cloud.cyfronet.pl/geant4-wasm/", @@ -75,21 +83,10 @@ var preModule = { }; var mod = createMainModule(preModule); -mod.then((module) => { - const tClass = new module.TestClass(1, 2); - - console.log(tClass.testMethod()); - const vec = new module.vector_int(); - vec.push_back(1); - vec.push_back(2); - vec.push_back(3); - - console.log(tClass.complicatedFunction(vec)); -}); - -ctx.onmessage = async (event: MessageEvent) => { + +ctx.onmessage = async (event: MessageEvent) => { switch (event.data.type) { - case "loadDepsData": { + case GeantWorkerMessageType.INIT_DATA_FILES: { const res = await mod.then(async (module) => { console.log("Initializing lazy files..."); @@ -107,7 +104,7 @@ ctx.onmessage = async (event: MessageEvent) => { }); break; } - case "loadDepsLazy": { + case GeantWorkerMessageType.INIT_LAZY_FILES: { const res = await mod.then(async (module) => { module.FS_createPath('/', 'data', true, true); module.FS_createPath('/data', 'G4EMLOW8.6.1', true, true); @@ -149,27 +146,31 @@ ctx.onmessage = async (event: MessageEvent) => { }); break; } - case "runSimulation": - try { - console.log("Running simulation..."); - const initResult = await mod.then((module) => { - module.Geant4_init() - }); - console.log("Initialization result:", initResult); - const result = await mod.then((module) => module.Geant4_run()); - ctx.postMessage({ - type: "result", - result: result - }); - } catch (error: unknown) { + case GeantWorkerMessageType.CREATE_FILE: + await mod.then((module) => { + const data = event.data.data as GeantWorkerMessageFile; + + module.FS.createFile("/", data.name, null, true, true); + module.FS.writeFile(data.name, data.data); + }); + break; + case GeantWorkerMessageType.READ_FILE: + await mod.then((module) => { + const fileName = event.data.data as string; + + const fileConent = module.FS.readFile(fileName, { encoding: "utf8" }); + ctx.postMessage({ - type: "error", - message: (error as Error).message - }); - } + type: GeantWorkerMessageType.FILE_RESPONSE, + data: { + name: fileName, + data: new TextDecoder().decode(fileConent) + } as GeantWorkerMessageFile + } as GeantWorkerMessage) + }); break; - case "runGDML": + case GeantWorkerMessageType.RUN_SIMULATION: try { console.log("Running GDML simulation..."); const gdmlResult = await mod.then((module) => { @@ -296,7 +297,7 @@ ctx.onmessage = async (event: MessageEvent) => { /score/dumpQuantityToFile Pr fluxdiff diff.txt ` ); - return module.Geant4_GDML(); + return module.Geant4GDMRun("geom.gdml", "init.mac"); }); console.log("GDML run result:", gdmlResult); @@ -318,6 +319,6 @@ ctx.onmessage = async (event: MessageEvent) => { } break; default: - console.warn("Unknown message type:", event.data.type); + console.warn("Unknown message type:", event.data); } } \ No newline at end of file