diff --git a/Makefile b/Makefile
index 6d074e578..9a7d1e601 100644
--- a/Makefile
+++ b/Makefile
@@ -8,7 +8,7 @@ WAMR_DIR = _build/wamr
GENESIS_WASM_BRANCH = feat/http-checkpoint
GENESIS_WASM_REPO = https://github.com/permaweb/ao.git
-GENESIS_WASM_SERVER_DIR = _build/genesis_wasm/genesis-wasm-server
+GENESIS_WASM_SERVER_DIR = _build/genesis-wasm/genesis-wasm-server
ifdef HB_DEBUG
WAMR_FLAGS = -DWAMR_ENABLE_LOG=1 -DWAMR_BUILD_DUMP_CALL_STACK=1 -DCMAKE_BUILD_TYPE=Debug
diff --git a/README.md b/README.md
index dc6bf80e1..802894d3a 100644
--- a/README.md
+++ b/README.md
@@ -85,7 +85,7 @@ all devices and sets up default stores on port 10000.
HyperBEAM supports several optional build profiles that enable additional features:
-- `genesis_wasm`: Enables Genesis WebAssembly support
+- `genesis-wasm`: Enables Genesis WebAssembly support
- `rocksdb`: Enables RocksDB storage backend (adds RocksDB v1.8.0 dependency)
- `http3`: Enables HTTP/3 support via QUIC protocol
@@ -99,14 +99,14 @@ To start a shell with profiles:
rebar3 as rocksdb shell
# Multiple profiles
-rebar3 as rocksdb, genesis_wasm shell
+rebar3 as rocksdb, genesis-wasm shell
```
To create a release with profiles:
```bash
# Create release with profiles
-rebar3 as rocksdb,genesis_wasm release
+rebar3 as rocksdb,genesis-wasm release
```
Note: Profiles modify compile-time options that get baked into the release. Choose the profiles you need before starting HyperBEAM.
diff --git a/docs/llms-full.txt b/docs/llms-full.txt
index 29c722720..cbb9a6d27 100644
--- a/docs/llms-full.txt
+++ b/docs/llms-full.txt
@@ -14175,7 +14175,7 @@ build system and the runtime execution environment.
## Function Index ##
-
+
@@ -14198,11 +14198,11 @@ Returns a list of all feature flags that the node supports.
Returns true if the feature flag is enabled.
-
+
-### genesis_wasm/0 ###
+### genesis-wasm/0 ###
-`genesis_wasm() -> any()`
+`genesis-wasm() -> any()`
@@ -21299,7 +21299,7 @@ HyperBEAM uses build profiles to enable optional features, often requiring extra
**Available Profiles (Examples):**
-* `genesis_wasm`: Enables Genesis WebAssembly support.
+* `genesis-wasm`: Enables Genesis WebAssembly support.
* `rocksdb`: Enables the RocksDB storage backend.
* `http3`: Enables HTTP/3 support.
@@ -21310,7 +21310,7 @@ HyperBEAM uses build profiles to enable optional features, often requiring extra
rebar3 as rocksdb shell
# Start with RocksDB and Genesis WASM profiles
-rebar3 as rocksdb, genesis_wasm shell
+rebar3 as rocksdb, genesis-wasm shell
```
*Note: Choose profiles **before** starting the shell, as they affect compile-time options.*
diff --git a/docs/misc/installation-core/hyperbeam-setup-config/setup.md b/docs/misc/installation-core/hyperbeam-setup-config/setup.md
index 5d497fe90..66c43293c 100644
--- a/docs/misc/installation-core/hyperbeam-setup-config/setup.md
+++ b/docs/misc/installation-core/hyperbeam-setup-config/setup.md
@@ -76,7 +76,7 @@ HyperBEAM uses a `config.flat` file for configuration when running as a release.
### **b. Build the Release (with Optional Profiles)**
-You can build a standard release or include specific profiles for additional features (like `genesis_wasm`, `rocksdb`, `http3`).
+You can build a standard release or include specific profiles for additional features (like `genesis-wasm`, `rocksdb`, `http3`).
To build a standard release:
```bash
diff --git a/docs/resources/source-code/hb_features.md b/docs/resources/source-code/hb_features.md
index 1bb652ad2..71ea7fc16 100644
--- a/docs/resources/source-code/hb_features.md
+++ b/docs/resources/source-code/hb_features.md
@@ -15,7 +15,7 @@ build system and the runtime execution environment.
## Function Index ##
-
+
@@ -38,11 +38,11 @@ Returns a list of all feature flags that the node supports.
Returns true if the feature flag is enabled.
-
+
-### genesis_wasm/0 ###
+### genesis-wasm/0 ###
-`genesis_wasm() -> any()`
+`genesis-wasm() -> any()`
diff --git a/docs/run/running-a-hyperbeam-node.md b/docs/run/running-a-hyperbeam-node.md
index 30f9df072..18d3b9669 100644
--- a/docs/run/running-a-hyperbeam-node.md
+++ b/docs/run/running-a-hyperbeam-node.md
@@ -162,7 +162,7 @@ HyperBEAM uses build profiles to enable optional features, often requiring extra
**Available Profiles (Examples):**
-* `genesis_wasm`: Enables Genesis WebAssembly support.
+* `genesis-wasm`: Enables Genesis WebAssembly support.
* `rocksdb`: Enables the RocksDB storage backend.
* `http3`: Enables HTTP/3 support.
@@ -173,7 +173,7 @@ HyperBEAM uses build profiles to enable optional features, often requiring extra
rebar3 as rocksdb shell
# Start with RocksDB and Genesis WASM profiles
-rebar3 as rocksdb, genesis_wasm shell
+rebar3 as rocksdb, genesis-wasm shell
```
*Note: Choose profiles **before** starting the shell, as they affect compile-time options.*
diff --git a/rebar.config b/rebar.config
index 0e5b04494..c59acb3a1 100644
--- a/rebar.config
+++ b/rebar.config
@@ -2,9 +2,9 @@
{plugins, [pc, rebar3_rustler, rebar_edown_plugin]}.
{profiles, [
- {no_events, [{erl_opts, [{d, 'NO_EVENTS', true}]}]},
- {store_events, [{erl_opts, [{d, 'STORE_EVENTS', true}]}]},
- {ao_profiling, [{erl_opts, [{d, 'AO_PROFILING', true}]}]},
+ {'no-events', [{erl_opts, [{d, 'NO_EVENTS', true}]}]},
+ {'store-events', [{erl_opts, [{d, 'STORE_EVENTS', true}]}]},
+ {'ao-profiling', [{erl_opts, [{d, 'AO_PROFILING', true}]}]},
{eflame,
[
{deps,
@@ -20,7 +20,7 @@
{erl_opts, [{d, 'ENABLE_EFLAME', true}]}
]
},
- {genesis_wasm, [
+ {'genesis-wasm', [
{erl_opts, [{d, 'ENABLE_GENESIS_WASM', true}]},
{pre_hooks, [
{compile, "make -C \"${REBAR_ROOT_DIR}\" setup-genesis-wasm"}
@@ -28,7 +28,7 @@
{relx, [
{overlay, [
{copy,
- "_build/genesis_wasm/genesis-wasm-server",
+ "_build/genesis-wasm/genesis-wasm-server",
"genesis-wasm-server"
}
]}
diff --git a/scripts/hyper-token-p4-client.lua b/scripts/hyper-token-p4-client.lua
index d6a14ce9e..3ff84b139 100644
--- a/scripts/hyper-token-p4-client.lua
+++ b/scripts/hyper-token-p4-client.lua
@@ -3,13 +3,14 @@
-- Find the user's balance in the current ledger state.
function balance(base, request)
+ ao.event({ "Client received balance request" })
local status, res = ao.resolve({
path =
base["ledger-path"]
.. "/now/balance/"
.. request["target"]
})
- ao.event({ "client received balance response",
+ ao.event({ "Client received balance response",
{ status = status, res = res, target = request["target"] } }
)
-- If the balance request fails (most likely because the user has no balance),
@@ -25,7 +26,7 @@ end
-- Charge the user's balance in the current ledger state.
function charge(base, request)
ao.event("debug_charge", {
- "client starting charge",
+ "Client starting charge",
{ request = request, base = base }
})
local status, res = ao.resolve({
diff --git a/scripts/hyper-token-p4.lua b/scripts/hyper-token-p4.lua
index ef7e323f0..83da6a997 100644
--- a/scripts/hyper-token-p4.lua
+++ b/scripts/hyper-token-p4.lua
@@ -47,7 +47,7 @@ function charge(base, assignment)
-- cost of an execution at lower than its actual cost. Subsequently, the
-- ledger should at least debit the source, even if the source may not
-- deposit to restore this balance.
- ao.event({ "Debit request validated: ", { assignment = assignment } })
+ ao.event({ "Debit request validated, balances before charge: ", { balances = base.balance } })
base.balance = base.balance or {}
base.balance[request.account] =
(base.balance[request.account] or 0) - request.quantity
@@ -55,7 +55,6 @@ function charge(base, assignment)
-- Increment the balance of the recipient account.
base.balance[request.recipient] =
(base.balance[request.recipient] or 0) + request.quantity
-
- ao.event("debug_charge", { "Charge processed: ", { balances = base.balance } })
+ ao.event({ "Debit request processed, balances after charge: ", { balances = base.balance } })
return "ok", base
end
diff --git a/scripts/hyper-token.lua b/scripts/hyper-token.lua
index 016d9713e..779cf77c1 100644
--- a/scripts/hyper-token.lua
+++ b/scripts/hyper-token.lua
@@ -294,7 +294,7 @@ end
-- be from our own root ledger, or from a sub-ledger that is precisely the same
-- as our own.
local function validate_new_peer_ledger(base, request)
- ao.event({ "Validating peer ledger: ", { request = request } })
+ ao.event({ "Validating peer ledger: ", { action = request.action } })
-- Check if the request is from the root ledger.
if is_root(base) or (base.token == request.from) then
@@ -308,7 +308,7 @@ local function validate_new_peer_ledger(base, request)
-- modified to remove the `authority' and `scheduler' fields.
-- This ensures that the process we are receiving the `credit-notice` from
-- has the same structure as our own process.
- ao.event({ "Calculating expected `base` from self", { base = base } })
+ ao.event({ "Calculating expected `base` from self" })
local status, proc, expected
status, proc = ao.resolve({"as", "message@1.0", base}, "process")
-- Reset the `authority' and `scheduler' fields to nil, to ensure that the
@@ -320,12 +320,10 @@ local function validate_new_peer_ledger(base, request)
proc,
{ path = "id", commitments = "none" }
)
- ao.event({ "Expected `from-base`", { status = status, expected = expected } })
+ ao.event("debug_base", { "Expected `from-base`", { status = status, expected = expected, proc = proc } })
-- Check if the `from-base' field is present in the assignment.
if not request["from-base"] then
- ao.event({ "`from-base` field not found in message", {
- request = request
- }})
+ ao.event({ "`from-base` field not found in message" })
return false
end
@@ -474,6 +472,10 @@ function validate_request(incoming_base, assignment)
})
end
+ ao.event({ "ensure_initialized", {
+ status = status,
+ base = base
+ }})
-- First, ensure that the message has not already been processed.
ao.event("Deduplicating message.", {
["history-length"] = #(base.dedup or {})
@@ -598,7 +600,8 @@ local function debit_balance(base, request)
if type(source_balance) ~= "number" then
return "error", log_result(base, "error", {
message = "Source balance is not a number.",
- balance = source_balance
+ balance = source_balance,
+ type = type(source_balance)
})
end
@@ -620,7 +623,7 @@ local function debit_balance(base, request)
})
end
- ao.event({ "Deducting funds:", { request = request } })
+ ao.event({ "Deducting funds" })
base.balance[source] = source_balance - request.quantity
ao.event({ "Balances after deduction:",
{ balances = base.balance, ledgers = base.ledgers } }
@@ -655,10 +658,20 @@ end
-- Xfer in: Sub-ledger = Dec User balance
-- C-N in: Root = Inc User balance, Dec Sub-ledger balance
function transfer(base, assignment)
- ao.event({ "Transfer request received", { assignment = assignment } })
+ ao.event({ "Transfer request received", {
+ balances = base.balance,
+ ledgers = base.ledgers,
+ path = assignment.path,
+ body = assignment.body
+ } })
-- Verify the security of the request.
local status, request
status, base, request = validate_request(base, assignment)
+ ao.event({ "Transfer request validated", {
+ status = status,
+ balances = base.balance,
+ ledgers = base.ledgers
+ } })
if status ~= "ok" or not request then
return "ok", base
end
@@ -691,6 +704,7 @@ function transfer(base, assignment)
-- another user. We credit the recipient's balance, or the sub-ledger's
-- balance if the request has a `route' key.
local direct_recipient = request.route or request.recipient
+ ao.event({ "Balances before credit", { balances = base.balance } })
base.balance[direct_recipient] =
(base.balance[direct_recipient] or 0) + quantity
base = send(base, {
@@ -700,12 +714,14 @@ function transfer(base, assignment)
quantity = quantity,
sender = request.from
})
+ ao.event({ "Balances after credit", { balances = base.balance } })
return log_result(base, "ok", {
message = "Direct or root transfer processed successfully.",
from_user = request.from,
to = direct_recipient,
explicit_recipient = request.recipient,
- quantity = quantity
+ quantity = quantity,
+ balances = base.balance
})
end
@@ -750,7 +766,7 @@ end
-- Process credit notices from other ledgers.
_G["credit-notice"] = function (base, assignment)
- ao.event({ "Credit-Notice received", { assignment = assignment } })
+ ao.event({ "Credit-Notice received" })
-- Verify the security of the request.
local status, request
@@ -801,7 +817,8 @@ _G["credit-notice"] = function (base, assignment)
to_ledger = request.sender,
to_user = request.recipient,
quantity = quantity,
- balance = base.balance[request.recipient]
+ balance = base.balance[request.recipient],
+ balances = base.balance
})
end
@@ -867,7 +884,7 @@ end
--- We route any `action' to the appropriate function based on the request path.
function compute(base, assignment)
ao.event({ "compute called",
- { balance = base.balance, ledgers = base.ledgers } })
+ { balance = base.balance, ledgers = base.ledgers, action = assignment.body.action or "unknown" } })
assignment.body.action = string.lower(assignment.body.action or "")
@@ -880,6 +897,7 @@ function compute(base, assignment)
elseif assignment.body.action == "register-remote" then
return _G["register-remote"](base, assignment)
else
+ ao.event({ "Unknown action received, initializing process." })
-- Handle unknown `action' values.
_, base = ensure_initialized(base, assignment)
base.results = {
diff --git a/src/dev_codec_httpsig.erl b/src/dev_codec_httpsig.erl
index c104ec2ac..8d3b12ae0 100644
--- a/src/dev_codec_httpsig.erl
+++ b/src/dev_codec_httpsig.erl
@@ -557,7 +557,7 @@ validate_large_message_from_http_test() ->
committed_id_test() ->
Msg = #{ <<"basic">> => <<"value">> },
- Signed = hb_message:commit(Msg, hb:wallet()),
+ Signed = hb_message:commit(Msg, #{ priv_wallet => hb:wallet() }),
?assert(hb_message:verify(Signed, all, #{})),
?event({signed_msg, Signed}),
UnsignedID = hb_message:id(Signed, none),
@@ -599,8 +599,14 @@ commit_secret_key_test() ->
multicommitted_id_test() ->
Msg = #{ <<"basic">> => <<"value">> },
- Signed1 = hb_message:commit(Msg, Wallet1 = ar_wallet:new()),
- Signed2 = hb_message:commit(Signed1, Wallet2 = ar_wallet:new()),
+ Signed1 = hb_message:commit(
+ Msg,
+ #{ priv_wallet => Wallet1 = ar_wallet:new() }
+ ),
+ Signed2 = hb_message:commit(
+ Signed1,
+ #{ priv_wallet => Wallet2 = ar_wallet:new() }
+ ),
Addr1 = hb_util:human_id(ar_wallet:to_address(Wallet1)),
Addr2 = hb_util:human_id(ar_wallet:to_address(Wallet2)),
?event({signed_msg, Signed2}),
@@ -624,6 +630,6 @@ sign_and_verify_link_test() ->
},
NormMsg = hb_message:convert(Msg, <<"structured@1.0">>, #{}),
?event({msg, NormMsg}),
- Signed = hb_message:commit(NormMsg, hb:wallet()),
+ Signed = hb_message:commit(NormMsg, #{ priv_wallet => hb:wallet() }),
?event({signed_msg, Signed}),
?assert(hb_message:verify(Signed)).
diff --git a/src/dev_codec_httpsig_conv.erl b/src/dev_codec_httpsig_conv.erl
index cc08b85d0..8aeb6a683 100644
--- a/src/dev_codec_httpsig_conv.erl
+++ b/src/dev_codec_httpsig_conv.erl
@@ -256,7 +256,7 @@ from_body_part(InlinedKey, Part, Opts) ->
hb_maps:without(
[
<<"content-disposition">>,
- <<"content-type">>,
+ % <<"content-type">>,
<<"ao-body-key">>,
<<"content-digest">>
],
@@ -652,7 +652,7 @@ group_maps(Map, Parent, Top, Opts) when is_map(Map) ->
end;
_ ->
?event({group_maps, {norm_key, NormKey}, {value, Value}}),
- case byte_size(Value) > ?MAX_HEADER_LENGTH of
+ case byte_size(hb_util:bin(Value)) > ?MAX_HEADER_LENGTH of
% the value is too large to be encoded as a header
% within a part, so instead lift it to be a top level
% part
diff --git a/src/dev_genesis_wasm.erl b/src/dev_genesis_wasm.erl
index 6aca4d1a2..468d2d154 100644
--- a/src/dev_genesis_wasm.erl
+++ b/src/dev_genesis_wasm.erl
@@ -141,6 +141,14 @@ ensure_started(Opts) ->
DatabaseUrl = filename:absname(DBDir ++ "/genesis-wasm-db"),
filelib:ensure_path(DBDir),
filelib:ensure_path(CheckpointDir),
+ GenesisWasmPort =
+ integer_to_list(
+ hb_opts:get(
+ genesis_wasm_port,
+ 6363,
+ Opts
+ )
+ ),
Port =
open_port(
{spawn_executable,
@@ -164,15 +172,7 @@ ensure_started(Opts) ->
Env = [
{"UNIT_MODE", "hbu"},
{"HB_URL", NodeURL},
- {"PORT",
- integer_to_list(
- hb_opts:get(
- genesis_wasm_port,
- 6363,
- Opts
- )
- )
- },
+ {"PORT", GenesisWasmPort},
{"DB_URL", DatabaseUrl},
{"NODE_CONFIG_ENV", "production"},
{"DEFAULT_LOG_LEVEL",
@@ -195,8 +195,14 @@ ensure_started(Opts) ->
)
)
},
- {"DISABLE_PROCESS_FILE_CHECKPOINT_CREATION", "false"},
- {"PROCESS_MEMORY_FILE_CHECKPOINTS_DIR", CheckpointDir}
+ {
+ "DISABLE_PROCESS_FILE_CHECKPOINT_CREATION",
+ "false"
+ },
+ {
+ "PROCESS_MEMORY_FILE_CHECKPOINTS_DIR",
+ CheckpointDir
+ }
]
}
]
diff --git a/src/dev_green_zone.erl b/src/dev_green_zone.erl
index 1669b23b2..4ecbe2c1b 100644
--- a/src/dev_green_zone.erl
+++ b/src/dev_green_zone.erl
@@ -447,7 +447,7 @@ join_peer(PeerLocation, PeerID, _M1, _M2, InitOpts) ->
),
% Create an committed join request using the wallet.
Req = hb_cache:ensure_all_loaded(
- hb_message:commit(MergedReq, Wallet),
+ hb_message:commit(MergedReq, InitOpts),
InitOpts
),
?event({join_req, {explicit, Req}}),
diff --git a/src/dev_json_iface.erl b/src/dev_json_iface.erl
index 3b8401cd8..f64f337a2 100644
--- a/src/dev_json_iface.erl
+++ b/src/dev_json_iface.erl
@@ -105,7 +105,7 @@ message_to_json_struct(RawMsg, Features, Opts) ->
Opts
),
MsgWithoutCommitments = hb_maps:without([<<"commitments">>], TABM, Opts),
- ID = hb_message:id(RawMsg, all),
+ ID = hb_message:id(RawMsg, all, Opts),
?event({encoding, {id, ID}, {msg, RawMsg}}),
Last = hb_ao:get(<<"anchor">>, {as, <<"message@1.0">>, MsgWithoutCommitments}, <<>>, Opts),
Owner =
@@ -430,7 +430,8 @@ generate_stack(File, Mode) ->
generate_stack(File, _Mode, RawOpts) ->
Opts = normalize_test_opts(RawOpts),
test_init(),
- Msg0 = dev_wasm:cache_wasm_image(File, Opts),
+ Opts = #{ priv_wallet => hb:wallet() },
+ Msg0 = dev_wasm:cache_wasm_image(File),
Image = hb_ao:get(<<"image">>, Msg0, Opts),
Msg1 = Msg0#{
<<"device">> => <<"stack@1.0">>,
@@ -457,7 +458,8 @@ generate_stack(File, _Mode, RawOpts) ->
Msg2.
generate_aos_msg(ProcID, Code) ->
- generate_aos_msg(ProcID, Code, #{}).
+ Opts = #{ priv_wallet => hb:wallet() },
+ generate_aos_msg(ProcID, Code, Opts).
generate_aos_msg(ProcID, Code, RawOpts) ->
Opts = normalize_test_opts(RawOpts),
hb_message:commit(#{
diff --git a/src/dev_lookup.erl b/src/dev_lookup.erl
index bd6958be7..5ccf1e891 100644
--- a/src/dev_lookup.erl
+++ b/src/dev_lookup.erl
@@ -63,16 +63,15 @@ http_lookup_test() ->
<<"store-module">> => hb_store_fs,
<<"name">> => <<"cache-mainnet">>
},
- Opts = #{ store => [Store] },
+ Opts = #{ store => [Store], priv_wallet => hb:wallet() },
Msg = #{ <<"test-key">> => <<"test-value">>, <<"data">> => <<"test-data">> },
{ok, ID} = hb_cache:write(Msg, Opts),
Node = hb_http_server:start_node(Opts),
- Wallet = hb:wallet(),
Req = hb_message:commit(#{
<<"path">> => <<"/~lookup@1.0/read?target=", ID/binary>>,
<<"device">> => <<"lookup@1.0">>,
<<"accept">> => <<"application/aos-2">>
- }, Wallet),
+ }, Opts),
{ok, Res} = hb_http:post(Node, Req, Opts),
{ok, Decoded} = dev_json_iface:json_to_message(hb_ao:get(<<"body">>, Res, Opts), Opts),
?assertEqual(<<"test-data">>, hb_ao:get(<<"Data">>, Decoded, Opts)).
\ No newline at end of file
diff --git a/src/dev_lua.erl b/src/dev_lua.erl
index 8f26f2619..70386a790 100644
--- a/src/dev_lua.erl
+++ b/src/dev_lua.erl
@@ -222,7 +222,15 @@ sandbox(State, [Path | Rest], Opts) ->
%% @doc Call the Lua script with the given arguments.
compute(Key, RawBase, Req, Opts) ->
?event(debug_lua, compute_called),
- {ok, Base} = ensure_initialized(RawBase, Req, Opts),
+ LoadedReq =
+ hb_message:normalize_commitments(
+ hb_cache:read_all_commitments(
+ Req,
+ Opts
+ ),
+ Opts
+ ),
+ {ok, Base} = ensure_initialized(RawBase, LoadedReq, Opts),
?event(debug_lua, ensure_initialized_done),
% Get the state from the base message's private element.
OldPriv = #{ <<"state">> := State } = hb_private:from_message(Base),
@@ -232,8 +240,8 @@ compute(Key, RawBase, Req, Opts) ->
Function =
hb_ao:get_first(
[
- {Req, <<"body/function">>},
- {Req, <<"function">>},
+ {LoadedReq, <<"body/function">>},
+ {LoadedReq, <<"function">>},
{{as, <<"message@1.0">>, Base}, <<"function">>}
],
Key,
@@ -243,26 +251,33 @@ compute(Key, RawBase, Req, Opts) ->
Params =
hb_ao:get_first(
[
- {Req, <<"body/parameters">>},
- {Req, <<"parameters">>},
+ {LoadedReq, <<"body/parameters">>},
+ {LoadedReq, <<"parameters">>},
{{as, <<"message@1.0">>, Base}, <<"parameters">>}
],
[
hb_private:reset(Base),
- Req,
+ LoadedReq,
#{}
],
Opts#{ hashpath => ignore }
),
?event(debug_lua, parameters_found),
% Resolve all hyperstate links
- ResolvedParams = hb_cache:ensure_all_loaded(Params, Opts),
+ ResolvedParams =
+ hb_message:normalize_commitments(
+ hb_cache:ensure_all_loaded(
+ Params,
+ Opts
+ ),
+ Opts
+ ),
% Call the VM function with the given arguments.
?event(lua,
{calling_lua_func,
{function, Function},
{args, ResolvedParams},
- {req, Req}
+ {req, LoadedReq}
}
),
process_response(
@@ -272,7 +287,14 @@ compute(Key, RawBase, Req, Opts) ->
State
)
catch
- _:Reason:Stacktrace -> {error, Reason, Stacktrace}
+ _:Reason:Stacktrace ->
+ ?event(lua_error,
+ {error,
+ {reason, Reason},
+ {stacktrace, Stacktrace}
+ }
+ ),
+ {error, Reason, Stacktrace}
end,
OldPriv,
Opts
diff --git a/src/dev_lua_test_ledgers.erl b/src/dev_lua_test_ledgers.erl
index 5d7e79af9..02dca1458 100644
--- a/src/dev_lua_test_ledgers.erl
+++ b/src/dev_lua_test_ledgers.erl
@@ -113,7 +113,7 @@ subledger(Root, Extra, Opts) ->
},
Extra
),
- hb_opts:get(priv_wallet, hb:wallet(), Opts)
+ #{ priv_wallet => hb_opts:get(priv_wallet, hb:wallet(), Opts) }
),
hb_cache:write(Proc, Opts),
Proc.
@@ -132,20 +132,24 @@ transfer(ProcMsg, Sender, Recipient, Quantity, Route, Opts) ->
end
}
end,
+ XferTarget = hb_message:id(ProcMsg, all),
+ XferRecipient = hb_util:human_id(Recipient),
+ XferBody = hb_message:commit(
+ MaybeRoute#{
+ <<"action">> => <<"Transfer">>,
+ <<"target">> => XferTarget,
+ <<"recipient">> => XferRecipient,
+ <<"quantity">> => Quantity
+ },
+ #{ priv_wallet => Sender }
+ ),
Xfer =
- hb_message:commit(#{
- <<"path">> => <<"push">>,
- <<"body">> =>
- hb_message:commit(MaybeRoute#{
- <<"action">> => <<"Transfer">>,
- <<"target">> => hb_message:id(ProcMsg, all),
- <<"recipient">> => hb_util:human_id(Recipient),
- <<"quantity">> => Quantity
- },
- Sender
- )
+ hb_message:commit(
+ #{
+ <<"path">> => <<"push">>,
+ <<"body">> => XferBody
},
- Sender
+ #{ priv_wallet => Sender }
),
hb_ao:resolve(
ProcMsg,
@@ -207,13 +211,20 @@ balances(Mode, ProcMsg, Opts) when is_atom(Mode) ->
balances(hb_util:bin(Mode), ProcMsg, Opts);
balances(Prefix, ProcMsg, Opts) ->
Balances = hb_ao:get(<>, ProcMsg, #{}, Opts),
- hb_private:reset(hb_cache:ensure_all_loaded(Balances, Opts)).
+ LoadedBalances = hb_private:reset(
+ hb_cache:ensure_all_loaded(Balances, Opts)
+ ),
+ hb_maps:without(
+ [<<"commitments">>],
+ LoadedBalances
+ ).
%% @doc Get the supply of a ledger, either `now` or `initial`.
supply(ProcMsg, Opts) ->
supply(now, ProcMsg, Opts).
supply(Mode, ProcMsg, Opts) ->
- lists:sum(maps:values(balances(Mode, ProcMsg, Opts))).
+ Balances = balances(Mode, ProcMsg, Opts),
+ lists:sum(maps:values(Balances)).
%% @doc Calculate the supply of tokens in all sub-ledgers, from the balances of
%% the root ledger.
@@ -334,10 +345,13 @@ verify_net(RootProc, AllProcs, Opts) ->
%% as the current supply. This invariant will not hold for sub-ledgers, as they
%% 'mint' tokens in their local supply when they receive them from other ledgers.
verify_root_supply(RootProc, Opts) ->
+ InitialSupply = supply(initial, RootProc, Opts),
+ CurrentSupply = supply(now, RootProc, Opts),
+ LedgerValues = maps:values(ledgers(RootProc, Opts)),
+ LedgerSupply = lists:sum(LedgerValues),
?assert(
- supply(initial, RootProc, Opts) ==
- supply(now, RootProc, Opts) +
- lists:sum(maps:values(ledgers(RootProc, Opts)))
+ InitialSupply ==
+ CurrentSupply + LedgerSupply
).
%% @doc Verify that the sum of all spendable balances held by ledgers in a
@@ -428,7 +442,8 @@ transfer() ->
Opts
),
?assertEqual(100, supply(Proc, Opts)),
- transfer(Proc, Alice, Bob, 1, Opts),
+ TransferRes = transfer(Proc, Alice, Bob, 1, Opts),
+ ?event(transfer_test, {transfer_res, TransferRes}),
?assertEqual(99, balance(Proc, Alice, Opts)),
?assertEqual(1, balance(Proc, Bob, Opts)),
?assertEqual(100, supply(Proc, Opts)).
@@ -515,25 +530,26 @@ subledger_transfer() ->
RootLedger => root,
SubLedger => subledger
},
- % 1. Alice has tokens on the root ledger.
+ % 1. Alice has 100 tokens on the root ledger.
?assertEqual(100, balance(RootLedger, Alice, Opts)),
- ?event(token_log, {map, map([RootLedger], EnvNames, Opts)}),
- % 2. Alice sends tokens to the sub-ledger from the root ledger.
+ ?event(token_log, {map_1, map([RootLedger, SubLedger], EnvNames, Opts)}),
+
+ % 2. Alice sends 10 tokens to the sub-ledger from the root ledger.
transfer(RootLedger, Alice, Alice, 10, SubLedger, Opts),
+ ?event(token_log, {map_2, map([RootLedger, SubLedger], EnvNames, Opts)}),
?assertEqual(90, balance(RootLedger, Alice, Opts)),
?assertEqual(10, balance(SubLedger, Alice, Opts)),
- % 3. Alice sends tokens to Bob on the sub-ledger.
+
+ % 3. Alice sends 8 tokens to Bob on the sub-ledger.
transfer(SubLedger, Alice, Bob, 8, Opts),
- ?event(token_log,
- {state_after_subledger_user_xfer,
- {names, map([RootLedger, SubLedger], EnvNames, Opts)},
- {ids, map([RootLedger, SubLedger], Opts)}
- }),
- % 4. Bob sends tokens to Alice on the root ledger.
+ ?event(token_log, {map_3, map([RootLedger, SubLedger], EnvNames, Opts)}),
+
+ % 4. Bob sends 7 tokens to himself on the root ledger.
transfer(SubLedger, Bob, Bob, 7, RootLedger, Opts),
+ ?event(token_log, {map_4, map([RootLedger, SubLedger], EnvNames, Opts)}),
% Validate the balances of the root and sub-ledgers.
+
Map = map([RootLedger, SubLedger], EnvNames, Opts),
- ?event(token_log, {map, map([RootLedger, SubLedger], Opts)}),
?assertEqual(
#{
root => #{
@@ -615,21 +631,22 @@ single_subledger_to_subledger() ->
SubLedger1 => subledger1,
SubLedger2 => subledger2
},
- ?event(debug, {root_ledger, RootLedger}),
- ?event(debug, {sl1, SubLedger1}),
- ?event(debug, {sl2, SubLedger2}),
+ % 1. Alice starts with 100 tokens on root ledger.
?assertEqual(100, balance(RootLedger, Alice, Opts)),
- % 2. Alice sends 90 tokens to herself on SubLedger1.
- ?event(debug, {transfer_1}),
+ ?assertEqual(0, balance(SubLedger1, Alice, Opts)),
+ ?assertEqual(0, balance(SubLedger2, Alice, Opts)),
+
+ % 2. Alice sends 90 tokens to herself on SubLedger1 from the root ledger.
transfer(RootLedger, Alice, Alice, 90, SubLedger1, Opts),
?assertEqual(10, balance(RootLedger, Alice, Opts)),
?assertEqual(90, balance(SubLedger1, Alice, Opts)),
- ?event(debug, {transfer_2}),
+ ?assertEqual(0, balance(SubLedger2, Alice, Opts)),
+
+ % 3. Alice sends 80 tokens to herself on SubLedger2 from SubLedger1.
PushRes = transfer(SubLedger1, Alice, Alice, 80, SubLedger2, Opts),
- ?event(debug, {push_res, PushRes}),
- ?event(debug, {map, map([RootLedger, SubLedger1, SubLedger2], Opts)}),
- ?assertEqual(80, balance(SubLedger2, Alice, Opts)),
- ?assertEqual(10, balance(SubLedger1, Alice, Opts)).
+ ?assertEqual(10, balance(RootLedger, Alice, Opts)),
+ ?assertEqual(10, balance(SubLedger1, Alice, Opts)),
+ ?assertEqual(80, balance(SubLedger2, Alice, Opts)).
%% @doc Verify that registered sub-ledgers are able to send tokens to each other
%% without the need for messages on the root ledger.
@@ -653,21 +670,43 @@ subledger_to_subledger() ->
SubLedger1 => subledger1,
SubLedger2 => subledger2
},
- % 1. Alice has tokens on the root ledger.
+ ?event(token_log, {names,
+ {alice, hb_util:human_id(ar_wallet:to_address(Alice))},
+ {bob, hb_util:human_id(ar_wallet:to_address(Bob))},
+ {subledger1, hb_message:id(SubLedger1, signed, Opts)},
+ {subledger2, hb_message:id(SubLedger2, signed, Opts)}
+ }),
+ % 1. Alice starts with 100 tokens on the root ledger.
?assertEqual(100, balance(RootLedger, Alice, Opts)),
- % 2. Alice sends 90 tokens to herself on SubLedger1.
+ ?event(token_log,
+ {map_1, map([RootLedger, SubLedger1, SubLedger2], Names, Opts)}
+ ),
+ % 2. Alice sends 90 tokens to herself on SubLedger1 from the root ledger.
transfer(RootLedger, Alice, Alice, 90, SubLedger1, Opts),
- % 3. Alice sends 10 tokens to Bob on SubLedger2.
+ ?event(token_log,
+ {map_2, map([RootLedger, SubLedger1, SubLedger2], Names, Opts)}
+ ),
+ % 3. Alice sends 10 tokens to Bob on SubLedger2 from SubLedger1.
transfer(SubLedger1, Alice, Bob, 10, SubLedger2, Opts),
- ?event(debug, {map, map([RootLedger, SubLedger1, SubLedger2], Names, Opts)}),
+ ?event(token_log,
+ {map_3, map([RootLedger, SubLedger1, SubLedger2], Names, Opts)}
+ ),
?assertEqual(10, balance(RootLedger, Alice, Opts)),
?assertEqual(80, balance(SubLedger1, Alice, Opts)),
?assertEqual(10, balance(SubLedger2, Bob, Opts)),
- verify_net(RootLedger, [SubLedger1, SubLedger2], Opts),
- % 5. Bob sends 5 tokens to himself on SubLedger1.
+ % 4. Bob sends 5 tokens to himself on SubLedger1 from SubLedger2.
transfer(SubLedger2, Bob, Bob, 5, SubLedger1, Opts),
+ ?event(token_log,
+ {map_4, map([RootLedger, SubLedger1, SubLedger2], Names, Opts)}
+ ),
+ ?assertEqual(5, balance(SubLedger1, Bob, Opts)),
+ ?assertEqual(5, balance(SubLedger2, Bob, Opts)),
+ verify_net(RootLedger, [SubLedger1, SubLedger2], Opts),
+ % 5. Bob sends 4 tokens to Alice on SubLedger1 from SubLedger2.
transfer(SubLedger2, Bob, Alice, 4, SubLedger1, Opts),
- ?event(debug, {map, map([RootLedger, SubLedger1, SubLedger2], Names, Opts)}),
+ ?event(token_log,
+ {map_5, map([RootLedger, SubLedger1, SubLedger2], Names, Opts)}
+ ),
?assertEqual(10, balance(RootLedger, Alice, Opts)),
?assertEqual(5, balance(SubLedger1, Bob, Opts)),
?assertEqual(84, balance(SubLedger1, Alice, Opts)),
diff --git a/src/dev_message.erl b/src/dev_message.erl
index c2e9fab88..ed71be5e8 100644
--- a/src/dev_message.erl
+++ b/src/dev_message.erl
@@ -110,8 +110,11 @@ id(RawBase, Req, NodeOpts) ->
case hb_maps:keys(Commitments) of
[] ->
% If there are no commitments, we must (re)calculate the ID.
- ?event(id, no_commitments_found_in_id_call),
+ ?event(id_result, no_commitments_found_in_id_call),
calculate_id(hb_maps:without([<<"commitments">>], ModBase), Req, IDOpts);
+ [ID] ->
+ ?event(id_result, {single_commitment_found_in_id_call, {id, ID}}),
+ {ok, hb_util:human_id(ID)};
IDs ->
% Accumulate the relevant IDs into a single value. This is performed
% by module arithmetic of each of the IDs. The effect of this is that:
@@ -125,7 +128,7 @@ id(RawBase, Req, NodeOpts) ->
% accumulation function starts with a buffer of zero encoded as a
% 256-bit binary. Subsequently, a single ID on its own 'accumulates'
% to itself.
- ?event(id, {accumulating_existing_ids, IDs}),
+ ?event(id_result, {accumulating_existing_ids, IDs}),
{ok,
hb_util:human_id(
hb_crypto:accumulate(
@@ -252,9 +255,10 @@ commit(Self, Req, Opts) ->
AttMod = hb_ao:message_to_device(#{ <<"device">> => AttDev }, CommitOpts),
{ok, AttFun} = hb_ao:find_exported_function(Base, AttMod, commit, 3, CommitOpts),
% Encode to a TABM
+ Converted = hb_message:convert(Base, tabm, CommitOpts),
Loaded =
ensure_commitments_loaded(
- hb_message:convert(Base, tabm, CommitOpts),
+ Converted,
Opts
),
{ok, Committed} =
@@ -269,7 +273,9 @@ commit(Self, Req, Opts) ->
]
)
),
- {ok, hb_message:convert(Committed, <<"structured@1.0">>, tabm, CommitOpts)}.
+ ConvertBack =
+ hb_message:convert(Committed, <<"structured@1.0">>, tabm, CommitOpts),
+ {ok, ConvertBack}.
%% @doc Verify a message. By default, all commitments are verified. The
%% `committers' key in the request can be used to specify that only the
@@ -878,7 +884,7 @@ set_ignore_undefined_test() ->
verify_test() ->
Unsigned = #{ <<"a">> => <<"b">> },
- Signed = hb_message:commit(Unsigned, hb:wallet()),
+ Signed = hb_message:commit(Unsigned, #{ priv_wallet => hb:wallet() }),
?event({signed, Signed}),
BadSigned = Signed#{ <<"a">> => <<"c">> },
?event({bad_signed, BadSigned}),
diff --git a/src/dev_node_process.erl b/src/dev_node_process.erl
index 5366b6793..7e8c2cc8b 100644
--- a/src/dev_node_process.erl
+++ b/src/dev_node_process.erl
@@ -161,14 +161,24 @@ lookup_spawn_test() ->
{ok, #{ <<"device">> := <<"process@1.0">> }},
Res1
),
- {ok, Process2} = hb_ao:resolve(
- #{ <<"device">> => <<"node-process@1.0">> },
- ?TEST_NAME,
- Opts
- ),
+ {ok, Process2} =
+ hb_ao:resolve(
+ #{ <<"device">> => <<"node-process@1.0">> },
+ ?TEST_NAME,
+ Opts
+ ),
+ LoadedProcess1 = hb_cache:ensure_all_loaded(Process1, Opts),
+ LoadedProcess2 = hb_cache:ensure_all_loaded(Process2, Opts),
+ ?event(lookup_spawn, {match, { p1, LoadedProcess1 }, { p2, LoadedProcess2 }}),
?assertEqual(
- hb_cache:ensure_all_loaded(Process1, Opts),
- hb_cache:ensure_all_loaded(Process2, Opts)
+ hb_message:normalize_commitments(
+ LoadedProcess1,
+ Opts
+ ),
+ hb_message:normalize_commitments(
+ LoadedProcess2,
+ Opts
+ )
).
%% @doc Test that a process can be spawned, executed upon, and its result retrieved.
diff --git a/src/dev_p4.erl b/src/dev_p4.erl
index fdd77eccd..a2b6d27ab 100644
--- a/src/dev_p4.erl
+++ b/src/dev_p4.erl
@@ -350,9 +350,9 @@ faff_test() ->
<<"path">> => <<"/greeting">>,
<<"greeting">> => <<"Hello, world!">>
},
- GoodSignedReq = hb_message:commit(Req, GoodWallet),
+ GoodSignedReq = hb_message:commit(Req, #{ priv_wallet => GoodWallet }),
?event({req, GoodSignedReq}),
- BadSignedReq = hb_message:commit(Req, BadWallet),
+ BadSignedReq = hb_message:commit(Req, #{ priv_wallet => BadWallet }),
?event({req, BadSignedReq}),
{ok, Res} = hb_http:get(Node, GoodSignedReq, #{}),
?event(payment, {res, Res}),
@@ -385,18 +385,18 @@ non_chargable_route_test() ->
Req = #{
<<"path">> => <<"/~p4@1.0/balance">>
},
- GoodSignedReq = hb_message:commit(Req, Wallet),
+ GoodSignedReq = hb_message:commit(Req, #{ priv_wallet => Wallet }),
Res = hb_http:get(Node, GoodSignedReq, #{}),
?event({res1, Res}),
?assertMatch({ok, 0}, Res),
Req2 = #{ <<"path">> => <<"/~meta@1.0/info/operator">> },
- GoodSignedReq2 = hb_message:commit(Req2, Wallet),
+ GoodSignedReq2 = hb_message:commit(Req2, #{ priv_wallet => Wallet }),
Res2 = hb_http:get(Node, GoodSignedReq2, #{}),
?event({res2, Res2}),
OperatorAddress = hb_util:human_id(hb:address()),
?assertEqual({ok, OperatorAddress}, Res2),
Req3 = #{ <<"path">> => <<"/~scheduler@1.0">> },
- BadSignedReq3 = hb_message:commit(Req3, Wallet),
+ BadSignedReq3 = hb_message:commit(Req3, #{ priv_wallet => Wallet }),
Res3 = hb_http:get(Node, BadSignedReq3, #{}),
?event({res3, Res3}),
?assertMatch({error, _}, Res3).
@@ -422,6 +422,12 @@ hyper_token_ledger() ->
AliceAddress = hb_util:human_id(AliceWallet),
BobWallet = ar_wallet:new(),
BobAddress = hb_util:human_id(BobWallet),
+ ?event(debug_token, {name_addresses,
+ {host, HostAddress},
+ {operator, OperatorAddress},
+ {alice, AliceAddress},
+ {bob, BobAddress}
+ }),
{ok, TokenScript} = file:read_file("scripts/hyper-token.lua"),
{ok, ProcessScript} = file:read_file("scripts/hyper-token-p4.lua"),
{ok, ClientScript} = file:read_file("scripts/hyper-token-p4-client.lua"),
@@ -439,6 +445,12 @@ hyper_token_ledger() ->
},
<<"ledger-path">> => <<"/ledger~node-process@1.0">>
},
+ StoreOpts = [
+ #{
+ <<"name">> => <<"cache-mainnet">>,
+ <<"store-module">> => hb_store_lmdb
+ }
+ ],
% Start the node with the processor and the `local-process' ledger
% (component 2) running the `hyper-token.lua' and `hyper-token-p4.lua'
% scripts. `hyper-token.lua' implements the core token ledger, while
@@ -447,13 +459,8 @@ hyper_token_ledger() ->
% with 100 tokens for Alice.
Node =
hb_http_server:start_node(
- #{
- store => [
- #{
- <<"name">> => <<"cache-mainnet/lmdb">>,
- <<"store-module">> => hb_store_lmdb
- }
- ],
+ Opts = #{
+ store => StoreOpts,
priv_wallet => HostWallet,
p4_non_chargable_routes =>
[
@@ -485,8 +492,6 @@ hyper_token_ledger() ->
],
<<"balance">> => #{ AliceAddress => 100 },
<<"admin">> => HostAddress
- % <<"operator">> =>
- % hb_util:human_id(ar_wallet:to_address(HostWallet))
}
}
}
@@ -497,46 +502,72 @@ hyper_token_ledger() ->
<<"path">> => <<"/greeting">>,
<<"greeting">> => <<"Hello, world!">>
},
- SignedReq = hb_message:commit(Req, BobWallet),
- Res = hb_http:get(Node, SignedReq, #{}),
- ?event({expected_failure, Res}),
- ?assertMatch({error, _}, Res),
- % We then move 50 tokens from Alice to Bob.
+ SignedReq = hb_message:commit(Req, Opts#{ priv_wallet => BobWallet }),
+ ErrorRes = hb_http:get(Node, SignedReq, Opts),
+ ?event(debug_token, {expected_failure, ErrorRes}),
+ ?assertMatch({error, _}, ErrorRes),
+ % We then move 50 tokens from Alice to Bob,
+ Xfer = hb_message:commit(
+ #{
+ <<"path">> => <<"/ledger~node-process@1.0/schedule">>,
+ <<"body">> =>
+ hb_message:commit(
+ #{
+ <<"path">> => <<"transfer">>,
+ <<"quantity">> => 50,
+ <<"recipient">> => BobAddress
+ },
+ #{ priv_wallet => AliceWallet, store => StoreOpts }
+ )
+ },
+ Opts#{ priv_wallet => HostWallet, store => StoreOpts }
+ ),
{ok, TopupRes} =
hb_http:post(
Node,
- hb_message:commit(
- #{
- <<"path">> => <<"/ledger~node-process@1.0/schedule">>,
- <<"body">> =>
- hb_message:commit(
- #{
- <<"path">> => <<"transfer">>,
- <<"quantity">> => 50,
- <<"recipient">> => BobAddress
- },
- AliceWallet
- )
- },
- HostWallet
- ),
- #{}
+ Xfer,
+ Opts#{ store => StoreOpts }
),
+ ?event(debug_token, {topup_res, TopupRes}),
+ {ok, BalancesAfterTopup} =
+ hb_http:get(
+ Node,
+ <<"/ledger~node-process@1.0/now/balance">>,
+ Opts#{ store => StoreOpts }
+ ),
+ ?event(debug_token, {balances_after_topup, BalancesAfterTopup}),
+ % Alice and Bob should each have 50 tokens.
+ ?assertMatch(50, hb_ao:get(AliceAddress, BalancesAfterTopup, Opts)),
+ ?assertMatch(50, hb_ao:get(BobAddress, BalancesAfterTopup, Opts)),
% We now attempt Bob's request again, which should succeed.
- ?event({topup_res, TopupRes}),
- ResAfterTopup = hb_http:get(Node, SignedReq, #{}),
- ?event({res_after_topup, ResAfterTopup}),
- ?assertMatch({ok, <<"Hello, world!">>}, ResAfterTopup),
- % We now check the balance of Bob. It should have been charged 2 tokens from
- % the 50 Alice sent him.
+ Req2 = Req#{
+ <<"greeting">> => <<"Hello again, world!">>
+ },
+ SignedReq2 = hb_message:commit(Req2, Opts#{ priv_wallet => BobWallet }),
+ ResAfterTopup = hb_http:get(Node, SignedReq2, Opts#{ store => StoreOpts }),
+ ?event(debug_token, {res_after_topup, ResAfterTopup}),
+ ?assertMatch({ok, <<"Hello again, world!">>}, ResAfterTopup),
{ok, Balances} =
hb_http:get(
Node,
<<"/ledger~node-process@1.0/now/balance">>,
- #{}
+ Opts
),
- ?event(debug_charge, {balances, Balances}),
- ?assertMatch(48, hb_ao:get(BobAddress, Balances, #{})),
- % Finally, we check the balance of the operator. It should be 2 tokens,
- % the amount that was charged from Alice.
- ?assertMatch(2, hb_ao:get(OperatorAddress, Balances, #{})).
\ No newline at end of file
+ ?event(debug_token, {balances_after_request, Balances}),
+ % We now check the balance of Alice, Bob, and the operator.
+ AliceBalance = hb_ao:get(AliceAddress, Balances, Opts),
+ BobBalance = hb_ao:get(BobAddress, Balances, Opts),
+ OperatorBalance = hb_ao:get(OperatorAddress, Balances, Opts),
+ ?event(debug_token,
+ {
+ final_balances,
+ {alice_balance, AliceBalance},
+ {bob_balance, BobBalance},
+ {operator_balance, OperatorBalance}
+ }
+ ),
+ % The new balances should be 50 (100 - 50) for Alice,
+ % 48 (0 + 50 - 2) for Bob, and 2 (0 + 2) for the operator.
+ ?assertMatch(50, AliceBalance),
+ ?assertMatch(48, BobBalance),
+ ?assertMatch(2, OperatorBalance).
diff --git a/src/dev_patch.erl b/src/dev_patch.erl
index f7ef33e26..bfdc7e9f5 100644
--- a/src/dev_patch.erl
+++ b/src/dev_patch.erl
@@ -228,7 +228,7 @@ patch_to_submessage_test() ->
<<"banana">> => 200
}
},
- hb:wallet()
+ #{ priv_wallet => hb:wallet() }
)
}
},
diff --git a/src/dev_process.erl b/src/dev_process.erl
index df2b33b0c..5d4e19aa0 100644
--- a/src/dev_process.erl
+++ b/src/dev_process.erl
@@ -495,7 +495,11 @@ now(RawMsg1, Msg2, Opts) ->
?event(compute_short,
{serving_latest_cached_state,
{proc_id, ProcessID},
- {slot, LatestSlot}
+ {slot, LatestSlot},
+ {
+ loaded_latest_msg,
+ hb_cache:ensure_all_loaded(LatestMsg, Opts)
+ }
},
Opts
),
@@ -573,27 +577,12 @@ ensure_loaded(Msg1, Msg2, Opts) ->
MaybeLoadedSnapshotMsg,
Opts
),
- Process = hb_maps:get(<<"process">>, LoadedSnapshotMsg, Opts),
- #{ <<"commitments">> := HmacCommits} =
- hb_message:with_commitments(
- #{ <<"type">> => <<"hmac-sha256">>},
- Process,
- Opts),
- #{ <<"commitments">> := SignCommits } =
- hb_message:with_commitments(ProcID, Process, Opts),
- UpdateProcess = hb_maps:put(
- <<"commitments">>,
- hb_maps:merge(HmacCommits, SignCommits),
- Process,
- Opts
- ),
- LoadedSnapshotMsg2 = LoadedSnapshotMsg#{ <<"process">> => UpdateProcess },
LoadedSlot = hb_cache:ensure_all_loaded(MaybeLoadedSlot, Opts),
- ?event(compute, {found_state_checkpoint, ProcID, LoadedSnapshotMsg2}),
+ ?event(compute, {found_state_checkpoint, ProcID, LoadedSnapshotMsg}),
{ok, Normalized} =
run_as(
<<"execution">>,
- LoadedSnapshotMsg2,
+ LoadedSnapshotMsg,
normalize,
Opts#{ hashpath => ignore }
),
@@ -735,7 +724,9 @@ init() ->
%% @doc Generate a process message with a random number, and no
%% executor.
test_base_process() ->
- test_base_process(#{}).
+ Wallet = hb:wallet(),
+ WalletOpts = #{ priv_wallet => Wallet },
+ test_base_process(WalletOpts).
test_base_process(Opts) ->
Wallet = hb_opts:get(priv_wallet, hb:wallet(), Opts),
Address = hb_util:human_id(ar_wallet:to_address(Wallet)),
@@ -745,24 +736,30 @@ test_base_process(Opts) ->
<<"scheduler-location">> => hb_opts:get(scheduler, Address, Opts),
<<"type">> => <<"Process">>,
<<"test-random-seed">> => rand:uniform(1337)
- }, Wallet).
+ }, Opts).
test_wasm_process(WASMImage) ->
test_wasm_process(WASMImage, #{}).
test_wasm_process(WASMImage, Opts) ->
Wallet = hb_opts:get(priv_wallet, hb:wallet(), Opts),
- #{ <<"image">> := WASMImageID } = dev_wasm:cache_wasm_image(WASMImage, Opts),
+ WalletOpts = Opts#{ priv_wallet => Wallet },
+ #{ <<"image">> := WASMImageID } =
+ dev_wasm:cache_wasm_image(WASMImage, WalletOpts),
+ BaseProc = test_base_process(WalletOpts),
+ UncommittedBaseProc = hb_message:uncommitted(BaseProc, WalletOpts),
+ ?event({base_proc, BaseProc}),
+ Message = hb_maps:merge(
+ UncommittedBaseProc,
+ #{
+ <<"execution-device">> => <<"stack@1.0">>,
+ <<"device-stack">> => [<<"wasm-64@1.0">>],
+ <<"image">> => WASMImageID
+ },
+ WalletOpts
+ ),
hb_message:commit(
- hb_maps:merge(
- hb_message:uncommitted(test_base_process(Opts), Opts),
- #{
- <<"execution-device">> => <<"stack@1.0">>,
- <<"device-stack">> => [<<"wasm-64@1.0">>],
- <<"image">> => WASMImageID
- },
- Opts
- ),
- Opts#{ priv_wallet => Wallet}
+ Message,
+ WalletOpts
).
%% @doc Generate a process message with a random number, and the
@@ -779,10 +776,11 @@ test_aos_process(Opts) ->
test_aos_process(Opts, Stack) ->
Wallet = hb_opts:get(priv_wallet, hb:wallet(), Opts),
Address = hb_util:human_id(ar_wallet:to_address(Wallet)),
- WASMProc = test_wasm_process(<<"test/aos-2-pure-xs.wasm">>, Opts),
+ WalletOpts = Opts#{ priv_wallet => Wallet },
+ WASMProc = test_wasm_process(<<"test/aos-2-pure-xs.wasm">>, WalletOpts),
hb_message:commit(
hb_maps:merge(
- hb_message:uncommitted(WASMProc, Opts),
+ hb_message:uncommitted(WASMProc, WalletOpts),
#{
<<"device-stack">> => Stack,
<<"execution-device">> => <<"stack@1.0">>,
@@ -798,11 +796,13 @@ test_aos_process(Opts, Stack) ->
<<"normalize">>
],
<<"scheduler">> =>
- hb_opts:get(scheduler, Address, Opts),
+ hb_opts:get(scheduler, Address, WalletOpts),
<<"authority">> =>
- hb_opts:get(authority, Address, Opts)
- }, Opts),
- Opts#{ priv_wallet => Wallet}
+ hb_opts:get(authority, Address, WalletOpts)
+ },
+ WalletOpts
+ ),
+ WalletOpts
).
%% @doc Generate a device that has a stack of two `dev_test's for
@@ -810,18 +810,20 @@ test_aos_process(Opts, Stack) ->
%% `Already-Seen' elements for each assigned slot.
dev_test_process() ->
Wallet = hb:wallet(),
+ WalletOpts = #{ priv_wallet => Wallet },
hb_message:commit(
- hb_maps:merge(test_base_process(), #{
+ hb_maps:merge(test_base_process(WalletOpts), #{
<<"execution-device">> => <<"stack@1.0">>,
<<"device-stack">> => [<<"test-device@1.0">>, <<"test-device@1.0">>]
- }, #{}),
- Wallet
+ }, WalletOpts),
+ WalletOpts
).
schedule_test_message(Msg1, Text, Opts) ->
schedule_test_message(Msg1, Text, #{}, Opts).
schedule_test_message(Msg1, Text, MsgBase, Opts) ->
- Wallet = hb:wallet(),
+ Wallet = hb_opts:get(priv_wallet, hb:wallet(), Opts),
+ WalletOpts = Opts#{ priv_wallet => Wallet },
UncommittedBase = hb_message:uncommitted(MsgBase, Opts),
Msg2 =
hb_message:commit(#{
@@ -833,12 +835,12 @@ schedule_test_message(Msg1, Text, MsgBase, Opts) ->
<<"type">> => <<"Message">>,
<<"test-label">> => Text
},
- Opts#{ priv_wallet => Wallet}
+ WalletOpts
)
},
- Opts#{ priv_wallet => Wallet}
+ WalletOpts
),
- {ok, _} = hb_ao:resolve(Msg1, Msg2, Opts).
+ {ok, _} = hb_ao:resolve(Msg1, Msg2, WalletOpts).
schedule_aos_call(Msg1, Code) ->
schedule_aos_call(Msg1, Code, #{}).
@@ -990,7 +992,7 @@ http_wasm_process_by_id_test() ->
process_async_cache => false,
store => #{
<<"store-module">> => hb_store_fs,
- <<"name">> => <<"cache-mainnet">>
+ <<"name">> => <<"cache-TEST">>
}
}),
Wallet = ar_wallet:new(),
@@ -1012,7 +1014,7 @@ http_wasm_process_by_id_test() ->
<<"function">> => <<"fac">>,
<<"parameters">> => [5.0]
},
- Wallet
+ #{ priv_wallet => Wallet }
),
{ok, Msg3} = hb_http:post(Node, << ProcID/binary, "/schedule">>, ExecMsg, #{}),
?event({schedule_msg_res, {msg3, Msg3}}),
@@ -1031,36 +1033,52 @@ http_wasm_process_by_id_test() ->
aos_compute_test_() ->
{timeout, 30, fun() ->
init(),
- Msg1 = test_aos_process(),
- schedule_aos_call(Msg1, <<"return 1+1">>),
- schedule_aos_call(Msg1, <<"return 2+2">>),
+ Opts = #{
+ store => [#{
+ <<"store-module">> => hb_store_fs,
+ <<"name">> => <<"cache-TEST">>
+ }]
+ },
+ Msg1 = test_aos_process(Opts),
+ schedule_aos_call(Msg1, <<"return 1+1">>, Opts),
+ schedule_aos_call(Msg1, <<"return 2+2">>, Opts),
Msg2 = #{ <<"path">> => <<"compute">>, <<"slot">> => 0 },
- {ok, Msg3} = hb_ao:resolve(Msg1, Msg2, #{}),
- {ok, Res} = hb_ao:resolve(Msg3, <<"results">>, #{}),
+ {ok, Msg3} = hb_ao:resolve(Msg1, Msg2, Opts),
+ {ok, Res} = hb_ao:resolve(Msg3, <<"results">>, Opts),
?event({computed_message, {msg3, Res}}),
- {ok, Data} = hb_ao:resolve(Res, <<"data">>, #{}),
+ {ok, Data} = hb_ao:resolve(Res, <<"data">>, Opts),
?event({computed_data, Data}),
?assertEqual(<<"2">>, Data),
Msg4 = #{ <<"path">> => <<"compute">>, <<"slot">> => 1 },
- {ok, Msg5} = hb_ao:resolve(Msg1, Msg4, #{}),
- ?assertEqual(<<"4">>, hb_ao:get(<<"results/data">>, Msg5, #{})),
+ {ok, Msg5} = hb_ao:resolve(Msg1, Msg4, Opts),
+ ?assertEqual(<<"4">>, hb_ao:get(<<"results/data">>, Msg5, Opts)),
{ok, Msg5}
end}.
aos_browsable_state_test_() ->
{timeout, 30, fun() ->
init(),
- Msg1 = test_aos_process(),
- schedule_aos_call(Msg1,
+ Opts = #{
+ store => [
+ #{
+ <<"store-module">> => hb_store_fs,
+ <<"name">> => <<"cache-BROWSABLE-TEST">>
+ }
+ ]
+ },
+ Msg1 = test_aos_process(Opts),
+ schedule_aos_call(
+ Msg1,
<<"table.insert(ao.outbox.Messages, { target = ao.id, ",
"action = \"State\", ",
- "data = { deep = 4, bool = true } })">>
+ "data = { deep = 4, bool = true } })">>,
+ Opts
),
Msg2 = #{ <<"path">> => <<"compute">>, <<"slot">> => 0 },
{ok, Msg3} =
hb_ao:resolve_many(
[Msg1, Msg2, <<"results">>, <<"outbox">>, 1, <<"data">>, <<"deep">>],
- #{ cache_control => <<"always">> }
+ Opts#{ cache_control => <<"always">> }
),
ID = hb_message:id(Msg1),
?event({computed_message, {id, {explicit, ID}}}),
@@ -1070,19 +1088,18 @@ aos_browsable_state_test_() ->
aos_state_access_via_http_test_() ->
{timeout, 60, fun() ->
rand:seed(default),
- Wallet = ar_wallet:new(),
Node = hb_http_server:start_node(Opts = #{
+ priv_wallet => ar_wallet:new(),
port => 10000 + rand:uniform(10000),
- priv_wallet => Wallet,
cache_control => <<"always">>,
store => #{
<<"store-module">> => hb_store_fs,
- <<"name">> => <<"cache-mainnet">>
+ <<"name">> => <<"cache-TEST">>
},
force_signed_requests => true
}),
Proc = test_aos_process(Opts),
- ProcID = hb_util:human_id(hb_message:id(Proc, all)),
+ ProcID = hb_util:human_id(hb_message:id(Proc, all, Opts)),
{ok, _InitRes} = hb_http:post(Node, <<"/schedule">>, Proc, Opts),
Msg2 = hb_message:commit(#{
<<"data-protocol">> => <<"ao">>,
@@ -1096,7 +1113,7 @@ aos_state_access_via_http_test_() ->
"[\"body\"] = \"Hello, world!
\"",
"}})">>,
<<"target">> => ProcID
- }, Wallet),
+ }, Opts),
{ok, Msg3} = hb_http:post(Node, << ProcID/binary, "/schedule">>, Msg2, Opts),
?event({schedule_msg_res, {msg3, Msg3}}),
{ok, Msg4} =
@@ -1127,34 +1144,40 @@ aos_state_access_via_http_test_() ->
aos_state_patch_test_() ->
{timeout, 30, fun() ->
- Wallet = hb:wallet(),
init(),
- Msg1Raw = test_aos_process(#{}, [
+ Opts = #{ priv_wallet => hb:wallet(), store => [#{
+ <<"store-module">> => hb_store_fs,
+ <<"name">> => <<"cache-TEST">>
+ }] },
+ Msg1Raw = test_aos_process(Opts, [
<<"wasi@1.0">>,
<<"json-iface@1.0">>,
<<"wasm-64@1.0">>,
<<"patch@1.0">>,
<<"multipass@1.0">>
]),
- {ok, Msg1} = hb_message:with_only_committed(Msg1Raw, #{}),
- ProcID = hb_message:id(Msg1, all),
- Msg2 = (hb_message:commit(#{
- <<"data-protocol">> => <<"ao">>,
- <<"variant">> => <<"ao.N.1">>,
- <<"target">> => ProcID,
- <<"type">> => <<"Message">>,
- <<"action">> => <<"Eval">>,
- <<"data">> =>
- <<
- "table.insert(ao.outbox.Messages, "
- "{ method = \"PATCH\", x = \"banana\" })"
- >>
- }, Wallet))#{ <<"path">> => <<"schedule">>, <<"method">> => <<"POST">> },
- {ok, _} = hb_ao:resolve(Msg1, Msg2, #{}),
+ {ok, Msg1} = hb_message:with_only_committed(Msg1Raw, Opts),
+ ProcID = hb_message:id(Msg1, all, Opts),
+ Msg2 = (hb_message:commit(
+ #{
+ <<"data-protocol">> => <<"ao">>,
+ <<"variant">> => <<"ao.N.1">>,
+ <<"target">> => ProcID,
+ <<"type">> => <<"Message">>,
+ <<"action">> => <<"Eval">>,
+ <<"data">> =>
+ <<
+ "table.insert(ao.outbox.Messages, "
+ "{ method = \"PATCH\", x = \"banana\" })"
+ >>
+ },
+ Opts
+ ))#{ <<"path">> => <<"schedule">>, <<"method">> => <<"POST">> },
+ {ok, _} = hb_ao:resolve(Msg1, Msg2, Opts),
Msg3 = #{ <<"path">> => <<"compute">>, <<"slot">> => 0 },
- {ok, Msg4} = hb_ao:resolve(Msg1, Msg3, #{}),
+ {ok, Msg4} = hb_ao:resolve(Msg1, Msg3, Opts),
?event({computed_message, {msg3, Msg4}}),
- {ok, Data} = hb_ao:resolve(Msg4, <<"x">>, #{}),
+ {ok, Data} = hb_ao:resolve(Msg4, <<"x">>, Opts),
?event({computed_data, Data}),
?assertEqual(<<"banana">>, Data)
end}.
@@ -1302,28 +1325,35 @@ aos_persistent_worker_benchmark_test_() ->
{timeout, 30, fun() ->
BenchTime = 5,
init(),
- Msg1 = test_aos_process(),
- schedule_aos_call(Msg1, <<"X=1337">>),
+ Opts = #{
+ store => [#{
+ <<"store-module">> => hb_store_fs,
+ <<"name">> => <<"cache-TEST">>
+ }]
+ },
+ Msg1 = test_aos_process(Opts),
+ schedule_aos_call(Msg1, <<"X=1337">>, Opts),
FirstSlotMsg2 = #{
<<"path">> => <<"compute">>,
<<"slot">> => 0
},
?assertMatch(
{ok, _},
- hb_ao:resolve(Msg1, FirstSlotMsg2, #{ spawn_worker => true })
+ hb_ao:resolve(Msg1, FirstSlotMsg2, Opts#{ spawn_worker => true })
),
Iterations = hb_test_utils:benchmark(
fun(Iteration) ->
schedule_aos_call(
Msg1,
- <<"return X + ", (integer_to_binary(Iteration))/binary>>
+ <<"return X + ", (integer_to_binary(Iteration))/binary>>,
+ Opts
),
?assertMatch(
{ok, _},
hb_ao:resolve(
Msg1,
#{ <<"path">> => <<"compute">>, <<"slot">> => Iteration },
- #{}
+ Opts
)
)
end,
diff --git a/src/dev_process_cache.erl b/src/dev_process_cache.erl
index df3b32f07..8c9447f6b 100644
--- a/src/dev_process_cache.erl
+++ b/src/dev_process_cache.erl
@@ -16,7 +16,8 @@ read(ProcID, SlotRef, Opts) ->
hb_cache:read(Path, Opts).
%% @doc Write a process computation result to the cache.
-write(ProcID, Slot, Msg, Opts) ->
+write(ProcID, Slot, RawMsg, Opts) ->
+ Msg = hb_message:normalize_commitments(RawMsg, Opts),
% Write the item to the cache in the root of the store.
{ok, Root} = hb_cache:write(Msg, Opts),
% Link the item to the path in the store by slot number.
@@ -29,14 +30,6 @@ write(ProcID, Slot, Msg, Opts) ->
ID = hb_util:human_id(hb_ao:get(id, Msg, Opts)),
Opts
),
- ?event(
- {linking_id,
- {proc_id, ProcID},
- {slot, Slot},
- {id, ID},
- {path, MsgIDPath}
- }
- ),
hb_cache:link(Root, MsgIDPath, Opts),
% Return the slot number path.
{ok, SlotNumPath}.
@@ -85,10 +78,8 @@ latest(ProcID, RawRequiredPath, Limit, Opts) ->
Opts
)
end,
- ?event({required_path_converted, {proc_id, ProcID}, {required_path, RequiredPath}}),
Path = path(ProcID, slot_root, Opts),
AllSlots = hb_cache:list_numbered(Path, Opts),
- ?event({all_slots, {proc_id, ProcID}, {slots, AllSlots}}),
CappedSlots =
case Limit of
undefined -> AllSlots;
diff --git a/src/dev_push.erl b/src/dev_push.erl
index 42e0f89e8..691514c48 100644
--- a/src/dev_push.erl
+++ b/src/dev_push.erl
@@ -639,18 +639,21 @@ parse_redirect(Location, Opts) ->
full_push_test_() ->
{timeout, 30, fun() ->
- dev_process:init(),
+ hb:init(),
Opts = #{
process_async_cache => false,
priv_wallet => hb:wallet(),
cache_control => <<"always">>,
store => [
- #{ <<"store-module">> => hb_store_fs, <<"name">> => <<"cache-TEST">> },
+ #{
+ <<"store-module">> => hb_store_fs,
+ <<"name">> => <<"cache-TEST">>
+ },
#{ <<"store-module">> => hb_store_gateway,
- <<"store">> => #{
+ <<"local-store">> => [#{
<<"store-module">> => hb_store_fs,
<<"name">> => <<"cache-TEST">>
- }
+ }]
}
]
},
@@ -664,19 +667,20 @@ full_push_test_() ->
},
Opts
),
- ?event({test_setup, {msg1, Msg1}, {sched_init, SchedInit}}),
+ ?event(full_push, {test_setup, {msg1, Msg1}, {sched_init, SchedInit}}),
Script = ping_pong_script(2),
- ?event({script, Script}),
+ ?event(full_push, {script, Script}),
{ok, Msg2} = dev_process:schedule_aos_call(Msg1, Script, Opts),
- ?event({msg_sched_result, Msg2}),
+ ?event(full_push, {msg_sched_result, Msg2}),
{ok, StartingMsgSlot} =
hb_ao:resolve(Msg2, #{ <<"path">> => <<"slot">> }, Opts),
- ?event({starting_msg_slot, StartingMsgSlot}),
+ ?event(full_push, {starting_msg_slot, StartingMsgSlot}),
Msg3 =
#{
<<"path">> => <<"push">>,
<<"slot">> => StartingMsgSlot
},
+ ?event(full_push, {msg3, Msg3}),
{ok, _} = hb_ao:resolve(Msg1, Msg3, Opts),
?assertEqual(
{ok, <<"Done.">>},
@@ -734,7 +738,7 @@ push_as_identity_test_() ->
?event({test_setup, {msg1, Msg1}, {sched_init, SchedInit}}),
Script = ping_pong_script(2),
?event({script, Script}),
- {ok, Msg2} = dev_process:schedule_aos_call(Msg1, Script),
+ {ok, Msg2} = dev_process:schedule_aos_call(Msg1, Script, Opts),
?event(push, {msg_sched_result, Msg2}),
{ok, StartingMsgSlot} =
hb_ao:resolve(Msg2, #{ <<"path">> => <<"slot">> }, Opts),
@@ -750,12 +754,17 @@ push_as_identity_test_() ->
hb_ao:resolve(Msg1, <<"now/results/data">>, Opts)
),
% Validate that the scheduler's wallet was used to sign the message.
- Committers =
+ Assignment =
hb_ao:get(
- <<"schedule/assignments/2/committers">>,
+ <<"schedule/assignments/2">>,
Msg1,
Opts
),
+ Committers = hb_ao:get(
+ <<"committers">>,
+ hb_cache:read_all_commitments(Assignment, Opts),
+ Opts
+ ),
?assert(lists:member(SchedulingID, Committers)),
?assert(lists:member(ComputeID, Committers)),
% Validate that the compute wallet was used to sign the message.
diff --git a/src/dev_router.erl b/src/dev_router.erl
index 0ba71e25b..d3d338fba 100644
--- a/src/dev_router.erl
+++ b/src/dev_router.erl
@@ -1093,7 +1093,9 @@ dynamic_router() ->
),
% Ensure that computation is done by the exec node.
{ok, ResMsg} = hb_http:get(Node, <<"/c?c+list=1">>, ExecOpts),
- ?assertEqual([ExecNodeAddr], hb_message:signers(ResMsg, ExecOpts)).
+ Signers = hb_message:signers(ResMsg, ExecOpts),
+ ?event(debug_dynrouter, {match, {signers, Signers}, {exec_addr, ExecNodeAddr}}),
+ ?assertEqual([ExecNodeAddr], Signers).
%% @doc Demonstrates routing tables being dynamically created and adjusted
%% according to the real-time performance of nodes. This test utilizes the
@@ -1486,7 +1488,7 @@ add_route_test() ->
<<"node">> => <<"new">>,
<<"priority">> => 15
},
- Owner
+ #{ priv_wallet => Owner }
),
#{}
),
diff --git a/src/dev_scheduler.erl b/src/dev_scheduler.erl
index 9e4d2a999..0eee9c2c8 100644
--- a/src/dev_scheduler.erl
+++ b/src/dev_scheduler.erl
@@ -323,8 +323,8 @@ check_lookahead_and_local_cache(Worker, ProcID, TargetSlot, Opts) when is_pid(Wo
check_lookahead_and_local_cache(undefined, ProcID, TargetSlot, Opts) ->
% The lookahead worker has not found an assignment for the target
% slot yet, so we check our local cache.
- ?event(next_lookahead, {reading_local_cache, {slot, TargetSlot}}),
- case dev_scheduler_cache:read(ProcID, TargetSlot, Opts) of
+ SchedulerCacheResult = dev_scheduler_cache:read(ProcID, TargetSlot, Opts),
+ case SchedulerCacheResult of
not_found -> not_found;
{ok, Assignment} ->
% We have an assignment in our local cache, so we return it.
@@ -1687,6 +1687,7 @@ register_location_on_boot_test() ->
schedule_message_and_get_slot_test() ->
start(),
+ Opts = #{ priv_wallet => hb:wallet() },
Msg1 = test_process(),
Msg2 = #{
<<"path">> => <<"schedule">>,
@@ -1695,10 +1696,10 @@ schedule_message_and_get_slot_test() ->
hb_message:commit(#{
<<"type">> => <<"Message">>,
<<"test-key">> => <<"true">>
- }, hb:wallet())
+ }, Opts)
},
- ?assertMatch({ok, _}, hb_ao:resolve(Msg1, Msg2, #{})),
- ?assertMatch({ok, _}, hb_ao:resolve(Msg1, Msg2, #{})),
+ ?assertMatch({ok, _}, hb_ao:resolve(Msg1, Msg2, Opts)),
+ ?assertMatch({ok, _}, hb_ao:resolve(Msg1, Msg2, Opts)),
Msg3 = #{
<<"path">> => <<"slot">>,
<<"method">> => <<"GET">>,
@@ -1737,11 +1738,19 @@ redirect_from_graphql_test_() ->
redirect_from_graphql() ->
start(),
Opts =
- #{ store =>
- [
- #{ <<"store-module">> => hb_store_fs, <<"name">> => <<"cache-mainnet">> },
- #{ <<"store-module">> => hb_store_gateway, <<"store">> => false }
- ]
+ #{
+ priv_wallet => hb:wallet(),
+ store =>
+ [
+ #{
+ <<"store-module">> => hb_store_fs,
+ <<"name">> => <<"cache-mainnet">>
+ },
+ #{
+ <<"store-module">> => hb_store_gateway,
+ <<"store">> => false
+ }
+ ]
},
{ok, Msg} = hb_cache:read(<<"0syT13r0s0tgPmIed95bJnuSqaD29HQNN8D3ElLSrsc">>, Opts),
?assertMatch(
@@ -1758,10 +1767,10 @@ redirect_from_graphql() ->
<<"0syT13r0s0tgPmIed95bJnuSqaD29HQNN8D3ElLSrsc">>,
<<"test-key">> => <<"Test-Val">>
},
- hb:wallet()
+ Opts
)
},
- #{
+ Opts#{
scheduler_follow_redirects => false
}
)
@@ -1769,6 +1778,7 @@ redirect_from_graphql() ->
get_local_schedule_test() ->
start(),
+ Opts = #{ priv_wallet => hb:wallet() },
Msg1 = test_process(),
Msg2 = #{
<<"path">> => <<"schedule">>,
@@ -1777,7 +1787,7 @@ get_local_schedule_test() ->
hb_message:commit(#{
<<"type">> => <<"Message">>,
<<"test-key">> => <<"Test-Val">>
- }, hb:wallet())
+ }, Opts)
},
Msg3 = #{
<<"path">> => <<"schedule">>,
@@ -1786,10 +1796,10 @@ get_local_schedule_test() ->
hb_message:commit(#{
<<"type">> => <<"Message">>,
<<"test-key">> => <<"Test-Val-2">>
- }, hb:wallet())
+ }, Opts)
},
- ?assertMatch({ok, _}, hb_ao:resolve(Msg1, Msg2, #{})),
- ?assertMatch({ok, _}, hb_ao:resolve(Msg1, Msg3, #{})),
+ ?assertMatch({ok, _}, hb_ao:resolve(Msg1, Msg2, Opts)),
+ ?assertMatch({ok, _}, hb_ao:resolve(Msg1, Msg3, Opts)),
?assertMatch(
{ok, _},
hb_ao:resolve(Msg1, #{
@@ -1797,7 +1807,7 @@ get_local_schedule_test() ->
<<"path">> => <<"schedule">>,
<<"target">> => hb_util:id(Msg1)
},
- #{})
+ Opts)
).
%%% HTTP tests
@@ -1832,7 +1842,7 @@ register_scheduler_test() ->
{ok, Res} = hb_http:post(Node, Msg1, #{}),
?assertMatch(#{ <<"url">> := Location } when is_binary(Location), Res).
-http_post_schedule_sign(Node, Msg, ProcessMsg, Wallet) ->
+http_post_schedule_sign(Node, Msg, ProcessMsg, Opts) ->
Msg1 = hb_message:commit(#{
<<"path">> => <<"/~scheduler@1.0/schedule">>,
<<"method">> => <<"POST">>,
@@ -1843,34 +1853,50 @@ http_post_schedule_sign(Node, Msg, ProcessMsg, Wallet) ->
hb_util:human_id(hb_message:id(ProcessMsg, all)),
<<"type">> => <<"Message">>
},
- Wallet
+ Opts
)
- }, Wallet),
- hb_http:post(Node, Msg1, #{}).
+ }, Opts),
+ hb_http:post(Node, Msg1, Opts).
http_get_slot(N, PMsg) ->
ID = hb_message:id(PMsg, all),
- Wallet = hb:wallet(),
- {ok, _} = hb_http:get(N, hb_message:commit(#{
- <<"path">> => <<"/~scheduler@1.0/slot">>,
- <<"method">> => <<"GET">>,
- <<"target">> => ID
- }, Wallet), #{}).
+ Opts = #{ priv_wallet => hb:wallet() },
+ {ok, _} =
+ hb_http:get(
+ N,
+ hb_message:commit(
+ #{
+ <<"path">> => <<"/~scheduler@1.0/slot">>,
+ <<"method">> => <<"GET">>,
+ <<"target">> => ID
+ },
+ Opts
+ ),
+ Opts
+ ).
http_get_schedule(N, PMsg, From, To) ->
http_get_schedule(N, PMsg, From, To, <<"application/http">>).
http_get_schedule(N, PMsg, From, To, Format) ->
ID = hb_message:id(PMsg, all),
- Wallet = hb:wallet(),
- {ok, _} = hb_http:get(N, hb_message:commit(#{
- <<"path">> => <<"/~scheduler@1.0/schedule">>,
- <<"method">> => <<"GET">>,
- <<"target">> => hb_util:human_id(ID),
- <<"from">> => From,
- <<"to">> => To,
- <<"accept">> => Format
- }, Wallet), #{}).
+ Opts = #{ priv_wallet => hb:wallet() },
+ {ok, _} =
+ hb_http:get(
+ N,
+ hb_message:commit(
+ #{
+ <<"path">> => <<"/~scheduler@1.0/schedule">>,
+ <<"method">> => <<"GET">>,
+ <<"target">> => hb_util:human_id(ID),
+ <<"from">> => From,
+ <<"to">> => To,
+ <<"accept">> => Format
+ },
+ Opts
+ ),
+ Opts
+ ).
http_get_schedule_redirect_test_() ->
{timeout, 60, fun http_get_schedule_redirect/0}.
@@ -2061,9 +2087,11 @@ http_get_json_schedule_test_() ->
fun(_) -> {ok, _} = hb_http:post(Node, Msg2, Opts) end,
lists:seq(1, 10)
),
- ?assertMatch({ok, #{ <<"current">> := 10 }}, http_get_slot(Node, PMsg)),
+ ProcessSlot = http_get_slot(Node, PMsg),
+ ?event({process_slot, ProcessSlot}),
+ ?assertMatch({ok, #{ <<"current">> := 10 }}, ProcessSlot),
{ok, Schedule} = http_get_schedule(Node, PMsg, 0, 10, <<"application/aos-2">>),
- ?event({schedule, Schedule}),
+ ?event({full_schedule, Schedule}),
JSON = hb_ao:get(<<"body">>, Schedule, Opts),
Assignments = hb_json:decode(JSON),
?assertEqual(
diff --git a/src/dev_scheduler_server.erl b/src/dev_scheduler_server.erl
index b9e072de0..4c8051da9 100644
--- a/src/dev_scheduler_server.erl
+++ b/src/dev_scheduler_server.erl
@@ -228,8 +228,8 @@ do_assign(State, Message, ReplyPID) ->
%% @doc Commit to the assignment using all of our appropriate wallets.
commit_assignment(BaseAssignment, State) ->
- Wallets = maps:get(wallets, State),
Opts = maps:get(opts, State),
+ Wallets = maps:get(wallets, State),
lists:foldr(
fun(Wallet, Assignment) ->
hb_message:commit(Assignment, Opts#{ priv_wallet => Wallet })
diff --git a/src/dev_volume.erl b/src/dev_volume.erl
index 5c227eead..d080b3937 100644
--- a/src/dev_volume.erl
+++ b/src/dev_volume.erl
@@ -544,6 +544,9 @@ update_node_config(StorePath, NewStore, Opts) ->
{updating_config, StorePath, NewStore}
}
),
+ % Handle wallet copying for all store types and get the wallet
+ Wallet = hb_volume:copy_wallet_to_volume(StorePath, Opts),
+ % Update genesis wasm db path
GenesisWasmDBDir =
hb_opts:get(
genesis_wasm_db_dir,
@@ -559,14 +562,18 @@ update_node_config(StorePath, NewStore, Opts) ->
?event(debug_volume,
{update_node_config, full_path_created, FullGenesisPath}
),
- ok =
- hb_http_server:set_opts(
- Opts#{
- store => NewStore,
- genesis_wasm_db_dir => FullGenesisPath
- }
- ),
+ UpdatedOpts = Opts#{
+ store => NewStore,
+ genesis_wasm_db_dir => FullGenesisPath
+ },
+ % Set wallet if one was returned
+ FinalOpts = case Wallet of
+ undefined -> UpdatedOpts;
+ _ -> UpdatedOpts#{priv_wallet => Wallet}
+ end,
+ ok = hb_http_server:set_opts(FinalOpts),
?event(debug_volume,
{update_node_config, config_updated, success}
),
- {ok, <<"Volume mounted and store updated successfully">>}.
\ No newline at end of file
+ {ok, <<"Volume mounted and store updated successfully">>}.
+
diff --git a/src/dev_wasi.erl b/src/dev_wasi.erl
index cfb522c72..e5dc69fe8 100644
--- a/src/dev_wasi.erl
+++ b/src/dev_wasi.erl
@@ -244,7 +244,7 @@ generate_wasi_stack(File, Func, Params) ->
vfs_is_serializable_test() ->
StackMsg = generate_wasi_stack("test/test-print.wasm", <<"hello">>, []),
- VFSMsg = hb_ao:get(<<"vfs">>, StackMsg),
+ VFSMsg = hb_message:normalize_commitments(hb_ao:get(<<"vfs">>, StackMsg), #{}),
VFSMsg2 =
hb_message:minimize(
hb_message:convert(
@@ -259,7 +259,10 @@ wasi_stack_is_serializable_test() ->
Msg = generate_wasi_stack("test/test-print.wasm", <<"hello">>, []),
HTTPSigMsg = hb_message:convert(Msg, <<"httpsig@1.0">>, #{}),
Msg2 = hb_message:convert(HTTPSigMsg, <<"structured@1.0">>, <<"httpsig@1.0">>, #{}),
- ?assert(hb_message:match(Msg, Msg2)).
+ ?assert(hb_message:match(
+ hb_message:normalize_commitments(Msg, #{}),
+ hb_message:normalize_commitments(Msg2, #{})
+ )).
basic_aos_exec_test() ->
Init = generate_wasi_stack("test/aos-2-pure-xs.wasm", <<"handle">>, []),
diff --git a/src/hb_ao_test_vectors.erl b/src/hb_ao_test_vectors.erl
index 29f80cde3..bac0de306 100644
--- a/src/hb_ao_test_vectors.erl
+++ b/src/hb_ao_test_vectors.erl
@@ -186,7 +186,7 @@ test_opts() ->
%% @doc Ensure that we can read a device from the cache then execute it. By
%% extension, this will also allow us to load a device from Arweave due to the
%% remote store implementations.
-exec_dummy_device(SigningWallet, Opts) ->
+exec_dummy_device(Opts) ->
% Compile the test device and store it in an accessible cache to the execution
% environment.
{ok, ModName, Bin} = compile:file("test/dev_dummy.erl", [binary]),
@@ -206,11 +206,17 @@ exec_dummy_device(SigningWallet, Opts) ->
),
Opts
),
- {ok, ID} = hb_cache:write(DevMsg, Opts),
+ {ok, _UnsignedID} = hb_cache:write(DevMsg, Opts),
+ ID = hb_message:id(DevMsg, signed, Opts),
% Ensure that we can read the device message from the cache and that it matches
% the original message.
- {ok, ReadMsg} = hb_cache:read(ID, Opts),
- ?assertEqual(DevMsg, hb_cache:ensure_all_loaded(ReadMsg, Opts)),
+ {ok, RawReadMsg} = hb_cache:read(ID, Opts),
+ ReadMsg =
+ hb_cache:ensure_all_loaded(
+ hb_cache:read_all_commitments(RawReadMsg, Opts),
+ Opts
+ ),
+ ?assertEqual(DevMsg, ReadMsg),
% Create a base message with the device ID, then request a dummy path from
% it.
hb_ao:resolve(
@@ -232,7 +238,7 @@ load_device_test() ->
priv_wallet => Wallet
},
hb_store:reset(Store),
- ?assertEqual({ok, <<"example">>}, exec_dummy_device(Wallet, Opts)).
+ ?assertEqual({ok, <<"example">>}, exec_dummy_device(Opts)).
untrusted_load_device_test() ->
% Establish an execution environment which does not trust the device author.
@@ -250,7 +256,7 @@ untrusted_load_device_test() ->
hb_store:reset(Store),
?assertThrow(
{error, {device_not_loadable, _, device_signer_not_trusted}},
- exec_dummy_device(UntrustedWallet, Opts)
+ exec_dummy_device(Opts)
).
%%% Test vector suite
diff --git a/src/hb_cache.erl b/src/hb_cache.erl
index d92069694..3521bb64a 100644
--- a/src/hb_cache.erl
+++ b/src/hb_cache.erl
@@ -39,6 +39,7 @@
%%% loading the data -- overriding the suggested options in the link.
-module(hb_cache).
-export([ensure_loaded/1, ensure_loaded/2, ensure_all_loaded/1, ensure_all_loaded/2]).
+-export([read_all_commitments/2]).
-export([read/2, read_resolved/3, write/2, write_binary/3, write_hashpath/2, link/3]).
-export([match/2, list/2, list_numbered/2]).
-export([test_unsigned/1, test_signed/1]).
@@ -144,6 +145,12 @@ report_ensure_loaded_not_found(Ref, Lk, Opts) ->
%% performance costs.
ensure_all_loaded(Msg) ->
ensure_all_loaded(Msg, #{}).
+ensure_all_loaded(Link, Opts) when ?IS_LINK(Link) ->
+ ensure_all_loaded(ensure_loaded(Link, Opts), Opts);
+ensure_all_loaded(Msg, Opts) when is_map(Msg) ->
+ maps:map(fun(_K, V) -> ensure_all_loaded(V, Opts) end, Msg);
+ensure_all_loaded(Msg, Opts) when is_list(Msg) ->
+ lists:map(fun(V) -> ensure_all_loaded(V, Opts) end, Msg);
ensure_all_loaded(Msg, Opts) ->
ensure_all_loaded([], Msg, Opts).
ensure_all_loaded(Ref, Link, Opts) when ?IS_LINK(Link) ->
@@ -215,7 +222,6 @@ write(RawMsg, Opts) when is_map(RawMsg) ->
TABM = hb_message:convert(RawMsg, tabm, <<"structured@1.0">>, Opts),
case hb_message:with_only_committed(TABM, Opts) of
{ok, Msg} ->
- ?event(debug_cache, {writing_full_message, {msg, Msg}}),
%try
do_write_message(
TABM,
@@ -302,7 +308,6 @@ write_key(Base, <<"commitments">>, _HPAlg, RawCommitments, Store, Opts) ->
),
maps:map(
fun(BaseCommID, Commitment) ->
- ?event(debug_cache, {writing_commitment, {commitment, Commitment}}),
{ok, CommMsgID} = do_write_message(Commitment, Store, Opts),
hb_store:make_link(
Store,
@@ -331,12 +336,14 @@ write_key(Base, Key, HPAlg, Value, Store, Opts) ->
%% separately, then write each to the store.
prepare_commitments(RawCommitments, Opts) ->
Commitments = ensure_all_loaded(RawCommitments, Opts),
- maps:map(
- fun(_, StructuredCommitment) ->
- hb_message:convert(StructuredCommitment, tabm, Opts)
- end,
- Commitments
- ).
+ PreparedCommitments =
+ maps:map(
+ fun(_, StructuredCommitment) ->
+ hb_message:convert(StructuredCommitment, tabm, Opts)
+ end,
+ Commitments
+ ),
+ PreparedCommitments.
%% @doc Generate the commitment path for a given base path.
commitment_path(Base, Opts) ->
@@ -383,20 +390,62 @@ write_binary(Hashpath, Bin, Store, Opts) ->
%% @doc Read the message at a path. Returns in `structured@1.0' format: Either a
%% richly typed map or a direct binary.
read(Path, Opts) ->
- case store_read(Path, hb_opts:get(store, no_viable_store, Opts), Opts) of
- not_found -> not_found;
- {ok, Res} ->
- %?event({applying_types_to_read_message, Res}),
- %Structured = dev_codec_structured:to(Res),
- %?event({finished_read, Structured}),
- {ok, Res}
- end.
+ store_read(Path, hb_opts:get(store, no_viable_store, Opts), Opts).
+
+%% @doc Load all of the commitments for a message into memory.
+read_all_commitments(Msg, Opts) ->
+ Store = hb_opts:get(store, no_viable_store, Opts),
+ UncommittedID = hb_message:id(Msg, none, Opts#{ linkify_mode => discard }),
+ CurrentCommitments = hb_maps:get(<<"commitments">>, Msg, #{}, Opts),
+ AlreadyLoaded = hb_maps:keys(CurrentCommitments, Opts),
+ CommitmentsPath =
+ hb_store:resolve(
+ Store,
+ hb_store:path(Store, [UncommittedID, <<"commitments">>])
+ ),
+ FoundCommitments =
+ case hb_store:list(Store, CommitmentsPath) of
+ {ok, CommitmentIDs} ->
+ lists:filtermap(
+ fun(CommitmentID) ->
+ ShouldLoad = not lists:member(CommitmentID, AlreadyLoaded),
+ ResolvedCommPath =
+ hb_store:path(
+ Store,
+ [CommitmentsPath, CommitmentID]
+ ),
+ case ShouldLoad andalso read(ResolvedCommPath, Opts) of
+ {ok, Commitment} ->
+ {
+ true,
+ {
+ CommitmentID,
+ ensure_all_loaded(Commitment, Opts)
+ }
+ };
+ _ ->
+ false
+ end
+ end,
+ CommitmentIDs
+ );
+ not_found ->
+ []
+ end,
+ NewCommitments =
+ hb_maps:merge(
+ CurrentCommitments,
+ maps:from_list(FoundCommitments)
+ ),
+ Msg#{ <<"commitments">> => NewCommitments }.
%% @doc List all of the subpaths of a given path and return a map of keys and
%% links to the subpaths, including their types.
-store_read(_Path, no_viable_store, _) ->
- not_found;
store_read(Path, Store, Opts) ->
+ store_read(Path, Path, Store, Opts).
+store_read(_Target, _Path, no_viable_store, _) ->
+ not_found;
+store_read(Target, Path, Store, Opts) ->
ResolvedFullPath = hb_store:resolve(Store, PathBin = hb_path:to_binary(Path)),
?event({read_resolved,
{original_path, {string, PathBin}},
@@ -423,11 +472,17 @@ store_read(Path, Store, Opts) ->
{subpaths, {explicit, Subpaths}}
}
),
- % Generate links for all subpaths except `commitments' and
- % `ao-types'. `commitments' is always read in its entirety,
- % such that all messages have their IDs and signatures
- % locally available.
- Msg = prepare_links(ResolvedFullPath, Subpaths, Store, Opts),
+ % Generate links for each of the listed keys. We only list
+ % the target ID given in the case of multiple known
+ % commitments.
+ Msg =
+ prepare_links(
+ Target,
+ ResolvedFullPath,
+ Subpaths,
+ Store,
+ Opts
+ ),
?event(
{completed_read,
{resolved_path, ResolvedFullPath},
@@ -442,62 +497,43 @@ store_read(Path, Store, Opts) ->
end.
%% @doc Prepare a set of links from a listing of subpaths.
-prepare_links(RootPath, Subpaths, Store, Opts) ->
+prepare_links(Target, RootPath, Subpaths, Store, Opts) ->
{ok, Implicit, Types} = read_ao_types(RootPath, Subpaths, Store, Opts),
Res =
maps:from_list(lists:filtermap(
fun(<<"ao-types">>) -> false;
(<<"commitments">>) ->
- % List the commitments for this message, and load them into
- % memory. If there no commitments at the path, we exclude
- % commitments from the list of links.
CommPath =
hb_store:resolve(
Store,
- hb_store:path(Store, [RootPath, <<"commitments">>])
+ hb_store:path(
+ Store,
+ [
+ RootPath,
+ <<"commitments">>,
+ Target
+ ]
+ )
),
- ?event(
- {reading_commitments,
+ ?event(read_commitment,
+ {reading_commitment,
+ {target, Target},
{root_path, RootPath},
{commitments_path, CommPath}
}
),
- case hb_store:list(Store, CommPath) of
- {ok, CommitmentIDs} ->
- ?event(
- {found_commitments,
+ case read(CommPath, Opts) of
+ {ok, Commitment} ->
+ LoadedCommitment = ensure_all_loaded(Commitment, Opts),
+ ?event(read_commitment,
+ {found_target_commitment,
{path, CommPath},
- {ids, CommitmentIDs}
+ {commitment, LoadedCommitment}
}
),
- % We have commitments, so we read each commitment
- % into memory, and return it as part of the message.
{
true,
- {
- <<"commitments">>,
- maps:from_list(lists:map(
- fun(CommitmentID) ->
- {ok, Commitment} =
- read(
- <<
- CommPath/binary,
- "/",
- CommitmentID/binary
- >>,
- Opts
- ),
- {
- CommitmentID,
- ensure_all_loaded(
- Commitment,
- Opts
- )
- }
- end,
- CommitmentIDs
- ))
- }
+ {<<"commitments">>, #{ Target => LoadedCommitment }}
};
_ ->
false
@@ -565,12 +601,16 @@ prepare_links(RootPath, Subpaths, Store, Opts) ->
)),
Merged = maps:merge(Res, Implicit),
% Convert the message to an ordered list if the ao-types indicate that it
- % should be so.
+ % should be so. If it is a message, we ensure that the commitments are
+ % normalized (have an unsigned comm. ID) and loaded into memory.
case dev_codec_structured:is_list_from_ao_types(Types, Opts) of
true ->
hb_util:message_to_ordered_list(Merged, Opts);
false ->
- Merged
+ case hb_opts:get(lazy_loading, true, Opts) of
+ true -> Merged;
+ false -> ensure_all_loaded(Merged, Opts)
+ end
end.
%% @doc Read and parse the ao-types for a given path if it is in the supplied
@@ -639,8 +679,10 @@ test_unsigned(Data) ->
%% Helper function to create signed #tx items.
test_signed(Data) -> test_signed(Data, ar_wallet:new()).
-test_signed(Data, Wallet) ->
- hb_message:commit(test_unsigned(Data), Wallet).
+test_signed(Data, Wallet) when not is_map(Wallet) ->
+ test_signed(Data, #{ priv_wallet => Wallet });
+test_signed(Data, Opts) ->
+ hb_message:commit(test_unsigned(Data), Opts).
test_store_binary(Store) ->
Bin = <<"Simple unsigned data item">>,
@@ -735,9 +777,20 @@ test_store_simple_signed_message(Store) ->
% ?assert(MatchRes),
{ok, CommittedID} = dev_message:id(Item, #{ <<"committers">> => [Address] }, Opts),
{ok, RetrievedItemSigned} = read(CommittedID, Opts),
- ?event({retreived_signed_message, {expected, Item}, {got, RetrievedItemSigned}}),
- MatchResSigned = hb_message:match(Item, RetrievedItemSigned, strict, Opts),
- ?event({match_result_signed, MatchResSigned}),
+ ?event(debug_test,
+ {retreived_signed_message,
+ {expected, Item},
+ {got, RetrievedItemSigned}
+ }
+ ),
+ MatchResSigned =
+ hb_message:match(
+ Item,
+ hb_message:normalize_commitments(RetrievedItemSigned, Opts),
+ strict,
+ Opts
+ ),
+ ?event(debug_test, {match_result_signed, MatchResSigned}),
?assert(MatchResSigned),
ok.
diff --git a/src/hb_client.erl b/src/hb_client.erl
index 51428aca9..54b2234bf 100644
--- a/src/hb_client.erl
+++ b/src/hb_client.erl
@@ -169,7 +169,11 @@ upload_raw_ans104_with_anchor_test() ->
upload_empty_message_test() ->
Msg = #{ <<"data">> => <<"TEST">> },
- Committed = hb_message:commit(Msg, hb:wallet(), <<"ans104@1.0">>),
+ Committed = hb_message:commit(
+ Msg,
+ #{ priv_wallet => hb:wallet() },
+ <<"ans104@1.0">>
+ ),
Result = upload(Committed, #{}, <<"ans104@1.0">>),
?event({upload_result, Result}),
?assertMatch({ok, _}, Result).
@@ -180,7 +184,11 @@ upload_single_layer_message_test() ->
<<"basic">> => <<"value">>,
<<"integer">> => 1
},
- Committed = hb_message:commit(Msg, hb:wallet(), <<"ans104@1.0">>),
+ Committed = hb_message:commit(
+ Msg,
+ #{ priv_wallet => hb:wallet() },
+ <<"ans104@1.0">>
+ ),
Result = upload(Committed, #{}, <<"ans104@1.0">>),
?event({upload_result, Result}),
?assertMatch({ok, _}, Result).
\ No newline at end of file
diff --git a/src/hb_examples.erl b/src/hb_examples.erl
index 3ebd63890..c58e32f45 100644
--- a/src/hb_examples.erl
+++ b/src/hb_examples.erl
@@ -40,7 +40,7 @@ relay_with_payments_test() ->
ClientMessage1 =
hb_message:commit(
#{<<"path">> => <<"/~relay@1.0/call?relay-path=https://www.google.com">>},
- ClientWallet
+ #{ priv_wallet => ClientWallet }
),
% Relay the message.
Res = hb_http:get(HostNode, ClientMessage1, #{}),
@@ -54,7 +54,7 @@ relay_with_payments_test() ->
<<"recipient">> => ClientAddress,
<<"amount">> => 100
},
- HostWallet
+ #{ priv_wallet => HostWallet }
),
?assertMatch({ok, _}, hb_http:get(HostNode, TopupMessage, #{})),
% Relay the message again.
@@ -121,7 +121,7 @@ paid_wasm() ->
ClientMessage2 =
hb_message:commit(
#{<<"path">> => <<"/~p4@1.0/balance">>},
- ClientWallet
+ #{ priv_wallet => ClientWallet }
),
{ok, Res2} = hb_http:get(HostNode, ClientMessage2, Opts),
?assertMatch(60, Res2).
diff --git a/src/hb_http.erl b/src/hb_http.erl
index 3beeac3c8..4d4085b8d 100644
--- a/src/hb_http.erl
+++ b/src/hb_http.erl
@@ -905,20 +905,18 @@ simple_ao_resolve_unsigned_test() ->
?assertEqual({ok, <<"Value1">>}, post(URL, TestMsg, #{})).
simple_ao_resolve_signed_test() ->
- URL = hb_http_server:start_node(),
+ URL = hb_http_server:start_node(Opts = #{ priv_wallet => hb:wallet() }),
TestMsg = #{ <<"path">> => <<"/key1">>, <<"key1">> => <<"Value1">> },
- Wallet = hb:wallet(),
{ok, Res} =
post(
URL,
- hb_message:commit(TestMsg, Wallet),
- #{}
+ hb_message:commit(TestMsg, Opts),
+ Opts
),
?assertEqual(<<"Value1">>, Res).
nested_ao_resolve_test() ->
- URL = hb_http_server:start_node(),
- Wallet = hb:wallet(),
+ URL = hb_http_server:start_node(Opts = #{ priv_wallet => hb:wallet() }),
{ok, Res} =
post(
URL,
@@ -930,99 +928,111 @@ nested_ao_resolve_test() ->
<<"key3">> => <<"Value2">>
}
}
- }, Wallet),
- #{}
+ }, Opts),
+ Opts
),
?assertEqual(<<"Value2">>, Res).
-wasm_compute_request(ImageFile, Func, Params) ->
- wasm_compute_request(ImageFile, Func, Params, <<"">>).
-wasm_compute_request(ImageFile, Func, Params, ResultPath) ->
+wasm_compute_request(ImageFile, Func, Params, Opts) ->
+ wasm_compute_request(ImageFile, Func, Params, <<"">>, Opts).
+wasm_compute_request(ImageFile, Func, Params, ResultPath, Opts) ->
{ok, Bin} = file:read_file(ImageFile),
- Wallet = hb:wallet(),
- hb_message:commit(#{
- <<"path">> => <<"/init/compute/results", ResultPath/binary>>,
- <<"device">> => <<"wasm-64@1.0">>,
- <<"function">> => Func,
- <<"parameters">> => Params,
- <<"body">> => Bin
- }, Wallet).
+ hb_message:commit(
+ #{
+ <<"path">> => <<"/init/compute/results", ResultPath/binary>>,
+ <<"device">> => <<"wasm-64@1.0">>,
+ <<"function">> => Func,
+ <<"parameters">> => Params,
+ <<"body">> => Bin
+ },
+ Opts
+ ).
run_wasm_unsigned_test() ->
- Node = hb_http_server:start_node(#{force_signed => false}),
- Msg = wasm_compute_request(<<"test/test-64.wasm">>, <<"fac">>, [3.0]),
- {ok, Res} = post(Node, Msg, #{}),
+ Node = hb_http_server:start_node(Opts = #{
+ force_signed => false,
+ priv_wallet => hb:wallet()
+ }),
+ Msg = wasm_compute_request(<<"test/test-64.wasm">>, <<"fac">>, [3.0], Opts),
+ {ok, Res} = post(Node, Msg, Opts),
?event({res, Res}),
- ?assertEqual(6.0, hb_ao:get(<<"output/1">>, Res, #{})).
+ ?assertEqual(6.0, hb_ao:get(<<"output/1">>, Res, Opts)).
run_wasm_signed_test() ->
Opts = #{ priv_wallet => hb:wallet() },
- URL = hb_http_server:start_node(#{force_signed => true}),
- Msg = wasm_compute_request(<<"test/test-64.wasm">>, <<"fac">>, [3.0], <<"">>),
+ URL = hb_http_server:start_node(Opts#{force_signed => true}),
+ Msg = wasm_compute_request(
+ <<"test/test-64.wasm">>,
+ <<"fac">>,
+ [3.0],
+ <<"">>,
+ Opts
+ ),
{ok, Res} = post(URL, hb_message:commit(Msg, Opts), Opts),
- ?assertEqual(6.0, hb_ao:get(<<"output/1">>, Res, #{})).
+ ?assertEqual(6.0, hb_ao:get(<<"output/1">>, Res, Opts)).
get_deep_unsigned_wasm_state_test() ->
- URL = hb_http_server:start_node(#{force_signed => false}),
- Msg = wasm_compute_request(<<"test/test-64.wasm">>, <<"fac">>, [3.0], <<"">>),
- {ok, Res} = post(URL, Msg, #{}),
- ?assertEqual(6.0, hb_ao:get(<<"/output/1">>, Res, #{})).
+ Opts = #{ priv_wallet => hb:wallet() },
+ URL = hb_http_server:start_node(Opts#{force_signed => false}),
+ Msg = wasm_compute_request(
+ <<"test/test-64.wasm">>,
+ <<"fac">>,
+ [3.0],
+ <<"">>,
+ Opts
+ ),
+ {ok, Res} = post(URL, Msg, Opts),
+ ?assertEqual(6.0, hb_ao:get(<<"/output/1">>, Res, Opts)).
get_deep_signed_wasm_state_test() ->
- URL = hb_http_server:start_node(#{force_signed => true}),
+ Opts = #{ priv_wallet => hb:wallet() },
+ URL = hb_http_server:start_node(Opts#{force_signed => true}),
Msg =
wasm_compute_request(
<<"test/test-64.wasm">>,
<<"fac">>,
[3.0],
- <<"/output">>
+ <<"/output">>,
+ Opts
),
- {ok, Res} = post(URL, Msg, #{}),
- ?assertEqual(6.0, hb_ao:get(<<"1">>, Res, #{})).
+ {ok, Res} = post(URL, Msg, Opts),
+ ?assertEqual(6.0, hb_ao:get(<<"1">>, Res, Opts)).
cors_get_test() ->
- URL = hb_http_server:start_node(),
- {ok, Res} = get(URL, <<"/~meta@1.0/info">>, #{}),
+ Opts = #{ priv_wallet => hb:wallet() },
+ URL = hb_http_server:start_node(Opts),
+ {ok, Res} = get(URL, <<"/~meta@1.0/info">>, Opts),
?assertEqual(
<<"*">>,
- hb_ao:get(<<"access-control-allow-origin">>, Res, #{})
+ hb_ao:get(<<"access-control-allow-origin">>, Res, Opts)
).
ans104_wasm_test() ->
- TestStore = [hb_test_utils:test_store()],
- TestOpts =
+ Opts = #{ priv_wallet => hb:wallet() },
+ URL = hb_http_server:start_node(Opts#{force_signed => true}),
+ {ok, Bin} = file:read_file(<<"test/test-64.wasm">>),
+ Msg = hb_message:commit(
#{
- force_signed => true,
- store => TestStore,
- priv_wallet => ar_wallet:new()
+ <<"accept-codec">> => <<"ans104@1.0">>,
+ <<"codec-device">> => <<"ans104@1.0">>,
+ <<"device">> => <<"wasm-64@1.0">>,
+ <<"function">> => <<"fac">>,
+ <<"parameters">> => [3.0],
+ <<"body">> => Bin
},
- ClientStore = [hb_test_utils:test_store()],
- ClientOpts = #{ store => ClientStore, priv_wallet => hb:wallet() },
- URL = hb_http_server:start_node(TestOpts),
- {ok, Bin} = file:read_file(<<"test/test-64.wasm">>),
- Msg =
- hb_message:commit(
- #{
- <<"accept-codec">> => <<"ans104@1.0">>,
- <<"codec-device">> => <<"ans104@1.0">>,
- <<"device">> => <<"wasm-64@1.0">>,
- <<"function">> => <<"fac">>,
- <<"parameters">> => [3.0],
- <<"body">> => Bin
- },
- ClientOpts,
- #{ <<"device">> => <<"ans104@1.0">>, <<"bundle">> => true }
- ),
- ?assert(hb_message:verify(Msg, all, ClientOpts)),
- ?event({msg, Msg}),
- {ok, Res} =
- post(
- URL,
- Msg#{ <<"path">> => <<"/init/compute/results">> },
- ClientOpts
- ),
+ Opts,
+ #{ <<"device">> => <<"ans104@1.0">>, <<"bundle">> => true }
+ ),
+ ?assert(hb_message:verify(Msg, all, Opts)),
+ ?event({msg, {explicit, Msg}}),
+ {ok, Res} = post(
+ URL,
+ Msg#{ <<"path">> => <<"/init/compute/results">> },
+ Opts
+ ),
?event({res, Res}),
- ?assertEqual(6.0, hb_ao:get(<<"output/1">>, Res, ClientOpts)).
+ ?assertEqual(6.0, hb_ao:get(<<"output/1">>, Res, Opts)),
+ ok.
send_large_signed_request_test() ->
% Note: If the signature scheme ever changes, we will need to run the
@@ -1044,20 +1054,22 @@ send_large_signed_request_test() ->
% )
% ).
{ok, [Req]} = file:consult(<<"test/large-message.eterm">>),
+ Opts = #{ priv_wallet => hb:wallet() },
% Get the short trace length from the node message in the large, stored
% request.
?assertMatch(
{ok, 5},
post(
- hb_http_server:start_node(),
+ hb_http_server:start_node(Opts),
<<"/node-message/short_trace_len">>,
Req,
- #{ http_client => httpc }
+ Opts#{ http_client => httpc }
)
).
index_test() ->
- NodeURL = hb_http_server:start_node(),
+ Opts = #{ priv_wallet => hb:wallet() },
+ NodeURL = hb_http_server:start_node(Opts),
{ok, Res} =
get(
NodeURL,
@@ -1065,12 +1077,13 @@ index_test() ->
<<"path">> => <<"/~test-device@1.0/load">>,
<<"accept-bundle">> => false
},
- #{}
+ Opts
),
- ?assertEqual(<<"i like turtles!">>, hb_ao:get(<<"body">>, Res, #{})).
+ ?assertEqual(<<"i like turtles!">>, hb_ao:get(<<"body">>, Res, Opts)).
index_request_test() ->
- URL = hb_http_server:start_node(),
+ Opts = #{ priv_wallet => hb:wallet() },
+ URL = hb_http_server:start_node(Opts),
{ok, Res} =
get(
URL,
@@ -1078,6 +1091,6 @@ index_request_test() ->
<<"path">> => <<"/~test-device@1.0/load?name=dogs">>,
<<"accept-bundle">> => false
},
- #{}
+ Opts
),
- ?assertEqual(<<"i like dogs!">>, hb_ao:get(<<"body">>, Res, #{})).
\ No newline at end of file
+ ?assertEqual(<<"i like dogs!">>, hb_ao:get(<<"body">>, Res, Opts)).
\ No newline at end of file
diff --git a/src/hb_message.erl b/src/hb_message.erl
index 6c4d526de..df0809b52 100644
--- a/src/hb_message.erl
+++ b/src/hb_message.erl
@@ -201,6 +201,10 @@ id(Msg, RawCommitters, Opts) ->
%% unsigned ID present. By forcing this work to occur in strategically positioned
%% places, we avoid the need to recalculate the IDs for every `hb_message:id`
%% call.
+normalize_commitments(Msg, _Opts) when is_map(Msg) andalso map_size(Msg) == 0 ->
+ Msg;
+normalize_commitments(Msg = #{<<"priv">> := _}, _Opts) when map_size(Msg) == 1 ->
+ Msg;
normalize_commitments(Msg, Opts) when is_map(Msg) ->
NormMsg =
maps:map(
@@ -210,17 +214,29 @@ normalize_commitments(Msg, Opts) when is_map(Msg) ->
end,
Msg
),
- case hb_maps:get(<<"commitments">>, NormMsg, not_found, Opts) of
- not_found ->
- {ok, #{ <<"commitments">> := Commitments }} =
- dev_message:commit(
- NormMsg,
- #{ <<"type">> => <<"unsigned">> },
- Opts
- ),
- NormMsg#{ <<"commitments">> => Commitments };
- _ -> NormMsg
- end;
+ MessageWithoutHmac =
+ without_commitments(
+ #{ <<"type">> => <<"hmac-sha256">> },
+ NormMsg,
+ Opts
+ ),
+ {ok, #{ <<"commitments">> := Commitments }} =
+ dev_message:commit(
+ MessageWithoutHmac,
+ #{
+ <<"type">> => <<"hmac-sha256">>,
+ <<"bundle">> => hb_maps:get(<<"bundle">>, Opts, false, Opts)
+ },
+ Opts
+ ),
+ MessageWithoutHmac#{
+ <<"commitments">> =>
+ hb_maps:merge(
+ Commitments,
+ hb_maps:get(<<"commitments">>, MessageWithoutHmac, #{}, Opts),
+ Opts
+ )
+ };
normalize_commitments(Msg, Opts) when is_list(Msg) ->
lists:map(fun(X) -> normalize_commitments(X, Opts) end, Msg);
normalize_commitments(Msg, _Opts) ->
@@ -302,21 +318,19 @@ without_unless_signed(Keys, Msg, Opts) ->
).
%% @doc Sign a message with the given wallet.
-commit(Msg, WalletOrOpts) ->
+commit(Msg, Opts) ->
commit(
Msg,
- WalletOrOpts,
+ Opts,
hb_opts:get(
commitment_device,
no_viable_commitment_device,
- case is_map(WalletOrOpts) of
- true -> WalletOrOpts;
- false -> #{ priv_wallet => WalletOrOpts }
- end
+ Opts
)
).
-commit(Msg, Wallet, Format) when not is_map(Wallet) ->
- commit(Msg, #{ priv_wallet => Wallet }, Format);
+commit(Msg, NotOpts, CodecName) when not is_map(NotOpts) ->
+ ?event(error, {deprecated_commit_call, {msg, Msg}, {opts, NotOpts}, {codec, CodecName}}),
+ error({deprecated_commit_call, {arg_must_be_node_msg, NotOpts}});
commit(Msg, Opts, CodecName) when is_binary(CodecName) ->
commit(Msg, Opts, #{ <<"commitment-device">> => CodecName });
commit(Msg, Opts, Spec) ->
@@ -329,12 +343,19 @@ commit(Msg, Opts, Spec) ->
none ->
case hb_maps:get(<<"device">>, Spec, none, Opts) of
none ->
- throw(
- {
- no_commitment_device_in_codec_spec,
- Spec
- }
- );
+ FromOpts =
+ hb_opts:get(
+ commitment_device,
+ no_viable_commitment_device,
+ Opts
+ ),
+ case FromOpts of
+ no_viable_commitment_device ->
+ throw(
+ {unset_commitment_device, Spec}
+ );
+ Device -> Device
+ end;
Device -> Device
end;
CommitmentDevice -> CommitmentDevice
@@ -439,8 +460,6 @@ match(Map1, Map2, Mode, Opts) ->
catch _:Details -> Details
end.
-
-
unsafe_match(Map1, Map2, Mode, Path, Opts) ->
Keys1 =
hb_maps:keys(
@@ -580,17 +599,19 @@ with_commitments(_Spec, Msg, _Opts) ->
%% @doc Filter messages that match the 'spec' given. Inverts the `with_commitments/2'
%% function, such that only messages that do _not_ match the spec are returned.
without_commitments(Spec, Msg = #{ <<"commitments">> := Commitments }, Opts) ->
+ CommitmentKeys =
+ hb_maps:keys(
+ hb_maps:get(
+ <<"commitments">>,
+ with_commitments(Spec, Msg, Opts),
+ #{},
+ Opts
+ )
+ ),
?event({without_commitments, {spec, Spec}, {msg, Msg}, {commitments, Commitments}}),
FilteredCommitments =
hb_maps:without(
- hb_maps:keys(
- hb_maps:get(
- <<"commitments">>,
- with_commitments(Spec, Msg, Opts),
- #{},
- Opts
- )
- ),
+ CommitmentKeys,
Commitments
),
?event({without_commitments, {filtered_commitments, FilteredCommitments}}),
diff --git a/src/hb_message_test_vectors.erl b/src/hb_message_test_vectors.erl
index b26ce6460..c72612852 100644
--- a/src/hb_message_test_vectors.erl
+++ b/src/hb_message_test_vectors.erl
@@ -9,8 +9,8 @@
%% Disable/enable as needed.
run_test() ->
hb:init(),
- encode_small_balance_table_test(
- <<"ans104@1.0">>,
+ verify_nested_complex_signed_test(
+ #{ <<"device">> => <<"httpsig@1.0">>, <<"bundle">> => true },
test_opts(normal)
).
@@ -473,31 +473,33 @@ message_with_large_keys_test(Codec, Opts) ->
%% tests a large portion of the complex type encodings that HyperBEAM uses
%% together.
verify_nested_complex_signed_test(Codec, Opts) ->
- Msg =
- hb_message:commit(#{
- <<"path">> => <<"schedule">>,
- <<"method">> => <<"POST">>,
+ Inner = hb_message:commit(
+ #{
+ <<"type">> => <<"Message">>,
+ <<"function">> => <<"fac">>,
+ <<"parameters">> => #{
+ <<"a">> => 1
+ },
+ % TODO: Figure out why this is needed.
+ <<"content-type">> => <<"application/html">>,
<<"body">> =>
- Inner = hb_message:commit(
- #{
- <<"type">> => <<"Message">>,
- <<"function">> => <<"fac">>,
- <<"parameters">> => #{
- <<"a">> => 1
- },
- <<"content-type">> => <<"application/html">>,
- <<"body">> =>
- <<
- """
-
- Hello, multiline message
-
- """
- >>
- },
- Opts,
- Codec
- )
+ <<
+ """
+
+ Hello, multiline message
+
+ """
+ >>
+ },
+ Opts,
+ Codec
+ ),
+ Msg =
+ hb_message:commit(
+ #{
+ <<"path">> => <<"schedule">>,
+ <<"method">> => <<"POST">>,
+ <<"body">> => Inner
},
Opts,
Codec
@@ -516,16 +518,25 @@ verify_nested_complex_signed_test(Codec, Opts) ->
LoadedMsg = hb_cache:ensure_all_loaded(Decoded, Opts),
?event({loaded, LoadedMsg}),
% % Ensure that the decoded message matches.
- MatchRes = hb_message:match(Msg, Decoded, strict, Opts),
+ MatchRes = hb_message:match(LoadedMsg, Decoded, strict, Opts),
?event({match_result, MatchRes}),
?assert(MatchRes),
+ ?assert(hb_message:verify(Msg, all, Opts)),
?assert(hb_message:verify(Decoded, all, Opts)),
+ NormalizeOpts = case is_map(Codec) of
+ true -> hb_maps:merge(Opts, Codec);
+ _ -> Opts
+ end,
% % Ensure that both of the messages can be verified (and retreived).
- FoundInner = hb_maps:get(<<"body">>, Msg, not_found, Opts),
- LoadedFoundInner = hb_cache:ensure_all_loaded(FoundInner, Opts),
+ FoundInner =
+ hb_maps:get(<<"body">>, Msg, not_found, Opts),
+ NormalizedFoundInner =
+ hb_message:normalize_commitments(FoundInner, NormalizeOpts),
+ LoadedFoundInner =
+ hb_cache:ensure_all_loaded(FoundInner, Opts),
% Verify that the fully loaded version of the inner message, and the one
% gained by applying `hb_maps:get` match and verify.
- ?assert(hb_message:match(Inner, FoundInner, primary, Opts)),
+ ?assert(hb_message:match(Inner, NormalizedFoundInner, primary, Opts)),
?assert(hb_message:match(FoundInner, LoadedFoundInner, primary, Opts)),
?assert(hb_message:verify(Inner, all, Opts)),
?assert(hb_message:verify(LoadedFoundInner, all, Opts)),
@@ -598,8 +609,20 @@ signed_nested_message_with_child_test(Codec, Opts) ->
Encoded = hb_message:convert(Msg, Codec, <<"structured@1.0">>, Opts),
?event({encoded, Encoded}),
Decoded = hb_message:convert(Encoded, <<"structured@1.0">>, Codec, Opts),
+ NormalizeOpts = case is_map(Codec) of
+ true -> Opts#{ <<"bundle">> => <<"true">> };
+ _ -> Opts
+ end,
?event({matching, {input, Msg}, {output, Decoded}}),
- MatchRes = hb_message:match(Msg, Decoded, primary, Opts),
+ MatchRes = hb_message:match(
+ Msg,
+ hb_message:normalize_commitments(
+ hb_cache:ensure_all_loaded(Decoded, Opts),
+ NormalizeOpts
+ ),
+ primary,
+ Opts
+ ),
?event({match_result, MatchRes}),
?assert(MatchRes),
?assert(hb_message:verify(Decoded, all, Opts)).
@@ -1221,10 +1244,14 @@ signed_with_inner_signed_message_test(Codec, Opts) ->
),
?event({verify_inner, {original, InnerSigned}, {from_decoded, InnerFromDecoded}}),
% 3. Verify the outer message after decode.
+ NormalizeOpts = case is_map(Codec) of
+ true -> hb_maps:merge(Opts, Codec);
+ _ -> Opts
+ end,
MatchRes =
hb_message:match(
InnerSigned,
- InnerFromDecoded,
+ hb_message:normalize_commitments(InnerFromDecoded, NormalizeOpts),
primary,
Opts
),
diff --git a/src/hb_opts.erl b/src/hb_opts.erl
index 6eeb7aeb3..26968deda 100644
--- a/src/hb_opts.erl
+++ b/src/hb_opts.erl
@@ -212,7 +212,7 @@ default_message() ->
debug_print_trace => short, % `short` | `false`. Has performance impact.
short_trace_len => 20,
debug_metadata => true,
- debug_ids => true,
+ debug_ids => false,
debug_committers => true,
debug_show_priv => if_present,
debug_resolve_links => true,
@@ -305,7 +305,7 @@ default_message() ->
cache_control => [<<"always">>]
},
% Should the node store all signed messages?
- store_all_signed => true,
+ store_all_signed => false,
% Should the node use persistent processes?
process_workers => false,
% Options for the router device
@@ -326,7 +326,8 @@ default_message() ->
#{ <<"device">> => <<"http-auth@1.0">> }
}
}
- }
+ },
+ lazy_loading => true
% Should the node track and expose prometheus metrics?
% We do not set this explicitly, so that the hb_features:test() value
% can be used to determine if we should expose metrics instead,
diff --git a/src/hb_private.erl b/src/hb_private.erl
index 636611507..d627bca6a 100644
--- a/src/hb_private.erl
+++ b/src/hb_private.erl
@@ -210,8 +210,11 @@ priv_opts_cache_read_message_test() ->
% Ensure we can read the message using the public store.
{ok, PubMsg} = hb_cache:read(ID, Opts),
PubMsgLoaded = hb_cache:ensure_all_loaded(PubMsg, Opts),
- ?assertEqual(Msg, PubMsgLoaded),
+ PubMsgWithCommitments = hb_cache:read_all_commitments(PubMsgLoaded, Opts),
+ ?assertEqual(Msg, PubMsgWithCommitments),
% Read the message using the private store.
{ok, PrivMsg} = hb_cache:read(ID, PrivOpts),
PrivMsgLoaded = hb_cache:ensure_all_loaded(PrivMsg, PrivOpts),
- ?assertEqual(Msg, PrivMsgLoaded).
\ No newline at end of file
+ PrivMsgWithCommitments = hb_cache:read_all_commitments(PrivMsgLoaded, PrivOpts),
+ ?event({match_priv_msg, {msg, {explicit, Msg}}, {priv_msg, {explicit, PrivMsgWithCommitments}}}),
+ ?assertEqual(Msg, PrivMsgWithCommitments).
\ No newline at end of file
diff --git a/src/hb_store.erl b/src/hb_store.erl
index 77fc6c52f..cc395e2b1 100644
--- a/src/hb_store.erl
+++ b/src/hb_store.erl
@@ -83,6 +83,17 @@ behavior_info(callbacks) ->
%%% Store named terms registry functions.
+%% @doc Return the name of a store, or list of names of the given stores.
+names(StoreOpts) when not is_list(StoreOpts) ->
+ names([StoreOpts]);
+names([]) -> [];
+names(Stores) ->
+ [
+ maps:get(<<"name">>, StoreOpts, maps:get(<<"store-module">>, StoreOpts))
+ ||
+ StoreOpts <- Stores
+ ].
+
%% @doc Set the instance options for a given store module and name combination.
set(StoreOpts, InstanceTerm) ->
Mod = maps:get(<<"store-module">>, StoreOpts),
@@ -164,8 +175,16 @@ start([StoreOpts | Rest]) ->
find(StoreOpts),
start(Rest).
+%% @doc Stop the first viable store in the Modules list. If the store returns a
+%% tuple of form `{unset, Store}`, we remove the associated store's instance
+%% from the process dictionary and persistent terms.
stop(Modules) ->
- call_function(Modules, stop, []).
+ case call_function(Modules, stop, []) of
+ {unset, Store} ->
+ set(Store, undefined),
+ ok;
+ Other -> Other
+ end.
%% @doc Takes a store object and a filter function or match spec, returning a
%% new store object with only the modules that match the filter. The filter
@@ -186,7 +205,13 @@ filter(Modules, Filter) ->
%% @doc Limit the store scope to only a specific (set of) option(s).
%% Takes either an Opts message or store, and either a single scope or a list
%% of scopes.
+scope(Store = #{ <<"store-module">> := _ }, Scope) ->
+ % The given value is a single store message, so we wrap it in a list and
+ % call the scope function on it.
+ scope([Store], Scope);
scope(Opts, Scope) when is_map(Opts) ->
+ % The argument is a node message as it is a map with no store-module key.
+ % Scope the store and return the new message.
case hb_opts:get(store, no_viable_store, Opts) of
no_viable_store -> Opts;
Store when is_list(Store) ->
@@ -204,7 +229,8 @@ scope(Opts, Scope) when is_map(Opts) ->
Opts#{ store => scope([Store], Scope) }
end
end;
-scope(Store, Scope) ->
+scope(Store, Scope) when is_list(Store) ->
+ % Filter the list of stores to only include those that match the given scope.
filter(
Store,
fun(StoreScope, _) ->
@@ -311,6 +337,13 @@ match(Modules, Match) -> call_function(Modules, match, [Match]).
%% counter.
-ifdef(STORE_EVENTS).
call_function(X, Function, Args) ->
+ ?event(verbose_store,
+ {call_function,
+ Function,
+ {store, names(X)},
+ {args, Args}
+ }
+ ),
{Time, Result} = timer:tc(fun() -> do_call_function(X, Function, Args) end),
?event(store_events,
{store_call,
@@ -841,10 +874,25 @@ benchmark_message_read_write(Store, WriteOps, ReadOps) ->
fun() ->
lists:foldl(
fun({MsgID, Msg}, Count) ->
+ NormalizedMsg = hb_cache:ensure_all_loaded(
+ hb_message:normalize_commitments(Msg, Opts),
+ Opts
+ ),
case hb_cache:read(MsgID, Opts) of
- {ok, Msg1} ->
- case hb_cache:ensure_all_loaded(Msg1, Opts) of
- Msg -> Count;
+ {ok, CacheMsg} ->
+ NormalizedCacheMsg =
+ hb_message:normalize_commitments(
+ hb_cache:read_all_commitments(
+ hb_cache:ensure_all_loaded(
+ CacheMsg,
+ Opts
+ ),
+ Opts
+ ),
+ Opts
+ ),
+ case NormalizedCacheMsg of
+ NormalizedMsg -> Count;
_ -> Count + 1
end;
_ -> Count + 1
diff --git a/src/hb_store_fs.erl b/src/hb_store_fs.erl
index ac2bf8849..a657d73bd 100644
--- a/src/hb_store_fs.erl
+++ b/src/hb_store_fs.erl
@@ -66,6 +66,7 @@ read(Path) ->
write(Opts, PathComponents, Value) ->
Path = add_prefix(Opts, PathComponents),
?event({writing, Path, byte_size(Value)}),
+ ?event(debug_fs, {writing_key_fs, {path, Path}, {value, Value}}),
filelib:ensure_dir(Path),
ok = file:write_file(Path, Value).
@@ -105,8 +106,11 @@ resolve(Opts, CurrPath, [Next|Rest]) ->
{ok, RawLink} ->
Link = remove_prefix(Opts, RawLink),
resolve(Opts, Link, Rest);
- {error, enoent} ->
- not_found;
+ % {error, enoent} ->
+ % case Rest of
+ % [] -> not_found;
+ % _ -> resolve(Opts, PathPart, Rest)
+ % end;
_ ->
resolve(Opts, PathPart, Rest)
end.
diff --git a/src/hb_store_gateway.erl b/src/hb_store_gateway.erl
index 0bb07bd37..084a5cccf 100644
--- a/src/hb_store_gateway.erl
+++ b/src/hb_store_gateway.erl
@@ -148,7 +148,8 @@ cache_read_message_test() ->
WriteOpts = #{
store =>
[
- #{ <<"store-module">> => hb_store_gateway,
+ #{
+ <<"store-module">> => hb_store_gateway,
<<"local-store">> => [Local]
}
]
diff --git a/src/hb_store_lmdb.erl b/src/hb_store_lmdb.erl
index 56667bdff..5452ed6b1 100644
--- a/src/hb_store_lmdb.erl
+++ b/src/hb_store_lmdb.erl
@@ -31,12 +31,7 @@
%% Configuration constants with reasonable defaults
-define(DEFAULT_SIZE, 16 * 1024 * 1024 * 1024). % 16GB default database size
--define(CONNECT_TIMEOUT, 6000). % Timeout for server communication
--define(DEFAULT_IDLE_FLUSH_TIME, 5). % Idle server time before auto-flush
--define(DEFAULT_MAX_FLUSH_TIME, 50). % Maximum time between flushes
-define(MAX_REDIRECTS, 1000). % Only resolve 1000 links to data
--define(MAX_PENDING_WRITES, 400). % Force flush after x pending
--define(FOLD_YIELD_INTERVAL, 100). % Yield every x keys
%% @doc Start the LMDB storage system for a given database configuration.
%%
@@ -56,6 +51,7 @@ start(Opts = #{ <<"name">> := DataDir }) ->
DataDirPath = hb_util:list(DataDir),
ok = filelib:ensure_dir(filename:join(DataDirPath, "dummy")),
% Create the LMDB environment with specified size limit
+ filelib:ensure_dir(<< (hb_util:bin(DataDir))/binary, "/data.mdb">>),
{ok, Env} =
elmdb:env_open(
DataDirPath,
@@ -168,7 +164,7 @@ read(Opts, PathParts) when is_list(PathParts) ->
read(Opts, to_path(PathParts));
read(Opts, Path) ->
% Try direct read first (fast path for non-link paths)
- case read_with_links(Opts, Path) of
+ Result = case read_with_links(Opts, Path) of
{ok, Value} ->
{ok, Value};
not_found ->
@@ -195,7 +191,9 @@ read(Opts, Path) ->
% If link resolution fails, return not_found
not_found
end
- end.
+ end,
+ ?event(debug_lmdb, {reading_key, {path, Path}, {result, Result}}),
+ Result.
%% @doc Helper function to check if a value is a link and extract the target.
is_link(Value) ->
@@ -535,10 +533,13 @@ resolve(_,_) -> not_found.
%% @doc Retrieve or create the LMDB environment handle for a database.
find_env(Opts) -> hb_store:find(Opts).
-%% Shutdown LMDB environment and cleanup resources
-stop(#{ <<"store-module">> := ?MODULE, <<"name">> := DataDir }) ->
- StoreKey = {lmdb, ?MODULE, DataDir},
- close_environment(StoreKey, DataDir);
+%% @doc Shutdown LMDB environment and cleanup resources
+stop(Store = #{ <<"store-module">> := ?MODULE }) ->
+ #{ <<"env">> := Env } = find_env(Store),
+ try elmdb:env_close(Env) of
+ ok -> {unset, Store}
+ catch error:Reason -> {error, Reason}
+ end;
stop(_InvalidStoreOpts) ->
ok.
@@ -647,7 +648,7 @@ basic_test() ->
?assertEqual(ok, Res),
{ok, Value} = read(StoreOpts, <<"Hello">>),
?assertEqual(Value, <<"World2">>),
- ok = stop(StoreOpts).
+ {unset, _} = stop(StoreOpts).
%% @doc List test - verifies prefix-based key listing functionality.
%%
@@ -817,7 +818,7 @@ path_traversal_link_test() ->
{ok, Result} = read(StoreOpts, [<<"link">>, <<"key">>]),
?event({path_traversal_result, Result}),
?assertEqual(<<"target-value">>, Result),
- ok = stop(StoreOpts).
+ {unset, _} = stop(StoreOpts).
%% @doc Test that matches the exact hb_store hierarchical test pattern
exact_hb_store_test() ->
@@ -848,7 +849,7 @@ exact_hb_store_test() ->
Result = read(StoreOpts, [<<"test-link">>, <<"test-file">>]),
?event(debug, {final_result, Result}),
?assertEqual({ok, <<"test-data">>}, Result),
- ok = stop(StoreOpts).
+ {unset, _} = stop(StoreOpts).
%% @doc Test cache-style usage through hb_store interface
cache_style_test() ->
@@ -958,7 +959,7 @@ nested_map_cache_test() ->
?event({reconstructed_map, ReconstructedMap}),
% Verify the reconstructed map matches the original structure
?assert(hb_message:match(OriginalMap, ReconstructedMap)),
- stop(StoreOpts).
+ {unset, _} = stop(StoreOpts).
%% Helper function to recursively reconstruct a map from the store
reconstruct_map(StoreOpts, Path) ->
diff --git a/src/hb_util.erl b/src/hb_util.erl
index b9122ff46..1c1148b83 100644
--- a/src/hb_util.erl
+++ b/src/hb_util.erl
@@ -533,6 +533,59 @@ is_hb_module(Str, Prefixes) ->
all_hb_modules() ->
lists:filter(fun(Module) -> is_hb_module(Module) end, erlang:loaded()).
+%% @doc Print a trace to the standard error stream.
+print_trace_short(Trace, Mod, Func, Line) ->
+ io:format(standard_error, "=== [ HB SHORT TRACE ~p:~w ~p ] ==> ~s~n",
+ [
+ Mod, Line, Func,
+ format_trace_short(Trace)
+ ]
+ ).
+
+%% @doc Return a list of calling modules and lines from a trace.
+trace_to_list(Trace) ->
+ Prefixes = hb_opts:get(stack_print_prefixes, [], #{}),
+ lists:filtermap(
+ fun(TraceItem) ->
+ Formatted = format_trace_element(TraceItem),
+ case is_hb_module(Formatted, Prefixes) of
+ true -> {true, Formatted};
+ false -> false
+ end
+ end,
+ Trace
+ ).
+
+%% @doc Format a trace to a short string.
+format_trace_short() -> format_trace_short(get_trace()).
+format_trace_short(Trace) when is_list(Trace) ->
+ lists:join(" / ", lists:reverse(trace_to_list(Trace))).
+
+%% @doc Format a trace element in form `mod:line' or `mod:func'.
+format_trace_element({Mod, _, _, [{file, _}, {line, Line}|_]}) ->
+ lists:flatten(io_lib:format("~p:~p", [Mod, Line]));
+format_trace_element({Mod, Func, _ArityOrTerm, _Extras}) ->
+ lists:flatten(io_lib:format("~p:~p", [Mod, Func])).
+
+%% @doc Utility function to help macro `?trace/0' remove the first frame of the
+%% stack trace.
+trace_macro_helper(Fun, {_, {_, Stack}}, Mod, Func, Line) ->
+ Fun(Stack, Mod, Func, Line).
+
+%% @doc Get the trace of the current process.
+get_trace() ->
+ case catch error(debugging_print) of
+ {_, {_, Stack}} ->
+ normalize_trace(Stack);
+ _ -> []
+ end.
+
+%% @doc Remove all calls from this module from the top of a trace.
+normalize_trace([]) -> [];
+normalize_trace([{Mod, _, _, _}|Rest]) when Mod == ?MODULE ->
+ normalize_trace(Rest);
+normalize_trace(Trace) -> Trace.
+
%%% Statistics
count(Item, List) ->
@@ -719,4 +772,4 @@ lower_case_key_map(Map, Opts) ->
maps:put(hb_util:to_lower(K), lower_case_key_map(V, Opts), Acc);
(K, V, Acc) ->
maps:put(hb_util:to_lower(K), V, Acc)
- end, #{}, Map, Opts).
\ No newline at end of file
+ end, #{}, Map, Opts).
diff --git a/src/hb_volume.erl b/src/hb_volume.erl
index 182217334..d3fdffedb 100644
--- a/src/hb_volume.erl
+++ b/src/hb_volume.erl
@@ -5,7 +5,7 @@ for partitioning, formatting, mounting, and managing encrypted volumes.
""".
-export([list_partitions/0, create_partition/2]).
-export([format_disk/2, mount_disk/4, change_node_store/2]).
--export([check_for_device/1]).
+-export([check_for_device/1, copy_wallet_to_volume/2]).
-include("include/hb.hrl").
-include_lib("eunit/include/eunit.hrl").
@@ -522,7 +522,8 @@ change_node_store(StorePath, CurrentStore) ->
}}.
%%% Helper functions
-%% Execute system command with error checking
+
+%% @doc Execute system command with error checking
safe_exec(Command) ->
safe_exec(Command, ["Error", "failed", "bad", "error"]).
@@ -533,7 +534,7 @@ safe_exec(Command, ErrorKeywords) ->
error -> {error, list_to_binary(Result)}
end.
-%% Check if command result contains error indicators
+%% @doc Check if command result contains error indicators
check_command_errors(Result, Keywords) ->
case lists:any(fun(Keyword) ->
string:find(Result, Keyword) =/= nomatch
@@ -542,7 +543,7 @@ check_command_errors(Result, Keywords) ->
false -> ok
end.
-%% Secure key file management with automatic cleanup
+%% @doc Secure key file management with automatic cleanup
with_secure_key_file(EncKey, Fun) ->
?event(debug_volume, {with_secure_key_file, entry, creating_temp_file}),
os:cmd("sudo mkdir -p /root/tmp"),
@@ -629,72 +630,131 @@ with_secure_key_file(EncKey, Fun) ->
erlang:raise(Class, Reason, Stacktrace)
end.
-% Update the store configuration with a new base path
+%% @doc Update the store configuration with a new base path and copy existing data
-spec update_store_config(StoreConfig :: term(),
- NewPath :: binary()) -> term().
-update_store_config(StoreConfig, NewPath) when is_list(StoreConfig) ->
+ NewPrefix :: binary()) -> term().
+update_store_config(StoreConfig, NewPrefix) when is_list(StoreConfig) ->
% For a list, update each element
- [update_store_config(Item, NewPath) || Item <- StoreConfig];
+ [update_store_config(Item, NewPrefix) || Item <- StoreConfig];
update_store_config(
#{<<"store-module">> := Module} = StoreConfig,
- NewPath
+ NewPrefix
) when is_map(StoreConfig) ->
- % Handle various store module types differently
- case Module of
- hb_store_fs ->
- % For filesystem store, prefix the existing path with the new path
- ExistingPath = maps:get(<<"name">>, StoreConfig, <<"">>),
- NewName = <>,
- ?event(debug_volume, {fs, StoreConfig, NewPath, NewName}),
- StoreConfig#{<<"name">> => NewName};
- hb_store_lmdb ->
- ExistingPath = maps:get(<<"name">>, StoreConfig, <<"">>),
- NewName = <>,
- ?event(debug_volume, {migrate_start, ExistingPath, NewName}),
- safe_stop_lmdb_store(StoreConfig),
- ?event(debug_volume, {using_existing_store, NewName}),
- FinalConfig = StoreConfig#{<<"name">> => NewName},
- safe_start_lmdb_store(FinalConfig),
- FinalConfig;
- hb_store_rocksdb ->
- StoreConfig;
+ % Get the existing path
+ ExistingPath = maps:get(<<"name">>, StoreConfig, <<"">>),
+ NewName = <>,
+ ?event(debug_volume,
+ {update_store_config,
+ module, Module,
+ existing_path, ExistingPath,
+ new_name, NewName
+ }
+ ),
+ % Always stop the store first for safe copying
+ ?event(debug_volume, {copy_store_data, stopping_store, {module, Module}}),
+ stop_store_safely(StoreConfig),
+ % Copy existing data to new location
+ copy_store_data(ExistingPath, NewName, Module),
+ % Handle special cases for nested stores
+ UpdatedConfig = case Module of
hb_store_gateway ->
% For gateway store, recursively update nested store configs
- NestedStore = maps:get(<<"store">>, StoreConfig, []),
- StoreConfig#{
- <<"store">> => update_store_config(NestedStore, NewPath)
- };
+ case maps:get(<<"local-store">>, StoreConfig, false) of
+ false ->
+ StoreConfig#{<<"name">> => NewName};
+ LocalStore ->
+ StoreConfig#{
+ <<"name">> => NewName,
+ <<"local-store">> =>
+ update_store_config(LocalStore, NewPrefix)
+ }
+ end;
_ ->
- % For any other store type, update the prefix
- % StoreConfig#{<<"name">> => NewPath}
- ?event(debug_volume, {other, StoreConfig, NewPath}),
- StoreConfig
- end;
-update_store_config({Type, _OldPath, Opts}, NewPath) ->
- % For tuple format with options
- {Type, NewPath, Opts};
-update_store_config({Type, _OldPath}, NewPath) ->
- % For tuple format without options
- {Type, NewPath};
-update_store_config(StoreConfig, _NewPath) ->
- % Return unchanged for any other format
- StoreConfig.
-
-%% Safely stop LMDB store with error handling
-safe_stop_lmdb_store(StoreConfig) ->
+ % For all other store types, just update the name
+ StoreConfig#{<<"name">> => NewName}
+ end,
+ ?event(debug_volume, {update_store_config, updated_config, UpdatedConfig}),
+ UpdatedConfig.
+
+%% @doc Safely stop any store type with error handling
+stop_store_safely(StoreConfig) ->
?event(debug_volume, {stopping_current_store, StoreConfig}),
try
- hb_store_lmdb:stop(StoreConfig)
+ hb_store:stop(StoreConfig)
catch
error:StopReason ->
?event(debug_volume, {stop_error, StopReason})
end.
-%% Safely start LMDB store
-safe_start_lmdb_store(StoreConfig) ->
- NewName = maps:get(<<"name">>, StoreConfig),
- ?event(debug_volume, {starting_new_store, NewName}),
- hb_store_lmdb:start(StoreConfig).
+%% @doc Copy store data from source to destination for any store type
+-spec copy_store_data(binary(), binary(), atom()) -> ok.
+copy_store_data(SourcePath, DestPath, Module) ->
+ ?event(debug_volume, {copy_store_data,
+ entry, {source, SourcePath, dest, DestPath, module, Module}
+ }
+ ),
+ % Check if destination already exists
+ DestStr = binary_to_list(DestPath),
+ case filelib:is_dir(DestStr) of
+ true ->
+ ?event(debug_volume, {copy_store_data, dest_exists, skipping_copy}),
+ ok; % Destination already exists, no need to copy
+ false ->
+ % Check if source exists
+ SourceStr = binary_to_list(SourcePath),
+ case filelib:is_dir(SourceStr) of
+ false ->
+ ?event(debug_volume,
+ {copy_store_data, source_not_found, SourcePath}
+ ),
+ ok; % Nothing to copy
+ true ->
+ ?event(debug_volume,
+ {copy_store_data, source_found, copying}
+ ),
+ copy_directory_contents(SourcePath, DestPath)
+ end
+ end.
+
+%% @doc Generic directory copying function that works for all store types
+-spec copy_directory_contents(binary(), binary()) -> ok.
+copy_directory_contents(SourcePath, DestPath) ->
+ ?event(debug_volume,
+ {copy_directory_contents, entry, {source, SourcePath, dest, DestPath}}
+ ),
+ SourceStr = binary_to_list(SourcePath),
+ DestStr = binary_to_list(DestPath),
+ % Ensure destination directory exists
+ ok = filelib:ensure_dir(DestStr ++ "/"),
+ % Use rsync for reliable copying of all file types
+ CopyCommand = io_lib:format(
+ "rsync -av --sparse '~s/' '~s/'",
+ [SourceStr, DestStr]
+ ),
+ ?event(debug_volume,
+ {copy_directory_contents, executing_rsync, command}
+ ),
+ case os:cmd(CopyCommand) of
+ Result ->
+ ?event(debug_volume,
+ {copy_directory_contents, rsync_completed, checking_errors}
+ ),
+ case {
+ string:find(Result, "rsync error"),
+ string:find(Result, "failed")
+ } of
+ {nomatch, nomatch} ->
+ ?event(debug_volume,
+ {copy_directory_contents, rsync_success, no_errors}
+ ),
+ ok;
+ _ ->
+ ?event(debug_volume,
+ {copy_directory_contents, rsync_error, Result}
+ ),
+ ok % Don't fail the entire operation if copy fails
+ end
+ end.
-doc """
Check if a device exists on the system.
@@ -719,6 +779,67 @@ check_for_device(Device) ->
),
DeviceExists.
+%% @doc Copy wallet to the mounted volume for all store types.
+%% @param StorePath The path to the mounted volume.
+%% @param Opts The current options.
+%% @returns The wallet if found/loaded, undefined otherwise.
+-spec copy_wallet_to_volume(binary(), map()) -> term() | undefined.
+copy_wallet_to_volume(StorePath, Opts) ->
+ ?event(debug_volume, {copy_wallet_to_volume, entry, {store_path, StorePath}}),
+ try
+ WalletName =
+ hb_opts:get(
+ priv_key_location,
+ <<"hyperbeam-key.json">>,
+ Opts
+ ),
+ Cwd = list_to_binary(hb_util:ok(file:get_cwd())),
+ CurrentWallet = <>,
+ CachedWallet = <>,
+ ?event(debug_volume,
+ {copy_wallet_to_volume, checking_cached_wallet, CachedWallet}
+ ),
+ case filelib:is_regular(binary_to_list(CachedWallet)) of
+ true ->
+ % Use existing wallet from volume
+ ?event(debug_volume,
+ {copy_wallet_to_volume, using_cached_wallet, CachedWallet}
+ ),
+ ar_wallet:load_keyfile(CachedWallet, Opts);
+ false ->
+ % Copy current wallet to volume
+ ?event(debug_volume,
+ {copy_wallet_to_volume,
+ copying_wallet, {from, CurrentWallet, to, CachedWallet}
+ }
+ ),
+ case filelib:is_regular(binary_to_list(CurrentWallet)) of
+ true ->
+ file:copy(CurrentWallet, CachedWallet),
+ ?event(debug_volume,
+ {copy_wallet_to_volume, wallet_copied, success}
+ ),
+ % Return the current wallet from options if available
+ hb_opts:get(priv_wallet, undefined, Opts);
+ false ->
+ ?event(debug_volume,
+ {copy_wallet_to_volume,
+ no_wallet_to_copy, CurrentWallet
+ }
+ ),
+ undefined
+ end
+ end
+ catch
+ Class:Reason ->
+ ?event(debug_volume,
+ {copy_wallet_to_volume, error,
+ {class, Class, reason, Reason}
+ }
+ ),
+ undefined
+ end.
+
%%% Unit Tests
%% Test helper function error checking
check_command_errors_test() ->
@@ -761,8 +882,6 @@ check_command_errors_test() ->
["Error", "failed"]
)
).
-
-%% Test store configuration updates for different types
update_store_config_test() ->
% Test filesystem store
FSStore = #{
@@ -771,16 +890,19 @@ update_store_config_test() ->
},
NewPath = <<"/encrypted/mount">>,
Updated = update_store_config(FSStore, NewPath),
- Expected = FSStore#{<<"name">> => <<"/encrypted/mount/cache">>},
+ Expected = FSStore#{ <<"name">> => <<"/encrypted/mount/cache">> },
?assertEqual(Expected, Updated),
% Test list of stores
- StoreList = [FSStore, #{<<"store-module">> => hb_store_gateway}],
+ StoreList = [FSStore, #{ <<"store-module">> => hb_store_gateway }],
UpdatedList = update_store_config(StoreList, NewPath),
?assertEqual(2, length(UpdatedList)),
- % Test tuple format
- TupleStore = {fs, <<"old_path">>, []},
- UpdatedTuple = update_store_config(TupleStore, NewPath),
- ?assertEqual({fs, NewPath, []}, UpdatedTuple).
+ % Test nested store
+ NestedStore = #{
+ <<"store-module">> => hb_store_gateway,
+ <<"local-store">> => FSStore
+ },
+ UpdatedNested = update_store_config(NestedStore, NewPath),
+ ?assertEqual(NestedStore#{ <<"local-store">> => Expected, <<"name">> => <<"/encrypted/mount/">> }, UpdatedNested).
%% Test secure key file management
with_secure_key_file_test() ->
@@ -850,4 +972,4 @@ safe_exec_mock_test() ->
"Error: disk not found",
["Error", "failed"]
),
- ?assertEqual(error, TestResult2).
\ No newline at end of file
+ ?assertEqual(error, TestResult2).