Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,11 @@ ifeq ($(UNAME_S),Darwin)
endif
else
WAMR_BUILD_PLATFORM = linux
WAMR_BUILD_TARGET = X86_64
ifeq ($(UNAME_M),aarch64)
WAMR_BUILD_TARGET = AARCH64
else
WAMR_BUILD_TARGET = X86_64
endif
endif

wamr: $(WAMR_DIR)/lib/libvmlib.a
Expand Down
5 changes: 4 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,12 +43,15 @@ payments information, amongst other configuration options.

To begin using HyperBeam, you will need to install:

- The Erlang runtime (OTP 27)
- Erlang runtime and compiler (OTP 27)
- Rebar3
- Git
- NodeJs (for the Compute Unit on genesis_wasm profile)
- Docker (optional, for containerized deployment)

You will also need:
- Cmake (recommended version is 3.31)
- Rust (latest)
- A wallet and it's keyfile *(generate a new wallet and keyfile with https://www.wander.app)*

Then you can clone the HyperBEAM source and build it:
Expand Down
2 changes: 1 addition & 1 deletion rebar.config
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@
]}.

{deps, [
{elmdb, { git, "https://github.com/twilson63/elmdb-rs.git", {branch, "feat/match" }}},
{elmdb, { git, "https://github.com/twilson63/elmdb-rs.git", {branch, "main" }}},
{b64fast, {git, "https://github.com/ArweaveTeam/b64fast.git", {ref, "58f0502e49bf73b29d95c6d02460d1fb8d2a5273"}}},
{cowboy, {git, "https://github.com/ninenines/cowboy", {tag, "2.13.0"}}},
{gun, {git, "https://github.com/ninenines/gun", {tag, "2.2.0"}}},
Expand Down
2 changes: 1 addition & 1 deletion rebar.lock
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
1},
{<<"elmdb">>,
{git,"https://github.com/twilson63/elmdb-rs.git",
{ref,"90c8857cd4ccff341fbe415b96bc5703d17ff7f0"}},
{ref, "5255868638e91b4dff24163467765d780f8a6f4a"}},
0},
{<<"graphql">>,{pkg,<<"graphql_erl">>,<<"0.17.1">>},0},
{<<"gun">>,{pkg,<<"gun">>,<<"2.2.0">>},0},
Expand Down
41 changes: 40 additions & 1 deletion src/dev_cron.erl
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,13 @@ once(_Msg1, Msg2, Opts) ->
maps:put(<<"path">>, CronPath, Msg2)
),
Name = {<<"[email protected]">>, ReqMsgID},
Pid = spawn(fun() -> once_worker(CronPath, ModifiedMsg2, Opts) end),
Pid = spawn(fun() ->
maybe <<DelayBin/binary>> ?= hb_ao:get(<<"after">>, Msg2),
DelaySecs = timer:seconds(hb_util:int(DelayBin)),
timer:sleep(DelaySecs)
end,
once_worker(CronPath, ModifiedMsg2, Opts)
end),
hb_name:register(Name, Pid),
{ok, ReqMsgID}
end.
Expand Down Expand Up @@ -286,6 +292,39 @@ once_executed_test() ->
throw(no_response_from_worker)
end.


%% @doc This test verifies that a one-time task can be scheduled and executed with a delay.
once_delayed_and_executed_test() ->
% start a new node
Node = hb_http_server:start_node(),
% spawn a worker on the new node that calls test_worker/0 which inits
% test_worker/1 with a state of undefined
PID = spawn(fun test_worker/0),
% generate a random id that we can then use later to lookup the worker
ID = hb_util:human_id(crypto:strong_rand_bytes(32)),
% register the worker with the id
hb_name:register({<<"test">>, ID}, PID),
% Construct the URL path with the dynamic ID
UrlPath = <<"/[email protected]/once?after=1&test-id=", ID/binary,
"&cron-path=/[email protected]/update_state">>,
% this should call the worker via the test device
% the test device should look up the worker via the id given
{ok, _ReqMsgId} = hb_http:get(Node, UrlPath, #{}),
% wait for the request to be processed
timer:sleep(1500),
% send a message to the worker to get the state
PID ! {get, self()},
% receive the state from the worker
receive
{state, State} ->
?event({once_executed_test_received_state, {state, State}}),
?assertMatch(#{ <<"test-id">> := ID }, State)
after 1000 ->
FinalLookup = hb_name:lookup({<<"test">>, ID}),
?event({timeout_waiting_for_worker, {pid, PID}, {lookup_result, FinalLookup}}),
throw(no_response_from_worker)
end.

%% @doc This test verifies that a recurring task can be scheduled and executed.
every_worker_loop_test() ->
Node = hb_http_server:start_node(),
Expand Down
23 changes: 21 additions & 2 deletions src/hb_app.erl
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

-behaviour(application).

-export([start/2, stop/1]).
-export([start/2, prep_stop/1, stop/1]).

-include("include/hb.hrl").

Expand All @@ -18,5 +18,24 @@ start(_StartType, _StartArgs) ->
_TimestampServer = ar_timestamp:start(),
{ok, _} = hb_http_server:start().

prep_stop(State) ->
maybe
{ok, Opts} ?= find_lmdb_store(),
hb_store_lmdb:flush(Opts)
end,
State.

stop(_State) ->
ok.
maybe
{ok, Opts} ?= find_lmdb_store(),
hb_store_lmdb:stop(Opts)
end,
ok.

find_lmdb_store() ->
Stores = maps:get(store, hb_opts:default_message()),
Pred = fun(S) -> maps:get(<<"store-module">>, S) == hb_store_lmdb end,
case lists:search(Pred, Stores) of
{value, Opts} -> {ok, Opts};
false -> not_found
end.
52 changes: 42 additions & 10 deletions src/hb_store_lmdb.erl
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

%% Public API exports
-export([start/1, stop/1, scope/0, scope/1, reset/1]).
-export([read/2, write/3, list/2, match/2]).
-export([read/2, write/3, flush/1, list/2, match/2]).
-export([make_group/2, make_link/3, type/2]).
-export([path/2, add_path/3, resolve/2]).

Expand Down Expand Up @@ -55,13 +55,18 @@ start(Opts = #{ <<"name">> := DataDir }) ->
% Ensure the directory exists before opening LMDB environment
DataDirPath = hb_util:list(DataDir),
ok = filelib:ensure_dir(filename:join(DataDirPath, "dummy")),
NoSyncParam =
case maps:get(<<"no-sync">>, Opts, true) of
true -> [no_sync];
false -> []
end,
% Create the LMDB environment with specified size limit
{ok, Env} =
elmdb:env_open(
DataDirPath,
[
{map_size, maps:get(<<"capacity">>, Opts, ?DEFAULT_SIZE)},
no_mem_init, no_sync
no_mem_init, NoSyncParam
]
),
{ok, DBInstance} = elmdb:db_open(Env, [create]),
Expand Down Expand Up @@ -141,6 +146,22 @@ write(Opts, Path, Value) ->
retry
end.

-spec flush(map()) -> ok | {error, flush_failed}.
flush(Opts) ->
#{ <<"env">> := DBEnv } = find_env(Opts),
case elmdb:env_sync(DBEnv) of
ok -> ok;
{error, Type, Description} ->
?event(
error,
{lmdb_error,
{type, Type},
{description, Description}
}
),
{error, flush_failed}
end.

%% @doc Read a value from the database by key, with automatic link resolution.
%%
%% This function attempts to read a value directly from the committed database.
Expand Down Expand Up @@ -657,8 +678,7 @@ basic_test() ->
list_test() ->
StoreOpts = #{
<<"store-module">> => ?MODULE,
<<"name">> => <<"/tmp/store-2">>,
<<"capacity">> => ?DEFAULT_SIZE
<<"name">> => <<"/tmp/store-2">>
},
reset(StoreOpts),
?assertEqual(list(StoreOpts, <<"colors">>), {ok, []}),
Expand Down Expand Up @@ -763,6 +783,7 @@ type_test() ->
Type2 = type(StoreOpts, <<"assets/1">>),
?event({type2, Type2}),
?assertEqual(simple, Type2).


%% @doc Link key list test - verifies symbolic link creation using structured key paths.
%%
Expand Down Expand Up @@ -985,8 +1006,7 @@ reconstruct_map(StoreOpts, Path) ->
cache_debug_test() ->
StoreOpts = #{
<<"store-module">> => ?MODULE,
<<"name">> => <<"/tmp/cache-debug">>,
<<"capacity">> => ?DEFAULT_SIZE
<<"name">> => <<"/tmp/cache-debug">>
},
reset(StoreOpts),
% Simulate what the cache does:
Expand Down Expand Up @@ -1023,8 +1043,7 @@ cache_debug_test() ->
isolated_type_debug_test() ->
StoreOpts = #{
<<"store-module">> => ?MODULE,
<<"name">> => <<"/tmp/isolated-debug">>,
<<"capacity">> => ?DEFAULT_SIZE
<<"name">> => <<"/tmp/isolated-debug">>
},
reset(StoreOpts),
% Create the exact scenario from user's description:
Expand Down Expand Up @@ -1063,8 +1082,7 @@ isolated_type_debug_test() ->
list_with_link_test() ->
StoreOpts = #{
<<"store-module">> => ?MODULE,
<<"name">> => <<"/tmp/store-list-link">>,
<<"capacity">> => ?DEFAULT_SIZE
<<"name">> => <<"/tmp/store-list-link">>
},
reset(StoreOpts),
% Create a group with some children
Expand All @@ -1083,4 +1101,18 @@ list_with_link_test() ->
{ok, LinkChildren} = list(StoreOpts, <<"link-to-group">>),
?event({link_children, LinkChildren}),
?assertEqual(ExpectedChildren, lists:sort(LinkChildren)),
stop(StoreOpts).

%% @doc Test that list function resolves links correctly
flush_test() ->
StoreOpts = #{
<<"store-module">> => ?MODULE,
<<"name">> => <<"/tmp/flush">>
},
reset(StoreOpts),
write(StoreOpts, <<"key1">>, <<"value1">>),
write(StoreOpts, <<"key2">>, <<"value2">>),
write(StoreOpts, <<"key3">>, <<"value3">>),
?assertEqual(ok, flush(StoreOpts)),
?assertEqual({ok, <<"value1">>}, read(StoreOpts, <<"key1">>)),
stop(StoreOpts).