Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 23 additions & 3 deletions deps/amqp_client/src/amqp_gen_connection.erl
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
%% connection.block, connection.unblock handler
block_handler,
blocked_by = sets:new([{version, 2}]),
queue_types_published = sets:new([{version, 2}]),
closing = false %% #closing{} | false
}).

Expand Down Expand Up @@ -214,18 +215,30 @@ handle_cast({register_blocked_handler, HandlerPid},
{noreply, State1};
handle_cast({conserve_resources, Source, Conserve},
#state{blocked_by = BlockedBy} = State) ->
WasNotBlocked = sets:is_empty(BlockedBy),
WasBlocked = should_block(State),
BlockedBy1 = case Conserve of
true ->
sets:add_element(Source, BlockedBy);
false ->
sets:del_element(Source, BlockedBy)
end,
State1 = State#state{blocked_by = BlockedBy1},
case sets:is_empty(BlockedBy1) of
case should_block(State1) of
true ->
handle_method(#'connection.unblocked'{}, State1);
false when WasNotBlocked ->
false when not WasBlocked ->
handle_method(#'connection.blocked'{}, State1);
false ->
{noreply, State1}
end;
handle_cast({channel_published_to_queue_type, _ChPid, QT},
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This feature might need a feature flag. Here for direct connections if old client code is used on a newer server then it would error after publishing since it isn't expecting this cast. I think it would be unlikely to happen in practice but the mixed-version test suite will probably run into this.

#state{queue_types_published = QTs} = State) ->
WasBlocked = should_block(State),
State1 = State#state{queue_types_published = sets:add_element(QT, QTs)},
case should_block(State1) of
true ->
handle_method(#'connection.unblocked'{}, State1);
false when not WasBlocked ->
handle_method(#'connection.blocked'{}, State1);
false ->
{noreply, State1}
Expand Down Expand Up @@ -274,6 +287,13 @@ i(Item, #state{module = Mod, module_state = MState}) -> Mod:i(Item, MState).
register_blocked_handler(Pid, HandlerPid) ->
gen_server:cast(Pid, {register_blocked_handler, HandlerPid}).

should_block(#state{blocked_by = BlockedBy, queue_types_published = QTs}) ->
lists:any(fun ({disk, QT}) ->
sets:is_element(QT, QTs);
(_Resource) ->
true
end, sets:to_list(BlockedBy)).

%%---------------------------------------------------------------------------
%% Command handling
%%---------------------------------------------------------------------------
Expand Down
108 changes: 108 additions & 0 deletions deps/rabbit/priv/schema/rabbit.schema
Original file line number Diff line number Diff line change
Expand Up @@ -1238,6 +1238,87 @@ fun(Conf) ->
end
end}.

%% Tuning of disk monitor polling parameters
{mapping, "disk_monitor.fast_rate", "rabbit.disk_monitor_fast_rate", [
%% Unit: KB/second, for example 250_000 for 250MB/sec.
{datatype, [integer]}
]}.
{mapping, "disk_monitor.min_interval", "rabbit.disk_monitor_min_interval", [
%% Unit: milliseconds.
{datatype, [integer]}
]}.
{mapping, "disk_monitor.max_interval", "rabbit.disk_monitor_max_interval", [
%% Unit: milliseconds.
{datatype, [integer]}
]}.

%% Per-queue-type / per-mount disk alarms
{mapping, "disk_free_limits.$num.name", "rabbit.disk_free_limits", [
{datatype, [binary]}
]}.
{mapping, "disk_free_limits.$num.mount", "rabbit.disk_free_limits", [
{datatype, [string]}
]}.
{mapping, "disk_free_limits.$num.limit", "rabbit.disk_free_limits", [
{datatype, [integer, string]},
{validators, ["is_supported_information_unit"]}
]}.
{mapping, "disk_free_limits.$num.queue_types", "rabbit.disk_free_limits", [
{datatype, [binary]}
]}.

{translation, "rabbit.disk_free_limits",
fun(Conf) ->
case cuttlefish_variable:filter_by_prefix("disk_free_limits", Conf) of
[] ->
cuttlefish:unset();
Settings ->
Ls = lists:foldl(
fun ({["disk_free_limits", Num, Key0], Value0}, Acc) ->
Idx = case string:to_integer(Num) of
{N, []} -> N;
_ -> cuttlefish:invalid(lists:flatten(io_lib:format("~p could not be parsed as a number", [Num])))
end,
Key = case Key0 of
"name" -> name;
"mount" -> mount;
"limit" -> limit;
"queue_types" -> queue_types;
_ -> cuttlefish:invalid(lists:flatten(io_lib:format("~p is invalid", [Key0])))
end,
Value = case Key of
queue_types -> string:split(Value0, ",");
_ -> Value0
end,
maps:update_with(
Idx,
fun (#{Key := ExistingValue} = Limit) ->
cuttlefish:warn(
io_lib:format("Disk limit ~b has duplicate setting ~ts, "
"using ~tp instead of ~tp",
[Idx, Key, Value, ExistingValue])),
Limit#{Key := Value};
(Limit) ->
Limit#{Key => Value}
end, #{Key => Value}, Acc);
(Other, _Acc) ->
cuttlefish:invalid(
lists:flatten(io_lib:format("~p is invalid", [Other])))
end, #{}, Settings),
maps:fold(
fun(_Idx, #{name := Name}, Names) ->
case sets:is_element(Name, Names) of
true ->
cuttlefish:invalid(
lists:flatten(io_lib:format("name ~ts is used by multiple mounts", [Name])));
false ->
sets:add_element(Name, Names)
end
end, sets:new([{version, 2}]), Ls),
Ls
end
end}.

%%
%% Clustering
%% =====================
Expand Down Expand Up @@ -2620,6 +2701,20 @@ end}.
{datatype, {enum, [true, false]}}
]}.

%% Classic queue data directory
{mapping, "classic_queue.data_dir", "rabbit.classic_queue_data_dir", [
{datatype, string}
]}.

{translation, "rabbit.classic_queue_data_dir",
fun(Conf) ->
case cuttlefish:conf_get("classic_queue.data_dir", Conf, undefined) of
undefined -> cuttlefish:unset();
Val -> Val
end
end
}.

%%
%% Backing queue version
%%
Expand Down Expand Up @@ -2776,6 +2871,19 @@ fun(Conf) ->
end
end}.

{mapping, "stream.data_dir", "osiris.data_dir", [
{datatype, string}
]}.

{translation, "osiris.data_dir",
fun(Conf) ->
case cuttlefish:conf_get("stream.data_dir", Conf, undefined) of
undefined -> cuttlefish:unset();
Val -> Val
end
end
}.

{mapping, "stream.read_ahead", "rabbit.stream_read_ahead",
[{datatype, {enum, [true, false]}}]}.

Expand Down
Loading
Loading