diff --git a/AirLib/AirLib.vcxproj b/AirLib/AirLib.vcxproj
index e580e57a1..43f78da8b 100644
--- a/AirLib/AirLib.vcxproj
+++ b/AirLib/AirLib.vcxproj
@@ -337,7 +337,7 @@
Disabled
_SCL_SECURE_NO_WARNINGS;_CRT_SECURE_NO_WARNINGS;_DEBUG;_LIB;%(PreprocessorDefinitions)
include;deps\eigen3;deps\rpclib\include;$(ProjectDir)..\MavLinkCom\include
- true
+ false
/w34263 /w34266 %(AdditionalOptions)
4100;4505;4820;4464;4514;4710;4571;%(DisableSpecificWarnings)
stdcpp20
@@ -424,7 +424,7 @@
true
NDEBUG;_SCL_SECURE_NO_WARNINGS;_CRT_SECURE_NO_WARNINGS;_LIB;%(PreprocessorDefinitions)
include;deps\eigen3;deps\rpclib\include;$(ProjectDir)..\MavLinkCom\include
- true
+ false
/w34263 /w34266 %(AdditionalOptions)
stdcpp20
diff --git a/AirLib/include/common/CancelToken.hpp b/AirLib/include/common/CancelToken.hpp
index 29df8f368..8ca02a013 100644
--- a/AirLib/include/common/CancelToken.hpp
+++ b/AirLib/include/common/CancelToken.hpp
@@ -8,6 +8,7 @@
#include
#include "common/Common.hpp"
#include "common/common_utils/Utils.hpp"
+#include "common/common_utils/AdvanceSleep.h"
namespace msr
{
@@ -47,7 +48,7 @@ namespace airlib
if (isCancelled()) {
return false;
}
-
+#if SLEEP_MODE == 0
TTimePoint start = ClockFactory::get()->nowNanos();
static constexpr std::chrono::duration MinSleepDuration(0);
@@ -56,7 +57,9 @@ namespace airlib
std::this_thread::sleep_for(MinSleepDuration);
}
-
+#else
+ advanceSleep(secs * 1.0E3);
+#endif
return !isCancelled();
}
diff --git a/AirLib/include/common/ClockBase.hpp b/AirLib/include/common/ClockBase.hpp
index c451b3789..ae1b7964a 100644
--- a/AirLib/include/common/ClockBase.hpp
+++ b/AirLib/include/common/ClockBase.hpp
@@ -7,7 +7,7 @@
#include
#include
#include "Common.hpp"
-
+#include "common/common_utils/AdvanceSleep.h"
namespace msr
{
namespace airlib
@@ -73,12 +73,15 @@ namespace airlib
{
if (dt <= 0)
return;
-
+#if SLEEP_MODE == 0
static constexpr std::chrono::duration MinSleepDuration(0);
TTimePoint start = nowNanos();
//spin wait
while (elapsedSince(start) < dt)
std::this_thread::sleep_for(MinSleepDuration);
+#else
+ advanceSleep(dt * 1e3);
+#endif
}
double getTrueScaleWrtWallClock()
diff --git a/AirLib/include/common/common_utils/AdvanceSleep.h b/AirLib/include/common/common_utils/AdvanceSleep.h
new file mode 100644
index 000000000..63cf8f818
--- /dev/null
+++ b/AirLib/include/common/common_utils/AdvanceSleep.h
@@ -0,0 +1,47 @@
+#ifndef SLEEP_MODE
+#define SLEEP_MODE 1
+#endif
+#if SLEEP_MODE != 0
+#ifndef ADVANCE_SLEEP
+#define ADVANCE_SLEEP
+#include
+#include
+#include
+#include
+double nowMs();
+void advanceSleep(double ms);
+#if SLEEP_MODE == 1
+#ifndef __cpp_lib_atomic_is_always_lock_free
+#define __cpp_lib_atomic_is_always_lock_free 0
+#endif
+#include "atomic_queue/atomic_queue.h"
+namespace advance_sleep
+{
+class Event
+{
+public:
+ double wakeUpTimeMs;
+ std::promise p;
+};
+struct CompareEvent
+{
+ bool operator()(advance_sleep::Event* a, advance_sleep::Event* b)
+ {
+ return a->wakeUpTimeMs > b->wakeUpTimeMs;
+ }
+};
+extern atomic_queue::AtomicQueueB<
+ advance_sleep::Event*,
+ std::allocator,
+ (Event*)NULL,
+ false,
+ false,
+ false>
+ eventQueue;
+extern volatile bool busySpinQuit;
+extern std::priority_queue, advance_sleep::CompareEvent> pq;
+void busySpin();
+}
+#endif
+#endif
+#endif
\ No newline at end of file
diff --git a/AirLib/include/common/common_utils/ScheduledExecutor.hpp b/AirLib/include/common/common_utils/ScheduledExecutor.hpp
index 9b0c67e99..dc5426f51 100644
--- a/AirLib/include/common/common_utils/ScheduledExecutor.hpp
+++ b/AirLib/include/common/common_utils/ScheduledExecutor.hpp
@@ -11,7 +11,7 @@
#include
#include
#include
-
+#include "AdvanceSleep.h"
namespace common_utils
{
@@ -158,7 +158,7 @@ class ScheduledExecutor
probbaly does spin loop anyway.
*/
-
+#if SLEEP_MODE == 0
if (delay_nanos >= 5000000LL) { //put thread to sleep
std::this_thread::sleep_for(std::chrono::duration(delay_nanos / 1.0E9));
}
@@ -169,6 +169,9 @@ class ScheduledExecutor
//std::this_thread::sleep_for(std::chrono::duration(0));
}
}
+#else
+ advanceSleep(delay_nanos / 1.0E6);
+#endif
}
void executorLoop()
diff --git a/AirLib/include/common/common_utils/atomic_queue/atomic_queue.h b/AirLib/include/common/common_utils/atomic_queue/atomic_queue.h
new file mode 100644
index 000000000..e4eeabdb1
--- /dev/null
+++ b/AirLib/include/common/common_utils/atomic_queue/atomic_queue.h
@@ -0,0 +1,646 @@
+/* -*- mode: c++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
+#ifndef ATOMIC_QUEUE_ATOMIC_QUEUE_H_INCLUDED
+#define ATOMIC_QUEUE_ATOMIC_QUEUE_H_INCLUDED
+
+// Copyright (c) 2019 Maxim Egorushkin. MIT License. See the full licence in file LICENSE.
+
+#include "defs.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+namespace atomic_queue {
+
+using std::uint32_t;
+using std::uint64_t;
+using std::uint8_t;
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+namespace details {
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+template struct GetCacheLineIndexBits { static int constexpr value = 0; };
+template<> struct GetCacheLineIndexBits<256> { static int constexpr value = 8; };
+template<> struct GetCacheLineIndexBits<128> { static int constexpr value = 7; };
+template<> struct GetCacheLineIndexBits< 64> { static int constexpr value = 6; };
+template<> struct GetCacheLineIndexBits< 32> { static int constexpr value = 5; };
+template<> struct GetCacheLineIndexBits< 16> { static int constexpr value = 4; };
+template<> struct GetCacheLineIndexBits< 8> { static int constexpr value = 3; };
+template<> struct GetCacheLineIndexBits< 4> { static int constexpr value = 2; };
+template<> struct GetCacheLineIndexBits< 2> { static int constexpr value = 1; };
+
+template
+struct GetIndexShuffleBits {
+ static int constexpr bits = GetCacheLineIndexBits::value;
+ static unsigned constexpr min_size = 1u << (bits * 2);
+ static int constexpr value = array_size < min_size ? 0 : bits;
+};
+
+template
+struct GetIndexShuffleBits {
+ static int constexpr value = 0;
+};
+
+// Multiple writers/readers contend on the same cache line when storing/loading elements at
+// subsequent indexes, aka false sharing. For power of 2 ring buffer size it is possible to re-map
+// the index in such a way that each subsequent element resides on another cache line, which
+// minimizes contention. This is done by swapping the lowest order N bits (which are the index of
+// the element within the cache line) with the next N bits (which are the index of the cache line)
+// of the element index.
+template
+constexpr unsigned remap_index(unsigned index) noexcept {
+ unsigned constexpr mix_mask{(1u << BITS) - 1};
+ unsigned const mix{(index ^ (index >> BITS)) & mix_mask};
+ return index ^ mix ^ (mix << BITS);
+}
+
+template<>
+constexpr unsigned remap_index<0>(unsigned index) noexcept {
+ return index;
+}
+
+template
+constexpr T& map(T* elements, unsigned index) noexcept {
+ return elements[remap_index(index)];
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Implement a "bit-twiddling hack" for finding the next power of 2 in either 32 bits or 64 bits
+// in C++11 compatible constexpr functions. The library no longer maintains C++11 compatibility.
+
+// "Runtime" version for 32 bits
+// --a;
+// a |= a >> 1;
+// a |= a >> 2;
+// a |= a >> 4;
+// a |= a >> 8;
+// a |= a >> 16;
+// ++a;
+
+template
+constexpr T decrement(T x) noexcept {
+ return x - 1;
+}
+
+template
+constexpr T increment(T x) noexcept {
+ return x + 1;
+}
+
+template
+constexpr T or_equal(T x, unsigned u) noexcept {
+ return x | x >> u;
+}
+
+template
+constexpr T or_equal(T x, unsigned u, Args... rest) noexcept {
+ return or_equal(or_equal(x, u), rest...);
+}
+
+constexpr uint32_t round_up_to_power_of_2(uint32_t a) noexcept {
+ return increment(or_equal(decrement(a), 1, 2, 4, 8, 16));
+}
+
+constexpr uint64_t round_up_to_power_of_2(uint64_t a) noexcept {
+ return increment(or_equal(decrement(a), 1, 2, 4, 8, 16, 32));
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+template
+constexpr T nil() noexcept {
+#if __cpp_lib_atomic_is_always_lock_free // Better compile-time error message requires C++17.
+ static_assert(std::atomic::is_always_lock_free, "Queue element type T is not atomic. Use AtomicQueue2/AtomicQueueB2 for such element types.");
+#endif
+ return {};
+}
+
+template
+inline void destroy_n(T* p, unsigned n) noexcept {
+ for(auto q = p + n; p != q;)
+ (p++)->~T();
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+} // namespace details
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+template
+class AtomicQueueCommon {
+protected:
+ // Put these on different cache lines to avoid false sharing between readers and writers.
+ alignas(CACHE_LINE_SIZE) std::atomic head_ = {};
+ alignas(CACHE_LINE_SIZE) std::atomic tail_ = {};
+
+ // The special member functions are not thread-safe.
+
+ AtomicQueueCommon() noexcept = default;
+
+ AtomicQueueCommon(AtomicQueueCommon const& b) noexcept
+ : head_(b.head_.load(X))
+ , tail_(b.tail_.load(X)) {}
+
+ AtomicQueueCommon& operator=(AtomicQueueCommon const& b) noexcept {
+ head_.store(b.head_.load(X), X);
+ tail_.store(b.tail_.load(X), X);
+ return *this;
+ }
+
+ void swap(AtomicQueueCommon& b) noexcept {
+ unsigned h = head_.load(X);
+ unsigned t = tail_.load(X);
+ head_.store(b.head_.load(X), X);
+ tail_.store(b.tail_.load(X), X);
+ b.head_.store(h, X);
+ b.tail_.store(t, X);
+ }
+
+ template
+ static T do_pop_atomic(std::atomic& q_element) noexcept {
+ if(Derived::spsc_) {
+ for(;;) {
+ T element = q_element.load(A);
+ if(ATOMIC_QUEUE_LIKELY(element != NIL)) {
+ q_element.store(NIL, X);
+ return element;
+ }
+ if(Derived::maximize_throughput_)
+ spin_loop_pause();
+ }
+ }
+ else {
+ for(;;) {
+ T element = q_element.exchange(NIL, A); // (2) The store to wait for.
+ if(ATOMIC_QUEUE_LIKELY(element != NIL))
+ return element;
+ // Do speculative loads while busy-waiting to avoid broadcasting RFO messages.
+ do
+ spin_loop_pause();
+ while(Derived::maximize_throughput_ && q_element.load(X) == NIL);
+ }
+ }
+ }
+
+ template
+ static void do_push_atomic(T element, std::atomic& q_element) noexcept {
+ assert(element != NIL);
+ if(Derived::spsc_) {
+ while(ATOMIC_QUEUE_UNLIKELY(q_element.load(X) != NIL))
+ if(Derived::maximize_throughput_)
+ spin_loop_pause();
+ q_element.store(element, R);
+ }
+ else {
+ for(T expected = NIL; ATOMIC_QUEUE_UNLIKELY(!q_element.compare_exchange_weak(expected, element, R, X)); expected = NIL) {
+ do
+ spin_loop_pause(); // (1) Wait for store (2) to complete.
+ while(Derived::maximize_throughput_ && q_element.load(X) != NIL);
+ }
+ }
+ }
+
+ enum State : unsigned char { EMPTY, STORING, STORED, LOADING };
+
+ template
+ static T do_pop_any(std::atomic& state, T& q_element) noexcept {
+ if(Derived::spsc_) {
+ while(ATOMIC_QUEUE_UNLIKELY(state.load(A) != STORED))
+ if(Derived::maximize_throughput_)
+ spin_loop_pause();
+ T element{std::move(q_element)};
+ state.store(EMPTY, R);
+ return element;
+ }
+ else {
+ for(;;) {
+ unsigned char expected = STORED;
+ if(ATOMIC_QUEUE_LIKELY(state.compare_exchange_weak(expected, LOADING, A, X))) {
+ T element{std::move(q_element)};
+ state.store(EMPTY, R);
+ return element;
+ }
+ // Do speculative loads while busy-waiting to avoid broadcasting RFO messages.
+ do
+ spin_loop_pause();
+ while(Derived::maximize_throughput_ && state.load(X) != STORED);
+ }
+ }
+ }
+
+ template
+ static void do_push_any(U&& element, std::atomic& state, T& q_element) noexcept {
+ if(Derived::spsc_) {
+ while(ATOMIC_QUEUE_UNLIKELY(state.load(A) != EMPTY))
+ if(Derived::maximize_throughput_)
+ spin_loop_pause();
+ q_element = std::forward(element);
+ state.store(STORED, R);
+ }
+ else {
+ for(;;) {
+ unsigned char expected = EMPTY;
+ if(ATOMIC_QUEUE_LIKELY(state.compare_exchange_weak(expected, STORING, A, X))) {
+ q_element = std::forward(element);
+ state.store(STORED, R);
+ return;
+ }
+ // Do speculative loads while busy-waiting to avoid broadcasting RFO messages.
+ do
+ spin_loop_pause();
+ while(Derived::maximize_throughput_ && state.load(X) != EMPTY);
+ }
+ }
+ }
+
+public:
+ template
+ bool try_push(T&& element) noexcept {
+ auto head = head_.load(X);
+ if(Derived::spsc_) {
+ if(static_cast(head - tail_.load(X)) >= static_cast(static_cast(*this).size_))
+ return false;
+ head_.store(head + 1, X);
+ }
+ else {
+ do {
+ if(static_cast(head - tail_.load(X)) >= static_cast(static_cast(*this).size_))
+ return false;
+ } while(ATOMIC_QUEUE_UNLIKELY(!head_.compare_exchange_weak(head, head + 1, X, X))); // This loop is not FIFO.
+ }
+
+ static_cast(*this).do_push(std::forward(element), head);
+ return true;
+ }
+
+ template
+ bool try_pop(T& element) noexcept {
+ auto tail = tail_.load(X);
+ if(Derived::spsc_) {
+ if(static_cast(head_.load(X) - tail) <= 0)
+ return false;
+ tail_.store(tail + 1, X);
+ }
+ else {
+ do {
+ if(static_cast(head_.load(X) - tail) <= 0)
+ return false;
+ } while(ATOMIC_QUEUE_UNLIKELY(!tail_.compare_exchange_weak(tail, tail + 1, X, X))); // This loop is not FIFO.
+ }
+
+ element = static_cast(*this).do_pop(tail);
+ return true;
+ }
+
+ template
+ void push(T&& element) noexcept {
+ unsigned head;
+ if(Derived::spsc_) {
+ head = head_.load(X);
+ head_.store(head + 1, X);
+ }
+ else {
+ constexpr auto memory_order = Derived::total_order_ ? std::memory_order_seq_cst : std::memory_order_relaxed;
+ head = head_.fetch_add(1, memory_order); // FIFO and total order on Intel regardless, as of 2019.
+ }
+ static_cast(*this).do_push(std::forward(element), head);
+ }
+
+ auto pop() noexcept {
+ unsigned tail;
+ if(Derived::spsc_) {
+ tail = tail_.load(X);
+ tail_.store(tail + 1, X);
+ }
+ else {
+ constexpr auto memory_order = Derived::total_order_ ? std::memory_order_seq_cst : std::memory_order_relaxed;
+ tail = tail_.fetch_add(1, memory_order); // FIFO and total order on Intel regardless, as of 2019.
+ }
+ return static_cast(*this).do_pop(tail);
+ }
+
+ bool was_empty() const noexcept {
+ return !was_size();
+ }
+
+ bool was_full() const noexcept {
+ return was_size() >= static_cast(static_cast(*this).size_);
+ }
+
+ unsigned was_size() const noexcept {
+ // tail_ can be greater than head_ because of consumers doing pop, rather that try_pop, when the queue is empty.
+ return std::max(static_cast(head_.load(X) - tail_.load(X)), 0);
+ }
+
+ unsigned capacity() const noexcept {
+ return static_cast(*this).size_;
+ }
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+template(), bool MINIMIZE_CONTENTION = true, bool MAXIMIZE_THROUGHPUT = true, bool TOTAL_ORDER = false, bool SPSC = false>
+class AtomicQueue : public AtomicQueueCommon> {
+ using Base = AtomicQueueCommon>;
+ friend Base;
+
+ static constexpr unsigned size_ = MINIMIZE_CONTENTION ? details::round_up_to_power_of_2(SIZE) : SIZE;
+ static constexpr int SHUFFLE_BITS = details::GetIndexShuffleBits)>::value;
+ static constexpr bool total_order_ = TOTAL_ORDER;
+ static constexpr bool spsc_ = SPSC;
+ static constexpr bool maximize_throughput_ = MAXIMIZE_THROUGHPUT;
+
+ alignas(CACHE_LINE_SIZE) std::atomic elements_[size_];
+
+ T do_pop(unsigned tail) noexcept {
+ std::atomic& q_element = details::map(elements_, tail % size_);
+ return Base::template do_pop_atomic(q_element);
+ }
+
+ void do_push(T element, unsigned head) noexcept {
+ std::atomic& q_element = details::map(elements_, head % size_);
+ Base::template do_push_atomic(element, q_element);
+ }
+
+public:
+ using value_type = T;
+
+ AtomicQueue() noexcept {
+ assert(std::atomic{NIL}.is_lock_free()); // Queue element type T is not atomic. Use AtomicQueue2/AtomicQueueB2 for such element types.
+ for(auto p = elements_, q = elements_ + size_; p != q; ++p)
+ p->store(NIL, X);
+ }
+
+ AtomicQueue(AtomicQueue const&) = delete;
+ AtomicQueue& operator=(AtomicQueue const&) = delete;
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+template
+class AtomicQueue2 : public AtomicQueueCommon> {
+ using Base = AtomicQueueCommon>;
+ using State = typename Base::State;
+ friend Base;
+
+ static constexpr unsigned size_ = MINIMIZE_CONTENTION ? details::round_up_to_power_of_2(SIZE) : SIZE;
+ static constexpr int SHUFFLE_BITS = details::GetIndexShuffleBits::value;
+ static constexpr bool total_order_ = TOTAL_ORDER;
+ static constexpr bool spsc_ = SPSC;
+ static constexpr bool maximize_throughput_ = MAXIMIZE_THROUGHPUT;
+
+ alignas(CACHE_LINE_SIZE) std::atomic states_[size_] = {};
+ alignas(CACHE_LINE_SIZE) T elements_[size_] = {};
+
+ T do_pop(unsigned tail) noexcept {
+ unsigned index = details::remap_index(tail % size_);
+ return Base::do_pop_any(states_[index], elements_[index]);
+ }
+
+ template
+ void do_push(U&& element, unsigned head) noexcept {
+ unsigned index = details::remap_index(head % size_);
+ Base::do_push_any(std::forward(element), states_[index], elements_[index]);
+ }
+
+public:
+ using value_type = T;
+
+ AtomicQueue2() noexcept = default;
+ AtomicQueue2(AtomicQueue2 const&) = delete;
+ AtomicQueue2& operator=(AtomicQueue2 const&) = delete;
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+template, T NIL = details::nil(), bool MAXIMIZE_THROUGHPUT = true, bool TOTAL_ORDER = false, bool SPSC = false>
+class AtomicQueueB : private std::allocator_traits::template rebind_alloc>,
+ public AtomicQueueCommon> {
+ using AllocatorElements = typename std::allocator_traits::template rebind_alloc>;
+ using Base = AtomicQueueCommon>;
+ friend Base;
+
+ static constexpr bool total_order_ = TOTAL_ORDER;
+ static constexpr bool spsc_ = SPSC;
+ static constexpr bool maximize_throughput_ = MAXIMIZE_THROUGHPUT;
+
+ static constexpr auto ELEMENTS_PER_CACHE_LINE = CACHE_LINE_SIZE / sizeof(std::atomic);
+ static_assert(ELEMENTS_PER_CACHE_LINE, "Unexpected ELEMENTS_PER_CACHE_LINE.");
+
+ static constexpr auto SHUFFLE_BITS = details::GetCacheLineIndexBits::value;
+ static_assert(SHUFFLE_BITS, "Unexpected SHUFFLE_BITS.");
+
+ // AtomicQueueCommon members are stored into by readers and writers.
+ // Allocate these immutable members on another cache line which never gets invalidated by stores.
+ alignas(CACHE_LINE_SIZE) unsigned size_;
+ std::atomic* elements_;
+
+ T do_pop(unsigned tail) noexcept {
+ std::atomic& q_element = details::map(elements_, tail & (size_ - 1));
+ return Base::template do_pop_atomic(q_element);
+ }
+
+ void do_push(T element, unsigned head) noexcept {
+ std::atomic& q_element = details::map(elements_, head & (size_ - 1));
+ Base::template do_push_atomic(element, q_element);
+ }
+
+public:
+ using value_type = T;
+ using allocator_type = A;
+
+ // The special member functions are not thread-safe.
+
+ AtomicQueueB(unsigned size, A const& allocator = A{})
+ : AllocatorElements(allocator)
+ , size_(std::max(details::round_up_to_power_of_2(size), 1u << (SHUFFLE_BITS * 2)))
+ , elements_(AllocatorElements::allocate(size_)) {
+ assert(std::atomic{NIL}.is_lock_free()); // Queue element type T is not atomic. Use AtomicQueue2/AtomicQueueB2 for such element types.
+ std::uninitialized_fill_n(elements_, size_, NIL);
+ assert(get_allocator() == allocator); // The standard requires the original and rebound allocators to manage the same state.
+ }
+
+ AtomicQueueB(AtomicQueueB&& b) noexcept
+ : AllocatorElements(static_cast(b)) // TODO: This must be noexcept, static_assert that.
+ , Base(static_cast(b))
+ , size_(std::exchange(b.size_, 0))
+ , elements_(std::exchange(b.elements_, nullptr))
+ {}
+
+ AtomicQueueB& operator=(AtomicQueueB&& b) noexcept {
+ b.swap(*this);
+ return *this;
+ }
+
+ ~AtomicQueueB() noexcept {
+ if(elements_) {
+ details::destroy_n(elements_, size_);
+ AllocatorElements::deallocate(elements_, size_); // TODO: This must be noexcept, static_assert that.
+ }
+ }
+
+ A get_allocator() const noexcept {
+ return *this; // The standard requires implicit conversion between rebound allocators.
+ }
+
+ void swap(AtomicQueueB& b) noexcept {
+ using std::swap;
+ swap(static_cast(*this), static_cast(b));
+ Base::swap(b);
+ swap(size_, b.size_);
+ swap(elements_, b.elements_);
+ }
+
+ friend void swap(AtomicQueueB& a, AtomicQueueB& b) noexcept {
+ a.swap(b);
+ }
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+template, bool MAXIMIZE_THROUGHPUT = true, bool TOTAL_ORDER = false, bool SPSC = false>
+class AtomicQueueB2 : private std::allocator_traits::template rebind_alloc,
+ public AtomicQueueCommon> {
+ using StorageAllocator = typename std::allocator_traits::template rebind_alloc;
+ using Base = AtomicQueueCommon>;
+ using State = typename Base::State;
+ using AtomicState = std::atomic;
+ friend Base;
+
+ static constexpr bool total_order_ = TOTAL_ORDER;
+ static constexpr bool spsc_ = SPSC;
+ static constexpr bool maximize_throughput_ = MAXIMIZE_THROUGHPUT;
+
+ // AtomicQueueCommon members are stored into by readers and writers.
+ // Allocate these immutable members on another cache line which never gets invalidated by stores.
+ alignas(CACHE_LINE_SIZE) unsigned size_;
+ AtomicState* states_;
+ T* elements_;
+
+ static constexpr auto STATES_PER_CACHE_LINE = CACHE_LINE_SIZE / sizeof(AtomicState);
+ static_assert(STATES_PER_CACHE_LINE, "Unexpected STATES_PER_CACHE_LINE.");
+
+ static constexpr auto SHUFFLE_BITS = details::GetCacheLineIndexBits::value;
+ static_assert(SHUFFLE_BITS, "Unexpected SHUFFLE_BITS.");
+
+ T do_pop(unsigned tail) noexcept {
+ unsigned index = details::remap_index(tail & (size_ - 1));
+ return Base::do_pop_any(states_[index], elements_[index]);
+ }
+
+ template
+ void do_push(U&& element, unsigned head) noexcept {
+ unsigned index = details::remap_index(head & (size_ - 1));
+ Base::do_push_any(std::forward(element), states_[index], elements_[index]);
+ }
+
+ template
+ U* allocate_() {
+ U* p = reinterpret_cast(StorageAllocator::allocate(size_ * sizeof(U)));
+ assert(reinterpret_cast(p) % alignof(U) == 0); // Allocated storage must be suitably aligned for U.
+ return p;
+ }
+
+ template
+ void deallocate_(U* p) noexcept {
+ StorageAllocator::deallocate(reinterpret_cast(p), size_ * sizeof(U)); // TODO: This must be noexcept, static_assert that.
+ }
+
+public:
+ using value_type = T;
+ using allocator_type = A;
+
+ // The special member functions are not thread-safe.
+
+ AtomicQueueB2(unsigned size, A const& allocator = A{})
+ : StorageAllocator(allocator)
+ , size_(std::max(details::round_up_to_power_of_2(size), 1u << (SHUFFLE_BITS * 2)))
+ , states_(allocate_())
+ , elements_(allocate_()) {
+ std::uninitialized_fill_n(states_, size_, Base::EMPTY);
+ A a = get_allocator();
+ assert(a == allocator); // The standard requires the original and rebound allocators to manage the same state.
+ for(auto p = elements_, q = elements_ + size_; p < q; ++p)
+ std::allocator_traits::construct(a, p);
+ }
+
+ AtomicQueueB2(AtomicQueueB2&& b) noexcept
+ : StorageAllocator(static_cast(b)) // TODO: This must be noexcept, static_assert that.
+ , Base(static_cast(b))
+ , size_(std::exchange(b.size_, 0))
+ , states_(std::exchange(b.states_, nullptr))
+ , elements_(std::exchange(b.elements_, nullptr))
+ {}
+
+ AtomicQueueB2& operator=(AtomicQueueB2&& b) noexcept {
+ b.swap(*this);
+ return *this;
+ }
+
+ ~AtomicQueueB2() noexcept {
+ if(elements_) {
+ A a = get_allocator();
+ for(auto p = elements_, q = elements_ + size_; p < q; ++p)
+ std::allocator_traits::destroy(a, p);
+ deallocate_(elements_);
+ details::destroy_n(states_, size_);
+ deallocate_(states_);
+ }
+ }
+
+ A get_allocator() const noexcept {
+ return *this; // The standard requires implicit conversion between rebound allocators.
+ }
+
+ void swap(AtomicQueueB2& b) noexcept {
+ using std::swap;
+ swap(static_cast(*this), static_cast(b));
+ Base::swap(b);
+ swap(size_, b.size_);
+ swap(states_, b.states_);
+ swap(elements_, b.elements_);
+ }
+
+ friend void swap(AtomicQueueB2& a, AtomicQueueB2& b) noexcept {
+ a.swap(b);
+ }
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+template
+struct RetryDecorator : Queue {
+ using T = typename Queue::value_type;
+
+ using Queue::Queue;
+
+ void push(T element) noexcept {
+ while(!this->try_push(element))
+ spin_loop_pause();
+ }
+
+ T pop() noexcept {
+ T element;
+ while(!this->try_pop(element))
+ spin_loop_pause();
+ return element;
+ }
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+} // namespace atomic_queue
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#endif // ATOMIC_QUEUE_ATOMIC_QUEUE_H_INCLUDED
diff --git a/AirLib/include/common/common_utils/atomic_queue/atomic_queue_mutex.h b/AirLib/include/common/common_utils/atomic_queue/atomic_queue_mutex.h
new file mode 100644
index 000000000..ea6731f7c
--- /dev/null
+++ b/AirLib/include/common/common_utils/atomic_queue/atomic_queue_mutex.h
@@ -0,0 +1,92 @@
+/* -*- mode: c++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
+#ifndef ATOMIC_QUEUE_ATOMIC_QUEUE_SPIN_LOCK_H_INCLUDED
+#define ATOMIC_QUEUE_ATOMIC_QUEUE_SPIN_LOCK_H_INCLUDED
+
+// Copyright (c) 2019 Maxim Egorushkin. MIT License. See the full licence in file LICENSE.
+
+#include "atomic_queue.h"
+#include "spinlock.h"
+
+#include
+#include
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+namespace atomic_queue {
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+template
+struct ScopedLockType {
+ using type = typename M::scoped_lock;
+};
+
+template<>
+struct ScopedLockType {
+ using type = std::unique_lock;
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+template
+class AtomicQueueMutexT {
+ static constexpr unsigned size_ = MINIMIZE_CONTENTION ? details::round_up_to_power_of_2(SIZE) : SIZE;
+
+ Mutex mutex_;
+ alignas(CACHE_LINE_SIZE) unsigned head_ = 0;
+ alignas(CACHE_LINE_SIZE) unsigned tail_ = 0;
+ alignas(CACHE_LINE_SIZE) T q_[size_] = {};
+
+ static constexpr int SHUFFLE_BITS = details::GetIndexShuffleBits::value;
+
+ using ScopedLock = typename ScopedLockType::type;
+
+public:
+ using value_type = T;
+
+ template
+ bool try_push(U&& element) noexcept {
+ ScopedLock lock(mutex_);
+ if(ATOMIC_QUEUE_LIKELY(head_ - tail_ < size_)) {
+ q_[details::remap_index(head_ % size_)] = std::forward(element);
+ ++head_;
+ return true;
+ }
+ return false;
+ }
+
+ bool try_pop(T& element) noexcept {
+ ScopedLock lock(mutex_);
+ if(ATOMIC_QUEUE_LIKELY(head_ != tail_)) {
+ element = std::move(q_[details::remap_index(tail_ % size_)]);
+ ++tail_;
+ return true;
+ }
+ return false;
+ }
+
+ bool was_empty() const noexcept {
+ return static_cast(head_ - tail_) <= 0;
+ }
+
+ bool was_full() const noexcept {
+ return static_cast(head_ - tail_) >= static_cast(size_);
+ }
+};
+
+template
+using AtomicQueueMutex = AtomicQueueMutexT;
+
+template
+using AtomicQueueSpinlock = AtomicQueueMutexT;
+
+// template
+// using AtomicQueueSpinlockHle = AtomicQueueMutexT;
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+} // namespace atomic_queue
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#endif // ATOMIC_QUEUE_ATOMIC_QUEUE_SPIN_LOCK_H_INCLUDED
diff --git a/AirLib/include/common/common_utils/atomic_queue/barrier.h b/AirLib/include/common/common_utils/atomic_queue/barrier.h
new file mode 100644
index 000000000..23ca0dccc
--- /dev/null
+++ b/AirLib/include/common/common_utils/atomic_queue/barrier.h
@@ -0,0 +1,38 @@
+/* -*- mode: c++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
+#ifndef BARRIER_H_INCLUDED
+#define BARRIER_H_INCLUDED
+
+// Copyright (c) 2019 Maxim Egorushkin. MIT License. See the full licence in file LICENSE.
+
+#include "defs.h"
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+namespace atomic_queue {
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+class Barrier {
+ std::atomic counter_ = {};
+
+public:
+ void wait() noexcept {
+ counter_.fetch_add(1, std::memory_order_acquire);
+ while(counter_.load(std::memory_order_relaxed))
+ spin_loop_pause();
+ }
+
+ void release(unsigned expected_counter) noexcept {
+ while(expected_counter != counter_.load(std::memory_order_relaxed))
+ spin_loop_pause();
+ counter_.store(0, std::memory_order_release);
+ }
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+} // namespace atomic_queue
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#endif // BARRIER_H_INCLUDED
diff --git a/AirLib/include/common/common_utils/atomic_queue/defs.h b/AirLib/include/common/common_utils/atomic_queue/defs.h
new file mode 100644
index 000000000..053f8e162
--- /dev/null
+++ b/AirLib/include/common/common_utils/atomic_queue/defs.h
@@ -0,0 +1,107 @@
+/* -*- mode: c++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
+#ifndef ATOMIC_QUEUE_DEFS_H_INCLUDED
+#define ATOMIC_QUEUE_DEFS_H_INCLUDED
+
+// Copyright (c) 2019 Maxim Egorushkin. MIT License. See the full licence in file LICENSE.
+
+#include
+
+#if defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86)
+#include
+namespace atomic_queue {
+constexpr int CACHE_LINE_SIZE = 64;
+static inline void spin_loop_pause() noexcept {
+ _mm_pause();
+}
+} // namespace atomic_queue
+#elif defined(__arm__) || defined(__aarch64__) || defined(_M_ARM64)
+namespace atomic_queue {
+constexpr int CACHE_LINE_SIZE = 64;
+static inline void spin_loop_pause() noexcept {
+#if (defined(__ARM_ARCH_6K__) || \
+ defined(__ARM_ARCH_6Z__) || \
+ defined(__ARM_ARCH_6ZK__) || \
+ defined(__ARM_ARCH_6T2__) || \
+ defined(__ARM_ARCH_7__) || \
+ defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7R__) || \
+ defined(__ARM_ARCH_7M__) || \
+ defined(__ARM_ARCH_7S__) || \
+ defined(__ARM_ARCH_8A__) || \
+ defined(__aarch64__))
+ asm volatile ("yield" ::: "memory");
+#elif defined(_M_ARM64)
+ __yield();
+#else
+ asm volatile ("nop" ::: "memory");
+#endif
+}
+} // namespace atomic_queue
+#elif defined(__ppc64__) || defined(__powerpc64__)
+namespace atomic_queue {
+constexpr int CACHE_LINE_SIZE = 128; // TODO: Review that this is the correct value.
+static inline void spin_loop_pause() noexcept {
+ asm volatile("or 31,31,31 # very low priority"); // TODO: Review and benchmark that this is the right instruction.
+}
+} // namespace atomic_queue
+#elif defined(__s390x__)
+namespace atomic_queue {
+constexpr int CACHE_LINE_SIZE = 256; // TODO: Review that this is the correct value.
+static inline void spin_loop_pause() noexcept {} // TODO: Find the right instruction to use here, if any.
+} // namespace atomic_queue
+#elif defined(__riscv)
+namespace atomic_queue {
+constexpr int CACHE_LINE_SIZE = 64;
+static inline void spin_loop_pause() noexcept {
+ asm volatile (".insn i 0x0F, 0, x0, x0, 0x010");
+}
+} // namespace atomic_queue
+#elif defined(__loongarch__)
+namespace atomic_queue {
+constexpr int CACHE_LINE_SIZE = 64;
+static inline void spin_loop_pause() noexcept
+{
+ asm volatile("nop \n nop \n nop \n nop \n nop \n nop \n nop \n nop");
+}
+} // namespace atomic_queue
+#else
+#ifdef _MSC_VER
+#pragma message("Unknown CPU architecture. Using L1 cache line size of 64 bytes and no spinloop pause instruction.")
+#else
+#warning "Unknown CPU architecture. Using L1 cache line size of 64 bytes and no spinloop pause instruction."
+#endif
+namespace atomic_queue {
+constexpr int CACHE_LINE_SIZE = 64; // TODO: Review that this is the correct value.
+static inline void spin_loop_pause() noexcept {}
+} // namespace atomic_queue
+#endif
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+namespace atomic_queue {
+
+#if defined(__GNUC__) || defined(__clang__)
+#define ATOMIC_QUEUE_LIKELY(expr) __builtin_expect(static_cast(expr), 1)
+#define ATOMIC_QUEUE_UNLIKELY(expr) __builtin_expect(static_cast(expr), 0)
+#define ATOMIC_QUEUE_NOINLINE __attribute__((noinline))
+#else
+#define ATOMIC_QUEUE_LIKELY(expr) (expr)
+#define ATOMIC_QUEUE_UNLIKELY(expr) (expr)
+#define ATOMIC_QUEUE_NOINLINE
+#endif
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+auto constexpr A = std::memory_order_acquire;
+auto constexpr R = std::memory_order_release;
+auto constexpr X = std::memory_order_relaxed;
+auto constexpr C = std::memory_order_seq_cst;
+auto constexpr AR = std::memory_order_acq_rel;
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+} // namespace atomic_queue
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#endif // ATOMIC_QUEUE_DEFS_H_INCLUDED
diff --git a/AirLib/include/common/common_utils/atomic_queue/spinlock.h b/AirLib/include/common/common_utils/atomic_queue/spinlock.h
new file mode 100644
index 000000000..802ea4676
--- /dev/null
+++ b/AirLib/include/common/common_utils/atomic_queue/spinlock.h
@@ -0,0 +1,203 @@
+/* -*- mode: c++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
+#ifndef ATOMIC_QUEUE_SPIN_LOCK_H_INCLUDED
+#define ATOMIC_QUEUE_SPIN_LOCK_H_INCLUDED
+
+// Copyright (c) 2019 Maxim Egorushkin. MIT License. See the full licence in file LICENSE.
+
+#include "defs.h"
+
+#include
+#include
+#include
+
+#include
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+namespace atomic_queue {
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+class Spinlock {
+ pthread_spinlock_t s_;
+
+public:
+ using scoped_lock = std::lock_guard;
+
+ Spinlock() noexcept {
+ if(ATOMIC_QUEUE_UNLIKELY(::pthread_spin_init(&s_, 0)))
+ std::abort();
+ }
+
+ Spinlock(Spinlock const&) = delete;
+ Spinlock& operator=(Spinlock const&) = delete;
+
+ ~Spinlock() noexcept {
+ ::pthread_spin_destroy(&s_);
+ }
+
+ void lock() noexcept {
+ if(ATOMIC_QUEUE_UNLIKELY(::pthread_spin_lock(&s_)))
+ std::abort();
+ }
+
+ void unlock() noexcept {
+ if(ATOMIC_QUEUE_UNLIKELY(::pthread_spin_unlock(&s_)))
+ std::abort();
+ }
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+class TicketSpinlock {
+ alignas(CACHE_LINE_SIZE) std::atomic ticket_{0};
+ alignas(CACHE_LINE_SIZE) std::atomic next_{0};
+
+public:
+ class LockGuard {
+ TicketSpinlock* const m_;
+ unsigned const ticket_;
+ public:
+ LockGuard(TicketSpinlock& m) noexcept
+ : m_(&m)
+ , ticket_(m.lock())
+ {}
+
+ LockGuard(LockGuard const&) = delete;
+ LockGuard& operator=(LockGuard const&) = delete;
+
+ ~LockGuard() noexcept {
+ m_->unlock(ticket_);
+ }
+ };
+
+ using scoped_lock = LockGuard;
+
+ TicketSpinlock() noexcept = default;
+ TicketSpinlock(TicketSpinlock const&) = delete;
+ TicketSpinlock& operator=(TicketSpinlock const&) = delete;
+
+ ATOMIC_QUEUE_NOINLINE unsigned lock() noexcept {
+ auto ticket = ticket_.fetch_add(1, std::memory_order_relaxed);
+ for(;;) {
+ auto position = ticket - next_.load(std::memory_order_acquire);
+ if(ATOMIC_QUEUE_LIKELY(!position))
+ break;
+ do
+ spin_loop_pause();
+ while(--position);
+ }
+ return ticket;
+ }
+
+ void unlock() noexcept {
+ unlock(next_.load(std::memory_order_relaxed) + 1);
+ }
+
+ void unlock(unsigned ticket) noexcept {
+ next_.store(ticket + 1, std::memory_order_release);
+ }
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+class UnfairSpinlock {
+ std::atomic lock_{0};
+
+public:
+ using scoped_lock = std::lock_guard;
+
+ UnfairSpinlock(UnfairSpinlock const&) = delete;
+ UnfairSpinlock& operator=(UnfairSpinlock const&) = delete;
+
+ void lock() noexcept {
+ for(;;) {
+ if(!lock_.load(std::memory_order_relaxed) && !lock_.exchange(1, std::memory_order_acquire))
+ return;
+ spin_loop_pause();
+ }
+ }
+
+ void unlock() noexcept {
+ lock_.store(0, std::memory_order_release);
+ }
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+// class SpinlockHle {
+// int lock_ = 0;
+
+// #ifdef __gcc__
+// static constexpr int HLE_ACQUIRE = __ATOMIC_HLE_ACQUIRE;
+// static constexpr int HLE_RELEASE = __ATOMIC_HLE_RELEASE;
+// #else
+// static constexpr int HLE_ACQUIRE = 0;
+// static constexpr int HLE_RELEASE = 0;
+// #endif
+
+// public:
+// using scoped_lock = std::lock_guard;
+
+// SpinlockHle(SpinlockHle const&) = delete;
+// SpinlockHle& operator=(SpinlockHle const&) = delete;
+
+// void lock() noexcept {
+// for(int expected = 0;
+// !__atomic_compare_exchange_n(&lock_, &expected, 1, false, __ATOMIC_ACQUIRE | HLE_ACQUIRE, __ATOMIC_RELAXED);
+// expected = 0)
+// spin_loop_pause();
+// }
+
+// void unlock() noexcept {
+// __atomic_store_n(&lock_, 0, __ATOMIC_RELEASE | HLE_RELEASE);
+// }
+// };
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+// class AdaptiveMutex {
+// pthread_mutex_t m_;
+
+// public:
+// using scoped_lock = std::lock_guard;
+
+// AdaptiveMutex() noexcept {
+// pthread_mutexattr_t a;
+// if(ATOMIC_QUEUE_UNLIKELY(::pthread_mutexattr_init(&a)))
+// std::abort();
+// if(ATOMIC_QUEUE_UNLIKELY(::pthread_mutexattr_settype(&a, PTHREAD_MUTEX_ADAPTIVE_NP)))
+// std::abort();
+// if(ATOMIC_QUEUE_UNLIKELY(::pthread_mutex_init(&m_, &a)))
+// std::abort();
+// if(ATOMIC_QUEUE_UNLIKELY(::pthread_mutexattr_destroy(&a)))
+// std::abort();
+// m_.__data.__spins = 32767;
+// }
+
+// AdaptiveMutex(AdaptiveMutex const&) = delete;
+// AdaptiveMutex& operator=(AdaptiveMutex const&) = delete;
+
+// ~AdaptiveMutex() noexcept {
+// if(ATOMIC_QUEUE_UNLIKELY(::pthread_mutex_destroy(&m_)))
+// std::abort();
+// }
+
+// void lock() noexcept {
+// if(ATOMIC_QUEUE_UNLIKELY(::pthread_mutex_lock(&m_)))
+// std::abort();
+// }
+
+// void unlock() noexcept {
+// if(ATOMIC_QUEUE_UNLIKELY(::pthread_mutex_unlock(&m_)))
+// std::abort();
+// }
+// };
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+} // namespace atomic_queue
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#endif // ATOMIC_QUEUE_SPIN_LOCK_H_INCLUDED
diff --git a/AirLib/src/api/RpcLibClientBase.cpp b/AirLib/src/api/RpcLibClientBase.cpp
index dec50c9b8..c8ff31e25 100644
--- a/AirLib/src/api/RpcLibClientBase.cpp
+++ b/AirLib/src/api/RpcLibClientBase.cpp
@@ -132,6 +132,7 @@ __pragma(warning(disable : 4239))
while (getConnectionState() != RpcLibClientBase::ConnectionState::Connected) {
std::cout << "X" << std::flush;
clock->sleep_for(pause_time);
+ //std::this_thread::sleep_for(std::chrono::milliseconds(1000));
}
std::cout << std::endl
<< "Connected!" << std::endl;
diff --git a/AirLib/src/common/common_utils/AdvanceSleep.cpp b/AirLib/src/common/common_utils/AdvanceSleep.cpp
new file mode 100644
index 000000000..e6724fa04
--- /dev/null
+++ b/AirLib/src/common/common_utils/AdvanceSleep.cpp
@@ -0,0 +1,79 @@
+#include "common/common_utils/AdvanceSleep.h"
+#if SLEEP_MODE != 0
+double nowMs()
+{
+ return std::chrono::duration(std::chrono::system_clock::now().time_since_epoch()).count();
+}
+#endif
+#if SLEEP_MODE == 1
+namespace advance_sleep
+{
+atomic_queue::AtomicQueueB<
+ Event*,
+ std::allocator,
+ (Event*)NULL,
+ false,
+ false,
+ false>
+ eventQueue(1024);
+std::priority_queue, CompareEvent> pq;
+volatile bool busySpinQuit = false;
+void busySpin()
+{
+ busySpinQuit = false;
+ while (!busySpinQuit) {
+ Event* p;
+ while (eventQueue.try_pop(p)) {
+ pq.push(p);
+ }
+ if (pq.empty()) {
+ continue;
+ }
+ auto now = std::chrono::system_clock::now();
+ auto duration = now.time_since_epoch();
+ double ts = std::chrono::duration(duration).count();
+ while (!pq.empty() && pq.top()->wakeUpTimeMs <= ts) {
+ pq.top()->p.set_value();
+ pq.pop();
+ }
+ }
+}
+}
+void advanceSleep(double ms)
+{
+ static std::thread busySpinThread(advance_sleep::busySpin);
+ advance_sleep::Event e;
+ e.wakeUpTimeMs = ms + nowMs();
+ advance_sleep::eventQueue.push(&e);
+ e.p.get_future().wait();
+}
+#elif SLEEP_MODE == 3
+void advanceSleep(double ms)
+{
+ auto start = nowMs();
+ while (nowMs() - start < ms) {
+ std::this_thread::yield();
+ }
+}
+#elif SLEEP_MODE == 4
+void advanceSleep(double ms)
+{
+ auto start = nowMs();
+ static constexpr std::chrono::duration MinSleepDuration(0);
+ while (nowMs() - start < ms) {
+ std::this_thread::sleep_for(MinSleepDuration);
+ }
+}
+#elif SLEEP_MODE == 5
+void advanceSleep(double ms)
+{
+ auto start = nowMs();
+ while (nowMs() - start < ms) {
+ }
+}
+#elif SLEEP_MODE != 0 // SLEEP_MODE == 2
+void advanceSleep(double ms)
+{
+ std::this_thread::sleep_for(std::chrono::nanoseconds((long long)(ms * 1e6)));
+}
+#endif
\ No newline at end of file
diff --git a/AirLib/src/vehicles/multirotor/api/MultirotorRpcLibClient.cpp b/AirLib/src/vehicles/multirotor/api/MultirotorRpcLibClient.cpp
index 59c1d8fd9..6105db456 100644
--- a/AirLib/src/vehicles/multirotor/api/MultirotorRpcLibClient.cpp
+++ b/AirLib/src/vehicles/multirotor/api/MultirotorRpcLibClient.cpp
@@ -248,7 +248,7 @@ __pragma(warning(disable : 4239))
//cancellation or timeout
MultirotorRpcLibClient* MultirotorRpcLibClient::waitOnLastTask(bool* task_result, float timeout_sec)
{
- bool result;
+ /*bool result;
if (std::isnan(timeout_sec) || timeout_sec == Utils::max())
result = pimpl_->last_future.get().as();
else {
@@ -259,6 +259,46 @@ __pragma(warning(disable : 4239))
result = false;
}
+ if (task_result)
+ *task_result = result;
+
+ return this;*/
+
+
+ bool result = false;
+
+ // 1. 核心防御:检查 future 是否有效
+ if (!pimpl_->last_future.valid()) {
+ if (task_result) *task_result = false;
+ return this;
+ }
+
+ try {
+ if (std::isnan(timeout_sec) || timeout_sec == Utils::max()) {
+ // get() 会阻塞直到任务完成或抛出异常
+ result = pimpl_->last_future.get().as();
+ }
+ else {
+ auto future_status = pimpl_->last_future.wait_for(std::chrono::duration(timeout_sec));
+ if (future_status == std::future_status::ready) {
+ // 再次确认 valid,因为某些实现下 wait_for 后状态可能改变
+ if (pimpl_->last_future.valid()) {
+ result = pimpl_->last_future.get().as();
+ }
+ }
+ else {
+ result = false;
+ }
+ }
+ }
+ catch (const std::exception& e) {
+ // 2. 捕获异常:当调用 cancelLastTask 时,get() 会抛出异常
+ // 我们捕获它,防止程序崩溃,并认为任务未成功完成
+ std::cout << "Exception in waitOnLastTask: " << e.what() << std::endl;
+ result = false;
+ // 可以根据需要记录日志,但在底层库中通常保持静默或由调用者决定
+ }
+
if (task_result)
*task_result = result;
diff --git a/CHANGELOG.md b/CHANGELOG.md
index bee313dc3..ce44caf28 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,6 @@
+### Development (version 3.4)
+* Fixed duplicate indexes for annotation system causing meshes to not show up in annotation masks.
+
### April 2025 (version 3.3)
* The latest available stable Unreal Engine version that is now targeted for release is 5.5. This means 5.4 will no longer be actively maintained.
* The documentation now is placed on a sub-URL page here: https://cosys-lab.github.io/Cosys-AirSim
diff --git a/README.md b/README.md
index e6a534fd8..f0804bd5c 100644
--- a/README.md
+++ b/README.md
@@ -25,7 +25,8 @@ Unreal [5.2.1](https://github.com/Cosys-Lab/Cosys-AirSim/tree/5.2.1) is also ava
volume={},
number={},
pages={37-48},
- doi={}}
+ keywords={Industries;Simultaneous localization and mapping;Machine learning algorithms;Atmospheric modeling;Transfer learning;Sensor systems and applications;Real-time systems;sensors;procedural generation;digital twins;transfer learning;open-source},
+ doi={https://doi.org/10.48550/arXiv.2303.13381}}
```
You can also find the presentation of the live tutorial of Cosys-AirSim at ANNSIM '23 conference [here](https://github.com/Cosys-Lab/Cosys-AirSim/tree/main/docs/annsim23_tutorial) together with the associated videos.
diff --git a/Unreal/Plugins/AirSim/AirSim.uplugin b/Unreal/Plugins/AirSim/AirSim.uplugin
index ae5bf2c5a..3d7119897 100644
--- a/Unreal/Plugins/AirSim/AirSim.uplugin
+++ b/Unreal/Plugins/AirSim/AirSim.uplugin
@@ -1,7 +1,7 @@
{
"FileVersion" : 3,
- "Version" : "2.0.0",
- "VersionName": "2.0.0",
+ "Version" : "3.4.0",
+ "VersionName": "3.4.0",
"FriendlyName": "Cosys-AirSim",
"Description": "Cosys-AirSim - Simulator Plugin",
"Category" : "Science",
diff --git a/Unreal/Plugins/AirSim/Source/AirBlueprintLib.cpp b/Unreal/Plugins/AirSim/Source/AirBlueprintLib.cpp
index 93b4f43fd..600f9034c 100644
--- a/Unreal/Plugins/AirSim/Source/AirBlueprintLib.cpp
+++ b/Unreal/Plugins/AirSim/Source/AirBlueprintLib.cpp
@@ -110,11 +110,14 @@ void UAirBlueprintLib::DrawPoint(const UWorld* InWorld, FVector const& Position,
{
// this means foreground lines can't be persistent
ULineBatchComponent* const LineBatcher = GetLineBatcher(InWorld, bPersistentLines, LifeTime, (DepthPriority == SDPG_Foreground));
- if (LineBatcher != NULL)
- {
- const float PointLifeTime = GetLineLifeTime(LineBatcher, LifeTime, bPersistentLines);
- LineBatcher->DrawPoint(Position, Color.ReinterpretAsLinear(), Size, DepthPriority, PointLifeTime);
- }
+ AsyncTask(ENamedThreads::GameThread, [LineBatcher, Position, Color, Size, DepthPriority, LifeTime, bPersistentLines]()
+ {
+ if (LineBatcher != NULL)
+ {
+ const float PointLifeTime = GetLineLifeTime(LineBatcher, LifeTime, bPersistentLines);
+ LineBatcher->DrawPoint(Position, Color.ReinterpretAsLinear(), Size, DepthPriority, PointLifeTime);
+ }
+ });
}
}
diff --git a/Unreal/Plugins/AirSim/Source/Annotation/AnnotationComponent.cpp b/Unreal/Plugins/AirSim/Source/Annotation/AnnotationComponent.cpp
index 9da3bc40c..27d0a5583 100644
--- a/Unreal/Plugins/AirSim/Source/Annotation/AnnotationComponent.cpp
+++ b/Unreal/Plugins/AirSim/Source/Annotation/AnnotationComponent.cpp
@@ -432,7 +432,7 @@ FPrimitiveSceneProxy* UAnnotationComponent::CreateSceneProxy()
// }
else
{
- UE_LOG(LogTemp, Warning, TEXT("AirSim Annotation: The type of ParentMeshComponent : %s can not be supported."), *ParentComponent->GetClass()->GetName());
+ //UE_LOG(LogTemp, Warning, TEXT("AirSim Annotation: The type of ParentMeshComponent : %s can not be supported."), *ParentComponent->GetClass()->GetName());
return nullptr;
}
// return nullptr;
diff --git a/Unreal/Plugins/AirSim/Source/Annotation/ObjectAnnotator.cpp b/Unreal/Plugins/AirSim/Source/Annotation/ObjectAnnotator.cpp
index db41e4f4b..3aeacd2b0 100644
--- a/Unreal/Plugins/AirSim/Source/Annotation/ObjectAnnotator.cpp
+++ b/Unreal/Plugins/AirSim/Source/Annotation/ObjectAnnotator.cpp
@@ -3,6 +3,8 @@
// Licensed under the MIT License.
#include "ObjectAnnotator.h"
#include "Runtime/Engine/Public/EngineUtils.h"
+#include "SceneInterface.h"
+#include "../Private/ScenePrivate.h"
#include "Runtime/Launch/Resources/Version.h"
#include "AnnotationComponent.h"
#include "AirBlueprintLib.h"
@@ -72,6 +74,16 @@ void FObjectAnnotator::getPaintableComponentMeshes(AActor* actor, TMapGetUniqueID();
+ if (const UPrimitiveComponent* PrimitiveComp = Cast(component))
+ {
+ if (const FPrimitiveSceneProxy* SceneProxy = PrimitiveComp->SceneProxy)
+ {
+ int32 PersistentPrimitiveIndexTemp = SceneProxy->GetPrimitiveSceneInfo()->GetPersistentIndex().Index;
+ if (PersistentPrimitiveIndexTemp != -1)
+ PersistentPrimitiveIndex = PersistentPrimitiveIndexTemp;
+ }
+ }
if (paintable_components.Num() == 1) {
if (UStaticMeshComponent* staticmesh_component = Cast(component)) {
if (actor->GetParentActor()) {
@@ -84,7 +96,9 @@ void FObjectAnnotator::getPaintableComponentMeshes(AActor* actor, TMapGetRootComponent()->GetAttachParent()->GetName());
component_name.Append("_");
}
- component_name.Append(actor->GetParentActor()->GetName());
+ component_name.Append(actor->GetParentActor()->GetName());
+ component_name.Append("_");
+ component_name.Append(FString::FromInt(PersistentPrimitiveIndex));
paintable_components_meshes->Emplace(component_name, component);
}
}
@@ -99,15 +113,23 @@ void FObjectAnnotator::getPaintableComponentMeshes(AActor* actor, TMapGetName());
+ component_name.Append("_");
+ component_name.Append(FString::FromInt(PersistentPrimitiveIndex));
paintable_components_meshes->Emplace(component_name, component);
}
- else {
- paintable_components_meshes->Emplace(actor->GetName(), component);
+ else {
+ FString component_name = actor->GetName();
+ component_name.Append("_");
+ component_name.Append(FString::FromInt(PersistentPrimitiveIndex));
+ paintable_components_meshes->Emplace(component_name, component);
}
}
}
if (USkinnedMeshComponent* SkinnedMeshComponent = Cast(component)) {
- paintable_components_meshes->Emplace(actor->GetName(), component);
+ FString component_name = actor->GetName();
+ component_name.Append("_");
+ component_name.Append(FString::FromInt(PersistentPrimitiveIndex));
+ paintable_components_meshes->Emplace(component_name, component);
}
}
else {
@@ -128,10 +150,14 @@ void FObjectAnnotator::getPaintableComponentMeshes(AActor* actor, TMapGetName());
}
+ component_name.Append("_");
+ component_name.Append(FString::FromInt(PersistentPrimitiveIndex));
}
}
if (USkinnedMeshComponent* skinnedmesh_component = Cast(component)) {
component_name = actor->GetName();
+ component_name.Append("_");
+ component_name.Append(FString::FromInt(PersistentPrimitiveIndex));
}
paintable_components_meshes->Emplace(component_name, component);
index++;
@@ -146,6 +172,16 @@ void FObjectAnnotator::getPaintableComponentMeshesAndTags(AActor* actor, TMapGetUniqueID();
+ if (const UPrimitiveComponent* PrimitiveComp = Cast(component))
+ {
+ if (const FPrimitiveSceneProxy* SceneProxy = PrimitiveComp->SceneProxy)
+ {
+ int32 PersistentPrimitiveIndexTemp = SceneProxy->GetPrimitiveSceneInfo()->GetPersistentIndex().Index;
+ if (PersistentPrimitiveIndexTemp != -1)
+ PersistentPrimitiveIndex = PersistentPrimitiveIndexTemp;
+ }
+ }
if (paintable_components.Num() == 1) {
if (UStaticMeshComponent* staticmesh_component = Cast(component)) {
if (actor->GetParentActor()) {
@@ -159,6 +195,8 @@ void FObjectAnnotator::getPaintableComponentMeshesAndTags(AActor* actor, TMapGetParentActor()->GetName());
+ component_name.Append("_");
+ component_name.Append(FString::FromInt(PersistentPrimitiveIndex));
paintable_components_meshes->Emplace(component_name, component);
paintable_components_tags->Emplace(component_name, staticmesh_component->ComponentTags);
}
@@ -174,6 +212,8 @@ void FObjectAnnotator::getPaintableComponentMeshesAndTags(AActor* actor, TMapGetName());
+ component_name.Append("_");
+ component_name.Append(FString::FromInt(PersistentPrimitiveIndex));
paintable_components_meshes->Emplace(component_name, component);
if (actor->Tags.Num() > 0)
paintable_components_tags->Emplace(component_name, actor->Tags);
@@ -181,21 +221,27 @@ void FObjectAnnotator::getPaintableComponentMeshesAndTags(AActor* actor, TMapEmplace(component_name, staticmesh_component->ComponentTags);
}
else {
- paintable_components_meshes->Emplace(actor->GetName(), component);
+ FString component_name = actor->GetName();
+ component_name.Append("_");
+ component_name.Append(FString::FromInt(PersistentPrimitiveIndex));
+ paintable_components_meshes->Emplace(component_name, component);
if (actor->Tags.Num() > 0)
- paintable_components_tags->Emplace(actor->GetName(), actor->Tags);
+ paintable_components_tags->Emplace(component_name, actor->Tags);
else
- paintable_components_tags->Emplace(actor->GetName(), staticmesh_component->ComponentTags);
+ paintable_components_tags->Emplace(component_name, staticmesh_component->ComponentTags);
}
}
}
if (USkinnedMeshComponent* SkinnedMeshComponent = Cast(component)) {
+ FString component_name = actor->GetName();
+ component_name.Append("_");
+ component_name.Append(FString::FromInt(PersistentPrimitiveIndex));
if (actor->Tags.Num() > 0)
- paintable_components_tags->Emplace(actor->GetName(), actor->Tags);
+ paintable_components_tags->Emplace(component_name, actor->Tags);
else
- paintable_components_tags->Emplace(actor->GetName(), SkinnedMeshComponent->ComponentTags);
- paintable_components_meshes->Emplace(actor->GetName(), component);
+ paintable_components_tags->Emplace(component_name, SkinnedMeshComponent->ComponentTags);
+ paintable_components_meshes->Emplace(component_name, component);
}
}
else {
@@ -217,10 +263,14 @@ void FObjectAnnotator::getPaintableComponentMeshesAndTags(AActor* actor, TMapGetName());
}
}
+ component_name.Append("_");
+ component_name.Append(FString::FromInt(PersistentPrimitiveIndex));
paintable_components_tags->Emplace(component_name, staticmesh_component->ComponentTags);
}
if (USkinnedMeshComponent* skinnedmesh_component = Cast(component)) {
component_name = actor->GetName();
+ component_name.Append("_");
+ component_name.Append(FString::FromInt(PersistentPrimitiveIndex));
paintable_components_tags->Emplace(component_name, skinnedmesh_component->ComponentTags);
}
paintable_components_meshes->Emplace(component_name, component);
diff --git a/cmake/AirLib/CMakeSettings.json b/cmake/AirLib/CMakeSettings.json
new file mode 100644
index 000000000..9204f06eb
--- /dev/null
+++ b/cmake/AirLib/CMakeSettings.json
@@ -0,0 +1,15 @@
+锘縶
+ "configurations": [
+ {
+ "name": "x64-Debug",
+ "generator": "Ninja",
+ "configurationType": "Debug",
+ "inheritEnvironments": [ "msvc_x64_x64" ],
+ "buildRoot": "${projectDir}\\out\\build\\${name}",
+ "installRoot": "${projectDir}\\out\\install\\${name}",
+ "cmakeCommandArgs": "",
+ "buildCommandArgs": "",
+ "ctestCommandArgs": ""
+ }
+ ]
+}
\ No newline at end of file
diff --git a/docker/Dockerfile_binary b/docker/Dockerfile_binary
index 0ee581c4e..ad8c77550 100644
--- a/docker/Dockerfile_binary
+++ b/docker/Dockerfile_binary
@@ -1,4 +1,4 @@
-ARG BASE_IMAGE=ghcr.io/epicgames/unreal-engine:runtime
+ARG BASE_IMAGE=ghcr.io/epicgames/unreal-engine:dev-slim-5.5.4
FROM $BASE_IMAGE
USER root
diff --git a/docker/Dockerfile_source b/docker/Dockerfile_source
index b1cea6338..c828b8755 100644
--- a/docker/Dockerfile_source
+++ b/docker/Dockerfile_source
@@ -1,4 +1,4 @@
-ARG BASE_IMAGE=ghcr.io/epicgames/unreal-engine:dev-5.4.3
+ARG BASE_IMAGE=ghcr.io/epicgames/unreal-engine:dev-slim-5.5.4
FROM $BASE_IMAGE
USER root
diff --git a/docker/build_airsim_image.py b/docker/build_airsim_image.py
index 494b47b06..cbbe8d0f2 100644
--- a/docker/build_airsim_image.py
+++ b/docker/build_airsim_image.py
@@ -1,8 +1,7 @@
-import argparse
import subprocess
def main():
- parser = argparse.ArgumentParser(description='AirSim docker image builder')
+ parser = argparse.ArgumentParser(description='Cosys-AirSim docker image builder')
parser.add_argument('--source', action='store_true', help='compile unreal and airsim from source') # default is false
parser.add_argument('--base_image', type=str, help='base image name AND tag')
parser.add_argument('--target_image', type=str, help='base image name AND tag')
@@ -14,7 +13,7 @@ def build_docker_image(args):
dockerfile = 'Dockerfile_source'
if args.source:
if not args.base_image:
- args.base_image = "ghcr.io/epicgames/unreal-engine:dev-5.4.3"
+ args.base_image = "ghcr.io/epicgames/unreal-engine:dev-slim-5.5.4"
target_image_tag = args.base_image.split(":")[1] # take tag from base image
if not args.target_image:
args.target_image = 'airsim_source' + ':' + target_image_tag
@@ -22,7 +21,7 @@ def build_docker_image(args):
else:
dockerfile = 'Dockerfile_binary'
if not args.base_image:
- args.base_image = "ghcr.io/epicgames/unreal-engine:runtime"
+ args.base_image = "ghcr.io/epicgames/unreal-engine:dev-slim-5.5.4"
target_image_tag = args.base_image.split(":")[1] # take tag from base image
if not args.target_image:
args.target_image = 'airsim_binary' + ':' + target_image_tag
diff --git a/docker/download_blocks_env_binary.sh b/docker/download_blocks_env_binary.sh
index 438d9a867..3077e157e 100755
--- a/docker/download_blocks_env_binary.sh
+++ b/docker/download_blocks_env_binary.sh
@@ -5,6 +5,7 @@ if ! which unzip; then
sudo apt-get install unzip
fi
-wget -c https://github.com/Cosys-Lab/Cosys-AirSim/releases/download/5.4-v3.2/Blocks_packaged_Linux_54_32.zip
-unzip -q Blocks_packaged_Linux_54_32.zip
-rm Blocks_packaged_Linux_54_32.zip
+wget -c https://github.com/Cosys-Lab/Cosys-AirSim/releases/download/5.5-v3.3/Blocks_packaged_Linux_55_33.zip
+unzip -q Blocks_packaged_Linux_55_33.zip
+rm Blocks_packaged_Linux_55_33.zip
+mv Blocks_packaged_Linux_55_33 LinuxBlocks
diff --git a/docker/settings.json b/docker/settings.json
index 90c8d0970..fedf2a33d 100644
--- a/docker/settings.json
+++ b/docker/settings.json
@@ -6,7 +6,7 @@
"ApiServerPort": 41451,
"RecordUIVisible": true,
"MoveWorldOrigin": false,
- "InitialInstanceSegmentation": true,
+ "InitialInstanceSegmentation": false,
"LogMessagesVisible": true,
"ShowLosDebugLines": false,
"ViewMode": "",
@@ -16,41 +16,6 @@
"SpeedUnitFactor": 1,
"SpeedUnitLabel": "m/s",
"SimMode": "Car",
- "Annotation": [
- {
- "Name": "RGBTestDirect",
- "Type": 0,
- "Default": true,
- "SetDirect": true,
- "ViewDistance": 10
- },
- {
- "Name": "RGBTestIndex",
- "Type": 0,
- "Default": true,
- "SetDirect": false
- },
- {
- "Name": "GreyscaleTest",
- "Type": 1,
- "Default": true,
- "ViewDistance": 5
- },
- {
- "Name": "TextureTestDirect",
- "Type": 2,
- "Default": true,
- "SetDirect": true
- },
- {
- "Name": "TextureTestRelativePath",
- "Type": 2,
- "Default": false,
- "SetDirect": false,
- "TexturePath": "/Game/AnnotationTest",
- "TexturePrefix": "Test1"
- }
- ],
"Wind": {
"X": 0,
"Y": 0,
@@ -436,4 +401,4 @@
"DrawDebugLocation": false
}
}
-}
\ No newline at end of file
+}
diff --git a/docs/docker_ubuntu.md b/docs/docker_ubuntu.md
index bacf6b729..3f392f04b 100644
--- a/docs/docker_ubuntu.md
+++ b/docs/docker_ubuntu.md
@@ -1,7 +1,7 @@
# Cosys-AirSim on Docker in Linux
We've two options for docker. You can either build an image for running [Cosys-AirSim binaries](#runtime-binaries), or for compiling Cosys-AirSim [from source](#source).
-## Runtime Binaries
+## Packaged runtime Binaries
#### Requirements:
- [Follow this guide for preparing setting up your GitHub access, installing Docker and authenticating with the GitHub Container Registry.](https://dev.epicgames.com/documentation/en-us/unreal-engine/quick-start-guide-for-using-container-images-in-unreal-engine).
@@ -9,15 +9,15 @@ We've two options for docker. You can either build an image for running [Cosys-A
#### Build the docker image
- Below are the default arguments.
- `--base_image`: This is image over which we'll run a runtime packaged binary. We've tested only the official Unreal Engine runtime container, more info can be found [here](https://dev.epicgames.com/documentation/en-us/unreal-engine/overview-of-containers-in-unreal-engine). Change the base image at your own risk.
+ `--base_image`: This is image over which we'll run a the packaged binary. We've tested only the official Unreal Engine _dev-slim_ container, more info can be found [here](https://dev.epicgames.com/documentation/en-us/unreal-engine/overview-of-containers-in-unreal-engine). Normally we would use the _runtime_ container but this does not currently contain the right drivers for Vulkan. Change the base image at your own risk.
`--target_image` is the desired name of your docker image.
Defaults to `airsim_binary` with same tag as the base image.
```bash
cd Airsim/docker;
python build_airsim_image.py \
- --base_image=ghcr.io/epicgames/unreal-engine:runtime \
- --target_image=airsim_binary:runtime
+ --base_image=ghcr.io/epicgames/unreal-engine:dev-slim-5.5.4 \
+ --target_image=airsim_binary:dev-slim-5.5.4
```
- Verify you have an image by:
@@ -44,10 +44,10 @@ xhost +local:docker
```
Do not forget to run the xhost command first to bind the X11 to docker.
- For Blocks, you can do a `./run_airsim_image_binary.sh airsim_binary:runtime LinuxBlocks/Linux/Blocks.sh -windowed -ResX=1080 -ResY=720`
+ For Blocks, you can do a `./run_airsim_image_binary.sh airsim_binary:dev-slim-5.5.4 LinuxBlocks/Linux/Blocks.sh -windowed -ResX=1080 -ResY=720`
`
- * `DOCKER_IMAGE_NAME`: Same as `target_image` parameter in previous step. By default, enter `airsim_binary:runtime`
+ * `DOCKER_IMAGE_NAME`: Same as `target_image` parameter in previous step. By default, enter `airsim_binary:dev-slim-5.5.4`
* `UNREAL_BINARY_SHELL_SCRIPT`: for Blocks enviroment, it will be `LinuxBlocks/Linux/Blocks.sh`
* [`UNREAL_BINARY_ARGUMENTS`](https://docs.unrealengine.com/en-us/Programming/Basics/CommandLineArguments):
For airsim, most relevant would be `-windowed`, `-ResX`, `-ResY`. Click on link to see all options.
@@ -69,8 +69,8 @@ xhost +local:docker
$ cd Airsim/docker;
$ python build_airsim_image.py \
--source \
- ----base_image ghcr.io/epicgames/unreal-engine:dev-5.5.X \
- --target_image=airsim_source:dev-5.5.X
+ --base_image ghcr.io/epicgames/unreal-engine:dev-slim-5.5.4 \
+ --target_image=airsim_source:dev-slim-5.5.4
```
#### Running Cosys-AirSim container
@@ -78,7 +78,7 @@ $ python build_airsim_image.py \
```bash
xhost +local:docker
-./run_airsim_image_source.sh airsim_source:dev-5.5.X
+./run_airsim_image_source.sh airsim_source:dev-slim-5.5.4
```
Syntax is `./run_airsim_image_source.sh DOCKER_IMAGE_NAME`
@@ -98,15 +98,14 @@ xhost +local:docker
#### Packaging Unreal Environments in `airsim_source` containers
* Let's take the Blocks environment as an example.
In the following script, specify the full path to your unreal uproject file by `project` and the directory where you want the binaries to be placed by `archivedirectory`
-
+* If you have not run the environment once manually you still need to copy the plugin to the project folder first like with the first command below.
```bash
-/home/ue4/UnrealEngine/Engine/Build/BatchFiles/RunUAT.sh BuildCookRun -platform=Linux -clientconfig=Development -serverconfig=Development -noP4 -cook -allmaps -build -stage -prereqs -pak -archive \
--archivedirectory=/home/ue4/Binaries/Blocks/ \
--project=/home/ue4/Cosys-AirSim/Unreal/Environments/Blocks/Blocks.uproject
+/home/ue4/Cosys-AirSim/Unreal/Environments/Blocks/update_from_git.sh
+/home/ue4/UnrealEngine/Engine/Build/BatchFiles/RunUAT.sh BuildCookRun -nop4 -utf8output -cook -project=/home/ue4/Cosys-AirSim/Unreal/Environments/Blocks/Blocks.uproject -target=Blocks -platform=Linux -installed -stage -archive -package -build -pak -iostore -compressed -prereqs -archivedirectory=/home/ue4/Binaries/Blocks/ -clientconfig=Development -nocompile -nocompileuat
```
This would create a Blocks binary in `/home/ue4/Binaries/Blocks/`.
-You can test it by running `/home/ue4/Binaries/Blocks/LinuxNoEditor/Blocks.sh -windowed`
+You can test it by running `/home/ue4/Binaries/Blocks/Linux/Blocks.sh -windowed`
## Specifying settings.json
#### `airsim_binary` docker image:
diff --git a/docs/packaging.md b/docs/packaging.md
index ecbe4570e..101faa894 100644
--- a/docs/packaging.md
+++ b/docs/packaging.md
@@ -24,12 +24,12 @@ Then you can package the plugin as a standalone plugin from a Unreal Project lik
On Windows:
* Open the Blocks project in Unreal Engine `cd Cosys-AirSim/Unreal/Environments/Blocks` and pull the latest plugin files by running `update_from_git.bat`.
-* Go to your Unreal Engine installation folder and run the build script while pointing at the Blocks project: `./RunUAT.bat BuildPlugin -Plugin=....\Cosys-AirSim\Unreal\Environments\Blocks\Plugins\AirSim\AirSim.uplugin -Package=....\airsimpluginpackagewin -Rocket -TargetPlatforms=Win64`
+* Go to your Unreal Engine installation folder, move to the subfolder `/Engine/Build/BatchFile`, and run the build script while pointing at the Blocks project: `./RunUAT.bat BuildPlugin -Plugin=....\Cosys-AirSim\Unreal\Environments\Blocks\Plugins\AirSim\AirSim.uplugin -Package=....\airsimpluginpackagewin -Rocket -TargetPlatforms=Win64`
On Linux:
* Open the Blocks project in Unreal Engine `cd Cosys-AirSim/Unreal/Environments/Blocks` and pull the latest plugin files by running `update_from_git.sh`.
-* Go to your Unreal Engine installation folder and run the build script while pointing at the Blocks project: `./RunUAT.sh BuildPlugin -Plugin=..../Cosys-AirSim/Unreal/Environments/Blocks/Plugins/AirSim/AirSim.uplugin -Package=..../airsimpluginpackagelinux -Rocket -TargetPlatforms=Linux`
+* Go to your Unreal Engine installation folder move to the subfolder `/Engine/Build/BatchFile`, and run the build script while pointing at the Blocks project: `./RunUAT.sh BuildPlugin -Plugin=..../Cosys-AirSim/Unreal/Environments/Blocks/Plugins/AirSim/AirSim.uplugin -Package=..../airsimpluginpackagelinux -Rocket -TargetPlatforms=Linux`
## Building an Unreal Project with Cosys-AirSim Plugin
@@ -52,12 +52,16 @@ Then you can package the plugin as a standalone plugin from a Unreal Project lik
On Windows:
* Open the Blocks project in Unreal Engine `cd Cosys-AirSim/Unreal/Environments/Blocks` and pull the latest plugin files by running `update_from_git.bat`.
-* Go to your Unreal Engine installation folder and run the build script while pointing at the Blocks project: `./RunUAT.bat BuildCookRun -cook -noP4 -build -stage -noiterate -archive -project=....\Cosys-AirSim\Unreal\Environments\Blocks\Blocks.uproject -archivedirectory=....\blockswin -Rocket -TargetPlatforms=Win64 -configuration=Development`
+* Go to your Unreal Engine installation folder, move to the subfolder `/Engine/Build/BatchFile`, and run the build script while pointing at the Blocks project: `./RunUAT.bat BuildCookRun -cook -noP4 -build -stage -noiterate -archive -project=....\Cosys-AirSim\Unreal\Environments\Blocks\Blocks.uproject -archivedirectory=....\blockswin -Rocket -TargetPlatforms=Win64 -configuration=Development`
On Linux:
* Open the Blocks project in Unreal Engine `cd Cosys-AirSim/Unreal/Environments/Blocks` and pull the latest plugin files by running `update_from_git.sh`.
-* Go to your Unreal Engine installation folder and run the build script while pointing at the Blocks project: `./RunUAT.sh BuildCookRun -nop4 -utf8output -nocompileeditor -skipbuildeditor -cook -project="..../Cosys-AirSim/Unreal/Environments/Blocks/Blocks.uproject" -target=Blocks -platform=Linux -installed -stage -archive -package -build -pak -iostore -compressed -prereqs -archivedirectory="..../blockslinux/" -clientconfig=Development -nocompile -nocompileuat`
+<<<<<<< HEAD
+* Go to your Unreal Engine installation folder, move to the subfolder `/Engine/Build/BatchFile`, and run the build script while pointing at the Blocks project: `./RunUAT.sh BuildCookRun -nop4 -utf8output -cook -project="..../Cosys-AirSim/Unreal/Environments/Blocks/Blocks.uproject" -target=Blocks -platform=Linux -installed -stage -archive -package -build -pak -iostore -compressed -prereqs -archivedirectory="..../blockslinux/" -clientconfig=Development -nocompile -nocompileuat`
+=======
+* Go to your Unreal Engine installation folder and run the build script while pointing at the Blocks project: `./RunUAT.sh BuildCookRun -nop4 -utf8output -cook -project="..../Cosys-AirSim/Unreal/Environments/Blocks/Blocks.uproject" -target=Blocks -platform=Linux -installed -stage -archive -package -build -pak -iostore -compressed -prereqs -archivedirectory="..../blockslinux/" -clientconfig=Development -nocompile -nocompileuat`
+>>>>>>> 74d6b95347a4b6b1d3dc2d7823dd1cbbc2160fdd