31#ifndef ETL_SPSC_QUEUE_ATOMIC_INCLUDED
32#define ETL_SPSC_QUEUE_ATOMIC_INCLUDED
50 template <
size_t Memory_Model = etl::memory_model::MEMORY_MODEL_LARGE>
51 class queue_spsc_atomic_base
56 typedef typename etl::size_type_lookup<Memory_Model>::type size_type;
65 return read.load(etl::memory_order_acquire) ==
write.load(etl::memory_order_acquire);
75 size_type next_index = get_next_index(
write.load(etl::memory_order_acquire), Reserved);
77 return (next_index ==
read.load(etl::memory_order_acquire));
84 size_type
size()
const
86 size_type write_index =
write.load(etl::memory_order_acquire);
87 size_type read_index =
read.load(etl::memory_order_acquire);
91 if (write_index >= read_index)
93 n = write_index - read_index;
97 n = Reserved - read_index + write_index;
107 size_type available()
const
109 return Reserved -
size() - 1;
115 size_type capacity()
const
123 size_type max_size()
const
130 queue_spsc_atomic_base(size_type reserved_)
133 , Reserved(reserved_)
140 static size_type get_next_index(size_type index, size_type maximum)
144 if (index == maximum) ETL_UNLIKELY
152 etl::atomic<size_type>
write;
153 etl::atomic<size_type>
read;
154 const size_type Reserved;
161 #if defined(ETL_POLYMORPHIC_SPSC_QUEUE_ATOMIC) || defined(ETL_POLYMORPHIC_CONTAINERS)
165 virtual ~queue_spsc_atomic_base() {}
170 ~queue_spsc_atomic_base() {}
185 template <
typename T, const
size_t Memory_Model = etl::memory_model::MEMORY_MODEL_LARGE>
186 class iqueue_spsc_atomic :
public queue_spsc_atomic_base<Memory_Model>
190 typedef typename etl::queue_spsc_atomic_base<Memory_Model> base_t;
194 typedef T value_type;
195 typedef T& reference;
196 typedef const T& const_reference;
198 typedef T&& rvalue_reference;
200 typedef typename base_t::size_type size_type;
202 using base_t::get_next_index;
204 using base_t::Reserved;
210 bool push(const_reference value)
212 size_type write_index =
write.load(etl::memory_order_relaxed);
213 size_type next_index = get_next_index(write_index, Reserved);
215 if (next_index !=
read.load(etl::memory_order_acquire))
217 ::new (&p_buffer[write_index]) T(value);
219 write.store(next_index, etl::memory_order_release);
228 #if ETL_USING_CPP11 && ETL_NOT_USING_STLPORT && !defined(ETL_QUEUE_ATOMIC_FORCE_CPP03_IMPLEMENTATION)
232 bool push(rvalue_reference value)
234 size_type write_index =
write.load(etl::memory_order_relaxed);
235 size_type next_index = get_next_index(write_index, Reserved);
237 if (next_index !=
read.load(etl::memory_order_acquire))
239 ::new (&p_buffer[write_index]) T(etl::move(value));
241 write.store(next_index, etl::memory_order_release);
251 #if ETL_USING_CPP11 && ETL_NOT_USING_STLPORT && !defined(ETL_QUEUE_ATOMIC_FORCE_CPP03_IMPLEMENTATION)
257 template <
typename... Args>
258 bool emplace(Args&&... args)
260 size_type write_index =
write.load(etl::memory_order_relaxed);
261 size_type next_index = get_next_index(write_index, Reserved);
263 if (next_index !=
read.load(etl::memory_order_acquire))
265 ::new (&p_buffer[write_index]) T(etl::forward<Args>(args)...);
267 write.store(next_index, etl::memory_order_release);
283 size_type write_index =
write.load(etl::memory_order_relaxed);
284 size_type next_index = get_next_index(write_index, Reserved);
286 if (next_index !=
read.load(etl::memory_order_acquire))
288 ::new (&p_buffer[write_index]) T();
290 write.store(next_index, etl::memory_order_release);
304 template <
typename T1>
305 bool emplace(
const T1& value1)
307 size_type write_index =
write.load(etl::memory_order_relaxed);
308 size_type next_index = get_next_index(write_index, Reserved);
310 if (next_index !=
read.load(etl::memory_order_acquire))
312 ::new (&p_buffer[write_index]) T(value1);
314 write.store(next_index, etl::memory_order_release);
328 template <
typename T1,
typename T2>
329 bool emplace(
const T1& value1,
const T2& value2)
331 size_type write_index =
write.load(etl::memory_order_relaxed);
332 size_type next_index = get_next_index(write_index, Reserved);
334 if (next_index !=
read.load(etl::memory_order_acquire))
336 ::new (&p_buffer[write_index]) T(value1, value2);
338 write.store(next_index, etl::memory_order_release);
352 template <
typename T1,
typename T2,
typename T3>
353 bool emplace(
const T1& value1,
const T2& value2,
const T3& value3)
355 size_type write_index =
write.load(etl::memory_order_relaxed);
356 size_type next_index = get_next_index(write_index, Reserved);
358 if (next_index !=
read.load(etl::memory_order_acquire))
360 ::new (&p_buffer[write_index]) T(value1, value2, value3);
362 write.store(next_index, etl::memory_order_release);
376 template <
typename T1,
typename T2,
typename T3,
typename T4>
377 bool emplace(
const T1& value1,
const T2& value2,
const T3& value3,
const T4& value4)
379 size_type write_index =
write.load(etl::memory_order_relaxed);
380 size_type next_index = get_next_index(write_index, Reserved);
382 if (next_index !=
read.load(etl::memory_order_acquire))
384 ::new (&p_buffer[write_index]) T(value1, value2, value3, value4);
386 write.store(next_index, etl::memory_order_release);
399 bool front(reference value)
401 size_type read_index =
read.load(etl::memory_order_relaxed);
403 if (read_index ==
write.load(etl::memory_order_acquire))
409 value = p_buffer[read_index];
417 bool pop(reference value)
419 size_type read_index =
read.load(etl::memory_order_relaxed);
421 if (read_index ==
write.load(etl::memory_order_acquire))
427 size_type next_index = get_next_index(read_index, Reserved);
429 #if ETL_USING_CPP11 && ETL_NOT_USING_STLPORT && !defined(ETL_QUEUE_LOCKABLE_FORCE_CPP03_IMPLEMENTATION)
430 value = etl::move(p_buffer[read_index]);
432 value = p_buffer[read_index];
435 p_buffer[read_index].~T();
437 read.store(next_index, etl::memory_order_release);
447 size_type read_index =
read.load(etl::memory_order_relaxed);
449 if (read_index ==
write.load(etl::memory_order_acquire))
455 size_type next_index = get_next_index(read_index, Reserved);
457 p_buffer[read_index].~T();
459 read.store(next_index, etl::memory_order_release);
469 size_type read_index =
read.load(etl::memory_order_relaxed);
471 return p_buffer[read_index];
477 const_reference front()
const
479 size_type read_index =
read.load(etl::memory_order_relaxed);
481 return p_buffer[read_index];
491 if ETL_IF_CONSTEXPR (etl::is_trivially_destructible<T>::value)
510 iqueue_spsc_atomic(T* p_buffer_, size_type reserved_)
512 , p_buffer(p_buffer_)
519 iqueue_spsc_atomic(
const iqueue_spsc_atomic&) ETL_DELETE;
520 iqueue_spsc_atomic& operator=(
const iqueue_spsc_atomic&) ETL_DELETE;
523 iqueue_spsc_atomic(iqueue_spsc_atomic&&) =
delete;
524 iqueue_spsc_atomic& operator=(iqueue_spsc_atomic&&) =
delete;
539 template <
typename T,
size_t Size, const
size_t Memory_Model = etl::memory_model::MEMORY_MODEL_LARGE>
540 class queue_spsc_atomic :
public iqueue_spsc_atomic<T, Memory_Model>
544 typedef typename etl::iqueue_spsc_atomic<T, Memory_Model> base_t;
548 typedef typename base_t::size_type size_type;
552 static ETL_CONSTANT size_type Reserved_Size = size_type(Size + 1);
556 ETL_STATIC_ASSERT((Size <= (etl::integral_limits<size_type>::max - 1)),
"Size too large for memory model");
558 static ETL_CONSTANT size_type MAX_SIZE = size_type(Size);
564 : base_t(reinterpret_cast<T*>(&buffer[0]), Reserved_Size)
579 typename etl::aligned_storage<
sizeof(T), etl::alignment_of<T>::value>::type buffer[Reserved_Size];
582 template <
typename T,
size_t Size, const
size_t Memory_Model>
583 ETL_CONSTANT
typename queue_spsc_atomic<T, Size, Memory_Model>::size_type queue_spsc_atomic<T, Size, Memory_Model>::MAX_SIZE;
bitset_ext
Definition absolute.h:40
etl::optional< T > read(etl::bit_stream_reader &stream)
Read a checked type from a stream.
Definition bit_stream.h:1377
ETL_CONSTEXPR TContainer::size_type size(const TContainer &container)
Definition iterator.h:1192
bool write(etl::bit_stream_writer &stream, bool value)
Definition bit_stream.h:995