1 #include "hmbdc/Copyright.hpp" 5 #include "hmbdc/app/StuckClientPurger.hpp" 6 #include "hmbdc/Config.hpp" 7 #include "hmbdc/numeric/BitMath.hpp" 8 #include "hmbdc/time/Time.hpp" 10 #include <boost/interprocess/allocators/allocator.hpp> 16 namespace hmbdc {
namespace app {
30 namespace context_property {
56 template <u
int16_t max_parallel_consumer = DEFAULT_HMBDC_CAPACITY>
58 static_assert(max_parallel_consumer >= 4u
120 #include "hmbdc/app/ContextDetail.hpp" 121 namespace hmbdc {
namespace app {
123 namespace context_detail {
124 HMBDC_CLASS_HAS_DECLARE(hmbdc_ctx_queued_ts);
125 HMBDC_CLASS_HAS_DECLARE(hmbdcIpcFrom);
126 HMBDC_CLASS_HAS_DECLARE(ibmaProc);
138 template <
size_t MaxMessageSize,
typename... ContextProperties>
143 using Allocator =
typename cpa::Allocator;
146 MAX_MESSAGE_SIZE = MaxMessageSize,
147 BUFFER_VALUE_SIZE = MaxMessageSize +
sizeof(
MessageHead),
150 size_t maxMessageSize()
const {
151 if (MaxMessageSize == 0)
return maxMessageSizeRuntime_;
152 return MaxMessageSize;
163 template <MessageC M0, MessageC M1,
typename ... Messages,
typename Enabled
164 =
typename std::enable_if<!std::is_integral<M1>::value,
void>::type>
166 send(M0&& m0, M1&& m1, Messages&&... msgs) {
167 auto n =
sizeof...(msgs) + 2;
168 auto it = buffer_.claim(n);
169 sendRecursive(it, std::forward<M0>(m0), std::forward<M1>(m1), std::forward<Messages>(msgs)...);
170 buffer_.commit(it, n);
183 template <MessageC M0, MessageC M1,
typename ... Messages,
typename Enabled
184 =
typename std::enable_if<!std::is_integral<M1>::value,
void>::type>
186 trySend(M0&& m0, M1&& m1, Messages&&... msgs) {
187 auto n =
sizeof...(msgs) + 2;
188 auto it = buffer_.tryClaim(n);
190 sendRecursive(it, std::forward<M0>(m0), std::forward<M1>(m1), std::forward<Messages>(msgs)...);
191 buffer_.commit(it, n);
206 template <MessageForwardIterC ForwardIt>
208 send(ForwardIt begin,
size_t n) {
209 if (hmbdc_likely(n)) {
210 auto bit = buffer_.claim(n);
212 for (
auto i = 0ul; i < n; i++) {
213 using Message =
typename std::iterator_traits<ForwardIt>::value_type;
214 static_assert(std::is_trivially_destructible<Message>::value
215 ,
"cannot send message with dtor");
216 static_assert(!std::is_base_of<hasMemoryAttachment, Message>::value
217 ,
"hasMemoryAttachment Messages cannot be sent in group");
219 if constexpr (has_hmbdc_ctx_queued_ts<Message>::value) {
220 wrap->template get<Message>().hmbdc_ctx_queued_ts = hmbdc::time::SysTime::now();
223 buffer_.commit(bit, n);
235 template <MessageForwardIterC ForwardIt>
238 if (hmbdc_likely(n)) {
239 auto bit = buffer_.tryClaim(n);
240 if (hmbdc_unlikely(!bit))
return false;
242 for (
auto i = 0ul; i < n; i++) {
243 using Message =
typename std::iterator_traits<ForwardIt>::value_type;
244 static_assert(std::is_trivially_destructible<Message>::value
245 ,
"cannot send message with dtor");
246 static_assert(!std::is_base_of<hasMemoryAttachment, Message>::value
247 ,
"hasMemoryAttachment Messages cannot be sent in group");
249 if constexpr (has_hmbdc_ctx_queued_ts<Message>::value) {
250 wrap->template get<Message>().hmbdc_ctx_queued_ts = hmbdc::time::SysTime::now();
253 buffer_.commit(bit, n);
268 template <MessageC Message>
271 using M =
typename std::decay<Message>::type;
272 static_assert(std::is_trivially_destructible<M>::value,
"cannot send message with dtor");
273 static_assert(MAX_MESSAGE_SIZE == 0 ||
sizeof(
MessageWrap<M>) <= BUFFER_VALUE_SIZE
274 ,
"message too big");
275 if constexpr(!std::is_base_of<hasMemoryAttachment, M>::value
277 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 &&
sizeof(
MessageWrap<M>) > buffer_.maxItemSize())) {
278 HMBDC_THROW(std::out_of_range,
"message too big, typeTag=" << m.getTypeTag());
281 if constexpr (has_hmbdc_ctx_queued_ts<M>::value) {
282 auto it = buffer_.claim();
284 wrap->template get<M>().hmbdc_ctx_queued_ts = hmbdc::time::SysTime::now();
290 if constexpr (cpa::ipc && has_hmbdcShmRefCount<M>::value) {
291 if (m.template holdShmHandle<M>()) {
292 if (0 >= m.hmbdcShmRefCount) {
295 auto it = buffer_.claim(1);
297 wrap->payload.shmConvert(*shmAttAllocator_);
298 if constexpr (has_hmbdc_ctx_queued_ts<M>::value) {
299 wrap->template get<Message>().hmbdc_ctx_queued_ts = hmbdc::time::SysTime::now();
301 buffer_.commit(it, 1);
302 m.hasMemoryAttachment::release();
306 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0
308 HMBDC_THROW(std::out_of_range,
"message too big, typeTag=" << m.getTypeTag());
311 size_t segSize = buffer_.maxItemSize() -
sizeof(
MessageHead);
312 auto n = (att.len + segSize - 1) / segSize + 1;
313 if (hmbdc_unlikely(n > buffer_.capacity())) {
314 HMBDC_THROW(std::out_of_range
315 ,
"hasMemoryAttachment message too big, typeTag=" << m.getTypeTag());
318 auto bit = buffer_.claim(n);
324 if constexpr (has_hmbdc_ctx_queued_ts<M>::value) {
325 wrap->template get<Message>().hmbdc_ctx_queued_ts = hmbdc::time::SysTime::now();
327 auto segStart = (
char*)att.attachment;
328 auto remaining = (
size_t)att.len;
332 auto bytes = std::min(segSize, (
size_t)remaining);
333 wrap->scratchpad().ipc.inbandPayloadLen = bytes;
334 memcpy(ms.seg, segStart, bytes);
338 buffer_.commit(bit, it - bit);
349 template <MessageC Message>
351 struct IteratorAdaptor {
352 typename Buffer::iterator it;
355 return wrap->payload;
357 auto& operator ++() {
361 auto operator ++(
int) {
362 auto tmp = IteratorAdaptor{it++};
367 auto it = buffer_.claim(n);
368 auto res = IteratorAdaptor{it};
382 template <
typename IteratorAdaptor>
384 buffer_.commit(itA.it);
399 template <MessageC Message>
401 using M =
typename std::decay<Message>::type;
402 static_assert(std::is_trivially_destructible<M>::value,
"cannot send message with dtor");
403 static_assert(MAX_MESSAGE_SIZE == 0 ||
sizeof(
MessageWrap<M>) <= BUFFER_VALUE_SIZE
404 ,
"message too big");
406 if constexpr(!std::is_base_of<
hasMemoryAttachment,
typename std::decay<Message>::type>::value) {
407 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 &&
sizeof(
MessageWrap<M>) > buffer_.maxItemSize())) {
408 HMBDC_THROW(std::out_of_range,
"message too big");
413 HMBDC_THROW(std::out_of_range,
"message too big, typeTag=" << m.getTypeTag());
416 size_t segSize = buffer_.maxItemSize() -
sizeof(
MessageHead);
417 auto n = (att.len + segSize - 1) / segSize + 1;
418 if (hmbdc_unlikely(n > buffer_.capacity())) {
419 HMBDC_THROW(std::out_of_range,
"hasMemoryAttachment message too big, typeTag=" << m.getTypeTag());
422 auto bit = buffer_.tryClaim(n);
423 if (!bit)
return false;
428 ->scratchpad().inbandUnderlyingTypeTag = m.getTypeTag();
429 auto segStart = (
char*)att.attachment;
430 auto remaining = (
size_t)att.len;
434 auto bytes = std::min(segSize, (
size_t)remaining);
435 wrap->scratchpad().ipc.inbandPayloadLen = bytes;
436 memcpy(ms.seg, segStart, bytes);
440 buffer_.commit(bit, it - bit);
455 template <MessageC Message,
typename ... Args>
457 static_assert(!std::is_base_of<JustBytes, Message>::value
458 ,
"use sendJustBytesInPlace");
459 static_assert(std::is_trivially_destructible<Message>::value,
"cannot send message with dtor");
461 ,
"message too big");
462 static_assert(!std::is_base_of<hasMemoryAttachment, Message>::value
463 ,
"hasMemoryAttachment Messages cannot be sent in place");
464 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 &&
sizeof(
MessageWrap<Message>) > buffer_.maxItemSize())) {
465 HMBDC_THROW(std::out_of_range
466 ,
"message too big buffer_.maxItemSize()=" << buffer_.maxItemSize());
468 buffer_.template putInPlace<MessageWrap<Message>>(std::forward<Args>(args)...);
471 template <
typename JustBytesType,
typename ... Args>
472 void sendJustBytesInPlace(uint16_t tag,
void const* bytes,
size_t len
474 if (hmbdc_unlikely(len > maxMessageSize())) {
475 HMBDC_THROW(std::out_of_range,
"message too big, typeTag=" << tag
478 if (!att || !cpa::ipc) {
479 auto it = buffer_.claim();
481 , std::forward<Args>(args)...);
484 size_t segSize = buffer_.maxItemSize() -
sizeof(MessageHead);
485 auto n = (att->len + segSize - 1) / segSize + 1;
486 if (hmbdc_unlikely(n > buffer_.capacity())) {
487 HMBDC_THROW(std::out_of_range
488 ,
"hasMemoryAttachment message too big, typeTag=" << tag);
491 auto bit = buffer_.claim(n);
493 auto wrap =
new (*it++) MessageWrap<InBandHasMemoryAttachment<JustBytesType>>(
494 tag, bytes, len, att, std::forward<Args>(args)...); (void)wrap;
497 auto segStart = (
char*)att->attachment;
498 auto remaining = (
size_t)att->len;
500 auto wrap = (
new (*it++) MessageWrap<InBandMemorySeg>());
501 auto& ms = wrap->get<InBandMemorySeg>();
502 auto bytes = std::min(segSize, (
size_t)remaining);
503 wrap->scratchpad().ipc.inbandPayloadLen = bytes;
504 memcpy(ms.seg, segStart, bytes);
508 buffer_.commit(bit, it - bit);
525 template <MessageC Message,
typename ... Args>
527 static_assert(std::is_trivially_destructible<Message>::value,
"cannot send message with dtor");
529 ,
"message too big");
530 static_assert(!std::is_base_of<hasMemoryAttachment, Message>::value,
"hasMemoryAttachment Messages cannot be sent in place");
531 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 &&
sizeof(
MessageWrap<Message>) > buffer_.maxItemSize())) {
532 HMBDC_THROW(std::out_of_range,
"message too big");
534 return buffer_.template tryPutInPlace<MessageWrap<Message>>(std::forward<Args>(args)...);
545 size_t dispatchingStartedCount()
const {
546 __atomic_thread_fence(__ATOMIC_ACQUIRE);
547 return *pDispStartCount_;
550 template <
typename T,
typename ...Args>
551 std::shared_ptr<T> allocateInShm(
size_t actualSize, Args&& ...args) {
552 static_assert(std::is_trivially_destructible<T>::value);
553 auto ptr = shmAttAllocator_->allocate(actualSize);
554 auto ptrT =
new (ptr) T{std::forward<Args>(args)...};
556 return std::shared_ptr<T>(
559 if (0 == __atomic_sub_fetch(&t->hmbdc0cpyShmRefCount, 1, __ATOMIC_RELEASE)) {
560 shmAttAllocator_->deallocate((uint8_t*)t);
566 template <
typename IntLvOrRv>
567 ThreadCommBase(uint32_t messageQueueSizePower2Num
568 ,
size_t maxMessageSizeRuntime
569 ,
char const* shmName
571 , IntLvOrRv&& ownership
572 ,
size_t ipcShmForAttPoolSize)
575 , Buffer::footprint(maxMessageSizeRuntime + sizeof(MessageHead)
576 , messageQueueSizePower2Num) + SMP_CACHE_BYTES + sizeof(*pDispStartCount_)
578 , pDispStartCount_(allocator_.template allocate<size_t>(SMP_CACHE_BYTES, 0))
579 , bufferptr_(allocator_.template allocate<Buffer>(SMP_CACHE_BYTES
580 , maxMessageSizeRuntime + sizeof(MessageHead), messageQueueSizePower2Num
583 , buffer_(*bufferptr_) {
584 if (messageQueueSizePower2Num < 2) {
585 HMBDC_THROW(std::out_of_range
586 ,
"messageQueueSizePower2Num need >= 2");
588 if (MaxMessageSize && maxMessageSizeRuntime != MAX_MESSAGE_SIZE) {
589 HMBDC_THROW(std::out_of_range
590 ,
"can only set maxMessageSizeRuntime when template value MaxMessageSize is 0");
592 maxMessageSizeRuntime_ = maxMessageSizeRuntime;
594 if (((cpa::ipc && ownership > 0) || !cpa::ipc) && cpa::has_pool) {
595 markDeadFrom(buffer_, 0);
598 if (cpa::ipc && ipcShmForAttPoolSize) {
603 auto name = std::string(shmName) +
"-att-pool";
605 shm_unlink(name.c_str());
606 shmAttAllocator_.emplace(
607 ownership > 0, boost::interprocess::create_only
608 , name.c_str(), ipcShmForAttPoolSize);
610 shmAttAllocator_.emplace(ownership > 0
611 , boost::interprocess::open_only, name.c_str());
614 }
catch (boost::interprocess::interprocess_exception
const&) {
615 if (--retry == 0)
throw;
626 allocator_.unallocate(bufferptr_);
630 void markDeadFrom(pattern::MonoLockFreeBuffer&
buffer, uint16_t) {
634 template <
typename BroadCastBuf>
636 void markDeadFrom(BroadCastBuf&
buffer, uint16_t poolThreadCount) {
637 for (uint16_t i = poolThreadCount;
638 i < BroadCastBuf::max_parallel_consumer;
646 void markDead(pattern::MonoLockFreeBuffer&
buffer, std::list<uint16_t>slots) {
650 template <
typename BroadCastBuf>
652 void markDead(BroadCastBuf&
buffer, std::list<uint16_t>slots) {
653 for (
auto s : slots) {
658 Allocator allocator_;
659 size_t* pDispStartCount_;
660 Buffer* HMBDC_RESTRICT bufferptr_;
661 Buffer& HMBDC_RESTRICT buffer_;
664 template <
typename Arg,
typename ...Args>
665 ShmAttAllocator(
bool own, Arg&& arg,
char const* name, Args&& ... args)
666 : managedShm_(std::forward<Arg>(arg), name, std::forward<Args>(args)...) {
673 if (nameUnlink_.size()) {
674 shm_unlink(nameUnlink_.c_str());
678 boost::interprocess::managed_shared_memory::handle_t
679 getHandle(
void* localAddr)
const {
680 return managedShm_.get_handle_from_address(localAddr);
684 getAddr(boost::interprocess::managed_shared_memory::handle_t h)
const {
685 return (uint8_t*)managedShm_.get_address_from_handle(h);
688 uint8_t* allocate(
size_t len) {
689 auto res = (uint8_t*)managedShm_.allocate(len);
694 auto deallocate(uint8_t* p) {
696 return managedShm_.deallocate(p);
700 boost::interprocess::managed_shared_memory managedShm_;
701 std::string nameUnlink_;
703 std::optional<ShmAttAllocator> shmAttAllocator_;
707 template <
typename M,
typename... Messages>
708 void sendRecursive(
typename Buffer::iterator it
709 , M&& msg, Messages&&... msgs) {
710 using Message =
typename std::decay<M>::type;
711 static_assert(std::is_trivially_destructible<Message>::value
712 ,
"cannot send message with dtor");
714 ,
"message too big");
715 static_assert(!std::is_base_of<hasMemoryAttachment, Message>::value
716 ,
"hasMemoryAttachment Messages cannot be sent in group");
717 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0
719 HMBDC_THROW(std::out_of_range,
"message too big");
722 if constexpr (has_hmbdc_ctx_queued_ts<Message>::value) {
723 wrap->template get<Message>().hmbdc_ctx_queued_ts = hmbdc::time::SysTime::now();
725 sendRecursive(++it, std::forward<Messages>(msgs)...);
727 void sendRecursive(
typename Buffer::iterator) {}
729 size_t maxMessageSizeRuntime_;
764 template <
size_t MaxMessageSize = 0,
typename... ContextProperties>
768 using Buffer =
typename Base::Buffer;
770 using Pool =
typename std::conditional<cpa::pool_msgless
784 Context(uint32_t messageQueueSizePower2Num = MaxMessageSize?20:2
785 ,
size_t maxPoolClientCount = MaxMessageSize?128:0
786 ,
size_t maxMessageSizeRuntime = MaxMessageSize)
787 :
Base(messageQueueSizePower2Num < 2?2:messageQueueSizePower2Num
788 , MaxMessageSize?MaxMessageSize:maxMessageSizeRuntime
789 , nullptr, 0, false, 0)
790 , usedHmbdcCapacity_(0)
792 , pool_(createPool<cpa>(maxPoolClientCount))
793 , poolThreadCount_(0) {
794 static_assert(!cpa::ipc,
"no name specified for ipc Context");
818 template <
typename IntRvOrLv
819 , std::enable_if_t<std::is_same<int, typename std::decay<IntRvOrLv>::type>::value>*
823 ,
char const* ipcTransportName
824 , uint32_t messageQueueSizePower2Num = MaxMessageSize?20:0
825 ,
size_t maxPoolClientCount = MaxMessageSize?128:0
826 ,
size_t maxMessageSizeRuntime = MaxMessageSize
827 , uint64_t purgerCpuAffinityMask = 0xfffffffffffffffful
828 ,
size_t ipcTransportDeviceOffset = 0
829 ,
size_t ipcShmForAttPoolSize = 0)
830 :
Base(messageQueueSizePower2Num
831 , MaxMessageSize?MaxMessageSize:maxMessageSizeRuntime
833 , ipcTransportDeviceOffset, ownership, ipcShmForAttPoolSize)
834 , usedHmbdcCapacity_(0)
836 , pool_(createPool<cpa>(maxPoolClientCount))
837 , poolThreadCount_(0)
838 , secondsBetweenPurge_(60)
839 , ifIpcCreator_(ownership > 0)
840 , purgerCpuAffinityMask_(purgerCpuAffinityMask) {
841 static_assert(cpa::ipc,
"ctor can only be used with ipc turned on Context");
851 Base::markDeadFrom(this->buffer_, 0);
872 template <
typename Client>
874 , uint64_t poolThreadAffinityIn = 0xfffffffffffffffful) {
875 static_assert(cpa::has_pool,
"pool is not support in the Context type");
876 if (std::is_base_of<single_thread_powered_client, Client>::value
877 && hmbdc::numeric::setBitsCount(poolThreadAffinityIn) != 1
878 && poolThreadCount_ != 1) {
879 HMBDC_THROW(std::out_of_range
880 ,
"cannot add a single thread powered client to the non-single" 881 "thread powered pool without specifying a single thread poolThreadAffinity" 884 primeForShmAtt(client);
886 pool_->addConsumer(*stub, poolThreadAffinityIn);
905 template <
typename Client,
typename ... Args>
907 , uint64_t poolThreadAffinityIn, Args&& ...args) {
924 template <
typename Client,
typename Client2,
typename ... Args,
typename Enabled
925 =
typename std::enable_if<!std::is_integral<Client2>::value,
void>::type>
929 addToPool(client2, std::forward<Args>(args)...);
938 static_assert(cpa::has_pool,
"pool is not support in the Context type");
939 return pool_->consumerSize();
949 return this->buffer_.parallelConsumerAlive();
983 template <
typename ...Args>
986 startWithContextProperty<cpa>(
true, std::forward<Args>(args) ...);
997 template <
typename ...Args>
1000 startWithContextProperty<cpa>(
false, std::forward<Args>(args) ...);
1010 stopWithContextProperty<cpa>();
1019 joinWithContextProperty<cpa>();
1035 secondsBetweenPurge_ = s;
1047 static_assert(cpa::has_pool,
"pool is not support in the Context type");
1048 pool_->runOnce(threadSerialNumberInPool);
1060 template <
typename Client>
1064 uint16_t tn = cpa::broadcast_msg?c.hmbdcNumber:0;
1067 return context_detail::runOnceImpl(tn
1068 , stopped_, this->buffer_
1073 template <
typename cpa>
1074 typename std::enable_if<cpa::has_pool && !cpa::pool_msgless, typename Pool::ptr>::type
1075 createPool(
size_t maxPoolClientCount) {
1076 return Pool::create(this->
buffer(), maxPoolClientCount);
1079 template <
typename cpa>
1080 typename std::enable_if<cpa::pool_msgless, typename Pool::ptr>::type
1081 createPool(
size_t maxPoolClientCount) {
1082 return Pool::create(maxPoolClientCount);
1085 template <
typename cpa>
1086 typename std::enable_if<!cpa::has_pool && !cpa::pool_msgless, typename Pool::ptr>::type
1087 createPool(
size_t) {
1088 return typename Pool::ptr();
1091 template <
typename cpa>
1092 typename std::enable_if<cpa::has_pool, void>::type
1093 stopWithContextProperty() {
1094 if (pool_) pool_->stop();
1095 __atomic_thread_fence(__ATOMIC_ACQUIRE);
1099 template <
typename cpa>
1100 typename std::enable_if<!cpa::has_pool, void>::type
1101 stopWithContextProperty() {
1102 __atomic_thread_fence(__ATOMIC_ACQUIRE);
1106 template <
typename cpa>
1107 typename std::enable_if<cpa::has_pool, void>::type
1108 joinWithContextProperty() {
1109 if (pool_) pool_->join();
1110 for (
auto& t : threads_) {
1116 template <
typename cpa>
1117 typename std::enable_if<!cpa::has_pool, void>::type
1118 joinWithContextProperty() {
1119 for (
auto& t : threads_) {
1125 template <
typename cpa>
1127 reserveSlots(std::list<uint16_t>&) {
1130 template <
typename cpa,
typename ...Args>
1131 typename std::enable_if<cpa::broadcast_msg && !cpa::pool_msgless, void>::type
1132 reserveSlots(std::list<uint16_t>& slots, uint16_t poolThreadCount, uint64_t, Args&& ... args) {
1133 auto available = this->buffer_.unusedConsumerIndexes();
1134 if (available.size() < poolThreadCount) {
1135 HMBDC_THROW(std::out_of_range
1136 ,
"Context remaining capacilty = " << available.size()
1137 <<
", consider increasing max_parallel_consumer");
1139 for (uint16_t i = 0; i < poolThreadCount; ++i) {
1140 slots.push_back(available[i]);
1141 this->buffer_.reset(available[i]);
1143 reserveSlots<cpa>(slots, std::forward<Args>(args) ...);
1146 template <
typename cpa,
typename ...Args>
1147 typename std::enable_if<!cpa::broadcast_msg || cpa::pool_msgless, void>::type
1148 reserveSlots(std::list<uint16_t>& slots, uint16_t poolThreadCount, uint64_t, Args&& ... args) {
1149 reserveSlots<cpa>(slots, std::forward<Args>(args) ...);
1152 template <
typename cpa,
typename CcClient,
typename ...Args>
1153 typename std::enable_if<cpa::broadcast_msg && !std::is_integral<CcClient>::value,
void>::type
1154 reserveSlots(std::list<uint16_t>& slots, CcClient& c, uint64_t, Args&& ... args) {
1155 const bool clientParticipateInMessaging =
1156 std::decay<CcClient>::type::INTERESTS_SIZE != 0;
1157 if (clientParticipateInMessaging) {
1158 auto available = this->buffer_.unusedConsumerIndexes();
1159 if (!available.size()) {
1160 HMBDC_THROW(std::out_of_range
1161 ,
"Context reached capacity, consider increasing max_parallel_consumer");
1163 this->buffer_.reset(available[0]);
1164 slots.push_back(available[0]);
1166 reserveSlots<cpa>(slots, std::forward<Args>(args) ...);
1169 template <
typename cpa,
typename CcClient,
typename ...Args>
1170 typename std::enable_if<!cpa::broadcast_msg && !std::is_integral<CcClient>::value,
void>::type
1171 reserveSlots(std::list<uint16_t>& slots, CcClient& c, uint64_t, Args&& ... args) {
1174 template <
typename cpa,
typename ...Args>
1175 typename std::enable_if<cpa::ipc, void>::type
1176 startWithContextProperty(
bool kickoffThread, Args&& ... args) {
1177 auto& lock = this->allocator_.fileLock();
1178 std::lock_guard<decltype(lock)> g(lock);
1179 std::list<uint16_t> slots;
1181 reserveSlots<cpa>(slots, args ...);
1183 startWithContextPropertyImpl<cpa>(kickoffThread, sc, std::forward<Args>(args) ...);
1184 }
catch (std::out_of_range
const&) {
1185 Base::markDead(this->buffer_, slots);
1190 template <
typename cpa,
typename ...Args>
1191 typename std::enable_if<!cpa::ipc, void>::type
1192 startWithContextProperty(
bool kickoffThread, Args&& ... args) {
1193 std::list<uint16_t> slots;
1195 reserveSlots<cpa>(slots, args ...);
1197 startWithContextPropertyImpl<cpa>(kickoffThread, sc, std::forward<Args>(args) ...);
1198 }
catch (std::out_of_range
const&) {
1199 Base::markDead(this->buffer_, slots);
1204 template <
typename cpa>
1205 typename std::enable_if<cpa::broadcast_msg && cpa::ipc, void>::type
1206 startWithContextPropertyImpl(
bool kickoffThread, std::list<uint16_t>& slots) {
1207 if (ifIpcCreator_ && !purger_ && secondsBetweenPurge_) {
1209 new StuckClientPurger<Buffer>(secondsBetweenPurge_, this->buffer_));
1210 startWithContextPropertyImpl<cpa>(kickoffThread, slots, *purger_, purgerCpuAffinityMask_);
1214 template <
typename cpa>
1215 typename std::enable_if<!cpa::broadcast_msg || !cpa::ipc, void>::type
1216 startWithContextPropertyImpl(
bool kickoffThread, std::list<uint16_t>& slots) {
1219 template <
typename cpa,
typename ...Args>
1220 typename std::enable_if<cpa::has_pool, void>::type
1221 startWithContextPropertyImpl(
bool kickoffThread, std::list<uint16_t>& slots
1222 , uint16_t poolThreadCount, uint64_t poolThreadsCpuAffinityMask
1223 , Args&& ... args) {
1224 if (poolThreadCount_) {
1225 HMBDC_THROW(std::out_of_range,
"Context pool already started");
1227 std::vector<uint16_t> sc(slots.begin(), slots.end());
1228 if (!poolThreadsCpuAffinityMask) {
1229 auto cpuCount = std::thread::hardware_concurrency();
1230 poolThreadsCpuAffinityMask =
1231 ((1ul << poolThreadCount) - 1u) << (hmbdcNumbers_.size() % cpuCount);
1234 pool_->startAt(poolThreadCount, poolThreadsCpuAffinityMask, sc);
1235 while(poolThreadCount--) {
1236 if (!cpa::pool_msgless) {
1237 hmbdcNumbers_.push_back(*slots.begin());
1241 poolThreadCount_ = poolThreadCount;
1242 startWithContextPropertyImpl<cpa>(kickoffThread, slots, std::forward<Args>(args) ...);
1245 template <
typename cpa,
typename Client,
typename ...Args>
1246 typename std::enable_if<!std::is_integral<Client>::value,
void>::type
1247 startWithContextPropertyImpl(
bool kickoffThread, std::list<uint16_t>& slots
1248 , Client& c, uint64_t cpuAffinity
1249 , Args&& ... args) {
1250 auto clientParticipateInMessaging =
1251 std::decay<Client>::type::INTERESTS_SIZE;
1252 uint16_t hmbdcNumber = 0xffffu;
1253 if (clientParticipateInMessaging && cpa::broadcast_msg) {
1254 hmbdcNumber = *slots.begin();
1255 c.hmbdcNumber = hmbdcNumber;
1258 if (kickoffThread) {
1260 c, cpuAffinity, hmbdcNumber, hmbdcNumbers_.size());
1261 threads_.push_back(move(thrd));
1263 hmbdcNumbers_.push_back(hmbdcNumber);
1264 startWithContextPropertyImpl<cpa>(kickoffThread, slots, std::forward<Args>(args) ...);
1267 template <
typename Client>
1269 Client& c, uint64_t mask, uint16_t hmbdcNumber, uint16_t threadSerialNumber) {
1276 , threadSerialNumber
1278 auto hmbdcNumber = h;
1280 char const* schedule;
1282 auto clientParticipateInMessaging =
1283 std::decay<Client>::type::INTERESTS_SIZE;
1289 if (clientParticipateInMessaging) {
1290 name =
"hmbdc" + std::to_string(hmbdcNumber);
1296 auto cpuAffinityMask = mask;
1297 std::tie(schedule, priority) = c.
schedSpec();
1299 if (!schedule) schedule =
"SCHED_OTHER";
1302 auto cpuCount = std::thread::hardware_concurrency();
1303 cpuAffinityMask = 1ul << (threadSerialNumber % cpuCount);
1306 hmbdc::os::configureCurrentThread(name.c_str(), cpuAffinityMask
1307 , schedule, priority);
1309 hmbdcNumber = clientParticipateInMessaging?hmbdcNumber:0xffffu;
1310 __atomic_add_fetch(this->pDispStartCount_, 1, __ATOMIC_RELEASE);
1312 }
catch (std::exception
const& e) {
1315 }
catch (
int code) {
1324 context_detail::runOnceImpl(hmbdcNumber, this->stopped_, this->buffer_, c)) {
1326 if (this->stopped_) {
1327 if (clientParticipateInMessaging) {
1328 typename Buffer::iterator begin, end;
1332 if constexpr (cpa::broadcast_msg) {
1333 count = this->buffer_.peek(hmbdcNumber, begin, end);
1334 this->buffer_.wasteAfterPeek(hmbdcNumber, count);
1336 count = this->buffer_.peek(begin, end);
1337 this->buffer_.wasteAfterPeek(begin, count);
1343 if (clientParticipateInMessaging) context_detail::unblock(this->buffer_, hmbdcNumber);
1350 template <
typename Client>
1351 void primeForShmAtt(
Client& c) {
1352 if constexpr (cpa::ipc && context_detail::has_ibmaProc<Client>::value) {
1353 if constexpr(!std::is_same<std::nullptr_t, decltype(c.ibmaProc)>::value) {
1354 if (!c.ibmaProc.hmbdcShmHandleToAddr) {
1355 c.ibmaProc.hmbdcShmHandleToAddr = [&alloc = this->shmAttAllocator_]
1356 (boost::interprocess::managed_shared_memory::handle_t h) {
1357 return alloc->getAddr(h);
1359 c.ibmaProc.hmbdcShmDeallocator = [&alloc = this->shmAttAllocator_]
1361 return alloc->deallocate(addr);
1370 uint16_t usedHmbdcCapacity_;
1371 std::vector<uint16_t> hmbdcNumbers_;
1374 typename Pool::ptr pool_;
1375 using Threads = std::vector<std::thread>;
1377 size_t poolThreadCount_;
1378 uint32_t secondsBetweenPurge_;
1379 bool const ifIpcCreator_ =
false;
1380 uint64_t purgerCpuAffinityMask_;
1381 typename std::conditional<cpa::broadcast_msg && cpa::ipc
1382 , std::unique_ptr<StuckClientPurger<Buffer>>, uint32_t
Definition: ContextDetail.hpp:66
char const * hmbdcName() const
return the name of thread that runs this client, override if necessary
Definition: Client.hpp:143
auto kickOffClientThread(Client &c, uint64_t mask, uint16_t hmbdcNumber, uint16_t threadSerialNumber)
Definition: Context.hpp:1268
Context template parameter indicating the Context must contain a pool to run Clients and the Clients ...
Definition: Context.hpp:88
void stop()
stop the message dispatching - asynchronously
Definition: Context.hpp:1009
covers the inter-thread and ipc communication fascade
Definition: Context.hpp:139
void addToPool(Client &client, Client2 &client2, Args &&...args)
add a bunch of clients to Context's pool - the Clients are run in pool mode
Definition: Context.hpp:927
void join()
wait until all threads (Pool threads too if apply) of the Context exit
Definition: Context.hpp:1018
Context template parameter inidcating each message is sent to one and only one of the clients within ...
Definition: Context.hpp:77
std::tuple< char const *, int > schedSpec() const
an overrideable method. returns the schedule policy and priority, override if necessary priority is o...
Definition: Client.hpp:153
Definition: PoolMinus.hpp:9
Context(IntRvOrLv &&ownership, char const *ipcTransportName, uint32_t messageQueueSizePower2Num=MaxMessageSize?20:0, size_t maxPoolClientCount=MaxMessageSize?128:0, size_t maxMessageSizeRuntime=MaxMessageSize, uint64_t purgerCpuAffinityMask=0xfffffffffffffffful, size_t ipcTransportDeviceOffset=0, size_t ipcShmForAttPoolSize=0)
ctor for construct local ipc Context
Definition: Context.hpp:822
void runOnce(uint16_t threadSerialNumberInPool)
normally not used until you want to run your own message loop
Definition: Context.hpp:1046
bool runOnce(Client &&c)
normally not used until you want to run your own message loop
Definition: Context.hpp:1061
Context template parameter indicating the Context is ipc enabled and it can create or be attached to ...
Definition: Context.hpp:108
void send(Message &&m)
send a message including hasMortAttachment message to the Context or attached ipc Contexts ...
Definition: Context.hpp:270
Unknown excpetion.
Definition: Exception.hpp:17
Definition: Context.hpp:663
void stopped(std::exception const &e) noexcept
the following are for internal use, don't change or override
Definition: Client.hpp:254
Context(uint32_t messageQueueSizePower2Num=MaxMessageSize?20:2, size_t maxPoolClientCount=MaxMessageSize?128:0, size_t maxMessageSizeRuntime=MaxMessageSize)
ctor for construct local non-ipc Context
Definition: Context.hpp:784
bool trySend(ForwardIt begin, size_t n)
try send a range of messages to the Context or attached ipc Contexts
Definition: Context.hpp:237
Definition: ContextDetail.hpp:24
Definition: Message.hpp:212
void send(ForwardIt begin, size_t n)
send a range of messages to the Context or attached ipc Contexts
Definition: Context.hpp:208
when processes are distributed on a PCIe board and host PC, add this property
Definition: Context.hpp:116
void commitForSend(IteratorAdaptor itA)
commit all the filled up buffers allocated by allocateForSend and send them out
Definition: Context.hpp:383
Context template parameter inidcating each message is sent to all clients within the Context...
Definition: Context.hpp:57
Definition: Message.hpp:459
bool trySend(M0 &&m0, M1 &&m1, Messages &&... msgs)
try to send a batch of message to the Context or attached ipc Contexts
Definition: Context.hpp:186
void addToPool(Client &client, uint64_t poolThreadAffinityIn=0xfffffffffffffffful)
add a client to Context's pool - the Client is run in pool mode
Definition: Context.hpp:873
Definition: LockFreeBufferT.hpp:18
size_t parallelConsumerAlive() const
how many parallel consummers are started
Definition: Context.hpp:948
size_t clientCountInPool() const
return the numebr of clients added into pool
Definition: Context.hpp:937
auto allocateForSend(size_t n)
preallocate consecutive buffers so they can be send out later
Definition: Context.hpp:350
A Context is like a media object that facilitates the communications for the Clients that it is holdi...
Definition: Context.hpp:765
~Context()
dtor
Definition: Context.hpp:849
void registerToRun(Args &&... args)
similarly to the start call for the usage and parameters except the Client thread is not started by t...
Definition: Context.hpp:999
Definition: Message.hpp:263
Definition: BitMath.hpp:9
Buffer & buffer()
accessor - mostly used internally
Definition: Context.hpp:541
void addToPool(Client &client, uint64_t poolThreadAffinityIn, Args &&...args)
add a bunch of clients to Context's pool - the Clients are run in pool mode
Definition: Context.hpp:906
A Client represents a thread of execution/a task. The execution is managed by a Context. a Client object could participate in message dispatching as the receiver of specifed message types.
Definition: Client.hpp:128
void send(M0 &&m0, M1 &&m1, Messages &&... msgs)
try send a batch of messages to the Context or attached ipc Contexts
Definition: Context.hpp:166
Exception that just has an exit code.
Definition: Exception.hpp:28
virtual void messageDispatchingStartedCb(size_t const *pClientDispatchingStarted)
called before any messages got dispatched - only once
Definition: Client.hpp:185
void setSecondsBetweenPurge(uint32_t s)
ipc_creator Context runs a StcuClientPurger to purge crashed (or slow, stuck ...) Clients from the ip...
Definition: Context.hpp:1034
void start(Args &&... args)
start the context by specifying what are in it (Pool and/or direct Clients) and their paired up cpu a...
Definition: Context.hpp:985
bool trySendInPlace(Args &&... args)
try send a message to all Clients in the Context or attached ipc Contexts if it wouldn't block ...
Definition: Context.hpp:526
if a specific hmbdc network transport (for example tcpcast, rmcast, and rnetmap) supports message wit...
Definition: Message.hpp:125
void sendInPlace(Args &&... args)
send a message to all Clients in the Context or attached ipc Contexts
Definition: Context.hpp:456
bool trySend(Message &&m)
try to send a message including hasMortAttachment message to the Context or attached ipc Contexts if ...
Definition: Context.hpp:400
Definition: Message.hpp:430