1 #include "hmbdc/Copyright.hpp" 3 #include "hmbdc/tips/DefaultUserConfig.hpp" 4 #include "hmbdc/tips/Messages.hpp" 5 #include "hmbdc/tips/TypeTagSet.hpp" 6 #include "hmbdc/tips/Node.hpp" 7 #include "hmbdc/app/BlockingContext.hpp" 8 #include "hmbdc/app/Context.hpp" 9 #include "hmbdc/app/Config.hpp" 10 #include "hmbdc/Exception.hpp" 11 #include "hmbdc/pattern/GuardedSingleton.hpp" 12 #include "hmbdc/MetaUtils.hpp" 16 #include <type_traits> 20 namespace hmbdc {
namespace tips {
31 template <
typename Protocol,
size_t MaxMessageSize = 1000>
34 max_message_size = MaxMessageSize
36 using protocol = Protocol;
46 using SendTransportEngine =
void*;
47 template <
typename Buffer,
typename AttachmentAllocator>
48 using RecvTransportEngine =
void*;
58 return cfg.
getExt<std::string>(
"tipsDomainNonNet");
70 using NoNet = net_property<NoProtocol, 0>;
82 template <u
int16_t IpcCapacity = 64,
size_t MaxMessageSize = 1000>
85 capacity = IpcCapacity,
86 max_message_size = MaxMessageSize
96 namespace domain_detail {
98 template <MessageC Message, bool canSerialize = has_toHmbdcSerialized<Message>::value>
100 using type = std::result_of_t<decltype(&Message::toHmbdcSerialized)(Message)>;
103 template <MessageC Message>
105 using type = Message;
108 template <MessageTupleC MessageTuple>
113 using type = std::tuple<>;
116 template <MessageC Message>
119 value = (std::is_trivially_destructible<Message>::value
120 || has_toHmbdcSerialized<Message>::value) ? 1:0,
124 template <MessageC Message, MessageC ...Messages>
127 using type =
typename std::conditional<is_ipcable<Message>::value
133 template <MessageC Message>
135 ipc_from(pid_t from, Message
const& m)
137 , hmbdcIpcFrom(from) {}
158 att->attachment = ::malloc(att->len);
160 ::free(hasAtt->attachment);
161 hasAtt->attachment =
nullptr;
163 return att->attachment;
194 template <MessageTupleC RecvMessageTupleIn
195 ,
typename IpcProp = NoIpc
196 ,
typename NetProp = NoNet
198 ,
typename AttachmentAllocator = DefaultAttachmentAllocator >
200 using IpcProperty = IpcProp;
201 using NetProperty = NetProp;
202 using NetProtocol =
typename NetProperty::protocol;
204 using RecvMessageTuple =
typename hmbdc::remove_duplicate<RecvMessageTupleIn>::type;
205 using ThreadCtx = NodeContext<RecvMessageTuple>;
207 ThreadCtx threadCtx_;
210 using NetableRecvMessages = IpcableRecvMessages;
213 run_pump_in_ipc_portal = IpcProperty::capacity != 0,
214 run_pump_in_thread_ctx = run_pump_in_ipc_portal
215 ? 0 : !std::is_same<NoNet, NetProperty>::value,
216 has_a_pump = run_pump_in_ipc_portal || run_pump_in_thread_ctx,
217 has_net_send_eng = !std::is_same<NoNet, NetProperty>::value,
218 has_net_recv_eng = !std::is_same<NoNet, NetProperty>::value
219 && std::tuple_size<NetableRecvMessages>::value,
224 using IpcSubscribeMessagesPossible = IpcableRecvMessages;
225 using IpcTransport =
typename std::conditional<
226 IpcProperty::capacity != 0
228 IpcProperty::max_message_size
235 std::optional<IpcTransport> ipcTransport_;
236 std::optional<os::ShmBasePtrAllocator> allocator_;
237 TypeTagSet* pOutboundSubscriptions_ =
nullptr;
240 ThreadCtx& threadCtx;
241 size_t dispCount = 0;
244 OneBuffer(ThreadCtx& threadCtx,
size_t maxItemSizeIn)
245 : threadCtx(threadCtx)
246 , maxItemSize_(maxItemSizeIn) {
248 size_t maxItemSize()
const {
252 template <MessageC Message>
255 if constexpr (has_toHmbdcUnserialized<Message>::value) {
256 static_assert(std::is_same<Message
257 , decltype(msg.toHmbdcUnserialized().toHmbdcSerialized())>::value
258 ,
"mising or wrong toHmbdcSerialized() func - cannot serialize");
259 threadCtx.send(msg.toHmbdcUnserialized());
263 if constexpr(std::is_base_of<hasMemoryAttachment, Message>::value) {
265 msg.hasMemoryAttachment::attachment =
nullptr;
266 msg.hasMemoryAttachment::len = 0;
271 threadCtx.sendJustBytesInPlace(tag, bytes, maxItemSize_, att);
274 att->attachment =
nullptr;
279 void put(
void* item,
size_t) {
281 , NetableRecvMessages>::type;
284 h.scratchpad().desc.flag = 0;
285 if (disp(*
this, h)) {
292 , NetableRecvMessages>::type;
294 item->scratchpad().desc.flag = app::hasMemoryAttachment::flag;
295 if (disp(*
this, *item)) {
299 item->payload.release();
303 template <
typename T>
void put(T& item) {put(&item,
sizeof(T));}
304 template <
typename T>
void putSome(T& item) {
305 put(&item, std::min(
sizeof(item), maxItemSize()));
311 std::optional<typename NetProtocol::SendTransportEngine> sendEng_;
312 using RecvTransportEngine
313 =
typename NetProtocol::template RecvTransportEngine<OneBuffer, AttachmentAllocator>;
314 std::optional<RecvTransportEngine> recvEng_;
317 std::string hmbdcName_;
320 typename ThreadCtx::ClientRegisterHandle handleInCtx;
325 , NetProperty::max_message_size != 0
326 ? NetProperty::max_message_size
327 : cfg.
getExt<uint32_t>(
"netMaxMessageSizeRuntime")
329 , hmbdcName_(cfg.
getExt<std::string>(
"pumpHmbdcName")) {
330 if constexpr (has_net_send_eng) {
331 uint32_t limit = NetProperty::max_message_size;
333 limit = cfg.
getExt<uint32_t>(
"netMaxMessageSizeRuntime");
335 sendEng_.emplace(cfg, limit);
337 if constexpr (has_net_recv_eng) {
338 recvEng_.emplace(cfg, netBuffer_);
342 template <
typename CcNode>
343 void subscribeFor(CcNode
const& node, uint16_t mod, uint16_t res) {
344 if constexpr (has_net_recv_eng) {
346 ,
typename CcNode::RecvMessageTuple>::type;
347 recvEng_->template subscribeFor<Messages>(node, mod, res);
351 size_t ipcSubscribingPartyCount(uint16_t tag)
const {
355 size_t netSubscribingPartyCount(uint16_t tag)
const {
356 auto res =
size_t{0};
357 if constexpr (has_net_send_eng) {
358 res += sendEng_->subscribingPartyDetectedCount(tag);
363 template <
typename CcNode>
364 void advertiseFor(CcNode
const& node, uint16_t mod, uint16_t res) {
365 if constexpr (has_net_send_eng) {
367 ,
typename CcNode::SendMessageTuple>::type;
368 sendEng_->template advertiseFor<Messages>(node, mod, res);
372 size_t netSendingPartyDetectedCount()
const {
373 if constexpr (has_net_recv_eng) {
374 return recvEng_->sessionsRemainingActive();
379 size_t netRecvingPartyDetectedCount()
const {
380 if constexpr (has_net_send_eng) {
381 return sendEng_->sessionsRemainingActive();
386 template <MessageC Message>
387 void send(Message&& message) {
388 using M =
typename std::decay<Message>::type;
389 using Mnet =
typename domain_detail::matching_ipcable<M>::type;
391 if constexpr (std::is_trivially_destructible<Mnet>::value) {
392 if constexpr (has_tipsDisableSendMask<M>::value) {
393 if (M::tipsDisableSendMask() & OVER_NETWORK)
return;
395 if constexpr (has_net_send_eng) {
396 if constexpr(has_toHmbdcSerialized<M>::value) {
397 static_assert(std::is_same<M
398 , decltype(message.toHmbdcSerialized().toHmbdcUnserialized())>::value
399 ,
"mising or wrong toHmbdcUnserialized() func - cannot convertback");
400 static_assert(NetProperty::max_message_size == 0
401 ||
sizeof(decltype(message.toHmbdcSerialized())) <= NetProperty::max_message_size
402 ,
"NetProperty::max_message_size is too small");
403 sendEng_->queue(message.toHmbdcSerialized());
405 static_assert(NetProperty::max_message_size == 0
406 ||
sizeof(message) <= NetProperty::max_message_size
407 ,
"NetProperty::max_message_size is too small");
408 sendEng_->queue(message);
414 void sendJustBytes(uint16_t tag,
void const* bytes,
size_t len
416 if constexpr (has_net_send_eng) {
417 sendEng_->queueJustBytes(tag, bytes, len, att);
421 char const* hmbdcName()
const {
422 return hmbdcName_.c_str();
425 void invokedCb(
size_t previousBatch)
override {
426 bool layback = !previousBatch;
427 if constexpr (has_net_send_eng) {
428 layback = layback && !sendEng_->bufferedMessageCount();
431 std::this_thread::yield();
434 if constexpr (has_net_recv_eng) {
435 netBuffer_.dispCount = 0;
438 if constexpr (has_net_send_eng) {
444 if constexpr (has_net_send_eng) {
450 template <
size_t MAX_MEMORY_ATTACHMENT>
454 uint8_t underlyingMessage[MAX_MEMORY_ATTACHMENT];
455 AttachmentAllocator attAllocator;
457 template <MessageC Message>
459 static_assert(
sizeof(Message) <= MAX_MEMORY_ATTACHMENT,
"");
460 if (hmbdc_unlikely(att->attachment)) {
461 HMBDC_THROW(std::logic_error,
"previous InBandMemoryAttachment not concluded");
464 if constexpr(Message::justBytes) {
465 memcpy(underlyingMessage, &ibma.underlyingMessage,
sizeof(underlyingMessage));
467 memcpy(underlyingMessage, &ibma.underlyingMessage,
sizeof(Message));
470 if (hmbdc_unlikely(!att->len)) {
471 att->attachment =
nullptr;
472 att->afterConsumedCleanupFunc =
nullptr;
475 }
else if (att->holdShmHandle<Message>()) {
476 if constexpr (app::has_hmbdcShmRefCount<Message>::value) {
477 auto shmAddr = hmbdcShmHandleToAddr(att->
shmHandle);
478 att->attachment = shmAddr;
479 att->
clientData[0] = (uint64_t)&hmbdcShmDeallocator;
480 static_assert(
sizeof(ibma.underlyingMessage.hmbdcShmRefCount) ==
sizeof(
size_t));
481 att->
clientData[1] = (uint64_t)&ibma.underlyingMessage.hmbdcShmRefCount;
482 if constexpr (Message::is_att_0cpyshm) {
484 auto hmbdcShmRefCount = (
size_t*)h->attachment;
486 && 0 == __atomic_sub_fetch(hmbdcShmRefCount, 1, __ATOMIC_RELEASE)) {
487 auto& hmbdcShmDeallocator
488 = *(std::function<void (uint8_t*)>*)h->clientData[0];
489 hmbdcShmDeallocator((uint8_t*)h->attachment);
492 }
else if constexpr (app::has_hmbdcShmRefCount<Message>::value) {
494 auto hmbdcShmRefCount = (
size_t*)h->clientData[1];
496 && 0 == __atomic_sub_fetch(hmbdcShmRefCount, 1, __ATOMIC_RELEASE)) {
497 auto& hmbdcShmDeallocator
498 = *(std::function<void (uint8_t*)>*)h->clientData[0];
499 hmbdcShmDeallocator((uint8_t*)h->attachment);
507 attAllocator(typeTagIn, att);
516 if (accSize < att->len) {
517 auto attBytes = ibms.seg;
518 auto copySize = std::min(n, att->len - accSize);
519 memcpy((
char*)att->attachment + accSize, attBytes, copySize);
521 return accSize == att->len;
526 uint16_t typeTag = 0;
528 std::function<void* (boost::interprocess::managed_shared_memory::handle_t)>
529 hmbdcShmHandleToAddr;
530 std::function<void (uint8_t*)> hmbdcShmDeallocator;
535 std::optional<typename NetProtocol::SendTransportEngine> sendEng_;
536 using RecvTransportEngine
537 =
typename NetProtocol::template RecvTransportEngine<OneBuffer, AttachmentAllocator>;
538 std::optional<RecvTransportEngine> recvEng_;
539 IpcTransport& ipcTransport_;
544 std::string hmbdcName_;
545 uint32_t pumpMaxBlockingTimeSec_;
549 std::max((
size_t)PumpInIpcPortal::MAX_MEMORY_ATTACHMENT, (
size_t)(64 * 1024))> ibmaProc;
550 pid_t
const hmbdcAvoidIpcFrom = getpid();
554 : ipcTransport_(ipcTransport)
557 , NetProperty::max_message_size != 0
558 ? NetProperty::max_message_size
559 : cfg.
getExt<uint32_t>(
"netMaxMessageSizeRuntime")
561 , pOutboundSubscriptions_(pOutboundSubscriptions)
562 , hmbdcName_(cfg.
getExt<std::string>(
"pumpHmbdcName"))
563 , pumpMaxBlockingTimeSec_(cfg.
getHex<
double>(
"pumpMaxBlockingTimeSec") * 1000000) {
564 pumpMaxBlockingTimeSec_ = std::min(1000000u, pumpMaxBlockingTimeSec_);
565 static_assert(IpcTransport::MAX_MESSAGE_SIZE == 0 || IpcTransport::MAX_MESSAGE_SIZE
568 if constexpr (has_net_send_eng) {
569 uint32_t limit = NetProperty::max_message_size;
571 limit = cfg.
getExt<uint32_t>(
"netMaxMessageSizeRuntime");
573 sendEng_.emplace(cfg, limit);
575 if constexpr (has_net_recv_eng) {
576 recvEng_.emplace(cfg, netBuffer_);
580 template <
typename CcNode>
581 void subscribeFor(CcNode
const& node, uint16_t mod, uint16_t res) {
583 ,
typename CcNode::RecvMessageTuple>::type;
584 inboundSubscriptions_.markSubsFor<Messages>(node, mod, res
585 , [
this](uint16_t tag) {
586 pOutboundSubscriptions_->add(tag);
589 if constexpr (has_net_recv_eng) {
590 recvEng_->template subscribeFor<Messages>(node, mod, res);
594 size_t ipcSubscribingPartyCount(uint16_t tag)
const {
595 return pOutboundSubscriptions_->check(tag) - inboundSubscriptions_.check(tag);
598 size_t netSubscribingPartyCount(uint16_t tag)
const {
599 auto res =
size_t{0};
600 if constexpr (has_net_send_eng) {
601 res += sendEng_->subscribingPartyDetectedCount(tag);
606 template <
typename CcNode>
607 void advertiseFor(CcNode
const& node, uint16_t mod, uint16_t res) {
609 ,
typename CcNode::SendMessageTuple>::type;
610 if constexpr (has_net_send_eng) {
611 sendEng_->template advertiseFor<Messages>(node, mod, res);
615 size_t netSendingPartyDetectedCount()
const {
616 if constexpr (has_net_recv_eng) {
617 return recvEng_->sessionsRemainingActive();
622 size_t netRecvingPartyDetectedCount()
const {
623 if constexpr (has_net_send_eng) {
624 return sendEng_->sessionsRemainingActive();
630 template <MessageC Message>
631 void send(Message&& message) {
632 using M =
typename std::decay<Message>::type;
633 using Mipc =
typename domain_detail::matching_ipcable<M>::type;
635 if constexpr (std::is_trivially_destructible<Mipc>::value) {
636 bool disableInterProcess =
false;
637 if constexpr (has_tipsDisableSendMask<M>::value) {
638 if (M::tipsDisableSendMask() & INTER_PROCESS) {
639 disableInterProcess =
true;
642 bool disableNet =
false;
643 if constexpr (!has_net_send_eng) {
645 }
else if constexpr (has_tipsDisableSendMask<M>::value) {
646 if (M::tipsDisableSendMask() & OVER_NETWORK) disableNet =
true;
649 std::optional<Mipc> serializedCached;
650 app::hasMemoryAttachment::AfterConsumedCleanupFunc afterConsumedCleanupFuncKept =
nullptr;
651 (void)afterConsumedCleanupFuncKept;
652 if (!disableInterProcess) {
654 auto intDiff = pOutboundSubscriptions_->check(message.getTypeTag())
655 - inboundSubscriptions_.check(message.getTypeTag());
659 if constexpr (has_toHmbdcSerialized<M>::value) {
660 static_assert(std::is_same<M
661 , decltype(message.toHmbdcSerialized().toHmbdcUnserialized())>::value
662 ,
"mising or wrong toHmbdcUnserialized() func - cannot convertback");
663 serializedCached.emplace(message.toHmbdcSerialized());
666 std::swap(afterConsumedCleanupFuncKept, serializedCached->afterConsumedCleanupFunc);
668 auto toSend = ToSendType{hmbdcAvoidIpcFrom, *serializedCached};
670 if constexpr (app::has_hmbdcShmRefCount<ToSendType>::value) {
671 toSend.hmbdcShmRefCount = intDiff;
672 if (ToSendType::is_att_0cpyshm && toSend.app::hasMemoryAttachment::attachment) {
673 auto hmbdc0cpyShmRefCount = (
size_t*)toSend.app::hasMemoryAttachment::attachment;
674 __atomic_add_fetch(hmbdc0cpyShmRefCount, intDiff, __ATOMIC_RELEASE);
677 ipcTransport_.send(std::move(toSend));
678 }
else if constexpr(std::is_base_of<app::hasMemoryAttachment, M>::value) {
679 auto toSend = ToSendType{hmbdcAvoidIpcFrom, message};
680 if constexpr (app::has_hmbdcShmRefCount<ToSendType>::value) {
681 toSend.hmbdcShmRefCount = intDiff;
682 if (ToSendType::is_att_0cpyshm && toSend.app::hasMemoryAttachment::attachment) {
683 auto hmbdc0cpyShmRefCount = (
size_t*)toSend.app::hasMemoryAttachment::attachment;
684 __atomic_add_fetch(hmbdc0cpyShmRefCount, intDiff, __ATOMIC_RELEASE);
687 ipcTransport_.send(std::move(toSend));
689 ipcTransport_.template sendInPlace<ToSendType>(hmbdcAvoidIpcFrom, message);
694 if (disableNet)
return;
696 if constexpr (has_net_send_eng) {
697 if constexpr(has_toHmbdcSerialized<M>::value) {
699 static_assert(NetProperty::max_message_size == 0
700 ||
sizeof(Mipc) <= NetProperty::max_message_size
701 ,
"NetProperty::max_message_size is too small");
702 if (serializedCached) {
704 std::swap(afterConsumedCleanupFuncKept, serializedCached->afterConsumedCleanupFunc);
705 sendEng_->queue(*serializedCached);
707 sendEng_->queue(message.toHmbdcSerialized());
710 static_assert(NetProperty::max_message_size == 0
711 ||
sizeof(Mipc) <= NetProperty::max_message_size
712 ,
"NetProperty::max_message_size is too small");
713 sendEng_->queue(message);
719 void sendJustBytes(uint16_t tag,
void const* bytes,
size_t len
721 app::hasMemoryAttachment::AfterConsumedCleanupFunc afterConsumedCleanupFuncKept
722 = att ? att->afterConsumedCleanupFunc :
nullptr;
723 (void)afterConsumedCleanupFuncKept;
725 if (pOutboundSubscriptions_->check(tag) > inboundSubscriptions_.check(tag)) {
727 if (has_net_send_eng && att) {
729 att->afterConsumedCleanupFunc =
nullptr;
731 ipcTransport_.template sendJustBytesInPlace<ipc_from<app::JustBytes>>(
732 tag, bytes, len, att, hmbdcAvoidIpcFrom);
735 if constexpr (has_net_send_eng) {
737 att->afterConsumedCleanupFunc = afterConsumedCleanupFuncKept;
739 sendEng_->queueJustBytes(tag, bytes, len, att);
743 template <
typename Iterator>
744 size_t handleRangeImpl(Iterator it,
745 Iterator end, uint16_t threadId) {
747 for (;hmbdc_likely(!this->batchDone_ && it != end); ++it) {
749 auto tagInEffect = h.typeTag;
751 tagInEffect = h.scratchpad().ipc.hd.inbandUnderlyingTypeTag;
753 if (hmbdc_unlikely(tagInEffect > app::LastSystemMessage::typeTag
754 && !inboundSubscriptions_.check(tagInEffect))) {
757 ,
typename PumpInIpcPortal::Interests>()(
762 this->batchDone_ =
false;
766 char const* hmbdcName()
const {
767 return hmbdcName_.c_str();
770 void invokedCb(
size_t previousBatch)
override {
771 bool layback = !previousBatch;
772 if constexpr (has_net_send_eng) {
773 layback = layback && !sendEng_->bufferedMessageCount();
776 if (pumpMaxBlockingTimeSec_) {
777 usleep(pumpMaxBlockingTimeSec_);
779 std::this_thread::yield();
782 if constexpr (has_net_recv_eng) {
785 if constexpr (has_net_send_eng) {
790 template <MessageC Message>
791 void handleMessageCb(Message& m) {
794 if constexpr (has_toHmbdcUnserialized<Message>::value) {
795 static_assert(std::is_same<Message
796 , decltype(msg.toHmbdcUnserialized().toHmbdcSerialized())>::value
797 ,
"mising or wrong toHmbdcSerialized() func - cannot serialize");
798 outCtx_.send(msg.toHmbdcUnserialized());
802 if constexpr(std::is_base_of<hasMemoryAttachment, Message>::value) {
803 msg.hasMemoryAttachment::attachment =
nullptr;
804 msg.hasMemoryAttachment::len = 0;
809 outCtx_.sendJustBytesInPlace(tag, bytes, ipcTransport_.maxMessageSize(), att);
811 att->attachment =
nullptr;
816 bool droppedCb()
override {
817 inboundSubscriptions_.exportTo([
this](uint16_t tag, uint8_t) {
818 pOutboundSubscriptions_->sub(tag);
824 if constexpr (has_net_send_eng) {
831 using Pump =
typename std::conditional<
832 run_pump_in_ipc_portal
834 ,
typename std::conditional<
835 run_pump_in_thread_ctx
840 std::deque<Pump> pumps_;
842 bool ownIpcTransport_ =
false;
854 auto pumpRunMode = config_.
getExt<std::string>(
"pumpRunMode");
855 if constexpr (run_pump_in_ipc_portal) {
856 auto ownershipStr = config_.
getExt<std::string>(
"ipcTransportOwnership");
858 if (ownershipStr ==
"own") {
860 }
else if (ownershipStr ==
"attach") {
862 }
else if (ownershipStr ==
"optional") {
865 HMBDC_THROW(std::out_of_range,
"ipcTransportOwnership unsupported: " << ownershipStr);
867 ipcTransport_.emplace(ownership
868 , NetProtocol::instance().getTipsDomainName(config_).c_str()
869 , config_.
getExt<uint32_t>(
"ipcMessageQueueSizePower2Num")
871 , IpcTransport::MAX_MESSAGE_SIZE != 0
872 ? IpcTransport::MAX_MESSAGE_SIZE
873 : config_.
getExt<
size_t>(
"ipcMaxMessageSizeRuntime")
875 , 0xfffffffffffffffful
877 , config_.
getExt<
size_t>(
"ipcShmForAttPoolSize")
879 ownIpcTransport_ = ownership > 0;
881 allocator_.emplace((NetProtocol::instance().getTipsDomainName(config_) +
"-ipcsubs").c_str()
883 ,
sizeof(*pOutboundSubscriptions_)
885 pOutboundSubscriptions_ = allocator_->template allocate<TypeTagSet>(SMP_CACHE_BYTES);
887 ipcTransport_->setSecondsBetweenPurge(
888 config_.
getExt<uint32_t>(
"ipcPurgeIntervalSeconds"));
890 auto pumpCount = config_.
getExt<uint32_t>(
"pumpCount");
891 if (pumpCount > 64) {
892 HMBDC_THROW(std::out_of_range,
"pumpCount > 64 is not suppported");
896 allocator_.emplace((NetProtocol::instance().getTipsDomainName(config_) +
"-ipcsubs").c_str()
898 ,
sizeof(*pOutboundSubscriptions_)
901 pOutboundSubscriptions_ = allocator_->template allocate<TypeTagSet>(SMP_CACHE_BYTES);
903 for (
auto i = 0u; i < config_.
getExt<uint32_t>(
"pumpCount"); ++i) {
904 auto& pump = pumps_.emplace_back(*ipcTransport_, pOutboundSubscriptions_, threadCtx_, config_);
905 if (pumpRunMode ==
"auto") {
906 ipcTransport_->start(pump
907 , config_.
getHex<uint64_t>(
"pumpCpuAffinityHex"));
908 }
else if (pumpRunMode ==
"manual") {
909 ipcTransport_->registerToRun(pump
910 , config_.
getHex<uint64_t>(
"pumpCpuAffinityHex"));
911 }
else if (pumpRunMode ==
"delayed") {
913 HMBDC_THROW(std::out_of_range,
"pumpRunMode=" << pumpRunMode <<
" not supported");
916 }
else if constexpr (run_pump_in_thread_ctx) {
917 for (
auto i = 0u; i < config_.
getExt<uint32_t>(
"pumpCount"); ++i) {
918 auto& pump = pumps_.emplace_back(threadCtx_, config_);
919 if (pumpRunMode ==
"auto") {
920 threadCtx_.start(pump, 0, 0
921 , config_.
getHex<uint64_t>(
"pumpCpuAffinityHex")
923 }
else if (pumpRunMode ==
"manual") {
924 pump.handleInCtx = threadCtx_.registerToRun(pump, 0, 0);
925 }
else if (pumpRunMode ==
"delayed") {
927 HMBDC_THROW(std::out_of_range,
"pumpRunMode=" << pumpRunMode <<
" not supported");
947 template <
typename... Args>
948 bool runOnce(
size_t pumpIndex, Args&& ...args) {
949 auto& pump = pumps_[pumpIndex];
950 if constexpr (run_pump_in_ipc_portal) {
951 return ipcTransport_->runOnce(pump, std::forward<Args>(args)...);
952 }
else if constexpr (run_pump_in_thread_ctx) {
953 return threadCtx_.runOnce(pump.handle, pump, std::forward<Args>(args)...);
966 template <
typename CcNode>
969 ,
"the node expecting messages Domain not covering");
970 if constexpr ((run_pump_in_ipc_portal || run_pump_in_thread_ctx)) {
971 for (uint16_t i = 0u; i < pumps_.size(); ++i) {
972 pumps_[i].template subscribeFor(node, (uint16_t)pumps_.size(), i);
973 pumps_[i].template advertiseFor(node, (uint16_t)pumps_.size(), i);
984 template <
typename SendMessageTuple>
1000 template <
typename Node>
1002 ,
size_t capacity = 1024
1004 , uint64_t cpuAffinity = 0
1006 if (std::tuple_size<typename Node::Interests>::value
1008 HMBDC_THROW(std::out_of_range,
"capacity cannot be 0 when receiving messages");
1010 node.updateSubscription();
1011 if constexpr (Node::manual_subscribe ==
false) {
1015 threadCtx_.start(node, capacity, node.maxMessageSize()
1016 , cpuAffinity, maxBlockingTime
1017 , [&node](
auto && ...args) {
1018 return node.ifDeliver(std::forward<decltype(args)>(args)...);
1028 auto pumpRunMode = config_.
getExt<std::string>(
"pumpRunMode");
1029 if (pumpRunMode ==
"delayed") {
1030 if constexpr (run_pump_in_ipc_portal) {
1031 for (
auto& pump : pumps_) {
1032 ipcTransport_->start(pump
1033 , config_.
getHex<uint64_t>(
"pumpCpuAffinityHex"));
1035 }
else if constexpr (run_pump_in_thread_ctx) {
1036 for (
auto& pump : pumps_) {
1037 threadCtx_.start(pump, 0, 0
1038 , config_.
getHex<uint64_t>(
"pumpCpuAffinityHex")
1042 HMBDC_THROW(std::runtime_error,
"pumpRunMode=" << pumpRunMode);
1061 template <
typename LoadSharingNodePtrIt>
1062 void startPool(LoadSharingNodePtrIt begin, LoadSharingNodePtrIt end
1063 ,
size_t capacity = 1024
1065 , uint64_t cpuAffinity = 0) {
1066 using Node =
typename std::decay<decltype(**LoadSharingNodePtrIt())>::type;
1067 auto maxItemSize = (*begin)->maxMessageSize();
1069 if (std::tuple_size<typename Node::Interests>::value
1071 HMBDC_THROW(std::out_of_range,
"capacity cannot be 0 when receiving messages");
1073 for (
auto it = begin; it != end; it++) {
1075 node.updateSubscription();
1076 if constexpr (Node::manual_subscribe ==
false) {
1081 threadCtx_.start(begin, end, capacity, maxItemSize, cpuAffinity, maxBlockingTime
1082 , [&node = **begin](
auto && ...args) {
1083 return node.ifDeliver(std::forward<decltype(args)>(args)...);
1094 if constexpr (run_pump_in_ipc_portal) {
1095 return ipcTransport_->dispatchingStartedCount();
1106 if constexpr (has_a_pump) {
1108 for (
auto& pump : pumps_) {
1109 res += pump.netSendingPartyDetectedCount();
1123 if constexpr (has_a_pump) {
1125 for (
auto& pump : pumps_) {
1126 res += pump.netRecvingPartyDetectedCount();
1141 return pumps_[tag % pumps_.size()].ipcSubscribingPartyCount(tag);
1152 return pumps_[tag % pumps_.size()].netSubscribingPartyCount(tag);
1169 template <MessageC Message>
1171 bool disableInterThread =
false;
1172 using M =
typename std::decay<Message>::type;
1173 static_assert((
int)M::typeSortIndex > (
int)app::LastSystemMessage::typeTag);
1175 if constexpr (has_tipsDisableSendMask<M>::value) {
1176 disableInterThread = M::tipsDisableSendMask() & INTER_THREAD;
1178 if (!disableInterThread) {
1181 if constexpr (run_pump_in_ipc_portal || run_pump_in_thread_ctx) {
1182 pumps_[m.getTypeTag() % pumps_.size()].send(std::forward<Message>(m));
1205 if (tag > app::LastSystemMessage::typeTag) {
1206 threadCtx_.sendJustBytesInPlace(tag, bytes, len, att);
1207 if constexpr (run_pump_in_ipc_portal || run_pump_in_thread_ctx) {
1208 pumps_[tag % pumps_.size()].sendJustBytes(tag, bytes, len, att);
1226 template <MessageForwardIterC ForwardIt>
1228 using M =
typename std::decay<decltype(*(ForwardIt()))>::type;
1229 bool disableInterThread =
false;
1230 if constexpr (has_tipsDisableSendMask<M>::value) {
1231 disableInterThread = M::tipsDisableSendMask() & INTER_THREAD;
1233 if (!disableInterThread) {
1234 threadCtx_.send(begin, n);
1236 if constexpr (run_pump_in_ipc_portal || run_pump_in_thread_ctx) {
1239 pumps_[m.getTypeTag() % pumps_.size()]->send(*begin++);
1256 template <MessageC Message,
typename T,
typename ...Args>
1258 ,
size_t actualSize, Args&& ...args) {
1259 static_assert(offsetof(T, hmbdc0cpyShmRefCount) == 0);
1260 static_assert(std::is_same<decltype(T::hmbdc0cpyShmRefCount),
size_t>::value);
1261 if (actualSize <
sizeof(T)) {
1262 HMBDC_THROW(std::out_of_range,
"too small size for type " << actualSize);
1264 att.attachmentSp = ipcTransport_->template allocateInShm<T>(
1265 actualSize, std::forward<Args>(args)...);
1266 att.attachmentSp->hmbdc0cpyShmRefCount = 1;
1267 att.len = actualSize;
1277 return ownIpcTransport_;
1285 if constexpr (run_pump_in_ipc_portal) {
1286 ipcTransport_->stop();
1290 if constexpr (run_pump_in_ipc_portal || run_pump_in_thread_ctx) {
1291 for (
auto& pump : pumps_) {
1302 if constexpr (run_pump_in_ipc_portal) {
1303 ipcTransport_->join();
1315 template <MessageC Message>
1317 template <
typename ...Args>
1320 , payload(std::forward<Args>(args)...) {
1321 if constexpr (std::is_base_of<hasMemoryAttachment, Message>::value) {
1322 this->scratchpad().desc.flag = hasMemoryAttachment::flag;
1324 if constexpr (Message::hasRange) {
1325 this->typeTag = payload.getTypeTag();
1327 this->typeTag = Message::typeTag;
1329 this->scratchpad().ipc.hd.from = from;
1335 if constexpr (std::is_base_of<hasMemoryAttachment, Message>::value) {
1336 this->scratchpad().desc.flag = hasMemoryAttachment::flag;
1338 this->scratchpad().ipc.hd.from = m.hmbdcIpcFrom;
1343 std::ostream& operator << (std::ostream& os,
MessageWrap const & w) {
1344 return os << static_cast<MessageHead const&>(w) <<
' ' << w.payload;
1348 template <MessageC Message>
1350 tips::domain_detail::ipc_from<Message>>> :
MessageHead {
1359 this->scratchpad().ipc.hd.from = m.hmbdcIpcFrom;
1360 this->scratchpad().ipc.hd.inbandUnderlyingTypeTag = m.getTypeTag();
1365 std::ostream& operator << (std::ostream& os,
MessageWrap const & w) {
1366 return os << static_cast<MessageHead const&>(w) <<
' ' << w.payload;
1373 , pid_t hmbdcIpcFrom)
1375 , payload(bytes, len) {
1377 this->scratchpad().desc.flag = hasMemoryAttachment::flag;
1379 this->scratchpad().ipc.hd.from = hmbdcIpcFrom;
1384 std::ostream& operator << (std::ostream& os,
MessageWrap const & w) {
1385 return os << static_cast<MessageHead const&>(w) <<
" *";
1391 tips::domain_detail::ipc_from<JustBytes>>> :
MessageHead {
1393 , pid_t hmbdcIpcFrom)
1395 , payload(bytes, len) {
1400 this->scratchpad().ipc.hd.from = hmbdcIpcFrom;
1401 this->scratchpad().ipc.hd.inbandUnderlyingTypeTag = tag;
1406 std::ostream& operator << (std::ostream& os,
MessageWrap const & w) {
1407 return os << static_cast<MessageHead const&>(w) <<
' ' << w.payload;
T getExt(const path_type ¶m, bool throwIfMissing=true) const
get a value from the config
Definition: Config.hpp:238
Definition: Domain.hpp:533
std::string getTipsDomainName(app::Config const &cfg)
construct the host-wide unique TIPS domain name from a configure file
Definition: Domain.hpp:57
Definition: MetaUtils.hpp:252
Definition: Domain.hpp:451
class to hold an hmbdc configuration
Definition: Config.hpp:44
void setAdditionalFallbackConfig(Config const &c)
set additional defaults
Definition: Config.hpp:153
void startDelayedPumping()
if pumpRunMode is set to be delayed this function start all the pumps
Definition: Domain.hpp:1027
boost::interprocess::managed_shared_memory ::handle_t shmHandle
Definition: Message.hpp:169
void publish(Message &&m)
publish a message through this Domain, all the Nodes in the TIPS domain could get it if it subscribed...
Definition: Domain.hpp:1170
Definition: TypedString.hpp:84
Placeholder for the Protocol within net_property that turns off network communication at compile time...
Definition: Domain.hpp:44
bool runOnce(size_t pumpIndex, Args &&...args)
manually drive the domain's pumps
Definition: Domain.hpp:948
size_t netSubscribingPartyCount(uint16_t tag) const
how many processes connecting thru network have the message tag marked as subscribed ...
Definition: Domain.hpp:1151
Definition: Domain.hpp:109
base for the Singleton that works with SingletonGuardian
Definition: GuardedSingleton.hpp:53
RAII representing the lifespan of the underlying Singleton which also ganrantees the singularity of u...
Definition: GuardedSingleton.hpp:20
size_t netSendingPartyDetectedCount() const
how many network parties (processes) are ready to send messages to this Domain
Definition: Domain.hpp:1105
Definition: Domain.hpp:99
Context template parameter indicating the Context is ipc enabled and it can create or be attached to ...
Definition: Context.hpp:108
void publishJustBytes(uint16_t tag, void const *bytes, size_t len, app::hasMemoryAttachment *att)
publish a message using byte format on this Domain - not recommended
Definition: Domain.hpp:1203
void addPubSubFor(CcNode const &node)
configure the subscriptions and advertisement for a Node
Definition: Domain.hpp:967
Definition: MetaUtils.hpp:295
void startPool(LoadSharingNodePtrIt begin, LoadSharingNodePtrIt end, size_t capacity=1024, time::Duration maxBlockingTime=time::Duration::seconds(1), uint64_t cpuAffinity=0)
start a group of Nodes as a thread pool within the Domain that collectively processing messages in a ...
Definition: Domain.hpp:1062
Definition: MessageDispacher.hpp:184
template that Domain uses for the IPC communication properties
Definition: Domain.hpp:83
void join()
wait for all the Node threads to stop
Definition: Domain.hpp:1301
Definition: MetaUtils.hpp:239
Definition: Domain.hpp:309
A BlockingContext is like a media object that facilitates the communications for the Clients that it ...
Definition: BlockingContext.hpp:203
when Domain (it's pumps) receives a hasMemoryAttachment, there is a need to allocate desired memory t...
Definition: Domain.hpp:148
Definition: Domain.hpp:239
Definition: Message.hpp:212
size_t ipcPartyDetectedCount() const
how many IPC parties (processes) have been detected by this context
Definition: Domain.hpp:1093
bool cache(app::InBandHasMemoryAttachment< Message > const &ibma, uint16_t typeTagIn)
Definition: Domain.hpp:458
void handleMessageCb(Message &msg)
Definition: Domain.hpp:253
A special type of message only used on the receiving side.
Definition: Message.hpp:341
Messages published on a TIPS pub/sub domain reach all the Nodes within that domain based on their sub...
Definition: Domain.hpp:199
Definition: Domain.hpp:134
void * operator()(uint16_t typeTag, app::hasMemoryAttachment *att)
fill in hasMemoryAttachment so it holds the desired memory and the incoming attachment can be holden ...
Definition: Domain.hpp:157
Definition: Client.hpp:294
size_t ipcSubscribingPartyCount(uint16_t tag) const
how many nodes on local machine have the message tag marked as subscribed excluding Nodes managed by ...
Definition: Domain.hpp:1140
Context template parameter inidcating each message is sent to all clients within the Context...
Definition: Context.hpp:57
Definition: Message.hpp:459
Domain(app::Config const &cfg)
Construct a new Domain object.
Definition: Domain.hpp:851
void putAtt(app::MessageWrap< app::hasMemoryAttachment > *item, size_t)
Definition: Domain.hpp:290
void allocateInShmFor0cpy(hasSharedPtrAttachment< Message, T > &att, size_t actualSize, Args &&...args)
allocate in shm for a hasSharedPtrAttachment to be publioshed later The release of it is auto handled...
Definition: Domain.hpp:1257
A Context is like a media object that facilitates the communications for the Clients that it is holdi...
Definition: Context.hpp:765
Definition: MetaUtils.hpp:67
Definition: Message.hpp:263
a Node is a thread of execution that can suscribe and receive Messages
Definition: Node.hpp:51
Definition: MetaUtils.hpp:100
template that Domain uses for the network communication properties
Definition: Domain.hpp:32
void publish(ForwardIt begin, size_t n)
publish a sequence of message of the same type through this Domain, all the Nodes in the TIPS domain ...
Definition: Domain.hpp:1227
Definition: MetaUtils.hpp:226
bool ownIpcTransport() const
if the Domain object owns the IPC transport
Definition: Domain.hpp:1276
void stop()
stop the Domain and its Message delivery functions
Definition: Domain.hpp:1284
void start(Node &node, size_t capacity=1024, time::Duration maxBlockingTime=time::Duration::seconds(1), uint64_t cpuAffinity=0)
start a Node within this Domain as a thread - handles its subscribing here too
Definition: Domain.hpp:1001
void handleJustBytesCb(uint16_t tag, void const *bytes, app::hasMemoryAttachment *att)
Definition: Domain.hpp:270
size_t netRecvingPartyDetectedCount() const
how many network parties (processes) are ready to receive messages from this Domain ...
Definition: Domain.hpp:1122
void addPub()
configure the advertisement with message types directly This is when you do not want to involve a Nod...
Definition: Domain.hpp:985
if a specific hmbdc network transport (for example tcpcast, rmcast, and rnetmap) supports message wit...
Definition: Message.hpp:125
T getHex(boost::property_tree::ptree::path_type const ¶m) const
get a number value in hex format
Definition: Config.hpp:302
uint64_t clientData[2]
byte size of the above
Definition: Message.hpp:183
Definition: Message.hpp:430
internal use
Definition: Messages.hpp:42
Definition: Domain.hpp:117