hmbdc
simplify-high-performance-messaging-programming
Domain.hpp
1 #include "hmbdc/Copyright.hpp"
2 #pragma once
3 #include "hmbdc/tips/DefaultUserConfig.hpp"
4 #include "hmbdc/tips/Messages.hpp"
5 #include "hmbdc/tips/TypeTagSet.hpp"
6 #include "hmbdc/tips/Node.hpp"
7 #include "hmbdc/app/BlockingContext.hpp"
8 #include "hmbdc/app/Context.hpp"
9 #include "hmbdc/app/Config.hpp"
10 #include "hmbdc/Exception.hpp"
11 #include "hmbdc/pattern/GuardedSingleton.hpp"
12 #include "hmbdc/MetaUtils.hpp"
13 
14 #include <deque>
15 #include <optional>
16 #include <type_traits>
17 #include <unistd.h>
18 #include <stdlib.h>
19 
20 namespace hmbdc { namespace tips {
21 
22 /**
23  * @brief template that Domain uses for the network communication properties
24  *
25  * @tparam Protocol the TIPS network protocol in use - tcpcast, rmcast etc
26  * @tparam MaxMessageSize the compile time specified max size of the network transferred
27  * message, this does not include the attachment size, it could be simply set to be
28  * something like hmbdc::max_size_in_tuple<AllSendMessagesTuple>
29  * If set to be 0, the value becomes runtime configured.
30  */
31 template <typename Protocol, size_t MaxMessageSize = 1000>
32 struct net_property {
33  enum {
34  max_message_size = MaxMessageSize
35  };
36  using protocol = Protocol;
37 };
38 
39 /**
40  * @brief Placeholder for the Protocol within net_property
41  * that turns off network communication at compile time
42  *
43  */
44 struct NoProtocol
45 : pattern::GuardedSingleton<NoProtocol> {
46  using SendTransportEngine = void*;
47  template <typename Buffer, typename AttachmentAllocator>
48  using RecvTransportEngine = void*;
49 
50  /**
51  * @brief construct the host-wide unique TIPS domain name
52  * from a configure file
53  *
54  * @param cfg the configure file that contains network specifics
55  * @return std::string
56  */
57  std::string getTipsDomainName(app::Config const& cfg) {
58  return cfg.getExt<std::string>("tipsDomainNonNet");
59  }
60 
61  private:
63  NoProtocol(){}
64 };
65 
66 /**
67  * @brief net_property that turns off network communication at compile time
68  *
69  */
70 using NoNet = net_property<NoProtocol, 0>;
71 
72 /**
73  * @brief template that Domain uses for the IPC communication properties
74  *
75  * @tparam IpcCapacity power of 2 value startign from 4, (8, 16 ... 256)
76  * It specify up to how many IPC parties (processes) are communicating
77  * @tparam MaxMessageSize the compile time specified max size of the IPC transferred
78  * message, this does not include the attachment size, it could be simply set to be
79  * something like hmbdc::max_size_in_tuple<AllSendMessagesTuple>
80  * If set to be 0, the value becomes runtime configured.
81  */
82 template <uint16_t IpcCapacity = 64, size_t MaxMessageSize = 1000>
83 struct ipc_property {
84  enum {
85  capacity = IpcCapacity,
86  max_message_size = MaxMessageSize
87  };
88 };
89 
90 /**
91  * @brief ipc_property that turns off IPC communication at compile time
92  *
93  */
95 
96 namespace domain_detail {
97 // HMBDC_CLASS_HAS_DECLARE(upgradeToHasSharedPtrAttachment);
98 template <MessageC Message, bool canSerialize = has_toHmbdcSerialized<Message>::value>
100  using type = std::result_of_t<decltype(&Message::toHmbdcSerialized)(Message)>;
101 };
102 
103 template <MessageC Message>
104 struct matching_ipcable<Message, false> {
105  using type = Message;
106 };
107 
108 template <MessageTupleC MessageTuple>
110 
111 template <>
112 struct recv_ipc_converted<std::tuple<>> {
113  using type = std::tuple<>;
114 };
115 
116 template <MessageC Message>
117 struct is_ipcable {
118  enum {
119  value = (std::is_trivially_destructible<Message>::value
120  || has_toHmbdcSerialized<Message>::value) ? 1:0,
121  };
122 };
123 
124 template <MessageC Message, MessageC ...Messages>
125 struct recv_ipc_converted<std::tuple<Message, Messages...>> {
126  using next = typename recv_ipc_converted<std::tuple<Messages...>>::type;
127  using type = typename std::conditional<is_ipcable<Message>::value
129  , next
130  >::type;
131 };
132 
133 template <MessageC Message>
134 struct ipc_from : Message {
135  ipc_from(pid_t from, Message const& m)
136  : Message(m)
137  , hmbdcIpcFrom(from) {}
138  pid_t hmbdcIpcFrom;
139 };
140 
141 } //domain_detail
142 
143 /**
144  * @brief when Domain (it's pumps) receives a hasMemoryAttachment, there is a need
145  * to allocate desired memory to hold the attachment bytes and release it after consumed.
146  * This is the policy type dictating how that is done by default - using malloc/free
147  */
149  /**
150  * @brief fill in hasMemoryAttachment so it holds the desired
151  * memory and the incoming attachment can be holden
152  *
153  * @param typeTag the hasMemoryAttachment message type tag
154  * @param att the hasMemoryAttachment struct to be filled in
155  * @return the allocated memory
156  */
157  void* operator()(uint16_t typeTag, app::hasMemoryAttachment* att) {
158  att->attachment = ::malloc(att->len);
159  att->afterConsumedCleanupFunc = [](app::hasMemoryAttachment* hasAtt) {
160  ::free(hasAtt->attachment);
161  hasAtt->attachment = nullptr;
162  };
163  return att->attachment;
164  }
165 };
166 
167 /**
168  * @brief Messages published on a TIPS pub/sub domain reach all the Nodes
169  * within that domain based on their subscriptions.
170  * This class represents a TIPS domain's handle / interface / fascade in its process.
171  * Typically - by recommendation - there is just a single Domain object for a specific TIPS domain
172  * within a process.
173  *
174  * @details A Domain object also manages a group of Nodes on their communication
175  * across the TIPS domain that this Domain object maps to.
176  * The TIPS domain is determined by the network configurations
177  * such as which NIC interface and multicast addresses configured.
178  * see getTipsDomainName for network transport protocol
179  *
180  * A Node can only be managed (be started) within a single Domain.
181  * A Domain instance can only be configured to map to a single TIPS domain
182  * Multiple Domain objects could be mapped to a single TIPS domain - in one (not recommended) or
183  * multiple processes on the same or different networked hosts.
184  *
185  * @tparam RecvMessageTupleIn a std tuple that lists all the receiving message types
186  * Any inbound messages (to the Domain) that are not within this list are dropped without
187  * being deliverred to the Nodes in this Domain
188  * @tparam IpcProp IPC preoperty - see ipc_property template
189  * @tparam NetProp Network communication properties - see net_property template
190  * @tparam NodeContext the template that manages the nodes and accepts the inter thread
191  * messages
192  * @tparam AttachmentAllocator the memory allocation policy for attachment - see DefaultAttachmentAllocator
193  */
194 template <MessageTupleC RecvMessageTupleIn
195  , typename IpcProp = NoIpc
196  , typename NetProp = NoNet
197  , template <class...> class NodeContext = app::BlockingContext
198  , typename AttachmentAllocator = DefaultAttachmentAllocator >
199 struct Domain {
200  using IpcProperty = IpcProp;
201  using NetProperty = NetProp;
202  using NetProtocol = typename NetProperty::protocol;
203 private:
204  using RecvMessageTuple = typename hmbdc::remove_duplicate<RecvMessageTupleIn>::type;
205  using ThreadCtx = NodeContext<RecvMessageTuple>;
206 
207  ThreadCtx threadCtx_;
208 
209  using IpcableRecvMessages = typename domain_detail::recv_ipc_converted<RecvMessageTuple>::type;
210  using NetableRecvMessages = IpcableRecvMessages;
211 
212  enum {
213  run_pump_in_ipc_portal = IpcProperty::capacity != 0,
214  run_pump_in_thread_ctx = run_pump_in_ipc_portal
215  ? 0 : !std::is_same<NoNet, NetProperty>::value,
216  has_a_pump = run_pump_in_ipc_portal || run_pump_in_thread_ctx,
217  has_net_send_eng = !std::is_same<NoNet, NetProperty>::value,
218  has_net_recv_eng = !std::is_same<NoNet, NetProperty>::value
219  && std::tuple_size<NetableRecvMessages>::value,
220  };
221 
222  template <MessageC Message> using ipc_from = domain_detail::ipc_from<Message>;
223 
224  using IpcSubscribeMessagesPossible = IpcableRecvMessages;
225  using IpcTransport = typename std::conditional<
226  IpcProperty::capacity != 0
227  , app::Context<
228  IpcProperty::max_message_size
231  >
232  , void*
233  >::type;
234 
235  std::optional<IpcTransport> ipcTransport_;
236  std::optional<os::ShmBasePtrAllocator> allocator_;
237  TypeTagSet* pOutboundSubscriptions_ = nullptr;
238 
239  struct OneBuffer {
240  ThreadCtx& threadCtx;
241  size_t dispCount = 0;
242  size_t maxItemSize_;
243 
244  OneBuffer(ThreadCtx& threadCtx, size_t maxItemSizeIn)
245  : threadCtx(threadCtx)
246  , maxItemSize_(maxItemSizeIn) {
247  }
248  size_t maxItemSize() const {
249  return maxItemSize_ + sizeof(app::MessageHead);
250  }
251 
252  template <MessageC Message>
253  void handleMessageCb(Message& msg) {
254  using hasMemoryAttachment = app::hasMemoryAttachment;
255  if constexpr (has_toHmbdcUnserialized<Message>::value) {
256  static_assert(std::is_same<Message
257  , decltype(msg.toHmbdcUnserialized().toHmbdcSerialized())>::value
258  , "mising or wrong toHmbdcSerialized() func - cannot serialize");
259  threadCtx.send(msg.toHmbdcUnserialized());
260  } else {
261  threadCtx.send(msg);
262  }
263  if constexpr(std::is_base_of<hasMemoryAttachment, Message>::value) {
264  /// do not let MD release the data
265  msg.hasMemoryAttachment::attachment = nullptr;
266  msg.hasMemoryAttachment::len = 0;
267  }
268  }
269 
270  void handleJustBytesCb(uint16_t tag, void const* bytes, app::hasMemoryAttachment* att) {
271  threadCtx.sendJustBytesInPlace(tag, bytes, maxItemSize_, att);
272  if (att) {
273  /// do not let MD release the data
274  att->attachment = nullptr;
275  att->len = 0;
276  }
277  }
278 
279  void put(void* item, size_t) {
280  using NoAttRecv = typename filter_out_tuple_by_base<app::hasMemoryAttachment
281  , NetableRecvMessages>::type;
283  auto& h = *(app::MessageHead*)(item);
284  h.scratchpad().desc.flag = 0;
285  if (disp(*this, h)) {
286  dispCount++;
287  }
288  }
289 
291  using AttRecv = typename filter_in_tuple_by_base<app::hasMemoryAttachment
292  , NetableRecvMessages>::type;
294  item->scratchpad().desc.flag = app::hasMemoryAttachment::flag;
295  if (disp(*this, *item)) {
296  dispCount++;
297  } else {
298  /// MD did not release the data, do it here
299  item->payload.release();
300  }
301  }
302 
303  template <typename T> void put(T& item) {put(&item, sizeof(T));}
304  template <typename T> void putSome(T& item) {
305  put(&item, std::min(sizeof(item), maxItemSize()));
306  }
307  };
308 
310  : public app::client_using_tuple<PumpInThreadCtx, std::tuple<>>::type {
311  std::optional<typename NetProtocol::SendTransportEngine> sendEng_;
312  using RecvTransportEngine
313  = typename NetProtocol::template RecvTransportEngine<OneBuffer, AttachmentAllocator>;
314  std::optional<RecvTransportEngine> recvEng_;
315  ThreadCtx& outCtx_;
316  OneBuffer netBuffer_;
317  std::string hmbdcName_;
318 
319  public:
320  typename ThreadCtx::ClientRegisterHandle handleInCtx;
321 
322  PumpInThreadCtx(ThreadCtx& outCtx, app::Config const& cfg)
323  : outCtx_(outCtx)
324  , netBuffer_{outCtx
325  , NetProperty::max_message_size != 0
326  ? NetProperty::max_message_size
327  : cfg.getExt<uint32_t>("netMaxMessageSizeRuntime")
328  }
329  , hmbdcName_(cfg.getExt<std::string>("pumpHmbdcName")) {
330  if constexpr (has_net_send_eng) {
331  uint32_t limit = NetProperty::max_message_size;
332  if (limit == 0) {
333  limit = cfg.getExt<uint32_t>("netMaxMessageSizeRuntime");
334  }
335  sendEng_.emplace(cfg, limit);
336  }
337  if constexpr (has_net_recv_eng) {
338  recvEng_.emplace(cfg, netBuffer_);
339  }
340  }
341 
342  template <typename CcNode>
343  void subscribeFor(CcNode const& node, uint16_t mod, uint16_t res) {
344  if constexpr (has_net_recv_eng) {
345  using Messages = typename filter_in_tuple<domain_detail::is_ipcable
346  , typename CcNode::RecvMessageTuple>::type;
347  recvEng_->template subscribeFor<Messages>(node, mod, res);
348  }
349  }
350 
351  size_t ipcSubscribingPartyCount(uint16_t tag) const {
352  return 0;
353  }
354 
355  size_t netSubscribingPartyCount(uint16_t tag) const {
356  auto res = size_t{0};
357  if constexpr (has_net_send_eng) {
358  res += sendEng_->subscribingPartyDetectedCount(tag);
359  }
360  return res;
361  }
362 
363  template <typename CcNode>
364  void advertiseFor(CcNode const& node, uint16_t mod, uint16_t res) {
365  if constexpr (has_net_send_eng) {
366  using Messages = typename filter_in_tuple<domain_detail::is_ipcable
367  , typename CcNode::SendMessageTuple>::type;
368  sendEng_->template advertiseFor<Messages>(node, mod, res);
369  }
370  }
371 
372  size_t netSendingPartyDetectedCount() const {
373  if constexpr (has_net_recv_eng) {
374  return recvEng_->sessionsRemainingActive();
375  }
376  return 0;
377  }
378 
379  size_t netRecvingPartyDetectedCount() const {
380  if constexpr (has_net_send_eng) {
381  return sendEng_->sessionsRemainingActive();
382  }
383  return 0;
384  }
385 
386  template <MessageC Message>
387  void send(Message&& message) {
388  using M = typename std::decay<Message>::type;
389  using Mnet = typename domain_detail::matching_ipcable<M>::type;
390 
391  if constexpr (std::is_trivially_destructible<Mnet>::value) {
392  if constexpr (has_tipsDisableSendMask<M>::value) {
393  if (M::tipsDisableSendMask() & OVER_NETWORK) return;
394  }
395  if constexpr (has_net_send_eng) {
396  if constexpr(has_toHmbdcSerialized<M>::value) {
397  static_assert(std::is_same<M
398  , decltype(message.toHmbdcSerialized().toHmbdcUnserialized())>::value
399  , "mising or wrong toHmbdcUnserialized() func - cannot convertback");
400  static_assert(NetProperty::max_message_size == 0
401  || sizeof(decltype(message.toHmbdcSerialized())) <= NetProperty::max_message_size
402  , "NetProperty::max_message_size is too small");
403  sendEng_->queue(message.toHmbdcSerialized());
404  } else {
405  static_assert(NetProperty::max_message_size == 0
406  || sizeof(message) <= NetProperty::max_message_size
407  , "NetProperty::max_message_size is too small");
408  sendEng_->queue(message);
409  }
410  }
411  }
412  }
413 
414  void sendJustBytes(uint16_t tag, void const* bytes, size_t len
415  , app::hasMemoryAttachment* att) {
416  if constexpr (has_net_send_eng) {
417  sendEng_->queueJustBytes(tag, bytes, len, att);
418  }
419  }
420 
421  char const* hmbdcName() const {
422  return hmbdcName_.c_str();
423  }
424 
425  void invokedCb(size_t previousBatch) override {
426  bool layback = !previousBatch;
427  if constexpr (has_net_send_eng) {
428  layback = layback && !sendEng_->bufferedMessageCount();
429  }
430  if (layback) {
431  std::this_thread::yield();
432  }
433 
434  if constexpr (has_net_recv_eng) {
435  netBuffer_.dispCount = 0;
436  recvEng_->rotate();
437  }
438  if constexpr (has_net_send_eng) {
439  sendEng_->rotate();
440  }
441  }
442 
443  void stop() {
444  if constexpr (has_net_send_eng) {
445  sendEng_->stop();
446  }
447  }
448  };
449 
450  template <size_t MAX_MEMORY_ATTACHMENT>
453  : att(new(underlyingMessage) app::hasMemoryAttachment){}
454  uint8_t underlyingMessage[MAX_MEMORY_ATTACHMENT];
455  AttachmentAllocator attAllocator;
456 
457  template <MessageC Message>
458  bool cache(app::InBandHasMemoryAttachment<Message> const& ibma, uint16_t typeTagIn) {
459  static_assert(sizeof(Message) <= MAX_MEMORY_ATTACHMENT, "");
460  if (hmbdc_unlikely(att->attachment)) {
461  HMBDC_THROW(std::logic_error, "previous InBandMemoryAttachment not concluded");
462  }
463  typeTag = typeTagIn;
464  if constexpr(Message::justBytes) {
465  memcpy(underlyingMessage, &ibma.underlyingMessage, sizeof(underlyingMessage));
466  } else {
467  memcpy(underlyingMessage, &ibma.underlyingMessage, sizeof(Message));
468  }
469 
470  if (hmbdc_unlikely(!att->len)) {
471  att->attachment = nullptr;
472  att->afterConsumedCleanupFunc = nullptr;
473  accSize = 0;
474  return true;
475  } else if (att->holdShmHandle<Message>()) {
476  if constexpr (app::has_hmbdcShmRefCount<Message>::value) {
477  auto shmAddr = hmbdcShmHandleToAddr(att->shmHandle);
478  att->attachment = shmAddr;
479  att->clientData[0] = (uint64_t)&hmbdcShmDeallocator;
480  static_assert(sizeof(ibma.underlyingMessage.hmbdcShmRefCount) == sizeof(size_t));
481  att->clientData[1] = (uint64_t)&ibma.underlyingMessage.hmbdcShmRefCount;
482  if constexpr (Message::is_att_0cpyshm) {
483  att->afterConsumedCleanupFunc = [](app::hasMemoryAttachment* h) {
484  auto hmbdcShmRefCount = (size_t*)h->attachment;
485  if (h->attachment
486  && 0 == __atomic_sub_fetch(hmbdcShmRefCount, 1, __ATOMIC_RELEASE)) {
487  auto& hmbdcShmDeallocator
488  = *(std::function<void (uint8_t*)>*)h->clientData[0];
489  hmbdcShmDeallocator((uint8_t*)h->attachment);
490  }
491  };
492  } else if constexpr (app::has_hmbdcShmRefCount<Message>::value) {
493  att->afterConsumedCleanupFunc = [](app::hasMemoryAttachment* h) {
494  auto hmbdcShmRefCount = (size_t*)h->clientData[1];
495  if (h->attachment
496  && 0 == __atomic_sub_fetch(hmbdcShmRefCount, 1, __ATOMIC_RELEASE)) {
497  auto& hmbdcShmDeallocator
498  = *(std::function<void (uint8_t*)>*)h->clientData[0];
499  hmbdcShmDeallocator((uint8_t*)h->attachment);
500  }
501  };
502  }
503  accSize = att->len;
504  } /// no else
505  return true;
506  } else {
507  attAllocator(typeTagIn, att);
508  // att->attachment = malloc(att->len);
509  // att->afterConsumedCleanupFunc = hasMemoryAttachment::free;
510  accSize = 0;
511  return false;
512  }
513  }
514 
515  bool accumulate(app::InBandMemorySeg const& ibms, size_t n) {
516  if (accSize < att->len) {
517  auto attBytes = ibms.seg;
518  auto copySize = std::min(n, att->len - accSize);
519  memcpy((char*)att->attachment + accSize, attBytes, copySize);
520  accSize += copySize;
521  return accSize == att->len;
522  }
523  return true;
524  }
525  app::hasMemoryAttachment * const att;
526  uint16_t typeTag = 0;
527  size_t accSize = 0;
528  std::function<void* (boost::interprocess::managed_shared_memory::handle_t)>
529  hmbdcShmHandleToAddr;
530  std::function<void (uint8_t*)> hmbdcShmDeallocator;
531  };
532 
534  : public app::client_using_tuple<PumpInIpcPortal, IpcSubscribeMessagesPossible>::type {
535  std::optional<typename NetProtocol::SendTransportEngine> sendEng_;
536  using RecvTransportEngine
537  = typename NetProtocol::template RecvTransportEngine<OneBuffer, AttachmentAllocator>;
538  std::optional<RecvTransportEngine> recvEng_;
539  IpcTransport& ipcTransport_;
540  ThreadCtx& outCtx_;
541  OneBuffer netBuffer_;
542  TypeTagSet* pOutboundSubscriptions_;
543  TypeTagSet inboundSubscriptions_;
544  std::string hmbdcName_;
545  uint32_t pumpMaxBlockingTimeSec_;
546 
547  public:
549  std::max((size_t)PumpInIpcPortal::MAX_MEMORY_ATTACHMENT, (size_t)(64 * 1024))> ibmaProc;
550  pid_t const hmbdcAvoidIpcFrom = getpid();
551  PumpInIpcPortal(IpcTransport& ipcTransport
552  , TypeTagSet* pOutboundSubscriptions
553  , ThreadCtx& outCtx, app::Config const& cfg)
554  : ipcTransport_(ipcTransport)
555  , outCtx_(outCtx)
556  , netBuffer_{outCtx
557  , NetProperty::max_message_size != 0
558  ? NetProperty::max_message_size
559  : cfg.getExt<uint32_t>("netMaxMessageSizeRuntime")
560  }
561  , pOutboundSubscriptions_(pOutboundSubscriptions)
562  , hmbdcName_(cfg.getExt<std::string>("pumpHmbdcName"))
563  , pumpMaxBlockingTimeSec_(cfg.getHex<double>("pumpMaxBlockingTimeSec") * 1000000) {
564  pumpMaxBlockingTimeSec_ = std::min(1000000u, pumpMaxBlockingTimeSec_);
565  static_assert(IpcTransport::MAX_MESSAGE_SIZE == 0 || IpcTransport::MAX_MESSAGE_SIZE
567 
568  if constexpr (has_net_send_eng) {
569  uint32_t limit = NetProperty::max_message_size;
570  if (limit == 0) {
571  limit = cfg.getExt<uint32_t>("netMaxMessageSizeRuntime");
572  }
573  sendEng_.emplace(cfg, limit);
574  }
575  if constexpr (has_net_recv_eng) {
576  recvEng_.emplace(cfg, netBuffer_);
577  }
578  }
579 
580  template <typename CcNode>
581  void subscribeFor(CcNode const& node, uint16_t mod, uint16_t res) {
582  using Messages = typename filter_in_tuple<domain_detail::is_ipcable
583  , typename CcNode::RecvMessageTuple>::type;
584  inboundSubscriptions_.markSubsFor<Messages>(node, mod, res
585  , [this](uint16_t tag) {
586  pOutboundSubscriptions_->add(tag);
587  });
588 
589  if constexpr (has_net_recv_eng) {
590  recvEng_->template subscribeFor<Messages>(node, mod, res);
591  }
592  }
593 
594  size_t ipcSubscribingPartyCount(uint16_t tag) const {
595  return pOutboundSubscriptions_->check(tag) - inboundSubscriptions_.check(tag);
596  }
597 
598  size_t netSubscribingPartyCount(uint16_t tag) const {
599  auto res = size_t{0};
600  if constexpr (has_net_send_eng) {
601  res += sendEng_->subscribingPartyDetectedCount(tag);
602  }
603  return res;
604  }
605 
606  template <typename CcNode>
607  void advertiseFor(CcNode const& node, uint16_t mod, uint16_t res) {
608  using Messages = typename filter_in_tuple<domain_detail::is_ipcable
609  , typename CcNode::SendMessageTuple>::type;
610  if constexpr (has_net_send_eng) {
611  sendEng_->template advertiseFor<Messages>(node, mod, res);
612  }
613  }
614 
615  size_t netSendingPartyDetectedCount() const {
616  if constexpr (has_net_recv_eng) {
617  return recvEng_->sessionsRemainingActive();
618  }
619  return 0;
620  }
621 
622  size_t netRecvingPartyDetectedCount() const {
623  if constexpr (has_net_send_eng) {
624  return sendEng_->sessionsRemainingActive();
625  }
626  return 0;
627  }
628 
629 
630  template <MessageC Message>
631  void send(Message&& message) {
632  using M = typename std::decay<Message>::type;
633  using Mipc = typename domain_detail::matching_ipcable<M>::type;
634 
635  if constexpr (std::is_trivially_destructible<Mipc>::value) {
636  bool disableInterProcess = false;
637  if constexpr (has_tipsDisableSendMask<M>::value) {
638  if (M::tipsDisableSendMask() & INTER_PROCESS) {
639  disableInterProcess = true;
640  }
641  }
642  bool disableNet = false;
643  if constexpr (!has_net_send_eng) {
644  disableNet = true;
645  } else if constexpr (has_tipsDisableSendMask<M>::value) {
646  if (M::tipsDisableSendMask() & OVER_NETWORK) disableNet = true;
647  }
648 
649  std::optional<Mipc> serializedCached;
650  app::hasMemoryAttachment::AfterConsumedCleanupFunc afterConsumedCleanupFuncKept = nullptr;
651  (void)afterConsumedCleanupFuncKept;
652  if (!disableInterProcess) {
653  //only send when others have interests
654  auto intDiff = pOutboundSubscriptions_->check(message.getTypeTag())
655  - inboundSubscriptions_.check(message.getTypeTag());
656  if (intDiff) {
657  //ipc message should not come back based on hmbdcAvoidIpcFrom
658  using ToSendType = ipc_from<Mipc>;
659  if constexpr (has_toHmbdcSerialized<M>::value) {
660  static_assert(std::is_same<M
661  , decltype(message.toHmbdcSerialized().toHmbdcUnserialized())>::value
662  , "mising or wrong toHmbdcUnserialized() func - cannot convertback");
663  serializedCached.emplace(message.toHmbdcSerialized());
664  if (!disableNet) {
665  //keep the att around
666  std::swap(afterConsumedCleanupFuncKept, serializedCached->afterConsumedCleanupFunc);
667  }
668  auto toSend = ToSendType{hmbdcAvoidIpcFrom, *serializedCached};
669 
670  if constexpr (app::has_hmbdcShmRefCount<ToSendType>::value) {
671  toSend.hmbdcShmRefCount = intDiff;
672  if (ToSendType::is_att_0cpyshm && toSend.app::hasMemoryAttachment::attachment) {
673  auto hmbdc0cpyShmRefCount = (size_t*)toSend.app::hasMemoryAttachment::attachment;
674  __atomic_add_fetch(hmbdc0cpyShmRefCount, intDiff, __ATOMIC_RELEASE);
675  }
676  }
677  ipcTransport_.send(std::move(toSend));
678  } else if constexpr(std::is_base_of<app::hasMemoryAttachment, M>::value) {
679  auto toSend = ToSendType{hmbdcAvoidIpcFrom, message};
680  if constexpr (app::has_hmbdcShmRefCount<ToSendType>::value) {
681  toSend.hmbdcShmRefCount = intDiff;
682  if (ToSendType::is_att_0cpyshm && toSend.app::hasMemoryAttachment::attachment) {
683  auto hmbdc0cpyShmRefCount = (size_t*)toSend.app::hasMemoryAttachment::attachment;
684  __atomic_add_fetch(hmbdc0cpyShmRefCount, intDiff, __ATOMIC_RELEASE);
685  }
686  }
687  ipcTransport_.send(std::move(toSend));
688  } else {
689  ipcTransport_.template sendInPlace<ToSendType>(hmbdcAvoidIpcFrom, message);
690  }
691  }
692  }
693 
694  if (disableNet) return;
695 
696  if constexpr (has_net_send_eng) {
697  if constexpr(has_toHmbdcSerialized<M>::value) {
698 
699  static_assert(NetProperty::max_message_size == 0
700  || sizeof(Mipc) <= NetProperty::max_message_size
701  , "NetProperty::max_message_size is too small");
702  if (serializedCached) {
703  // restore
704  std::swap(afterConsumedCleanupFuncKept, serializedCached->afterConsumedCleanupFunc);
705  sendEng_->queue(*serializedCached);
706  } else {
707  sendEng_->queue(message.toHmbdcSerialized());
708  }
709  } else {
710  static_assert(NetProperty::max_message_size == 0
711  || sizeof(Mipc) <= NetProperty::max_message_size
712  , "NetProperty::max_message_size is too small");
713  sendEng_->queue(message);
714  }
715  }
716  }
717  }
718 
719  void sendJustBytes(uint16_t tag, void const* bytes, size_t len
720  , app::hasMemoryAttachment* att) {
721  app::hasMemoryAttachment::AfterConsumedCleanupFunc afterConsumedCleanupFuncKept
722  = att ? att->afterConsumedCleanupFunc : nullptr;
723  (void)afterConsumedCleanupFuncKept;
724  //only send when others have interests
725  if (pOutboundSubscriptions_->check(tag) > inboundSubscriptions_.check(tag)) {
726  //ipc message should not come back based on hmbdcAvoidIpcFrom
727  if (has_net_send_eng && att) {
728  //keep the att around
729  att->afterConsumedCleanupFunc = nullptr;
730  }
731  ipcTransport_.template sendJustBytesInPlace<ipc_from<app::JustBytes>>(
732  tag, bytes, len, att, hmbdcAvoidIpcFrom);
733  }
734 
735  if constexpr (has_net_send_eng) {
736  if (att) {
737  att->afterConsumedCleanupFunc = afterConsumedCleanupFuncKept;
738  }
739  sendEng_->queueJustBytes(tag, bytes, len, att);
740  }
741  }
742 
743  template <typename Iterator>
744  size_t handleRangeImpl(Iterator it,
745  Iterator end, uint16_t threadId) {
746  size_t res = 0;
747  for (;hmbdc_likely(!this->batchDone_ && it != end); ++it) {
748  auto& h = *static_cast<app::MessageHead*>(*it);
749  auto tagInEffect = h.typeTag;
751  tagInEffect = h.scratchpad().ipc.hd.inbandUnderlyingTypeTag;
752  }
753  if (hmbdc_unlikely(tagInEffect > app::LastSystemMessage::typeTag
754  && !inboundSubscriptions_.check(tagInEffect))) {
755  continue;
757  , typename PumpInIpcPortal::Interests>()(
758  *this, *static_cast<app::MessageHead*>(*it))) {
759  res++;
760  }
761  }
762  this->batchDone_ = false;
763  return res;
764  }
765 
766  char const* hmbdcName() const {
767  return hmbdcName_.c_str();
768  }
769 
770  void invokedCb(size_t previousBatch) override {
771  bool layback = !previousBatch;
772  if constexpr (has_net_send_eng) {
773  layback = layback && !sendEng_->bufferedMessageCount();
774  }
775  if (layback) {
776  if (pumpMaxBlockingTimeSec_) {
777  usleep(pumpMaxBlockingTimeSec_);
778  } else {
779  std::this_thread::yield();
780  }
781  }
782  if constexpr (has_net_recv_eng) {
783  recvEng_->rotate();
784  }
785  if constexpr (has_net_send_eng) {
786  sendEng_->rotate();
787  }
788  }
789 
790  template <MessageC Message>
791  void handleMessageCb(Message& m) {
792  Message& msg = m;
793  using hasMemoryAttachment = app::hasMemoryAttachment;
794  if constexpr (has_toHmbdcUnserialized<Message>::value) {
795  static_assert(std::is_same<Message
796  , decltype(msg.toHmbdcUnserialized().toHmbdcSerialized())>::value
797  , "mising or wrong toHmbdcSerialized() func - cannot serialize");
798  outCtx_.send(msg.toHmbdcUnserialized());
799  } else {
800  outCtx_.send(msg);
801  }
802  if constexpr(std::is_base_of<hasMemoryAttachment, Message>::value) {
803  msg.hasMemoryAttachment::attachment = nullptr;
804  msg.hasMemoryAttachment::len = 0;
805  }
806  }
807 
808  void handleJustBytesCb(uint16_t tag, uint8_t* bytes, app::hasMemoryAttachment* att) {
809  outCtx_.sendJustBytesInPlace(tag, bytes, ipcTransport_.maxMessageSize(), att);
810  if (att) {
811  att->attachment = nullptr;
812  att->len = 0;
813  }
814  }
815 
816  bool droppedCb() override {
817  inboundSubscriptions_.exportTo([this](uint16_t tag, uint8_t) {
818  pOutboundSubscriptions_->sub(tag);
819  });
820  return true;
821  };
822 
823  void stop() {
824  if constexpr (has_net_send_eng) {
825  sendEng_->stop();
826  }
827  }
828  }; //end of Pumps
829 
830  app::Config config_;
831  using Pump = typename std::conditional<
832  run_pump_in_ipc_portal
834  , typename std::conditional<
835  run_pump_in_thread_ctx
837  , void*
838  >::type
839  >::type;
840  std::deque<Pump> pumps_;
841 
842  bool ownIpcTransport_ = false;
843 
844 public:
845  /**
846  * @brief Construct a new Domain object
847  *
848  * @param cfg The Jason configuration to specify the IPC and network transport details
849  * See the DefaultUserConfiguration.hpp files for each transport type
850  */
851  Domain(app::Config const& cfg)
852  : config_(cfg) {
853  config_.setAdditionalFallbackConfig(app::Config(DefaultUserConfig));
854  auto pumpRunMode = config_.getExt<std::string>("pumpRunMode");
855  if constexpr (run_pump_in_ipc_portal) {
856  auto ownershipStr = config_.getExt<std::string>("ipcTransportOwnership");
857  int ownership;
858  if (ownershipStr == "own") {
859  ownership = 1;
860  } else if (ownershipStr == "attach") {
861  ownership = -1;
862  } else if (ownershipStr == "optional") {
863  ownership = 0;
864  } else {
865  HMBDC_THROW(std::out_of_range, "ipcTransportOwnership unsupported: " << ownershipStr);
866  }
867  ipcTransport_.emplace(ownership
868  , NetProtocol::instance().getTipsDomainName(config_).c_str()
869  , config_.getExt<uint32_t>("ipcMessageQueueSizePower2Num")
870  , 0 //no pool
871  , IpcTransport::MAX_MESSAGE_SIZE != 0
872  ? IpcTransport::MAX_MESSAGE_SIZE
873  : config_.getExt<size_t>("ipcMaxMessageSizeRuntime")
874 
875  , 0xfffffffffffffffful
876  , 0
877  , config_.getExt<size_t>("ipcShmForAttPoolSize")
878  );
879  ownIpcTransport_ = ownership > 0;
880 
881  allocator_.emplace((NetProtocol::instance().getTipsDomainName(config_) + "-ipcsubs").c_str()
882  , 0
883  , sizeof(*pOutboundSubscriptions_)
884  , ownership);
885  pOutboundSubscriptions_ = allocator_->template allocate<TypeTagSet>(SMP_CACHE_BYTES);
886 
887  ipcTransport_->setSecondsBetweenPurge(
888  config_.getExt<uint32_t>("ipcPurgeIntervalSeconds"));
889 
890  auto pumpCount = config_.getExt<uint32_t>("pumpCount");
891  if (pumpCount > 64) {
892  HMBDC_THROW(std::out_of_range, "pumpCount > 64 is not suppported");
893  }
894  auto ownIpcTransport = ownIpcTransport_;
895 
896  allocator_.emplace((NetProtocol::instance().getTipsDomainName(config_) + "-ipcsubs").c_str()
897  , 0
898  , sizeof(*pOutboundSubscriptions_)
899  , ownIpcTransport);
900 
901  pOutboundSubscriptions_ = allocator_->template allocate<TypeTagSet>(SMP_CACHE_BYTES);
902 
903  for (auto i = 0u; i < config_.getExt<uint32_t>("pumpCount"); ++i) {
904  auto& pump = pumps_.emplace_back(*ipcTransport_, pOutboundSubscriptions_, threadCtx_, config_);
905  if (pumpRunMode == "auto") {
906  ipcTransport_->start(pump
907  , config_.getHex<uint64_t>("pumpCpuAffinityHex"));
908  } else if (pumpRunMode == "manual") {
909  ipcTransport_->registerToRun(pump
910  , config_.getHex<uint64_t>("pumpCpuAffinityHex"));
911  } else if (pumpRunMode == "delayed") {
912  } else {
913  HMBDC_THROW(std::out_of_range, "pumpRunMode=" << pumpRunMode << " not supported");
914  }
915  }
916  } else if constexpr (run_pump_in_thread_ctx) {
917  for (auto i = 0u; i < config_.getExt<uint32_t>("pumpCount"); ++i) {
918  auto& pump = pumps_.emplace_back(threadCtx_, config_);
919  if (pumpRunMode == "auto") {
920  threadCtx_.start(pump, 0, 0
921  , config_.getHex<uint64_t>("pumpCpuAffinityHex")
922  , config_.getHex<time::Duration>("pumpMaxBlockingTimeSec"));
923  } else if (pumpRunMode == "manual") {
924  pump.handleInCtx = threadCtx_.registerToRun(pump, 0, 0);
925  } else if (pumpRunMode == "delayed") {
926  } else {
927  HMBDC_THROW(std::out_of_range, "pumpRunMode=" << pumpRunMode << " not supported");
928  }
929  }
930  } // else no pump needed
931  }
932 
933  /**
934  * @brief manually drive the domain's pumps
935  *
936  * @tparam Args the arguments used for pump
937  * @param pumpIndex which pump - see pumpCount configure
938  * @param args the arguments used for pump
939  * - if IPC is enabled:
940  * empty
941  * - else
942  * time::Duration maxBlockingTime
943  *
944  * @return true if pump runs fine
945  * @return false either Domain stopped or exception thrown
946  */
947  template <typename... Args>
948  bool runOnce(size_t pumpIndex, Args&& ...args) {
949  auto& pump = pumps_[pumpIndex];
950  if constexpr (run_pump_in_ipc_portal) {
951  return ipcTransport_->runOnce(pump, std::forward<Args>(args)...);
952  } else if constexpr (run_pump_in_thread_ctx) {
953  return threadCtx_.runOnce(pump.handle, pump, std::forward<Args>(args)...);
954  }
955  }
956 
957  /**
958  * @brief configure the subscriptions and advertisement for a Node
959  * @details typically this is automatically done when the Node is started
960  * unless the Node wants to do this step at an earlier or later stage, this could be
961  * manually called at that time. Threadsafe call.
962  *
963  * @tparam CcNode the Node type
964  * @param node the Node
965  */
966  template <typename CcNode>
967  void addPubSubFor(CcNode const& node) {
969  , "the node expecting messages Domain not covering");
970  if constexpr ((run_pump_in_ipc_portal || run_pump_in_thread_ctx)) {
971  for (uint16_t i = 0u; i < pumps_.size(); ++i) {
972  pumps_[i].template subscribeFor(node, (uint16_t)pumps_.size(), i);
973  pumps_[i].template advertiseFor(node, (uint16_t)pumps_.size(), i);
974  }
975  }
976  }
977 
978  /**
979  * @brief configure the advertisement with message types directly
980  * This is when you do not want to involve a Node for publish, otherwise
981  * use addPubSubFor
982  * @tparam SendMessageTuple
983  */
984  template <typename SendMessageTuple>
985  void addPub() {
986  addPubSubFor(RegistrationNode<SendMessageTuple, std::tuple<>>{});
987  }
988 
989  /**
990  * @brief start a Node within this Domain as a thread - handles its subscribing here too
991  *
992  * @tparam Node a concrete Node type that send and/or recv Messages
993  * @param node the instance of the node - the Domain does not manage the object lifespan
994  * @param capacity the inbound message buffer depth - if the buffer is full the delivery mechanism
995  * is blocked until the buffer becomes available
996  * @param maxBlockingTime The node wakes up periodically even there is no messages for it
997  * so its thread can respond to Domain status change - like stopping
998  * @param cpuAffinity The CPU mask that he Node thread assigned to
999  */
1000  template <typename Node>
1001  void start(Node& node
1002  , size_t capacity = 1024
1003  , time::Duration maxBlockingTime = time::Duration::seconds(1)
1004  , uint64_t cpuAffinity = 0
1005  ) {
1006  if (std::tuple_size<typename Node::Interests>::value
1007  && capacity == 0) {
1008  HMBDC_THROW(std::out_of_range, "capacity cannot be 0 when receiving messages");
1009  }
1010  node.updateSubscription();
1011  if constexpr (Node::manual_subscribe == false) {
1012  addPubSubFor(node);
1013  }
1014 
1015  threadCtx_.start(node, capacity, node.maxMessageSize()
1016  , cpuAffinity, maxBlockingTime
1017  , [&node](auto && ...args) {
1018  return node.ifDeliver(std::forward<decltype(args)>(args)...);
1019  }
1020  );
1021  }
1022 
1023  /**
1024  * @brief if pumpRunMode is set to be delayed
1025  * this function start all the pumps
1026  */
1028  auto pumpRunMode = config_.getExt<std::string>("pumpRunMode");
1029  if (pumpRunMode == "delayed") {
1030  if constexpr (run_pump_in_ipc_portal) {
1031  for (auto& pump : pumps_) {
1032  ipcTransport_->start(pump
1033  , config_.getHex<uint64_t>("pumpCpuAffinityHex"));
1034  }
1035  } else if constexpr (run_pump_in_thread_ctx) {
1036  for (auto& pump : pumps_) {
1037  threadCtx_.start(pump, 0, 0
1038  , config_.getHex<uint64_t>("pumpCpuAffinityHex")
1039  , config_.getHex<time::Duration>("pumpMaxBlockingTimeSec"));
1040  }
1041  } else {
1042  HMBDC_THROW(std::runtime_error, "pumpRunMode=" << pumpRunMode);
1043  }
1044  }
1045  }
1046 
1047  /**
1048  * @brief start a group of Nodes as a thread pool within the Domain that collectively processing
1049  * messages in a load sharing manner. Each Node is powered by a single OS thread.
1050  *
1051  * @tparam LoadSharingNodePtrIt iterator to a Node pointer
1052  *
1053  * @param begin an iterator, **begin should produce a Node& for the first Node
1054  * @param end an end iterator, [begin, end) is the range for Nodes
1055  * @param capacity the maximum messages this Node can buffer
1056  * @param cpuAffinity cpu affinity mask for this Node's thread
1057  * @param maxBlockingTime it is recommended to limit the duration a Node blocks due to
1058  * no messages to handle, so it can respond to things like Domain is stopped, or generate
1059  * heartbeats if applicable.
1060  */
1061  template <typename LoadSharingNodePtrIt>
1062  void startPool(LoadSharingNodePtrIt begin, LoadSharingNodePtrIt end
1063  , size_t capacity = 1024
1064  , time::Duration maxBlockingTime = time::Duration::seconds(1)
1065  , uint64_t cpuAffinity = 0) {
1066  using Node = typename std::decay<decltype(**LoadSharingNodePtrIt())>::type;
1067  auto maxItemSize = (*begin)->maxMessageSize();
1068 
1069  if (std::tuple_size<typename Node::Interests>::value
1070  && capacity == 0) {
1071  HMBDC_THROW(std::out_of_range, "capacity cannot be 0 when receiving messages");
1072  }
1073  for (auto it = begin; it != end; it++) {
1074  auto& node = **it;
1075  node.updateSubscription();
1076  if constexpr (Node::manual_subscribe == false) {
1077  addPubSubFor(node);
1078  }
1079  }
1080 
1081  threadCtx_.start(begin, end, capacity, maxItemSize, cpuAffinity, maxBlockingTime
1082  , [&node = **begin](auto && ...args) {
1083  return node.ifDeliver(std::forward<decltype(args)>(args)...);
1084  });
1085  }
1086 
1087 
1088  /**
1089  * @brief how many IPC parties (processes) have been detected by this context
1090  *
1091  * @return size_t
1092  */
1093  size_t ipcPartyDetectedCount() const {
1094  if constexpr (run_pump_in_ipc_portal) {
1095  return ipcTransport_->dispatchingStartedCount();
1096  }
1097  return 0;
1098  }
1099 
1100  /**
1101  * @brief how many network parties (processes) are ready to send messages to this Domain
1102  *
1103  * @return size_t
1104  */
1106  if constexpr (has_a_pump) {
1107  size_t res = 0;
1108  for (auto& pump : pumps_) {
1109  res += pump.netSendingPartyDetectedCount();
1110  }
1111  return res;
1112  } else {
1113  return 0;
1114  }
1115  }
1116 
1117  /**
1118  * @brief how many network parties (processes) are ready to receive messages from this Domain
1119  *
1120  * @return size_t
1121  */
1123  if constexpr (has_a_pump) {
1124  size_t res = 0;
1125  for (auto& pump : pumps_) {
1126  res += pump.netRecvingPartyDetectedCount();
1127  }
1128  return res;
1129  }
1130  return 0;
1131  }
1132 
1133  /**
1134  * @brief how many nodes on local machine have the message tag
1135  * marked as subscribed excluding Nodes managed by this Domain instance
1136  *
1137  * @param tag
1138  * @return size_t
1139  */
1140  size_t ipcSubscribingPartyCount(uint16_t tag) const {
1141  return pumps_[tag % pumps_.size()].ipcSubscribingPartyCount(tag);
1142  }
1143 
1144  /**
1145  * @brief how many processes connecting thru network have the message tag
1146  * marked as subscribed
1147  *
1148  * @param tag
1149  * @return size_t
1150  */
1151  size_t netSubscribingPartyCount(uint16_t tag) const {
1152  return pumps_[tag % pumps_.size()].netSubscribingPartyCount(tag);
1153  }
1154 
1155  /**
1156  * @brief publish a message through this Domain, all the Nodes in the TIPS domain
1157  * could get it if it subscribed to the Message type
1158  * @details Messages that are not trivially destructable are not deliverred outside
1159  * of this process (No IPC and network paths). They are tried to be deliverred amoung
1160  * the Nodes within local process only.
1161  * enum of DisableSendMask could also be used to disable local, IPC or network paths.
1162  *
1163  * @tparam Message The type that need to fit into the max_message_size specified by the
1164  * ipc_property and net_property. Its copy ctor is used to push it to the outgoing buffer.
1165  * With that in mind, the user can do partial copy using the copy ctor to implement just publish
1166  * the starting N bytes
1167  * @param m
1168  */
1169  template <MessageC Message>
1170  void publish(Message&& m) {
1171  bool disableInterThread = false;
1172  using M = typename std::decay<Message>::type;
1173  static_assert((int)M::typeSortIndex > (int)app::LastSystemMessage::typeTag);
1174 
1175  if constexpr (has_tipsDisableSendMask<M>::value) {
1176  disableInterThread = M::tipsDisableSendMask() & INTER_THREAD;
1177  }
1178  if (!disableInterThread) {
1179  threadCtx_.send(m);
1180  }
1181  if constexpr (run_pump_in_ipc_portal || run_pump_in_thread_ctx) {
1182  pumps_[m.getTypeTag() % pumps_.size()].send(std::forward<Message>(m));
1183  }
1184  }
1185 
1186  /**
1187  * @brief publish a message using byte format on this Domain - not recommended
1188  * @details Messages MUST be trivially destructable - no type based delivery features
1189  * such as DisableSendMask are supported.
1190  * It is not recommended to publish with att while the tag is also subscribed locally
1191  * due to the complexity of att->release() will be called for each local recipient and
1192  * 1 additonal time in Domain. publish via hasSharedPtrAttachment is always prefered
1193  * @param tag - type tag
1194  * @param bytes - bytes of the message contents - must match the message already
1195  * constructed binary wise (hasMemoryAttachment, inTagRange must be considered
1196  * - user needs to add those leading bytes to match binary wise)
1197  * @param len - len of the above buffer
1198  * @param att - if the message type is derived from hasMemoryAttachment, explictly
1199  * provides the ptr here, otherwise must be nullptr
1200  * it is user's responsibility to make sure att->afterConsumedCleanupFunc works
1201  * even multiple att->release() is called
1202  */
1203  void publishJustBytes(uint16_t tag, void const* bytes, size_t len
1204  , app::hasMemoryAttachment* att) {
1205  if (tag > app::LastSystemMessage::typeTag) {
1206  threadCtx_.sendJustBytesInPlace(tag, bytes, len, att);
1207  if constexpr (run_pump_in_ipc_portal || run_pump_in_thread_ctx) {
1208  pumps_[tag % pumps_.size()].sendJustBytes(tag, bytes, len, att);
1209  }
1210  }
1211  }
1212 
1213  /**
1214  * @brief publish a sequence of message of the same type
1215  * through this Domain, all the Nodes in the TIPS domain
1216  * could get it if it subscribed to the Message type
1217  * @details It is recommended to use this method if publishing an array
1218  * of Messages (particularly to a Node pool - see startPool() above) for performance
1219  * Messages that are not trivially destructable are not deliverred outside
1220  * of this process (No IPC and network paths). They are tried to be deliverred amoung
1221  * the Nodes within local process though
1222  *
1223  * @tparam Message
1224  * @param m
1225  */
1226  template <MessageForwardIterC ForwardIt>
1227  void publish(ForwardIt begin, size_t n) {
1228  using M = typename std::decay<decltype(*(ForwardIt()))>::type;
1229  bool disableInterThread = false;
1230  if constexpr (has_tipsDisableSendMask<M>::value) {
1231  disableInterThread = M::tipsDisableSendMask() & INTER_THREAD;
1232  }
1233  if (!disableInterThread) {
1234  threadCtx_.send(begin, n);
1235  }
1236  if constexpr (run_pump_in_ipc_portal || run_pump_in_thread_ctx) {
1237  while (n--) {
1238  auto& m = *begin;
1239  pumps_[m.getTypeTag() % pumps_.size()]->send(*begin++);
1240  }
1241  }
1242  }
1243 
1244  /**
1245  * @brief allocate in shm for a hasSharedPtrAttachment to be publioshed later
1246  * The release of it is auto handled in TIPS
1247  *
1248  * @tparam Message hasSharedPtrAttachment tparam
1249  * @tparam T hasSharedPtrAttachment tparam - it needs to contain "size_t hmbdc0cpyShmRefCount"
1250  * as the first data member - won't compile otherwise
1251  * @tparam Args args for T's ctor
1252  * @param att the message holds the shared memory
1253  * @param actualSize T's actual size in bytes - could be > sizeof(T) for open ended struct
1254  * @param args args for T's ctor
1255  */
1256  template <MessageC Message, typename T, typename ...Args>
1258  , size_t actualSize, Args&& ...args) {
1259  static_assert(offsetof(T, hmbdc0cpyShmRefCount) == 0);
1260  static_assert(std::is_same<decltype(T::hmbdc0cpyShmRefCount), size_t>::value);
1261  if (actualSize < sizeof(T)) {
1262  HMBDC_THROW(std::out_of_range, "too small size for type " << actualSize);
1263  }
1264  att.attachmentSp = ipcTransport_->template allocateInShm<T>(
1265  actualSize, std::forward<Args>(args)...);
1266  att.attachmentSp->hmbdc0cpyShmRefCount = 1; /// sender
1267  att.len = actualSize;
1268  }
1269 
1270  /**
1271  * @brief if the Domain object owns the IPC transport
1272  *
1273  * @return true if yes
1274  * @return false if no
1275  */
1276  bool ownIpcTransport() const {
1277  return ownIpcTransport_;
1278  }
1279 
1280  /**
1281  * @brief stop the Domain and its Message delivery functions
1282  * @details a Async operation - expected to use the join to see the effects
1283  */
1284  void stop() {
1285  if constexpr (run_pump_in_ipc_portal) {
1286  ipcTransport_->stop();
1287  }
1288  threadCtx_.stop();
1289 
1290  if constexpr (run_pump_in_ipc_portal || run_pump_in_thread_ctx) {
1291  for (auto& pump : pumps_) {
1292  pump.stop();
1293  }
1294  }
1295  }
1296 
1297  /**
1298  * @brief wait for all the Node threads to stop
1299  *
1300  */
1301  void join() {
1302  if constexpr (run_pump_in_ipc_portal) {
1303  ipcTransport_->join();
1304  }
1305  threadCtx_.join();
1306  }
1307 };
1308 
1309 }
1310 
1311 ///
1312 /// implementation details
1313 ///
1314 namespace app {
1315 template <MessageC Message>
1316 struct MessageWrap<tips::domain_detail::ipc_from<Message>> : MessageHead {
1317  template <typename ...Args>
1318  MessageWrap(pid_t from, Args&& ... args)
1319  : MessageHead(0) //delayed
1320  , payload(std::forward<Args>(args)...) {
1321  if constexpr (std::is_base_of<hasMemoryAttachment, Message>::value) {
1322  this->scratchpad().desc.flag = hasMemoryAttachment::flag;
1323  }
1324  if constexpr (Message::hasRange) {
1325  this->typeTag = payload.getTypeTag();
1326  } else {
1327  this->typeTag = Message::typeTag;
1328  }
1329  this->scratchpad().ipc.hd.from = from;
1330  }
1331 
1333  : MessageHead(m.getTypeTag())
1334  , payload(m) {
1335  if constexpr (std::is_base_of<hasMemoryAttachment, Message>::value) {
1336  this->scratchpad().desc.flag = hasMemoryAttachment::flag;
1337  }
1338  this->scratchpad().ipc.hd.from = m.hmbdcIpcFrom;
1339  }
1340  Message payload;
1341 
1342  friend
1343  std::ostream& operator << (std::ostream& os, MessageWrap const & w) {
1344  return os << static_cast<MessageHead const&>(w) << ' ' << w.payload;
1345  }
1346 };
1347 
1348 template <MessageC Message>
1350  tips::domain_detail::ipc_from<Message>>> : MessageHead {
1354  , payload(m) {
1355  // if constexpr (std::is_base_of<hasMemoryAttachment, Message>::value) {
1356  // this->scratchpad().desc.flag = hasMemoryAttachment::flag;
1357  // } no use conflicting ipc.from and this type is wellknown not an attachment
1358  //
1359  this->scratchpad().ipc.hd.from = m.hmbdcIpcFrom;
1360  this->scratchpad().ipc.hd.inbandUnderlyingTypeTag = m.getTypeTag();
1361  }
1363 
1364  friend
1365  std::ostream& operator << (std::ostream& os, MessageWrap const & w) {
1366  return os << static_cast<MessageHead const&>(w) << ' ' << w.payload;
1367  }
1368 };
1369 
1370 template<>
1371 struct MessageWrap<tips::domain_detail::ipc_from<JustBytes>> : MessageHead {
1372  MessageWrap(uint16_t tag, void const* bytes, size_t len, hasMemoryAttachment* att
1373  , pid_t hmbdcIpcFrom)
1374  : MessageHead(tag)
1375  , payload(bytes, len) {
1376  if (att) {
1377  this->scratchpad().desc.flag = hasMemoryAttachment::flag;
1378  }
1379  this->scratchpad().ipc.hd.from = hmbdcIpcFrom;
1380  }
1381  JustBytes payload;
1382 
1383  friend
1384  std::ostream& operator << (std::ostream& os, MessageWrap const & w) {
1385  return os << static_cast<MessageHead const&>(w) << " *";
1386  }
1387 };
1388 
1389 template<>
1391  tips::domain_detail::ipc_from<JustBytes>>> : MessageHead {
1392  MessageWrap(uint16_t tag, void const* bytes, size_t len, hasMemoryAttachment* att
1393  , pid_t hmbdcIpcFrom)
1395  , payload(bytes, len) {
1396  // if constexpr (std::is_base_of<hasMemoryAttachment, Message>::value) {
1397  // this->scratchpad().desc.flag = hasMemoryAttachment::flag;
1398  // } no use conflicting ipc.from and this type is wellknown not an attachment
1399  //
1400  this->scratchpad().ipc.hd.from = hmbdcIpcFrom;
1401  this->scratchpad().ipc.hd.inbandUnderlyingTypeTag = tag;
1402  }
1404 
1405  friend
1406  std::ostream& operator << (std::ostream& os, MessageWrap const & w) {
1407  return os << static_cast<MessageHead const&>(w) << ' ' << w.payload;
1408  }
1409 };
1410 
1411 } /// app
1412 } /// hmbdc
1413 
1414 
T getExt(const path_type &param, bool throwIfMissing=true) const
get a value from the config
Definition: Config.hpp:238
Definition: Domain.hpp:533
std::string getTipsDomainName(app::Config const &cfg)
construct the host-wide unique TIPS domain name from a configure file
Definition: Domain.hpp:57
Definition: MetaUtils.hpp:252
class to hold an hmbdc configuration
Definition: Config.hpp:44
void setAdditionalFallbackConfig(Config const &c)
set additional defaults
Definition: Config.hpp:153
void startDelayedPumping()
if pumpRunMode is set to be delayed this function start all the pumps
Definition: Domain.hpp:1027
boost::interprocess::managed_shared_memory ::handle_t shmHandle
Definition: Message.hpp:169
void publish(Message &&m)
publish a message through this Domain, all the Nodes in the TIPS domain could get it if it subscribed...
Definition: Domain.hpp:1170
Definition: TypedString.hpp:84
Placeholder for the Protocol within net_property that turns off network communication at compile time...
Definition: Domain.hpp:44
bool runOnce(size_t pumpIndex, Args &&...args)
manually drive the domain&#39;s pumps
Definition: Domain.hpp:948
size_t netSubscribingPartyCount(uint16_t tag) const
how many processes connecting thru network have the message tag marked as subscribed ...
Definition: Domain.hpp:1151
base for the Singleton that works with SingletonGuardian
Definition: GuardedSingleton.hpp:53
RAII representing the lifespan of the underlying Singleton which also ganrantees the singularity of u...
Definition: GuardedSingleton.hpp:20
size_t netSendingPartyDetectedCount() const
how many network parties (processes) are ready to send messages to this Domain
Definition: Domain.hpp:1105
Context template parameter indicating the Context is ipc enabled and it can create or be attached to ...
Definition: Context.hpp:108
void publishJustBytes(uint16_t tag, void const *bytes, size_t len, app::hasMemoryAttachment *att)
publish a message using byte format on this Domain - not recommended
Definition: Domain.hpp:1203
void addPubSubFor(CcNode const &node)
configure the subscriptions and advertisement for a Node
Definition: Domain.hpp:967
Definition: MetaUtils.hpp:295
void startPool(LoadSharingNodePtrIt begin, LoadSharingNodePtrIt end, size_t capacity=1024, time::Duration maxBlockingTime=time::Duration::seconds(1), uint64_t cpuAffinity=0)
start a group of Nodes as a thread pool within the Domain that collectively processing messages in a ...
Definition: Domain.hpp:1062
Definition: MessageDispacher.hpp:184
template that Domain uses for the IPC communication properties
Definition: Domain.hpp:83
void join()
wait for all the Node threads to stop
Definition: Domain.hpp:1301
Definition: MetaUtils.hpp:239
Definition: Domain.hpp:309
A BlockingContext is like a media object that facilitates the communications for the Clients that it ...
Definition: BlockingContext.hpp:203
when Domain (it&#39;s pumps) receives a hasMemoryAttachment, there is a need to allocate desired memory t...
Definition: Domain.hpp:148
Definition: Domain.hpp:239
Definition: Message.hpp:212
size_t ipcPartyDetectedCount() const
how many IPC parties (processes) have been detected by this context
Definition: Domain.hpp:1093
bool cache(app::InBandHasMemoryAttachment< Message > const &ibma, uint16_t typeTagIn)
Definition: Domain.hpp:458
void handleMessageCb(Message &msg)
Definition: Domain.hpp:253
A special type of message only used on the receiving side.
Definition: Message.hpp:341
Messages published on a TIPS pub/sub domain reach all the Nodes within that domain based on their sub...
Definition: Domain.hpp:199
Definition: Domain.hpp:134
void * operator()(uint16_t typeTag, app::hasMemoryAttachment *att)
fill in hasMemoryAttachment so it holds the desired memory and the incoming attachment can be holden ...
Definition: Domain.hpp:157
Definition: Client.hpp:294
size_t ipcSubscribingPartyCount(uint16_t tag) const
how many nodes on local machine have the message tag marked as subscribed excluding Nodes managed by ...
Definition: Domain.hpp:1140
Context template parameter inidcating each message is sent to all clients within the Context...
Definition: Context.hpp:57
Definition: Message.hpp:459
Domain(app::Config const &cfg)
Construct a new Domain object.
Definition: Domain.hpp:851
void putAtt(app::MessageWrap< app::hasMemoryAttachment > *item, size_t)
Definition: Domain.hpp:290
void allocateInShmFor0cpy(hasSharedPtrAttachment< Message, T > &att, size_t actualSize, Args &&...args)
allocate in shm for a hasSharedPtrAttachment to be publioshed later The release of it is auto handled...
Definition: Domain.hpp:1257
A Context is like a media object that facilitates the communications for the Clients that it is holdi...
Definition: Context.hpp:765
Definition: MetaUtils.hpp:67
Definition: TypeTagSet.hpp:138
Definition: Message.hpp:263
Definition: Time.hpp:134
a Node is a thread of execution that can suscribe and receive Messages
Definition: Node.hpp:51
Definition: MetaUtils.hpp:100
template that Domain uses for the network communication properties
Definition: Domain.hpp:32
void publish(ForwardIt begin, size_t n)
publish a sequence of message of the same type through this Domain, all the Nodes in the TIPS domain ...
Definition: Domain.hpp:1227
Definition: MetaUtils.hpp:226
bool ownIpcTransport() const
if the Domain object owns the IPC transport
Definition: Domain.hpp:1276
void stop()
stop the Domain and its Message delivery functions
Definition: Domain.hpp:1284
void start(Node &node, size_t capacity=1024, time::Duration maxBlockingTime=time::Duration::seconds(1), uint64_t cpuAffinity=0)
start a Node within this Domain as a thread - handles its subscribing here too
Definition: Domain.hpp:1001
void handleJustBytesCb(uint16_t tag, void const *bytes, app::hasMemoryAttachment *att)
Definition: Domain.hpp:270
size_t netRecvingPartyDetectedCount() const
how many network parties (processes) are ready to receive messages from this Domain ...
Definition: Domain.hpp:1122
void addPub()
configure the advertisement with message types directly This is when you do not want to involve a Nod...
Definition: Domain.hpp:985
if a specific hmbdc network transport (for example tcpcast, rmcast, and rnetmap) supports message wit...
Definition: Message.hpp:125
Definition: Base.hpp:12
T getHex(boost::property_tree::ptree::path_type const &param) const
get a number value in hex format
Definition: Config.hpp:302
uint64_t clientData[2]
byte size of the above
Definition: Message.hpp:183
Definition: Message.hpp:430
internal use
Definition: Messages.hpp:42
Definition: Node.hpp:216
Definition: Domain.hpp:117