hmbdc
simplify-high-performance-messaging-programming
Context.hpp
1 #include "hmbdc/Copyright.hpp"
2 #pragma once
3 
4 
5 #include "hmbdc/app/StuckClientPurger.hpp"
6 #include "hmbdc/Config.hpp"
7 #include "hmbdc/numeric/BitMath.hpp"
8 #include "hmbdc/time/Time.hpp"
9 
10 #include <boost/interprocess/allocators/allocator.hpp>
11 #include <memory>
12 #include <vector>
13 #include <list>
14 #include <mutex>
15 
16 namespace hmbdc { namespace app {
17 
18 
19 /**
20 * @example hello-world.cpp
21 * @example hmbdc.cpp
22 * @example hmbdc-log.cpp
23 * @example ipc-market-data-propagate.cpp
24 */
25 
26 /**
27  * @namespace hmbdc::app::context_property
28  * contains the trait types that defines how a Context behave and capabilities
29  */
30 namespace context_property {
31  /**
32  * @class broadcast
33  * @brief Context template parameter inidcating each message is
34  * sent to all clients within the Context.
35  * This is the default property of a Context.
36  * @details each message is still subjected to Client's message type
37  * filtering. In the case of ipc Context
38  * it is also sent to all clients in the attached ipc Contexts.
39  * When this Context is specialized using this type, the context normally
40  * works with heterogeneous Clients and all Clients can talk to each
41  * other thru the Context. Load balance among Clients can be achieved by
42  * participating Clients coordinatedly select message to process
43  * In addtion to the direct mode Clients, a Client running pool is supported
44  * with the Context - see pool related functions in Context.
45  *
46  * Implicit usage in hello-world.cpp: @snippet hello-world.cpp broadcast as default
47  * Explicit usage in hmbdc.cpp @snippet hmbdc.cpp explicit using broadcast
48  * There is no hard coded limit on how many Clients can be added into a pool
49  * Also, there is no limit on when you can add a Client into a pool.
50  * @tparam max_parallel_consumer max thread counts that processes messages
51  * that incudes pool threads plus the count of direct mode Clients that
52  * registers messages within the Context
53  * supported values: 4(default)
54  * 2,8,16,32,64,128,256 requires hmbdc licensed
55  */
56  template <uint16_t max_parallel_consumer = DEFAULT_HMBDC_CAPACITY>
57  struct broadcast{
58  static_assert(max_parallel_consumer >= 4u
60  };
61 
62  /**
63  * @class partition
64  * @brief Context template parameter inidcating each message is
65  * sent to one and only one of the clients within the Context
66  * and its attached ipc Contexts if appllies.
67  * @details each message is still subjected to Client's message type
68  * filtering
69  * When this Context is specialized using this type, the context normally
70  * works with homogeneous Clients to achieve load balance thru threads. No
71  * coordination is needed between Clients.
72  * Only the direct mode Clients are supported, thread pool is NOT supported
73  * by this kind of Context - the pool related functions in Context are also disabled
74  *
75  * Example in server-cluster.cpp: @snippet server-cluster.cpp declare a partition context
76  */
77  struct partition{};
78 
79  /**
80  * @class msgless_pool
81  * @brief Context template parameter indicating the Context must contain a pool to run Clients
82  * and the Clients in the pool shall not receive messages - Unlike the default pool.
83  * @details msgless_pool performs better when its Clients don't need to receive messages from the Context.
84  * This is useful when the Clients are network transport engines. By default, partition Context
85  * don't come with a pool due to semantic reason, but this Context property enables a pool that
86  * does not deliver messages.
87  */
88  struct msgless_pool{};
89 
90  /**
91  * @class ipc_enabled
92  * @brief Context template parameter indicating the Context is ipc enabled and
93  * it can create or be attached to an ipc transport thru a transport name.
94  * @details In addition to the normal Context functions, the Context acts either as
95  * the creator (owner) of the named ipc transport or an attcher to the transport.
96  * Since the creator performs a critical function to purge crushed or
97  * stuck Clients to avoid buffer full for other well-behaving Clients, it is
98  * expected to be running (started) as long as ipc functions.
99  * ipc transport uses persistent shared memory and if the dtor of Context is not called
100  * due to crashing, there will be stale shared memory in /dev/shm.
101  * It is very important that the Context is constructed exactly
102  * the same size (see constructor) and type as the ipc
103  * transport creator specified.
104  * All Contexts attaching to a single ipc transport collectively are subjected to the
105  * max_parallel_consumer limits just like a sinlge local (non-ipc) Context does.
106  * Example in ipc-market-data-propagate.cpp @snippet ipc-market-data-propagate.cpp declare an ipc context
107  */
108  struct ipc_enabled{};
109 
110  /**
111  * @class pci_ipc
112  * @brief when processes are distributed on a PCIe board and host PC, add this property
113  * @details beta
114  *
115  */
116  struct pci_ipc{};
117 }
118 }}
119 
120 #include "hmbdc/app/ContextDetail.hpp"
121 namespace hmbdc { namespace app {
122 
123 namespace context_detail {
124 HMBDC_CLASS_HAS_DECLARE(hmbdc_ctx_queued_ts);
125 HMBDC_CLASS_HAS_DECLARE(hmbdcIpcFrom);
126 HMBDC_CLASS_HAS_DECLARE(ibmaProc);
127 
128 /**
129  * @class ThreadCommBase<>
130  * @brief covers the inter-thread and ipc communication fascade
131  * @details this type's interface is exposed thru Context and the type itself is
132  * not directly used by users
133  * @tparam MaxMessageSize What is the max message size, need at compile time
134  * if the value can only be determined at runtime, set this to 0. Things can still work
135  * but will lost some compile time checking advantages, see maxMessageSizeRuntime below
136  * @tparam ContextProperties see types in context_property namespace
137  */
138 template <size_t MaxMessageSize, typename... ContextProperties>
140  : private context_detail::context_property_aggregator<ContextProperties...> {
141  using cpa = context_property_aggregator<ContextProperties...>;
142  using Buffer = typename cpa::Buffer;
143  using Allocator = typename cpa::Allocator;
144 
145  enum {
146  MAX_MESSAGE_SIZE = MaxMessageSize,
147  BUFFER_VALUE_SIZE = MaxMessageSize + sizeof(MessageHead), //8bytes for wrap
148  };
149 
150  size_t maxMessageSize() const {
151  if (MaxMessageSize == 0) return maxMessageSizeRuntime_;
152  return MaxMessageSize;
153  }
154 
155  /**
156  * @brief try send a batch of messages to the Context or attached ipc Contexts
157  * @details only the Clients that handles the Message will get it of course
158  * This function is threadsafe, which means you can call it anywhere in the code
159  *
160  * @param msgs messages
161  * @tparam Messages message types
162  */
163  template <MessageC M0, MessageC M1, typename ... Messages, typename Enabled
164  = typename std::enable_if<!std::is_integral<M1>::value, void>::type>
165  void
166  send(M0&& m0, M1&& m1, Messages&&... msgs) {
167  auto n = sizeof...(msgs) + 2;
168  auto it = buffer_.claim(n);
169  sendRecursive(it, std::forward<M0>(m0), std::forward<M1>(m1), std::forward<Messages>(msgs)...);
170  buffer_.commit(it, n);
171  }
172 
173  /**
174  * @brief try to send a batch of message to the Context or attached ipc Contexts
175  * @details this call does not block and it is transactional - send all or none
176  * This function is threadsafe, which means you can call it anywhere in the code
177  *
178  * @param msgs messages
179  * @tparam Messages message types
180  *
181  * @return true if send successfully
182  */
183  template <MessageC M0, MessageC M1, typename ... Messages, typename Enabled
184  = typename std::enable_if<!std::is_integral<M1>::value, void>::type>
185  bool
186  trySend(M0&& m0, M1&& m1, Messages&&... msgs) {
187  auto n = sizeof...(msgs) + 2;
188  auto it = buffer_.tryClaim(n);
189  if (it) {
190  sendRecursive(it, std::forward<M0>(m0), std::forward<M1>(m1), std::forward<Messages>(msgs)...);
191  buffer_.commit(it, n);
192  return true;
193  }
194 
195  return false;
196  }
197 
198  /**
199  * @brief send a range of messages to the Context or attached ipc Contexts
200  * @details only the Clients that handles the Message will get it of course
201  * This function is threadsafe, which means you can call it anywhere in the code
202  *
203  * @param begin a forward iterator point at the start of the range
204  * @param n length of the range
205  */
206  template <MessageForwardIterC ForwardIt>
207  void
208  send(ForwardIt begin, size_t n) {
209  if (hmbdc_likely(n)) {
210  auto bit = buffer_.claim(n);
211  auto it = bit;
212  for (auto i = 0ul; i < n; i++) {
213  using Message = typename std::iterator_traits<ForwardIt>::value_type;
214  static_assert(std::is_trivially_destructible<Message>::value
215  , "cannot send message with dtor");
216  static_assert(!std::is_base_of<hasMemoryAttachment, Message>::value
217  , "hasMemoryAttachment Messages cannot be sent in group");
218  auto wrap = new (*it++) MessageWrap<Message>(*begin++);
219  if constexpr (has_hmbdc_ctx_queued_ts<Message>::value) {
220  wrap->template get<Message>().hmbdc_ctx_queued_ts = hmbdc::time::SysTime::now();
221  } else (void)wrap;
222  }
223  buffer_.commit(bit, n);
224  }
225  }
226 
227  /**
228  * @brief try send a range of messages to the Context or attached ipc Contexts
229  * @details this call does not block and it is transactional - send all or none
230  * This function is threadsafe, which means you can call it anywhere in the code
231  *
232  * @param begin a forward iterator point at the start of the range
233  * @param n length of the range
234  */
235  template <MessageForwardIterC ForwardIt>
236  bool
237  trySend(ForwardIt begin, size_t n) {
238  if (hmbdc_likely(n)) {
239  auto bit = buffer_.tryClaim(n);
240  if (hmbdc_unlikely(!bit)) return false;
241  auto it = bit;
242  for (auto i = 0ul; i < n; i++) {
243  using Message = typename std::iterator_traits<ForwardIt>::value_type;
244  static_assert(std::is_trivially_destructible<Message>::value
245  , "cannot send message with dtor");
246  static_assert(!std::is_base_of<hasMemoryAttachment, Message>::value
247  , "hasMemoryAttachment Messages cannot be sent in group");
248  auto wrap = new (*it++) MessageWrap<Message>(*begin++);
249  if constexpr (has_hmbdc_ctx_queued_ts<Message>::value) {
250  wrap->template get<Message>().hmbdc_ctx_queued_ts = hmbdc::time::SysTime::now();
251  } else (void)wrap;
252  }
253  buffer_.commit(bit, n);
254  }
255  return true;
256  }
257 
258  /**
259  * @brief send a message including hasMortAttachment message to the Context or attached ipc Contexts
260  * @details only the Clients that handles the Message will get it of course
261  * This function is threadsafe, which means you can call it anywhere in the code
262  * If sending hasMortAttachment message, the size of the attachment is runtime checked and
263  * restricted by Context capacity
264  *
265  * @param m message
266  * @tparam Message type
267  */
268  template <MessageC Message>
269  void
270  send(Message&& m) {
271  using M = typename std::decay<Message>::type;
272  static_assert(std::is_trivially_destructible<M>::value, "cannot send message with dtor");
273  static_assert(MAX_MESSAGE_SIZE == 0 || sizeof(MessageWrap<M>) <= BUFFER_VALUE_SIZE
274  , "message too big");
275  if constexpr(!std::is_base_of<hasMemoryAttachment, M>::value
276  || !cpa::ipc) {
277  if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 && sizeof(MessageWrap<M>) > buffer_.maxItemSize())) {
278  HMBDC_THROW(std::out_of_range, "message too big, typeTag=" << m.getTypeTag());
279  }
280 
281  if constexpr (has_hmbdc_ctx_queued_ts<M>::value) {
282  auto it = buffer_.claim();
283  auto wrap = new (*it++) MessageWrap<M>(std::forward<Message>(m));
284  wrap->template get<M>().hmbdc_ctx_queued_ts = hmbdc::time::SysTime::now();
285  buffer_.commit(it);
286  } else {
287  buffer_.put(MessageWrap<M>(std::forward<Message>(m)));
288  }
289  } else {
290  if constexpr (cpa::ipc && has_hmbdcShmRefCount<M>::value) {
291  if (m.template holdShmHandle<M>()) {
292  if (0 >= m.hmbdcShmRefCount) {
293  return; // no process is interested
294  }
295  auto it = buffer_.claim(1);
296  auto wrap = (new (*it) MessageWrap<InBandHasMemoryAttachment<M>>(m)); (void)wrap;
297  wrap->payload.shmConvert(*shmAttAllocator_);
298  if constexpr (has_hmbdc_ctx_queued_ts<M>::value) {
299  wrap->template get<Message>().hmbdc_ctx_queued_ts = hmbdc::time::SysTime::now();
300  }
301  buffer_.commit(it, 1);
302  m.hasMemoryAttachment::release();
303  return;
304  }
305  }
306  if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0
307  && sizeof(MessageWrap<InBandHasMemoryAttachment<M>>) > buffer_.maxItemSize())) {
308  HMBDC_THROW(std::out_of_range, "message too big, typeTag=" << m.getTypeTag());
309  }
310  auto att = reinterpret_cast<hasMemoryAttachment&>(m);
311  size_t segSize = buffer_.maxItemSize() - sizeof(MessageHead);
312  auto n = (att.len + segSize - 1) / segSize + 1;
313  if (hmbdc_unlikely(n > buffer_.capacity())) {
314  HMBDC_THROW(std::out_of_range
315  , "hasMemoryAttachment message too big, typeTag=" << m.getTypeTag());
316  }
317 
318  auto bit = buffer_.claim(n);
319  auto it = bit;
320  // InBandHasMemoryAttachment<M> ibm{m};
321  auto wrap = (new (*it++) MessageWrap<InBandHasMemoryAttachment<M>>(m)); (void)wrap;
322  // wrap->scratchpad().ipc.hd.inbandUnderlyingTypeTag = m.getTypeTag();
323 
324  if constexpr (has_hmbdc_ctx_queued_ts<M>::value) {
325  wrap->template get<Message>().hmbdc_ctx_queued_ts = hmbdc::time::SysTime::now();
326  }
327  auto segStart = (char*)att.attachment;
328  auto remaining = (size_t)att.len;
329  while(--n) {
330  auto wrap = (new (*it++) MessageWrap<InBandMemorySeg>());
331  auto& ms = wrap->get<InBandMemorySeg>();
332  auto bytes = std::min(segSize, (size_t)remaining);
333  wrap->scratchpad().ipc.inbandPayloadLen = bytes;
334  memcpy(ms.seg, segStart, bytes);
335  segStart += bytes;
336  remaining -= bytes;
337  }
338  buffer_.commit(bit, it - bit);
339  att.release();
340  }
341  }
342 
343  /**
344  * @brief preallocate consecutive buffers so they can be send out later
345  *
346  * @param n how many buffers to allocate - each buffer has the same sizeof max
347  * @return Buffer::iterator
348  */
349  template <MessageC Message>
350  auto allocateForSend(size_t n) {
351  struct IteratorAdaptor {
352  typename Buffer::iterator it;
353  Message& get() {
354  auto wrap = (MessageWrap<Message>*)(*it);
355  return wrap->payload;
356  }
357  auto& operator ++() {
358  ++it;
359  return *this;
360  }
361  auto operator ++(int) {
362  auto tmp = IteratorAdaptor{it++};
363  return tmp;
364  }
365  };
366 
367  auto it = buffer_.claim(n);
368  auto res = IteratorAdaptor{it};
369  while (n--) {
370  new (*it++) MessageWrap<Message>;
371  }
372  return res;
373  }
374 
375  /**
376  * @brief commit all the filled up buffers allocated by allocateForSend
377  * and send them out
378  *
379  * @tparam IteratorAdaptor the return value type of allocateForSend
380  * @param itA the returned calue of allocateForSend
381  */
382  template <typename IteratorAdaptor>
383  void commitForSend(IteratorAdaptor itA) {
384  buffer_.commit(itA.it);
385  }
386 
387  /**
388  * @brief try to send a message including hasMortAttachment message to the Context or attached ipc Contexts
389  * if it wouldn't block
390  * @details this call does not block - return false when buffer is full
391  * This function is threadsafe, which means you can call it anywhere in the code
392  * If sending hasMortAttachment message, the size of the attachment is runtime checked and
393  * restricted by Context capacity
394  *
395  * @param m message
396  * @tparam Message type
397  * @return true if send successfully
398  */
399  template <MessageC Message>
400  bool trySend(Message&& m) {
401  using M = typename std::decay<Message>::type;
402  static_assert(std::is_trivially_destructible<M>::value, "cannot send message with dtor");
403  static_assert(MAX_MESSAGE_SIZE == 0 || sizeof(MessageWrap<M>) <= BUFFER_VALUE_SIZE
404  , "message too big");
405 
406  if constexpr(!std::is_base_of<hasMemoryAttachment, typename std::decay<Message>::type>::value) {
407  if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 && sizeof(MessageWrap<M>) > buffer_.maxItemSize())) {
408  HMBDC_THROW(std::out_of_range, "message too big");
409  }
410  return buffer_.tryPut(MessageWrap<M>(std::forward<Message>(m)));
411  } else {
412  if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 && sizeof(MessageWrap<InBandHasMemoryAttachment<M>>) > buffer_.maxItemSize())) {
413  HMBDC_THROW(std::out_of_range, "message too big, typeTag=" << m.getTypeTag());
414  }
415  auto att = reinterpret_cast<hasMemoryAttachment&>(m);
416  size_t segSize = buffer_.maxItemSize() - sizeof(MessageHead);
417  auto n = (att.len + segSize - 1) / segSize + 1;
418  if (hmbdc_unlikely(n > buffer_.capacity())) {
419  HMBDC_THROW(std::out_of_range, "hasMemoryAttachment message too big, typeTag=" << m.getTypeTag());
420  }
421 
422  auto bit = buffer_.tryClaim(n);
423  if (!bit) return false;
424  auto it = bit;
425  InBandHasMemoryAttachment<M> ibm{std::forward<Message>(m)};
426 
427  (new (*it++) MessageWrap<InBandHasMemoryAttachment<M>>(ibm))
428  ->scratchpad().inbandUnderlyingTypeTag = m.getTypeTag();
429  auto segStart = (char*)att.attachment;
430  auto remaining = (size_t)att.len;
431  while(--n) {
432  auto wrap = (new (*it++) MessageWrap<InBandMemorySeg>());
433  auto& ms = wrap->get<InBandMemorySeg>();
434  auto bytes = std::min(segSize, (size_t)remaining);
435  wrap->scratchpad().ipc.inbandPayloadLen = bytes;
436  memcpy(ms.seg, segStart, bytes);
437  segStart += bytes;
438  remaining -= bytes;
439  }
440  buffer_.commit(bit, it - bit);
441  att.release();
442  return true;
443  }
444  }
445 
446  /**
447  * @brief send a message to all Clients in the Context or attached ipc Contexts
448  * @details construct the Message in buffer directly
449  * This function is threadsafe, which means you can call it anywhere in the code
450  *
451  * @param args ctor args
452  * @tparam Message type
453  * @tparam typename ... Args args
454  */
455  template <MessageC Message, typename ... Args>
456  void sendInPlace(Args&&... args) {
457  static_assert(!std::is_base_of<JustBytes, Message>::value
458  , "use sendJustBytesInPlace");
459  static_assert(std::is_trivially_destructible<Message>::value, "cannot send message with dtor");
460  static_assert(MAX_MESSAGE_SIZE == 0 || sizeof(MessageWrap<Message>) <= BUFFER_VALUE_SIZE
461  , "message too big");
462  static_assert(!std::is_base_of<hasMemoryAttachment, Message>::value
463  , "hasMemoryAttachment Messages cannot be sent in place");
464  if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 && sizeof(MessageWrap<Message>) > buffer_.maxItemSize())) {
465  HMBDC_THROW(std::out_of_range
466  , "message too big buffer_.maxItemSize()=" << buffer_.maxItemSize());
467  }
468  buffer_.template putInPlace<MessageWrap<Message>>(std::forward<Args>(args)...);
469  }
470 
471  template <typename JustBytesType, typename ... Args>
472  void sendJustBytesInPlace(uint16_t tag, void const* bytes, size_t len
473  , app::hasMemoryAttachment* att, Args&& ...args) {
474  if (hmbdc_unlikely(len > maxMessageSize())) {
475  HMBDC_THROW(std::out_of_range, "message too big, typeTag=" << tag
476  << " len=" << len);
477  }
478  if (!att || !cpa::ipc) {
479  auto it = buffer_.claim();
480  new (*it) MessageWrap<JustBytesType>(tag, bytes, len, att
481  , std::forward<Args>(args)...);
482  buffer_.commit(it);
483  } else {
484  size_t segSize = buffer_.maxItemSize() - sizeof(MessageHead);
485  auto n = (att->len + segSize - 1) / segSize + 1;
486  if (hmbdc_unlikely(n > buffer_.capacity())) {
487  HMBDC_THROW(std::out_of_range
488  , "hasMemoryAttachment message too big, typeTag=" << tag);
489  }
490 
491  auto bit = buffer_.claim(n);
492  auto it = bit;
493  auto wrap = new (*it++) MessageWrap<InBandHasMemoryAttachment<JustBytesType>>(
494  tag, bytes, len, att, std::forward<Args>(args)...); (void)wrap;
495  // wrap->scratchpad().ipc.hd.inbandUnderlyingTypeTag = tag;
496 
497  auto segStart = (char*)att->attachment;
498  auto remaining = (size_t)att->len;
499  while(--n) {
500  auto wrap = (new (*it++) MessageWrap<InBandMemorySeg>());
501  auto& ms = wrap->get<InBandMemorySeg>();
502  auto bytes = std::min(segSize, (size_t)remaining);
503  wrap->scratchpad().ipc.inbandPayloadLen = bytes;
504  memcpy(ms.seg, segStart, bytes);
505  segStart += bytes;
506  remaining -= bytes;
507  }
508  buffer_.commit(bit, it - bit);
509  att->release();
510  }
511  }
512 
513 
514  /**
515  * @brief try send a message to all Clients in the Context or attached ipc Contexts if it wouldn't block
516  * @details this call does not block - return false when buffer is full
517  * constructed the Message in buffer directly if returns true
518  * This function is threadsafe, which means you can call it anywhere in the code
519  *
520  * @param args ctor args
521  * @tparam Message type
522  * @tparam typename ... Args args
523  * @return true if send successfully
524  */
525  template <MessageC Message, typename ... Args>
526  bool trySendInPlace(Args&&... args) {
527  static_assert(std::is_trivially_destructible<Message>::value, "cannot send message with dtor");
528  static_assert(MAX_MESSAGE_SIZE == 0 || sizeof(MessageWrap<Message>) <= BUFFER_VALUE_SIZE
529  , "message too big");
530  static_assert(!std::is_base_of<hasMemoryAttachment, Message>::value, "hasMemoryAttachment Messages cannot be sent in place");
531  if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 && sizeof(MessageWrap<Message>) > buffer_.maxItemSize())) {
532  HMBDC_THROW(std::out_of_range, "message too big");
533  }
534  return buffer_.template tryPutInPlace<MessageWrap<Message>>(std::forward<Args>(args)...);
535  }
536 
537  /**
538  * @brief accessor - mostly used internally
539  * @return underlying buffer used in the Context
540  */
541  Buffer& buffer() {
542  return buffer_;
543  }
544 
545  size_t dispatchingStartedCount() const {
546  __atomic_thread_fence(__ATOMIC_ACQUIRE);
547  return *pDispStartCount_;
548  }
549 
550  template <typename T, typename ...Args>
551  std::shared_ptr<T> allocateInShm(size_t actualSize, Args&& ...args) {
552  static_assert(std::is_trivially_destructible<T>::value);
553  auto ptr = shmAttAllocator_->allocate(actualSize);
554  auto ptrT = new (ptr) T{std::forward<Args>(args)...};
555  // HMBDC_LOG_N(shmAttAllocator_->getHandle(ptr));
556  return std::shared_ptr<T>(
557  ptrT
558  , [this](T* t) {
559  if (0 == __atomic_sub_fetch(&t->hmbdc0cpyShmRefCount, 1, __ATOMIC_RELEASE)) {
560  shmAttAllocator_->deallocate((uint8_t*)t);
561  }
562  });
563  }
564 
565 protected:
566  template <typename IntLvOrRv>
567  ThreadCommBase(uint32_t messageQueueSizePower2Num
568  , size_t maxMessageSizeRuntime
569  , char const* shmName
570  , size_t offset
571  , IntLvOrRv&& ownership
572  , size_t ipcShmForAttPoolSize)
573  : allocator_(shmName
574  , offset
575  , Buffer::footprint(maxMessageSizeRuntime + sizeof(MessageHead)
576  , messageQueueSizePower2Num) + SMP_CACHE_BYTES + sizeof(*pDispStartCount_)
577  , ownership)
578  , pDispStartCount_(allocator_.template allocate<size_t>(SMP_CACHE_BYTES, 0))
579  , bufferptr_(allocator_.template allocate<Buffer>(SMP_CACHE_BYTES
580  , maxMessageSizeRuntime + sizeof(MessageHead), messageQueueSizePower2Num
581  , allocator_)
582  )
583  , buffer_(*bufferptr_) {
584  if (messageQueueSizePower2Num < 2) {
585  HMBDC_THROW(std::out_of_range
586  , "messageQueueSizePower2Num need >= 2");
587  }
588  if (MaxMessageSize && maxMessageSizeRuntime != MAX_MESSAGE_SIZE) {
589  HMBDC_THROW(std::out_of_range
590  , "can only set maxMessageSizeRuntime when template value MaxMessageSize is 0");
591  }
592  maxMessageSizeRuntime_ = maxMessageSizeRuntime;
593  // primeBuffer<(cpa::create_ipc || (!cpa::create_ipc && !cpa::attach_ipc)) && cpa::has_pool>();
594  if (((cpa::ipc && ownership > 0) || !cpa::ipc) && cpa::has_pool) {
595  markDeadFrom(buffer_, 0);
596  }
597 
598  if (cpa::ipc && ipcShmForAttPoolSize) {
599  size_t retry = 3;
600  while (true) {
601  try {
602  // using namespace boost::interprocess;
603  auto name = std::string(shmName) + "-att-pool";
604  if (ownership > 0) {
605  shm_unlink(name.c_str());
606  shmAttAllocator_.emplace(
607  ownership > 0, boost::interprocess::create_only
608  , name.c_str(), ipcShmForAttPoolSize);
609  } else {
610  shmAttAllocator_.emplace(ownership > 0
611  , boost::interprocess::open_only, name.c_str());
612  }
613  return;
614  } catch (boost::interprocess::interprocess_exception const&) {
615  if (--retry == 0) throw;
616  sleep(1);
617  }
618  }
619  }
620  // if (cpa::ipc) {
621  // sleep(2);
622  // }
623  }
624 
625  ~ThreadCommBase() {
626  allocator_.unallocate(bufferptr_);
627  }
628 
629  static
630  void markDeadFrom(pattern::MonoLockFreeBuffer& buffer, uint16_t) {
631  // does not apply
632  }
633 
634  template <typename BroadCastBuf>
635  static
636  void markDeadFrom(BroadCastBuf& buffer, uint16_t poolThreadCount) {
637  for (uint16_t i = poolThreadCount;
638  i < BroadCastBuf::max_parallel_consumer;
639  ++i) {
640  buffer.markDead(i);
641  }
642  }
643 
644 
645  static
646  void markDead(pattern::MonoLockFreeBuffer& buffer, std::list<uint16_t>slots) {
647  // does not apply
648  }
649 
650  template <typename BroadCastBuf>
651  static
652  void markDead(BroadCastBuf& buffer, std::list<uint16_t>slots) {
653  for (auto s : slots) {
654  buffer.markDead(s);
655  }
656  }
657 
658  Allocator allocator_;
659  size_t* pDispStartCount_;
660  Buffer* HMBDC_RESTRICT bufferptr_;
661  Buffer& HMBDC_RESTRICT buffer_;
662 
664  template <typename Arg, typename ...Args>
665  ShmAttAllocator(bool own, Arg&& arg, char const* name, Args&& ... args)
666  : managedShm_(std::forward<Arg>(arg), name, std::forward<Args>(args)...) {
667  if (own) {
668  nameUnlink_ = name;
669  }
670  }
671 
672  ~ShmAttAllocator() {
673  if (nameUnlink_.size()) {
674  shm_unlink(nameUnlink_.c_str());
675  }
676  }
677 
678  boost::interprocess::managed_shared_memory::handle_t
679  getHandle(void* localAddr) const {
680  return managedShm_.get_handle_from_address(localAddr);
681  }
682 
683  uint8_t*
684  getAddr(boost::interprocess::managed_shared_memory::handle_t h) const {
685  return (uint8_t*)managedShm_.get_address_from_handle(h);
686  }
687 
688  uint8_t* allocate(size_t len) {
689  auto res = (uint8_t*)managedShm_.allocate(len);
690  // HMBDC_LOG_N(getHandle(res));
691  return res;
692  }
693 
694  auto deallocate(uint8_t* p) {
695  // HMBDC_LOG_N(getHandle(p));
696  return managedShm_.deallocate(p);
697  }
698 
699  private:
700  boost::interprocess::managed_shared_memory managedShm_;
701  std::string nameUnlink_;
702  };
703  std::optional<ShmAttAllocator> shmAttAllocator_;
704 
705 private:
706 
707  template <typename M, typename... Messages>
708  void sendRecursive(typename Buffer::iterator it
709  , M&& msg, Messages&&... msgs) {
710  using Message = typename std::decay<M>::type;
711  static_assert(std::is_trivially_destructible<Message>::value
712  , "cannot send message with dtor");
713  static_assert(MAX_MESSAGE_SIZE == 0 || sizeof(MessageWrap<Message>) <= BUFFER_VALUE_SIZE
714  , "message too big");
715  static_assert(!std::is_base_of<hasMemoryAttachment, Message>::value
716  , "hasMemoryAttachment Messages cannot be sent in group");
717  if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0
718  && sizeof(MessageWrap<Message>) > buffer_.maxItemSize())) {
719  HMBDC_THROW(std::out_of_range, "message too big");
720  }
721  auto wrap = new (*it) MessageWrap<Message>(msg);
722  if constexpr (has_hmbdc_ctx_queued_ts<Message>::value) {
723  wrap->template get<Message>().hmbdc_ctx_queued_ts = hmbdc::time::SysTime::now();
724  } else (void)wrap;
725  sendRecursive(++it, std::forward<Messages>(msgs)...);
726  }
727  void sendRecursive(typename Buffer::iterator) {}
728 
729  size_t maxMessageSizeRuntime_;
730 };
731 
732 } //context_detail
733 
734 /**
735  * @example hmbdc.cpp
736  * @example server-cluster.cpp
737  * a partition Context rightlyfully doesn't contain a thread pool and all its Clients
738  * are in direct mode. Pool related interfaces are turned off in compile time
739  */
740 
741 /**
742  * @class Context<>
743  * @brief A Context is like a media object that facilitates the communications
744  * for the Clients that it is holding.
745  * a Client can only be added to (or started within) once to a single Context,
746  * undefined behavior otherwise.
747  * the communication model is determined by the context_property
748  * by default it is in the nature of broadcast fashion within local process indicating
749  * by broadcast<>
750  *
751  * @details a broadcast Context contains a thread Pool powered by a number of OS threads.
752  * a Client running in such a Context can either run in the pool mode or a direct mode
753  * (which means the Client has its own dedicated OS thread)
754  * direct mode provides faster responses, and pool mode provides more flexibility.
755  * It is recommended that the total number of threads (pool threads + direct threads)
756  * not exceeding the number of available CPUs.
757  * @tparam MaxMessageSize What is the max message size if known
758  * at compile time(compile time sized);
759  * if the value can only be determined at runtime (run time sized), set this to 0.
760  * Things can still work but will lost some compile time checking advantages,
761  * see maxMessageSizeRuntime below
762  * @tparam ContextProperties see context_property namespace
763  */
764 template <size_t MaxMessageSize = 0, typename... ContextProperties>
765 struct Context
766 : context_detail::ThreadCommBase<MaxMessageSize, ContextProperties...> {
767  using Base = context_detail::ThreadCommBase<MaxMessageSize, ContextProperties...>;
768  using Buffer = typename Base::Buffer;
769  using cpa = typename Base::cpa;
770  using Pool = typename std::conditional<cpa::pool_msgless
772  , pattern::PoolT<Buffer>>::type;
773  /**
774  * @brief ctor for construct local non-ipc Context
775  * @details won't compile if calling it for ipc Context
776  * @param messageQueueSizePower2Num value of 10 gives message queue if size of 1024
777  * (messages, not bytes)
778  * @param maxPoolClientCount up to how many Clients the pool is suppose to support,
779  * only used when
780  * pool supported in the Context with broadcast property
781  * @param maxMessageSizeRuntime if MaxMessageSize == 0, this value is used
782  * the context can manage
783  */
784  Context(uint32_t messageQueueSizePower2Num = MaxMessageSize?20:2
785  , size_t maxPoolClientCount = MaxMessageSize?128:0
786  , size_t maxMessageSizeRuntime = MaxMessageSize)
787  : Base(messageQueueSizePower2Num < 2?2:messageQueueSizePower2Num
788  , MaxMessageSize?MaxMessageSize:maxMessageSizeRuntime
789  , nullptr, 0, false, 0)
790  , usedHmbdcCapacity_(0)
791  , stopped_(false)
792  , pool_(createPool<cpa>(maxPoolClientCount))
793  , poolThreadCount_(0) {
794  static_assert(!cpa::ipc, "no name specified for ipc Context");
795  }
796 
797  /**
798  * @brief ctor for construct local ipc Context
799  * @details won't compile if calling it for local non-ipc Context
800  *
801  * @param ownership input/output flag -
802  * < 0 attach only and not own it;
803  * 0 attach or create (input);
804  * > 0 recreate and own
805  * this IPC Context - mean ctor called on it and when exiting, remove the IPC shm file.
806  * @param ipcTransportName the id to identify an ipc transport that supports
807  * a group of attached together Contexts and their Clients
808  * @param messageQueueSizePower2Num value of 10 gives message queue if size of
809  * 1024 (messages, not bytes)
810  * @param maxPoolClientCount up to how many Clients the pool is suppose to support,
811  * only used when pool supported in the Context with broadcast property
812  * @param maxMessageSizeRuntime if MaxMessageSize == 0, this value is used
813  * @param purgerCpuAffinityMask which CPUs to run the low profile (sleep mostly)
814  * thread in charge of purging crashed Clients. Used only for ipc_creator Contexts.
815  * @param ipcTransportDeviceOffset the offset in the ipcTransport dev for the use region
816  * the context can manage
817  */
818  template <typename IntRvOrLv
819  , std::enable_if_t<std::is_same<int, typename std::decay<IntRvOrLv>::type>::value>*
820  = nullptr
821  >
822  Context(IntRvOrLv&& ownership
823  , char const* ipcTransportName
824  , uint32_t messageQueueSizePower2Num = MaxMessageSize?20:0
825  , size_t maxPoolClientCount = MaxMessageSize?128:0
826  , size_t maxMessageSizeRuntime = MaxMessageSize
827  , uint64_t purgerCpuAffinityMask = 0xfffffffffffffffful
828  , size_t ipcTransportDeviceOffset = 0
829  , size_t ipcShmForAttPoolSize = 0)
830  : Base(messageQueueSizePower2Num
831  , MaxMessageSize?MaxMessageSize:maxMessageSizeRuntime
832  , ipcTransportName
833  , ipcTransportDeviceOffset, ownership, ipcShmForAttPoolSize)
834  , usedHmbdcCapacity_(0)
835  , stopped_(false)
836  , pool_(createPool<cpa>(maxPoolClientCount))
837  , poolThreadCount_(0)
838  , secondsBetweenPurge_(60)
839  , ifIpcCreator_(ownership > 0)
840  , purgerCpuAffinityMask_(purgerCpuAffinityMask) {
841  static_assert(cpa::ipc, "ctor can only be used with ipc turned on Context");
842  }
843 
844  /**
845  * @brief dtor
846  * @details if this Context owns ipc transport, notify all attached processes
847  * that read from it that this tranport is dead
848  */
850  if (ifIpcCreator_) {
851  Base::markDeadFrom(this->buffer_, 0);
852  }
853  stop();
854  join();
855  }
856 
857  /**
858  * @brief add a client to Context's pool - the Client is run in pool mode
859  * @details if pool is already started, the client is to get current Messages immediatly
860  * - might miss older messages.
861  * if the pool not started yet, the Client does not get messages or other callbacks until
862  * the Pool starts.
863  * This function is threadsafe, which means you can call it anywhere in the code
864  * @tparam Client client type
865  * @param client to be added into the Pool
866  * @param poolThreadAffinityIn pool is powered by a number of threads
867  * (thread in the pool is identified (by a number) in the mask starting from bit 0)
868  * it is possible to have a Client to use just some of the threads in the Pool
869  * - default to use all.
870  *
871  */
872  template <typename Client>
873  void addToPool(Client &client
874  , uint64_t poolThreadAffinityIn = 0xfffffffffffffffful) {
875  static_assert(cpa::has_pool, "pool is not support in the Context type");
876  if (std::is_base_of<single_thread_powered_client, Client>::value
877  && hmbdc::numeric::setBitsCount(poolThreadAffinityIn) != 1
878  && poolThreadCount_ != 1) {
879  HMBDC_THROW(std::out_of_range
880  , "cannot add a single thread powered client to the non-single"
881  "thread powered pool without specifying a single thread poolThreadAffinity"
882  );
883  }
884  primeForShmAtt(client);
885  auto stub = new context_detail::PoolConsumerProxy<Client>(client, this->pDispStartCount_);
886  pool_->addConsumer(*stub, poolThreadAffinityIn);
887 
888  }
889 
890  /**
891  * @brief add a bunch of clients to Context's pool - the Clients are run in pool mode
892  * @details if pool is already started, the client is to get current Messages immediatly
893  * - might miss older messages.
894  * if the pool not started yet, the Client does not get messages or other callbacks until
895  * the Pool starts.
896  * This function is threadsafe, which means you can call it anywhere in the code
897  * @tparam Client client type
898  * @param client to be added into the Pool
899  * @param poolThreadAffinityIn pool is powered by a number of threads
900  * (thread in the pool is identified (by a number) in the mask starting from bit 0)
901  * it is possible to have a Client to use just some of the threads in the Pool
902  * - default to use all.
903  * @param args more client and poolThreadAffinityIn pairs can follow
904  */
905  template <typename Client, typename ... Args>
906  void addToPool(Client &client
907  , uint64_t poolThreadAffinityIn, Args&& ...args) {
908  addToPool(client, poolThreadAffinityIn);
909  addToPool(std::forward<Args>(args)...);
910  }
911 
912  /**
913  * @brief add a bunch of clients to Context's pool - the Clients are run in pool mode
914  * @details the implementatiotn tells all
915  * if the pool not started yet, the Client does not get messages or other callbacks until
916  * the Pool starts.
917  * This function is threadsafe, which means you can call it anywhere in the code
918  * @tparam Client client type
919  * @tparam Client2 client2 type
920  * @param client to be added into the Pool using default poolThreadAffinity
921  * @param client2 to be added into the Pool
922  * @param args more client (and/or poolThreadAffinityIn pairs can follow
923  */
924  template <typename Client, typename Client2, typename ... Args, typename Enabled
925  = typename std::enable_if<!std::is_integral<Client2>::value, void>::type>
926  void
927  addToPool(Client &client, Client2 &client2, Args&& ...args) {
928  addToPool(client);
929  addToPool(client2, std::forward<Args>(args)...);
930  }
931 
932  /**
933  * @brief return the numebr of clients added into pool
934  * @details the number could change since the clients could be added in another thread
935  * @return client count
936  */
937  size_t clientCountInPool() const {
938  static_assert(cpa::has_pool, "pool is not support in the Context type");
939  return pool_->consumerSize();
940  }
941 
942  /**
943  * @brief how many parallel consummers are started
944  * @details the dynamic value could change after the call returns
945  * see max_parallel_consumer Context property
946  * @return how many parallel consummers are started
947  */
948  size_t parallelConsumerAlive() const {
949  return this->buffer_.parallelConsumerAlive();
950  }
951  /**
952  * @brief start the context by specifying what are in it (Pool and/or direct Clients)
953  * and their paired up cpu affinities.
954  * @details All direct mode or clients in a pool started by a single start
955  * statement are dispatched with starting from the same event
956  * (subjected to event filtering of each client).
957  * many compile time and runtime check is done, for example:
958  * won't compile if start a pool in a Context does not support one;
959  * exception throw if the Context capacity is reached or try to start a second pool, etc.
960  *
961  * Usage example:
962  *
963  * @code
964  * // the following starts the pool powered by 3 threads that are affinitied to
965  * // the lower 8 CPUs; client0 affinitied to 4th CPU and client1 affinitied to 5th CPU
966  * ctx.start(3, 0xfful, client0, 0x8ul, client1, 0x10ul);
967  *
968  * // the following starts the pool powered by 3 threads that are affinitied to
969  * // all exisiting CPUs; client0 affinitied to a rotating CPU and
970  * // client1 affinitied to 5th CPU
971  * ctx.start(3, 0, client0, 0, client1, 0x10ul);
972  *
973  * // the following starts 2 direct mode Clients (client2 and client3)
974  * ctx.start(client2, 0x3ul, client3, 0xful);
975  * @endcode
976  *
977  * @tparam typename ...Args types
978  *
979  * @param args paired up args in the form of (pool-thread-count|client, cpuAffinity)*.
980  * see examples above.
981  * If a cpuAffinity is 0, each thread's affinity rotates to one of the CPUs in the system.
982  */
983  template <typename ...Args>
984  void
985  start(Args&& ... args) {
986  startWithContextProperty<cpa>(true, std::forward<Args>(args) ...);
987  }
988 
989  /**
990  * @brief similarly to the start call for the usage and parameters except the Client thread
991  * is not started by this call and the user is expected to use runOnce() to
992  * manually power the Client externally
993  *
994  * @tparam Args see start above
995  * @param args see start above
996  */
997  template <typename ...Args>
998  void
999  registerToRun(Args&& ... args) {
1000  startWithContextProperty<cpa>(false, std::forward<Args>(args) ...);
1001  }
1002 
1003  /**
1004  * @brief stop the message dispatching - asynchronously
1005  * @details asynchronously means not garanteed message dispatching
1006  * stops immidiately after this non-blocking call
1007  */
1008  void
1009  stop() {
1010  stopWithContextProperty<cpa>();
1011  }
1012 
1013  /**
1014  * @brief wait until all threads (Pool threads too if apply) of the Context exit
1015  * @details blocking call
1016  */
1017  void
1018  join() {
1019  joinWithContextProperty<cpa>();
1020  }
1021 
1022  /**
1023  * @brief ipc_creator Context runs a StcuClientPurger to purge crashed (or slow, stuck ...)
1024  * Clients from the ipc transport to make the ipc trasnport healthy (avoiding buffer full).
1025  * It periodically looks for things to purge. This is to set the period (default is 60 seconds).
1026  * @details If some Client are known to
1027  * take long to process messages, increase it. If you need to remove slow Clients quickly
1028  * reduce it.
1029  * Only effective for ipc_creator Context.
1030  *
1031  * @param s seconds - if set zero, purger is disabled
1032  */
1033  void
1035  secondsBetweenPurge_ = s;
1036  }
1037 
1038  /**
1039  * @brief normally not used until you want to run your own message loop
1040  * @details call this function frequently to pump hmbdc message loop in its pool
1041  *
1042  * @param threadSerialNumber starting from 0, indicate which thread in the pool
1043  * is powering the loop
1044  */
1045  void
1046  runOnce(uint16_t threadSerialNumberInPool) {
1047  static_assert(cpa::has_pool, "pool is not support in the Context type");
1048  pool_->runOnce(threadSerialNumberInPool);
1049  }
1050 
1051  /**
1052  * @brief normally not used until you want to run your own message loop
1053  * @details call this function frequently to pump hmbdc message loop for a direct mode Client
1054  *
1055  * @param threadSerialNumber indicate which thread is powering the loop
1056  * @param c the Client
1057  * @return true when the Client did not terminate itself by throwing an exeption
1058  * @return false otherwise
1059  */
1060  template <typename Client>
1061  bool runOnce(Client&& c) {
1062  // c.messageDispatchingStarted(
1063  // hmbdcNumbers_[threadSerialNumber]); //lower level ensures executing only once
1064  uint16_t tn = cpa::broadcast_msg?c.hmbdcNumber:0;
1065  primeForShmAtt(c);
1066 
1067  return context_detail::runOnceImpl(tn
1068  , stopped_, this->buffer_
1069  , c);
1070  }
1071 
1072 private:
1073  template <typename cpa>
1074  typename std::enable_if<cpa::has_pool && !cpa::pool_msgless, typename Pool::ptr>::type
1075  createPool(size_t maxPoolClientCount) {
1076  return Pool::create(this->buffer(), maxPoolClientCount);
1077  }
1078 
1079  template <typename cpa>
1080  typename std::enable_if<cpa::pool_msgless, typename Pool::ptr>::type
1081  createPool(size_t maxPoolClientCount) {
1082  return Pool::create(maxPoolClientCount);
1083  }
1084 
1085  template <typename cpa>
1086  typename std::enable_if<!cpa::has_pool && !cpa::pool_msgless, typename Pool::ptr>::type
1087  createPool(size_t) {
1088  return typename Pool::ptr();
1089  }
1090 
1091  template <typename cpa>
1092  typename std::enable_if<cpa::has_pool, void>::type
1093  stopWithContextProperty() {
1094  if (pool_) pool_->stop();
1095  __atomic_thread_fence(__ATOMIC_ACQUIRE);
1096  stopped_ = true;
1097  }
1098 
1099  template <typename cpa>
1100  typename std::enable_if<!cpa::has_pool, void>::type
1101  stopWithContextProperty() {
1102  __atomic_thread_fence(__ATOMIC_ACQUIRE);
1103  stopped_ = true;
1104  }
1105 
1106  template <typename cpa>
1107  typename std::enable_if<cpa::has_pool, void>::type
1108  joinWithContextProperty() {
1109  if (pool_) pool_->join();
1110  for (auto& t : threads_) {
1111  t.join();
1112  }
1113  threads_.clear();
1114  }
1115 
1116  template <typename cpa>
1117  typename std::enable_if<!cpa::has_pool, void>::type
1118  joinWithContextProperty() {
1119  for (auto& t : threads_) {
1120  t.join();
1121  }
1122  threads_.clear();
1123  }
1124 
1125  template <typename cpa>
1126  void
1127  reserveSlots(std::list<uint16_t>&) {
1128  }
1129 
1130  template <typename cpa, typename ...Args>
1131  typename std::enable_if<cpa::broadcast_msg && !cpa::pool_msgless, void>::type
1132  reserveSlots(std::list<uint16_t>& slots, uint16_t poolThreadCount, uint64_t, Args&& ... args) {
1133  auto available = this->buffer_.unusedConsumerIndexes();
1134  if (available.size() < poolThreadCount) {
1135  HMBDC_THROW(std::out_of_range
1136  , "Context remaining capacilty = " << available.size()
1137  << ", consider increasing max_parallel_consumer");
1138  }
1139  for (uint16_t i = 0; i < poolThreadCount; ++i) {
1140  slots.push_back(available[i]);
1141  this->buffer_.reset(available[i]);
1142  }
1143  reserveSlots<cpa>(slots, std::forward<Args>(args) ...);
1144  }
1145 
1146  template <typename cpa, typename ...Args>
1147  typename std::enable_if<!cpa::broadcast_msg || cpa::pool_msgless, void>::type
1148  reserveSlots(std::list<uint16_t>& slots, uint16_t poolThreadCount, uint64_t, Args&& ... args) {
1149  reserveSlots<cpa>(slots, std::forward<Args>(args) ...);
1150  }
1151 
1152  template <typename cpa, typename CcClient, typename ...Args>
1153  typename std::enable_if<cpa::broadcast_msg && !std::is_integral<CcClient>::value, void>::type
1154  reserveSlots(std::list<uint16_t>& slots, CcClient& c, uint64_t, Args&& ... args) {
1155  const bool clientParticipateInMessaging =
1156  std::decay<CcClient>::type::INTERESTS_SIZE != 0;
1157  if (clientParticipateInMessaging) {
1158  auto available = this->buffer_.unusedConsumerIndexes();
1159  if (!available.size()) {
1160  HMBDC_THROW(std::out_of_range
1161  , "Context reached capacity, consider increasing max_parallel_consumer");
1162  }
1163  this->buffer_.reset(available[0]);
1164  slots.push_back(available[0]);
1165  }
1166  reserveSlots<cpa>(slots, std::forward<Args>(args) ...);
1167  }
1168 
1169  template <typename cpa, typename CcClient, typename ...Args>
1170  typename std::enable_if<!cpa::broadcast_msg && !std::is_integral<CcClient>::value, void>::type
1171  reserveSlots(std::list<uint16_t>& slots, CcClient& c, uint64_t, Args&& ... args) {
1172  }
1173 
1174  template <typename cpa, typename ...Args>
1175  typename std::enable_if<cpa::ipc, void>::type
1176  startWithContextProperty(bool kickoffThread, Args&& ... args) {
1177  auto& lock = this->allocator_.fileLock();
1178  std::lock_guard<decltype(lock)> g(lock);
1179  std::list<uint16_t> slots;
1180  try {
1181  reserveSlots<cpa>(slots, args ...);
1182  auto sc = slots;
1183  startWithContextPropertyImpl<cpa>(kickoffThread, sc, std::forward<Args>(args) ...);
1184  } catch (std::out_of_range const&) {
1185  Base::markDead(this->buffer_, slots);
1186  throw;
1187  }
1188  }
1189 
1190  template <typename cpa, typename ...Args>
1191  typename std::enable_if<!cpa::ipc, void>::type
1192  startWithContextProperty(bool kickoffThread, Args&& ... args) {
1193  std::list<uint16_t> slots;
1194  try {
1195  reserveSlots<cpa>(slots, args ...);
1196  auto sc = slots;
1197  startWithContextPropertyImpl<cpa>(kickoffThread, sc, std::forward<Args>(args) ...);
1198  } catch (std::out_of_range const&) {
1199  Base::markDead(this->buffer_, slots);
1200  throw;
1201  }
1202  }
1203 
1204  template <typename cpa>
1205  typename std::enable_if<cpa::broadcast_msg && cpa::ipc, void>::type
1206  startWithContextPropertyImpl(bool kickoffThread, std::list<uint16_t>& slots) {
1207  if (ifIpcCreator_ && !purger_ && secondsBetweenPurge_) {
1208  purger_.reset(
1209  new StuckClientPurger<Buffer>(secondsBetweenPurge_, this->buffer_));
1210  startWithContextPropertyImpl<cpa>(kickoffThread, slots, *purger_, purgerCpuAffinityMask_);
1211  }
1212  }
1213 
1214  template <typename cpa>
1215  typename std::enable_if<!cpa::broadcast_msg || !cpa::ipc, void>::type
1216  startWithContextPropertyImpl(bool kickoffThread, std::list<uint16_t>& slots) {
1217  }
1218 
1219  template <typename cpa, typename ...Args>
1220  typename std::enable_if<cpa::has_pool, void>::type
1221  startWithContextPropertyImpl(bool kickoffThread, std::list<uint16_t>& slots
1222  , uint16_t poolThreadCount, uint64_t poolThreadsCpuAffinityMask
1223  , Args&& ... args) {
1224  if (poolThreadCount_) {
1225  HMBDC_THROW(std::out_of_range, "Context pool already started");
1226  }
1227  std::vector<uint16_t> sc(slots.begin(), slots.end());
1228  if (!poolThreadsCpuAffinityMask) {
1229  auto cpuCount = std::thread::hardware_concurrency();
1230  poolThreadsCpuAffinityMask =
1231  ((1ul << poolThreadCount) - 1u) << (hmbdcNumbers_.size() % cpuCount);
1232  }
1233 
1234  pool_->startAt(poolThreadCount, poolThreadsCpuAffinityMask, sc);
1235  while(poolThreadCount--) {
1236  if (!cpa::pool_msgless) {
1237  hmbdcNumbers_.push_back(*slots.begin());
1238  slots.pop_front();
1239  }
1240  }
1241  poolThreadCount_ = poolThreadCount;
1242  startWithContextPropertyImpl<cpa>(kickoffThread, slots, std::forward<Args>(args) ...);
1243  }
1244 
1245  template <typename cpa, typename Client, typename ...Args>
1246  typename std::enable_if<!std::is_integral<Client>::value, void>::type
1247  startWithContextPropertyImpl(bool kickoffThread, std::list<uint16_t>& slots
1248  , Client& c, uint64_t cpuAffinity
1249  , Args&& ... args) {
1250  auto clientParticipateInMessaging =
1251  std::decay<Client>::type::INTERESTS_SIZE;
1252  uint16_t hmbdcNumber = 0xffffu;
1253  if (clientParticipateInMessaging && cpa::broadcast_msg) {
1254  hmbdcNumber = *slots.begin();
1255  c.hmbdcNumber = hmbdcNumber;
1256  slots.pop_front();
1257  }
1258  if (kickoffThread) {
1259  auto thrd = kickOffClientThread(
1260  c, cpuAffinity, hmbdcNumber, hmbdcNumbers_.size());
1261  threads_.push_back(move(thrd));
1262  }
1263  hmbdcNumbers_.push_back(hmbdcNumber);
1264  startWithContextPropertyImpl<cpa>(kickoffThread, slots, std::forward<Args>(args) ...);
1265  }
1266 
1267  template <typename Client>
1269  Client& c, uint64_t mask, uint16_t hmbdcNumber, uint16_t threadSerialNumber) {
1270  primeForShmAtt(c);
1271  std::thread thrd([
1272  this
1273  , &c
1274  , mask
1275  , h=hmbdcNumber
1276  , threadSerialNumber
1277  ]() {
1278  auto hmbdcNumber = h;
1279  std::string name;
1280  char const* schedule;
1281  int priority;
1282  auto clientParticipateInMessaging =
1283  std::decay<Client>::type::INTERESTS_SIZE;
1284 
1285 
1286  if (c.hmbdcName()) {
1287  name = c.hmbdcName();
1288  } else {
1289  if (clientParticipateInMessaging) {
1290  name = "hmbdc" + std::to_string(hmbdcNumber);
1291  } else {
1292  name = "hmbdc-x";
1293  }
1294  }
1295  try {
1296  auto cpuAffinityMask = mask;
1297  std::tie(schedule, priority) = c.schedSpec();
1298 
1299  if (!schedule) schedule = "SCHED_OTHER";
1300 
1301  if (!mask) {
1302  auto cpuCount = std::thread::hardware_concurrency();
1303  cpuAffinityMask = 1ul << (threadSerialNumber % cpuCount);
1304  }
1305 
1306  hmbdc::os::configureCurrentThread(name.c_str(), cpuAffinityMask
1307  , schedule, priority);
1308 
1309  hmbdcNumber = clientParticipateInMessaging?hmbdcNumber:0xffffu;
1310  __atomic_add_fetch(this->pDispStartCount_, 1, __ATOMIC_RELEASE);
1311  c.messageDispatchingStartedCb(this->pDispStartCount_);
1312  } catch (std::exception const& e) {
1313  c.stopped(e);
1314  return;
1315  } catch (int code) {
1316  c.stopped(ExitCode(code));
1317  return;
1318  } catch (...) {
1320  return;
1321  }
1322 
1323  while(!stopped_ &&
1324  context_detail::runOnceImpl(hmbdcNumber, this->stopped_, this->buffer_, c)) {
1325  }
1326  if (this->stopped_) {
1327  if (clientParticipateInMessaging) { /// drain all to release sending party
1328  typename Buffer::iterator begin, end;
1329  size_t count;
1330  do {
1331  usleep(10000);
1332  if constexpr (cpa::broadcast_msg) {
1333  count = this->buffer_.peek(hmbdcNumber, begin, end);
1334  this->buffer_.wasteAfterPeek(hmbdcNumber, count);
1335  } else {
1336  count = this->buffer_.peek(begin, end);
1337  this->buffer_.wasteAfterPeek(begin, count);
1338  }
1339  } while (count);
1340  }
1341  c.dropped();
1342  }
1343  if (clientParticipateInMessaging) context_detail::unblock(this->buffer_, hmbdcNumber);
1344  }
1345  );
1346 
1347  return thrd;
1348  }
1349 
1350  template <typename Client>
1351  void primeForShmAtt(Client& c) {
1352  if constexpr (cpa::ipc && context_detail::has_ibmaProc<Client>::value) {
1353  if constexpr(!std::is_same<std::nullptr_t, decltype(c.ibmaProc)>::value) {
1354  if (!c.ibmaProc.hmbdcShmHandleToAddr) {
1355  c.ibmaProc.hmbdcShmHandleToAddr = [&alloc = this->shmAttAllocator_]
1356  (boost::interprocess::managed_shared_memory::handle_t h) {
1357  return alloc->getAddr(h);
1358  };
1359  c.ibmaProc.hmbdcShmDeallocator = [&alloc = this->shmAttAllocator_]
1360  (uint8_t* addr) {
1361  return alloc->deallocate(addr);
1362  };
1363  }
1364  }
1365  }
1366  }
1367 
1368  Context(Context const&) = delete;
1369  Context& operator = (Context const&) = delete;
1370  uint16_t usedHmbdcCapacity_;
1371  std::vector<uint16_t> hmbdcNumbers_;
1372 
1373  bool stopped_;
1374  typename Pool::ptr pool_;
1375  using Threads = std::vector<std::thread>;
1376  Threads threads_;
1377  size_t poolThreadCount_;
1378  uint32_t secondsBetweenPurge_;
1379  bool const ifIpcCreator_ = false;
1380  uint64_t purgerCpuAffinityMask_;
1381  typename std::conditional<cpa::broadcast_msg && cpa::ipc
1382  , std::unique_ptr<StuckClientPurger<Buffer>>, uint32_t
1383  >::type purger_;
1384 };
1385 
1386 }}
1387 
char const * hmbdcName() const
return the name of thread that runs this client, override if necessary
Definition: Client.hpp:143
auto kickOffClientThread(Client &c, uint64_t mask, uint16_t hmbdcNumber, uint16_t threadSerialNumber)
Definition: Context.hpp:1268
Context template parameter indicating the Context must contain a pool to run Clients and the Clients ...
Definition: Context.hpp:88
void stop()
stop the message dispatching - asynchronously
Definition: Context.hpp:1009
covers the inter-thread and ipc communication fascade
Definition: Context.hpp:139
void addToPool(Client &client, Client2 &client2, Args &&...args)
add a bunch of clients to Context&#39;s pool - the Clients are run in pool mode
Definition: Context.hpp:927
void join()
wait until all threads (Pool threads too if apply) of the Context exit
Definition: Context.hpp:1018
Context template parameter inidcating each message is sent to one and only one of the clients within ...
Definition: Context.hpp:77
std::tuple< char const *, int > schedSpec() const
an overrideable method. returns the schedule policy and priority, override if necessary priority is o...
Definition: Client.hpp:153
Definition: PoolMinus.hpp:9
Context(IntRvOrLv &&ownership, char const *ipcTransportName, uint32_t messageQueueSizePower2Num=MaxMessageSize?20:0, size_t maxPoolClientCount=MaxMessageSize?128:0, size_t maxMessageSizeRuntime=MaxMessageSize, uint64_t purgerCpuAffinityMask=0xfffffffffffffffful, size_t ipcTransportDeviceOffset=0, size_t ipcShmForAttPoolSize=0)
ctor for construct local ipc Context
Definition: Context.hpp:822
void runOnce(uint16_t threadSerialNumberInPool)
normally not used until you want to run your own message loop
Definition: Context.hpp:1046
bool runOnce(Client &&c)
normally not used until you want to run your own message loop
Definition: Context.hpp:1061
Context template parameter indicating the Context is ipc enabled and it can create or be attached to ...
Definition: Context.hpp:108
void send(Message &&m)
send a message including hasMortAttachment message to the Context or attached ipc Contexts ...
Definition: Context.hpp:270
Unknown excpetion.
Definition: Exception.hpp:17
void stopped(std::exception const &e) noexcept
the following are for internal use, don&#39;t change or override
Definition: Client.hpp:254
Context(uint32_t messageQueueSizePower2Num=MaxMessageSize?20:2, size_t maxPoolClientCount=MaxMessageSize?128:0, size_t maxMessageSizeRuntime=MaxMessageSize)
ctor for construct local non-ipc Context
Definition: Context.hpp:784
bool trySend(ForwardIt begin, size_t n)
try send a range of messages to the Context or attached ipc Contexts
Definition: Context.hpp:237
Definition: ContextDetail.hpp:24
Definition: Message.hpp:212
void send(ForwardIt begin, size_t n)
send a range of messages to the Context or attached ipc Contexts
Definition: Context.hpp:208
when processes are distributed on a PCIe board and host PC, add this property
Definition: Context.hpp:116
void commitForSend(IteratorAdaptor itA)
commit all the filled up buffers allocated by allocateForSend and send them out
Definition: Context.hpp:383
Context template parameter inidcating each message is sent to all clients within the Context...
Definition: Context.hpp:57
Definition: Message.hpp:457
bool trySend(M0 &&m0, M1 &&m1, Messages &&... msgs)
try to send a batch of message to the Context or attached ipc Contexts
Definition: Context.hpp:186
void addToPool(Client &client, uint64_t poolThreadAffinityIn=0xfffffffffffffffful)
add a client to Context&#39;s pool - the Client is run in pool mode
Definition: Context.hpp:873
Definition: LockFreeBufferT.hpp:18
size_t parallelConsumerAlive() const
how many parallel consummers are started
Definition: Context.hpp:948
size_t clientCountInPool() const
return the numebr of clients added into pool
Definition: Context.hpp:937
auto allocateForSend(size_t n)
preallocate consecutive buffers so they can be send out later
Definition: Context.hpp:350
A Context is like a media object that facilitates the communications for the Clients that it is holdi...
Definition: Context.hpp:765
~Context()
dtor
Definition: Context.hpp:849
void registerToRun(Args &&... args)
similarly to the start call for the usage and parameters except the Client thread is not started by t...
Definition: Context.hpp:999
Definition: Message.hpp:263
Definition: BitMath.hpp:9
Buffer & buffer()
accessor - mostly used internally
Definition: Context.hpp:541
void addToPool(Client &client, uint64_t poolThreadAffinityIn, Args &&...args)
add a bunch of clients to Context&#39;s pool - the Clients are run in pool mode
Definition: Context.hpp:906
A Client represents a thread of execution/a task. The execution is managed by a Context. a Client object could participate in message dispatching as the receiver of specifed message types.
Definition: Client.hpp:128
void send(M0 &&m0, M1 &&m1, Messages &&... msgs)
try send a batch of messages to the Context or attached ipc Contexts
Definition: Context.hpp:166
Exception that just has an exit code.
Definition: Exception.hpp:28
virtual void messageDispatchingStartedCb(size_t const *pClientDispatchingStarted)
called before any messages got dispatched - only once
Definition: Client.hpp:185
void setSecondsBetweenPurge(uint32_t s)
ipc_creator Context runs a StcuClientPurger to purge crashed (or slow, stuck ...) Clients from the ip...
Definition: Context.hpp:1034
void start(Args &&... args)
start the context by specifying what are in it (Pool and/or direct Clients) and their paired up cpu a...
Definition: Context.hpp:985
bool trySendInPlace(Args &&... args)
try send a message to all Clients in the Context or attached ipc Contexts if it wouldn&#39;t block ...
Definition: Context.hpp:526
if a specific hmbdc network transport (for example tcpcast, rmcast, and rnetmap) supports message wit...
Definition: Message.hpp:125
void sendInPlace(Args &&... args)
send a message to all Clients in the Context or attached ipc Contexts
Definition: Context.hpp:456
bool trySend(Message &&m)
try to send a message including hasMortAttachment message to the Context or attached ipc Contexts if ...
Definition: Context.hpp:400
Definition: Base.hpp:12
Definition: PoolT.hpp:11
Definition: Message.hpp:428