35 #ifndef _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_ 36 #define _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_ 94 template<
typename MT1
98 void smpAssign_backend( DenseMatrix<MT1,SO1>& lhs,
const DenseMatrix<MT2,SO2>& rhs )
104 using ET1 = ElementType_<MT1>;
105 using ET2 = ElementType_<MT2>;
107 constexpr
bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable<ET1,ET2>::value );
108 constexpr
size_t SIMDSIZE( SIMDTrait< ElementType_<MT1> >::
size );
110 const bool lhsAligned( (~lhs).isAligned() );
111 const bool rhsAligned( (~rhs).isAligned() );
113 const int threads( omp_get_num_threads() );
114 const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
116 const size_t addon1 ( ( ( (~rhs).
rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
117 const size_t equalShare1( (~rhs).
rows() / threadmap.first + addon1 );
118 const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
119 const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
121 const size_t addon2 ( ( ( (~rhs).
columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
122 const size_t equalShare2( (~rhs).
columns() / threadmap.second + addon2 );
123 const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
124 const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
126 #pragma omp for schedule(dynamic,1) nowait 127 for(
int i=0; i<threads; ++i )
129 const size_t row ( ( i / threadmap.second ) * rowsPerThread );
130 const size_t column( ( i % threadmap.second ) * colsPerThread );
135 const size_t m(
min( rowsPerThread, (~rhs).
rows() -
row ) );
138 if( simdEnabled && lhsAligned && rhsAligned ) {
139 auto target( submatrix<aligned>( ~lhs,
row,
column, m, n ) );
140 assign( target, submatrix<aligned>( ~rhs,
row,
column, m, n ) );
142 else if( simdEnabled && lhsAligned ) {
143 auto target( submatrix<aligned>( ~lhs,
row,
column, m, n ) );
144 assign( target, submatrix<unaligned>( ~rhs,
row,
column, m, n ) );
146 else if( simdEnabled && rhsAligned ) {
147 auto target( submatrix<unaligned>( ~lhs,
row,
column, m, n ) );
148 assign( target, submatrix<aligned>( ~rhs,
row,
column, m, n ) );
151 auto target( submatrix<unaligned>( ~lhs,
row,
column, m, n ) );
152 assign( target, submatrix<unaligned>( ~rhs,
row,
column, m, n ) );
176 template<
typename MT1
180 void smpAssign_backend( DenseMatrix<MT1,SO1>& lhs,
const SparseMatrix<MT2,SO2>& rhs )
186 const size_t threads( omp_get_num_threads() );
187 const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
189 const size_t addon1 ( ( ( (~rhs).
rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
190 const size_t rowsPerThread( (~rhs).
rows() / threadmap.first + addon1 );
192 const size_t addon2 ( ( ( (~rhs).
columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
193 const size_t colsPerThread( (~rhs).
columns() / threadmap.second + addon2 );
195 #pragma omp for schedule(dynamic,1) nowait 196 for(
size_t i=0; i<threads; ++i )
198 const size_t row ( ( i / threadmap.second ) * rowsPerThread );
199 const size_t column( ( i % threadmap.second ) * colsPerThread );
204 const size_t m(
min( rowsPerThread, (~lhs).
rows() -
row ) );
207 auto target( submatrix<unaligned>( ~lhs,
row,
column, m, n ) );
208 assign( target, submatrix<unaligned>( ~rhs,
row,
column, m, n ) );
233 template<
typename MT1
237 inline EnableIf_< And< IsDenseMatrix<MT1>
238 , Or< Not< IsSMPAssignable<MT1> >
239 , Not< IsSMPAssignable<MT2> > > > >
240 smpAssign( Matrix<MT1,SO1>& lhs,
const Matrix<MT2,SO2>& rhs )
247 assign( ~lhs, ~rhs );
271 template<
typename MT1
275 inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
276 smpAssign( Matrix<MT1,SO1>& lhs,
const Matrix<MT2,SO2>& rhs )
289 assign( ~lhs, ~rhs );
292 #pragma omp parallel shared( lhs, rhs ) 293 smpAssign_backend( ~lhs, ~rhs );
325 template<
typename MT1
329 void smpAddAssign_backend( DenseMatrix<MT1,SO1>& lhs,
const DenseMatrix<MT2,SO2>& rhs )
335 using ET1 = ElementType_<MT1>;
336 using ET2 = ElementType_<MT2>;
338 constexpr
bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable<ET1,ET2>::value );
339 constexpr
size_t SIMDSIZE( SIMDTrait< ElementType_<MT1> >::
size );
341 const bool lhsAligned( (~lhs).isAligned() );
342 const bool rhsAligned( (~rhs).isAligned() );
344 const int threads( omp_get_num_threads() );
345 const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
347 const size_t addon1 ( ( ( (~rhs).
rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
348 const size_t equalShare1( (~rhs).
rows() / threadmap.first + addon1 );
349 const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
350 const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
352 const size_t addon2 ( ( ( (~rhs).
columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
353 const size_t equalShare2( (~rhs).
columns() / threadmap.second + addon2 );
354 const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
355 const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
357 #pragma omp for schedule(dynamic,1) nowait 358 for(
int i=0; i<threads; ++i )
360 const size_t row ( ( i / threadmap.second ) * rowsPerThread );
361 const size_t column( ( i % threadmap.second ) * colsPerThread );
366 const size_t m(
min( rowsPerThread, (~rhs).
rows() -
row ) );
369 if( simdEnabled && lhsAligned && rhsAligned ) {
370 auto target( submatrix<aligned>( ~lhs,
row,
column, m, n ) );
371 addAssign( target, submatrix<aligned>( ~rhs,
row,
column, m, n ) );
373 else if( simdEnabled && lhsAligned ) {
374 auto target( submatrix<aligned>( ~lhs,
row,
column, m, n ) );
375 addAssign( target, submatrix<unaligned>( ~rhs,
row,
column, m, n ) );
377 else if( simdEnabled && rhsAligned ) {
378 auto target( submatrix<unaligned>( ~lhs,
row,
column, m, n ) );
379 addAssign( target, submatrix<aligned>( ~rhs,
row,
column, m, n ) );
382 auto target( submatrix<unaligned>( ~lhs,
row,
column, m, n ) );
383 addAssign( target, submatrix<unaligned>( ~rhs,
row,
column, m, n ) );
407 template<
typename MT1
411 void smpAddAssign_backend( DenseMatrix<MT1,SO1>& lhs,
const SparseMatrix<MT2,SO2>& rhs )
417 const size_t threads( omp_get_num_threads() );
418 const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
420 const size_t addon1 ( ( ( (~rhs).
rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
421 const size_t rowsPerThread( (~rhs).
rows() / threadmap.first + addon1 );
423 const size_t addon2 ( ( ( (~rhs).
columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
424 const size_t colsPerThread( (~rhs).
columns() / threadmap.second + addon2 );
426 #pragma omp for schedule(dynamic,1) nowait 427 for(
size_t i=0; i<threads; ++i )
429 const size_t row ( ( i / threadmap.second ) * rowsPerThread );
430 const size_t column( ( i % threadmap.second ) * colsPerThread );
435 const size_t m(
min( rowsPerThread, (~lhs).
rows() -
row ) );
438 auto target( submatrix<unaligned>( ~lhs,
row,
column, m, n ) );
439 addAssign( target, submatrix<unaligned>( ~rhs,
row,
column, m, n ) );
464 template<
typename MT1
468 inline EnableIf_< And< IsDenseMatrix<MT1>
469 , Or< Not< IsSMPAssignable<MT1> >
470 , Not< IsSMPAssignable<MT2> > > > >
471 smpAddAssign( Matrix<MT1,SO1>& lhs,
const Matrix<MT2,SO2>& rhs )
478 addAssign( ~lhs, ~rhs );
502 template<
typename MT1
506 inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
507 smpAddAssign( Matrix<MT1,SO1>& lhs,
const Matrix<MT2,SO2>& rhs )
520 addAssign( ~lhs, ~rhs );
523 #pragma omp parallel shared( lhs, rhs ) 524 smpAddAssign_backend( ~lhs, ~rhs );
556 template<
typename MT1
560 void smpSubAssign_backend( DenseMatrix<MT1,SO1>& lhs,
const DenseMatrix<MT2,SO2>& rhs )
566 using ET1 = ElementType_<MT1>;
567 using ET2 = ElementType_<MT2>;
569 constexpr
bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable<ET1,ET2>::value );
570 constexpr
size_t SIMDSIZE( SIMDTrait< ElementType_<MT1> >::
size );
572 const bool lhsAligned( (~lhs).isAligned() );
573 const bool rhsAligned( (~rhs).isAligned() );
575 const int threads( omp_get_num_threads() );
576 const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
578 const size_t addon1 ( ( ( (~rhs).
rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
579 const size_t equalShare1( (~rhs).
rows() / threadmap.first + addon1 );
580 const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
581 const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
583 const size_t addon2 ( ( ( (~rhs).
columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
584 const size_t equalShare2( (~rhs).
columns() / threadmap.second + addon2 );
585 const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
586 const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
588 #pragma omp for schedule(dynamic,1) nowait 589 for(
int i=0; i<threads; ++i )
591 const size_t row ( ( i / threadmap.second ) * rowsPerThread );
592 const size_t column( ( i % threadmap.second ) * colsPerThread );
597 const size_t m(
min( rowsPerThread, (~rhs).
rows() -
row ) );
600 if( simdEnabled && lhsAligned && rhsAligned ) {
601 auto target( submatrix<aligned>( ~lhs,
row,
column, m, n ) );
602 subAssign( target, submatrix<aligned>( ~rhs,
row,
column, m, n ) );
604 else if( simdEnabled && lhsAligned ) {
605 auto target( submatrix<aligned>( ~lhs,
row,
column, m, n ) );
606 subAssign( target, submatrix<unaligned>( ~rhs,
row,
column, m, n ) );
608 else if( simdEnabled && rhsAligned ) {
609 auto target( submatrix<unaligned>( ~lhs,
row,
column, m, n ) );
610 subAssign( target, submatrix<aligned>( ~rhs,
row,
column, m, n ) );
613 auto target( submatrix<unaligned>( ~lhs,
row,
column, m, n ) );
614 subAssign( target, submatrix<unaligned>( ~rhs,
row,
column, m, n ) );
639 template<
typename MT1
643 void smpSubAssign_backend( DenseMatrix<MT1,SO1>& lhs,
const SparseMatrix<MT2,SO2>& rhs )
649 const size_t threads( omp_get_num_threads() );
650 const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
652 const size_t addon1 ( ( ( (~rhs).
rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
653 const size_t rowsPerThread( (~rhs).
rows() / threadmap.first + addon1 );
655 const size_t addon2 ( ( ( (~rhs).
columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
656 const size_t colsPerThread( (~rhs).
columns() / threadmap.second + addon2 );
658 #pragma omp for schedule(dynamic,1) nowait 659 for(
size_t i=0; i<threads; ++i )
661 const size_t row ( ( i / threadmap.second ) * rowsPerThread );
662 const size_t column( ( i % threadmap.second ) * colsPerThread );
667 const size_t m(
min( rowsPerThread, (~lhs).
rows() -
row ) );
670 auto target( submatrix<unaligned>( ~lhs,
row,
column, m, n ) );
671 subAssign( target, submatrix<unaligned>( ~rhs,
row,
column, m, n ) );
696 template<
typename MT1
700 inline EnableIf_< And< IsDenseMatrix<MT1>
701 , Or< Not< IsSMPAssignable<MT1> >
702 , Not< IsSMPAssignable<MT2> > > > >
703 smpSubAssign( Matrix<MT1,SO1>& lhs,
const Matrix<MT2,SO2>& rhs )
710 subAssign( ~lhs, ~rhs );
734 template<
typename MT1
738 inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
739 smpSubAssign( Matrix<MT1,SO1>& lhs,
const Matrix<MT2,SO2>& rhs )
752 subAssign( ~lhs, ~rhs );
755 #pragma omp parallel shared( lhs, rhs ) 756 smpSubAssign_backend( ~lhs, ~rhs );
789 template<
typename MT1
793 void smpSchurAssign_backend( DenseMatrix<MT1,SO1>& lhs,
const DenseMatrix<MT2,SO2>& rhs )
799 using ET1 = ElementType_<MT1>;
800 using ET2 = ElementType_<MT2>;
802 constexpr
bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable<ET1,ET2>::value );
803 constexpr
size_t SIMDSIZE( SIMDTrait< ElementType_<MT1> >::
size );
805 const bool lhsAligned( (~lhs).isAligned() );
806 const bool rhsAligned( (~rhs).isAligned() );
808 const int threads( omp_get_num_threads() );
809 const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
811 const size_t addon1 ( ( ( (~rhs).
rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
812 const size_t equalShare1( (~rhs).
rows() / threadmap.first + addon1 );
813 const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
814 const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
816 const size_t addon2 ( ( ( (~rhs).
columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
817 const size_t equalShare2( (~rhs).
columns() / threadmap.second + addon2 );
818 const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
819 const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
821 #pragma omp for schedule(dynamic,1) nowait 822 for(
int i=0; i<threads; ++i )
824 const size_t row ( ( i / threadmap.second ) * rowsPerThread );
825 const size_t column( ( i % threadmap.second ) * colsPerThread );
830 const size_t m(
min( rowsPerThread, (~rhs).
rows() -
row ) );
833 if( simdEnabled && lhsAligned && rhsAligned ) {
834 auto target( submatrix<aligned>( ~lhs,
row,
column, m, n ) );
835 schurAssign( target, submatrix<aligned>( ~rhs,
row,
column, m, n ) );
837 else if( simdEnabled && lhsAligned ) {
838 auto target( submatrix<aligned>( ~lhs,
row,
column, m, n ) );
839 schurAssign( target, submatrix<unaligned>( ~rhs,
row,
column, m, n ) );
841 else if( simdEnabled && rhsAligned ) {
842 auto target( submatrix<unaligned>( ~lhs,
row,
column, m, n ) );
843 schurAssign( target, submatrix<aligned>( ~rhs,
row,
column, m, n ) );
846 auto target( submatrix<unaligned>( ~lhs,
row,
column, m, n ) );
847 schurAssign( target, submatrix<unaligned>( ~rhs,
row,
column, m, n ) );
872 template<
typename MT1
876 void smpSchurAssign_backend( DenseMatrix<MT1,SO1>& lhs,
const SparseMatrix<MT2,SO2>& rhs )
882 const size_t threads( omp_get_num_threads() );
883 const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
885 const size_t addon1 ( ( ( (~rhs).
rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
886 const size_t rowsPerThread( (~rhs).
rows() / threadmap.first + addon1 );
888 const size_t addon2 ( ( ( (~rhs).
columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
889 const size_t colsPerThread( (~rhs).
columns() / threadmap.second + addon2 );
891 #pragma omp for schedule(dynamic,1) nowait 892 for(
size_t i=0; i<threads; ++i )
894 const size_t row ( ( i / threadmap.second ) * rowsPerThread );
895 const size_t column( ( i % threadmap.second ) * colsPerThread );
900 const size_t m(
min( rowsPerThread, (~lhs).
rows() -
row ) );
903 auto target( submatrix<unaligned>( ~lhs,
row,
column, m, n ) );
904 schurAssign( target, submatrix<unaligned>( ~rhs,
row,
column, m, n ) );
929 template<
typename MT1
933 inline EnableIf_< And< IsDenseMatrix<MT1>
934 , Or< Not< IsSMPAssignable<MT1> >
935 , Not< IsSMPAssignable<MT2> > > > >
936 smpSchurAssign( Matrix<MT1,SO1>& lhs,
const Matrix<MT2,SO2>& rhs )
943 schurAssign( ~lhs, ~rhs );
967 template<
typename MT1
971 inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
972 smpSchurAssign( Matrix<MT1,SO1>& lhs,
const Matrix<MT2,SO2>& rhs )
985 schurAssign( ~lhs, ~rhs );
988 #pragma omp parallel shared( lhs, rhs ) 989 smpSchurAssign_backend( ~lhs, ~rhs );
1021 template<
typename MT1
1025 inline EnableIf_< IsDenseMatrix<MT1> >
1026 smpMultAssign( Matrix<MT1,SO1>& lhs,
const Matrix<MT2,SO2>& rhs )
1033 multAssign( ~lhs, ~rhs );
Header file for the implementation of the Submatrix view.
Header file for auxiliary alias declarations.
Headerfile for the generic min algorithm.
Header file for the alignment flag values.
EnableIf_< IsDenseMatrix< MT1 > > smpSchurAssign(Matrix< MT1, SO1 > &lhs, const Matrix< MT2, SO2 > &rhs)
Default implementation of the SMP Schur product assignment of a matrix to dense matrix.
Definition: DenseMatrix.h:196
Header file for basic type definitions.
EnableIf_< IsDenseMatrix< MT1 > > smpSubAssign(Matrix< MT1, SO1 > &lhs, const Matrix< MT2, SO2 > &rhs)
Default implementation of the SMP subtraction assignment of a matrix to dense matrix.
Definition: DenseMatrix.h:164
BLAZE_ALWAYS_INLINE size_t size(const Vector< VT, TF > &vector) noexcept
Returns the current size/dimension of the vector.
Definition: Vector.h:265
EnableIf_< IsDenseVector< VT1 > > smpMultAssign(Vector< VT1, TF1 > &lhs, const Vector< VT2, TF2 > &rhs)
Default implementation of the SMP multiplication assignment of a vector to a dense vector...
Definition: DenseVector.h:193
Header file for the And class template.
const ElementType_< MT > min(const DenseMatrix< MT, SO > &dm)
Returns the smallest element of the dense matrix.
Definition: DenseMatrix.h:1762
Column< MT > column(Matrix< MT, SO > &matrix, size_t index)
Creating a view on a specific column of the given matrix.
Definition: Column.h:124
Header file for the SIMD trait.
EnableIf_< IsDenseMatrix< MT1 > > smpAddAssign(Matrix< MT1, SO1 > &lhs, const Matrix< MT2, SO2 > &rhs)
Default implementation of the SMP addition assignment of a matrix to a dense matrix.
Definition: DenseMatrix.h:133
Header file for the SparseMatrix base class.
Header file for the SMP thread mapping functionality.
Row< MT > row(Matrix< MT, SO > &matrix, size_t index)
Creating a view on a specific row of the given matrix.
Definition: Row.h:124
Header file for the matrix storage order types.
#define BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE(T)
Constraint on the data type.In case the given data type T is SMP-assignable (can be assigned by multi...
Definition: SMPAssignable.h:81
Namespace of the Blaze C++ math library.
Definition: Blaze.h:57
System settings for the shared-memory parallelization.
Header file for the IsSMPAssignable type trait.
EnableIf_< IsDenseMatrix< MT1 > > smpAssign(Matrix< MT1, SO1 > &lhs, const Matrix< MT2, SO2 > &rhs)
Default implementation of the SMP assignment of a matrix to a dense matrix.
Definition: DenseMatrix.h:102
Header file for the Or class template.
Header file for the DenseMatrix base class.
Header file for the Not class template.
BLAZE_ALWAYS_INLINE size_t columns(const Matrix< MT, SO > &matrix) noexcept
Returns the current number of columns of the matrix.
Definition: Matrix.h:340
Header file for the serial section implementation.
Header file for the parallel section implementation.
Header file for the IsDenseMatrix type trait.
Header file for the EnableIf class template.
#define BLAZE_PARALLEL_SECTION
Section for the debugging of the shared-memory parallelization.During the shared-memory parallel (SMP...
Definition: ParallelSection.h:246
bool isSerialSectionActive()
Returns whether a serial section is active or not.
Definition: SerialSection.h:213
Header file for the IsSIMDCombinable type trait.
Header file for run time assertion macros.
#define BLAZE_FUNCTION_TRACE
Function trace macro.This macro can be used to reliably trace function calls. In case function tracin...
Definition: FunctionTrace.h:94
BLAZE_ALWAYS_INLINE size_t rows(const Matrix< MT, SO > &matrix) noexcept
Returns the current number of rows of the matrix.
Definition: Matrix.h:324
bool isParallelSectionActive()
Returns whether a parallel section is active or not.
Definition: ParallelSection.h:213
#define BLAZE_OPENMP_PARALLEL_MODE
Compilation switch for the OpenMP parallelization.This compilation switch enables/disables the OpenMP...
Definition: SMP.h:67
#define BLAZE_STATIC_ASSERT(expr)
Compile time assertion macro.In case of an invalid compile time expression, a compilation error is cr...
Definition: StaticAssert.h:112
#define BLAZE_INTERNAL_ASSERT(expr, msg)
Run time assertion macro for internal checks.In case of an invalid run time expression, the program execution is terminated. The BLAZE_INTERNAL_ASSERT macro can be disabled by setting the BLAZE_USER_ASSERTION flag to zero or by defining NDEBUG during the compilation.
Definition: Assert.h:101
Constraint on the data type.
Header file for the function trace functionality.