Commits

Garth Wells  committed 2d1a700 Merge

Merge branch 'blechta/fix-issue-336'

  • Participants
  • Parent commits f8bd39c, f1832b8

Comments (0)

Files changed (2)

File dolfin/swig/la/post.i

   PyObject* _data()
   {
     PyObject* rows = %make_numpy_array(1, size_t)(self->size(0)+1,
-						  boost::tuples::get<0>(self->data()),
-						  false);
+                                                 boost::tuples::get<0>(self->data()),
+                                                 false);
     PyObject* cols = %make_numpy_array(1, size_t)(boost::tuples::get<3>(self->data()),
-						 boost::tuples::get<1>(self->data()),
-						 false);
+                                                 boost::tuples::get<1>(self->data()),
+                                                 false);
     PyObject* values = %make_numpy_array(1, double)(boost::tuples::get<3>(self->data()),
-						    boost::tuples::get<2>(self->data()),
-						    false);
+                                                   boost::tuples::get<2>(self->data()),
+                                                   false);
 
     if ( rows == NULL || cols == NULL || values == NULL)
       return NULL;
 // ---------------------------------------------------------------------------
 %define AS_BACKEND_TYPE_MACRO(TENSOR_TYPE)
 %inline %{
-bool _has_type_ ## TENSOR_TYPE(const std::shared_ptr<dolfin::GenericTensor> tensor)
+bool _has_type_ ## TENSOR_TYPE(const std::shared_ptr<dolfin::LinearAlgebraObject> tensor)
 { return dolfin::has_type<dolfin::TENSOR_TYPE>(*tensor); }
 
-std::shared_ptr<dolfin::TENSOR_TYPE> _as_backend_type_ ## TENSOR_TYPE(const std::shared_ptr<dolfin::GenericTensor> tensor)
+std::shared_ptr<dolfin::TENSOR_TYPE> _as_backend_type_ ## TENSOR_TYPE(const std::shared_ptr<dolfin::LinearAlgebraObject> tensor)
 { return dolfin::as_type<dolfin::TENSOR_TYPE>(tensor); }
 %}
 
 // Run the downcast macro
 // ---------------------------------------------------------------------------
 AS_BACKEND_TYPE_MACRO(uBLASVector)
+AS_BACKEND_TYPE_MACRO(uBLASLinearOperator)
 
 // NOTE: Silly SWIG force us to describe the type explicit for uBLASMatrices
 %inline %{
-bool _has_type_uBLASDenseMatrix(const std::shared_ptr<dolfin::GenericTensor> tensor)
+bool _has_type_uBLASDenseMatrix(const std::shared_ptr<dolfin::LinearAlgebraObject> tensor)
 { return dolfin::has_type<dolfin::uBLASMatrix<boost::numeric::ublas::matrix<double> > >(*tensor); }
 
-std::shared_ptr<dolfin::uBLASMatrix<boost::numeric::ublas::matrix<double> > > _as_backend_type_uBLASDenseMatrix(const std::shared_ptr<dolfin::GenericTensor> tensor)
+std::shared_ptr<dolfin::uBLASMatrix<boost::numeric::ublas::matrix<double> > > _as_backend_type_uBLASDenseMatrix(const std::shared_ptr<dolfin::LinearAlgebraObject> tensor)
 { return dolfin::as_type<dolfin::uBLASMatrix<boost::numeric::ublas::matrix<double> > >(tensor); }
 
-bool _has_type_uBLASSparseMatrix(const std::shared_ptr<dolfin::GenericTensor > tensor)
+bool _has_type_uBLASSparseMatrix(const std::shared_ptr<dolfin::LinearAlgebraObject> tensor)
 { return dolfin::has_type<dolfin::uBLASMatrix<boost::numeric::ublas::compressed_matrix<double, boost::numeric::ublas::row_major> > >(*tensor); }
 
-const std::shared_ptr<dolfin::uBLASMatrix<boost::numeric::ublas::compressed_matrix<double, boost::numeric::ublas::row_major> > > _as_backend_type_uBLASSparseMatrix(const std::shared_ptr<dolfin::GenericTensor> tensor)
+const std::shared_ptr<dolfin::uBLASMatrix<boost::numeric::ublas::compressed_matrix<double, boost::numeric::ublas::row_major> > > _as_backend_type_uBLASSparseMatrix(const std::shared_ptr<dolfin::LinearAlgebraObject> tensor)
 { return dolfin::as_type<dolfin::uBLASMatrix<boost::numeric::ublas::compressed_matrix<double, boost::numeric::ublas::row_major> > >(tensor); }
 %}
 
 // ---------------------------------------------------------------------------
 %pythoncode %{
 _matrix_vector_mul_map[uBLASSparseMatrix] = [uBLASVector]
-_matrix_vector_mul_map[uBLASDenseMatrix]  = [uBLASVector]
+_matrix_vector_mul_map[uBLASDenseMatrix] = [uBLASVector]
+_matrix_vector_mul_map[uBLASLinearOperator] = [uBLASVector]
 %}
 
 // ---------------------------------------------------------------------------
 #ifdef HAS_PETSC
 AS_BACKEND_TYPE_MACRO(PETScVector)
 AS_BACKEND_TYPE_MACRO(PETScMatrix)
+AS_BACKEND_TYPE_MACRO(PETScLinearOperator)
 
 %pythoncode %{
 _matrix_vector_mul_map[PETScMatrix] = [PETScVector]
+_matrix_vector_mul_map[PETScLinearOperator] = [PETScVector]
 %}
 
 #ifdef HAS_PETSC4PY

File test/unit/la/python/LinearOperator.py

         # Iterate over backends supporting linear operators
         for backend in backends:
 
-            # Check whether backend is available
-            if not has_linear_algebra_backend(backend):
-                continue
-
-            # Set linear algebra backend
-            parameters["linear_algebra_backend"] = backend
-
-            # Compute reference value by solving ordinary linear system
-            mesh = UnitSquareMesh(8, 8)
-
-            # Skip testing uBLAS in parallel
-            if MPI.size(mesh.mpi_comm()) > 1 and backend == "uBLAS":
-                print "Not running uBLAS test in parallel"
-                continue
-
-            V = FunctionSpace(mesh, "Lagrange", 1)
-            u = TrialFunction(V)
-            v = TestFunction(V)
-            f = Constant(1.0)
-            a = dot(grad(u), grad(v))*dx + u*v*dx
-            L = f*v*dx
-            A = assemble(a)
-            b = assemble(L)
-            x = Vector()
-            solve(A, x, b, "gmres", "none")
-            norm_ref = norm(x, "l2")
-
-            # Solve using linear operator defined by form action
-            u = Function(V)
-            a_action = action(a, coefficient=u)
-            O = MyLinearOperator(a_action, u)
-            solve(O, x, b, "gmres", "none")
-            norm_action = norm(x, "l2")
+            # Try wrapped and backend implementation of operator
+            for _as_backend_type in [lambda x:x, as_backend_type]:
+
+                # Check whether backend is available
+                if not has_linear_algebra_backend(backend):
+                    continue
+
+                # Set linear algebra backend
+                parameters["linear_algebra_backend"] = backend
+
+                # Compute reference value by solving ordinary linear system
+                mesh = UnitSquareMesh(8, 8)
+
+                # Skip testing uBLAS in parallel
+                if MPI.size(mesh.mpi_comm()) > 1 and backend == "uBLAS":
+                    print "Not running uBLAS test in parallel"
+                    continue
+
+                V = FunctionSpace(mesh, "Lagrange", 1)
+                u = TrialFunction(V)
+                v = TestFunction(V)
+                f = Constant(1.0)
+                a = dot(grad(u), grad(v))*dx + u*v*dx
+                L = f*v*dx
+                A = assemble(a)
+                b = assemble(L)
+                x = Vector()
+                solve(A, x, b, "gmres", "none")
+                norm_ref = norm(x, "l2")
+
+                # Solve using linear operator defined by form action
+                u = Function(V)
+                a_action = action(a, coefficient=u)
+                O = MyLinearOperator(a_action, u)
+                O = _as_backend_type(O)
+                solve(O, x, b, "gmres", "none")
+                norm_action = norm(x, "l2")
+
+                # Check at least that petsc4py interface is available
+                if backend == 'PETSc' and has_petsc4py() and \
+                  _as_backend_type == as_backend_type:
+                    from petsc4py import PETSc
+                    self.assertTrue(isinstance(O.mat(), PETSc.Mat))
+
 
 if __name__ == "__main__":