DataTransferKit - Multiphysics Solution Transfer Services  2.0
DTK_PolynomialMatrix.cpp
Go to the documentation of this file.
1 //---------------------------------------------------------------------------//
2 /*
3  Copyright (c) 2012, Stuart R. Slattery
4  All rights reserved.
5 
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are
8  met:
9 
10  *: Redistributions of source code must retain the above copyright
11  notice, this list of conditions and the following disclaimer.
12 
13  *: Redistributions in binary form must reproduce the above copyright
14  notice, this list of conditions and the following disclaimer in the
15  documentation and/or other materials provided with the distribution.
16 
17  *: Neither the name of the University of Wisconsin - Madison nor the
18  names of its contributors may be used to endorse or promote products
19  derived from this software without specific prior written permission.
20 
21  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33 //---------------------------------------------------------------------------//
39 //---------------------------------------------------------------------------//
40 
41 #include "DTK_PolynomialMatrix.hpp"
42 #include "DTK_DBC.hpp"
43 
44 #include <Teuchos_ArrayRCP.hpp>
45 #include <Teuchos_CommHelpers.hpp>
46 #include <Teuchos_OpaqueWrapper.hpp>
47 
48 #ifdef HAVE_MPI
49 #include <Teuchos_DefaultMpiComm.hpp>
50 #include <mpi.h>
51 #endif
52 
53 #include <Tpetra_Export.hpp>
54 
55 namespace DataTransferKit
56 {
57 //---------------------------------------------------------------------------//
62  const Teuchos::RCP<const Tpetra::MultiVector<double, int, SupportId>>
63  &polynomial,
64  const Teuchos::RCP<const Tpetra::Map<int, SupportId>> &domain_map,
65  const Teuchos::RCP<const Tpetra::Map<int, SupportId>> &range_map )
66  : d_comm( polynomial->getMap()->getComm() )
67  , d_polynomial( polynomial )
68  , d_domain_map( domain_map )
69  , d_range_map( range_map )
70 { /* ... */
71 }
72 
73 //---------------------------------------------------------------------------//
74 // Apply operation.
76  const Tpetra::MultiVector<double, int, SupportId> &X,
77  Tpetra::MultiVector<double, int, SupportId> &Y, Teuchos::ETransp mode,
78  double alpha, double beta ) const
79 {
80  DTK_REQUIRE( d_domain_map->isSameAs( *( X.getMap() ) ) );
81  DTK_REQUIRE( d_range_map->isSameAs( *( Y.getMap() ) ) );
82  DTK_REQUIRE( X.getNumVectors() == Y.getNumVectors() );
83 
84  // Get the size of the problem and view of the local vectors.
85  int poly_size = d_polynomial->getNumVectors();
86  int num_vec = X.getNumVectors();
87  int local_length = d_polynomial->getLocalLength();
88  Teuchos::ArrayRCP<Teuchos::ArrayRCP<const double>> poly_view =
89  d_polynomial->get2dView();
90  Teuchos::ArrayRCP<Teuchos::ArrayRCP<double>> y_view = Y.get2dViewNonConst();
91 
92  // Scale Y by beta.
93  Y.scale( beta );
94 
95  // No transpose.
96  if ( Teuchos::NO_TRANS == mode )
97  {
98  // Broadcast the polynomial components of X from the root rank.
99  Teuchos::Array<double> x_poly( poly_size * num_vec, 0.0 );
100  if ( 0 == d_comm()->getRank() )
101  {
102  Teuchos::ArrayRCP<Teuchos::ArrayRCP<const double>> x_view =
103  X.get2dView();
104  for ( int n = 0; n < num_vec; ++n )
105  {
106  std::copy( &x_view[n][0], &x_view[n][0] + poly_size,
107  &x_poly[n * poly_size] );
108  }
109  }
110  Teuchos::broadcast( *d_comm, 0, x_poly() );
111 
112  // Do the local mat-vec.
113  int stride = 0;
114  for ( int n = 0; n < num_vec; ++n )
115  {
116  stride = n * poly_size;
117 
118  for ( int i = 0; i < local_length; ++i )
119  {
120  for ( int p = 0; p < poly_size; ++p )
121  {
122  y_view[n][i] +=
123  alpha * poly_view[p][i] * x_poly[stride + p];
124  }
125  }
126  }
127  }
128 
129  // Transpose.
130  else if ( Teuchos::TRANS == mode )
131  {
132  // Make a work vector.
133  Tpetra::MultiVector<double, int, SupportId> work( Y.getMap(),
134  Y.getNumVectors() );
135 
136  // Export X to the polynomial decomposition.
137  Tpetra::Export<int, SupportId> exporter( X.getMap(), work.getMap() );
138  work.doExport( X, exporter, Tpetra::INSERT );
139 
140  // Do the local mat-vec.
141  Teuchos::ArrayRCP<Teuchos::ArrayRCP<double>> work_view =
142  work.get2dViewNonConst();
143  Teuchos::Array<double> products( poly_size * num_vec, 0.0 );
144  int stride = 0;
145  for ( int n = 0; n < num_vec; ++n )
146  {
147  stride = n * poly_size;
148  for ( int p = 0; p < poly_size; ++p )
149  {
150  for ( int i = 0; i < local_length; ++i )
151  {
152  products[stride + p] += poly_view[p][i] * work_view[n][i];
153  }
154  }
155  }
156 
157  // Reduce the results back to the root rank.
158  Teuchos::Array<double> product_sums( poly_size * num_vec, 0.0 );
159 #ifdef HAVE_MPI
160  Teuchos::RCP<const Teuchos::MpiComm<int>> mpi_comm =
161  Teuchos::rcp_dynamic_cast<const Teuchos::MpiComm<int>>( d_comm );
162  Teuchos::RCP<const Teuchos::OpaqueWrapper<MPI_Comm>> opaque_comm =
163  mpi_comm->getRawMpiComm();
164  MPI_Comm raw_comm = ( *opaque_comm )();
165  MPI_Reduce( products.getRawPtr(), product_sums.getRawPtr(),
166  poly_size * num_vec, MPI_DOUBLE, MPI_SUM, 0, raw_comm );
167 #else
168  product_sums = products;
169 #endif
170 
171  // Assign the values to Y on the root rank.
172  if ( 0 == d_comm->getRank() )
173  {
174  for ( int n = 0; n < num_vec; ++n )
175  {
176  for ( int p = 0; p < poly_size; ++p )
177  {
178  y_view[n][p] += alpha * product_sums[n * poly_size + p];
179  }
180  }
181  }
182  }
183 
184  else
185  {
186  DTK_INSIST( mode == Teuchos::NO_TRANS || mode == Teuchos::TRANS );
187  }
188 }
189 
190 //---------------------------------------------------------------------------//
191 
192 } // end namespace DataTransferKit
193 
194 //---------------------------------------------------------------------------//
195 // end DTK_PolynomialMatrix.cpp
196 //---------------------------------------------------------------------------//
void apply(const Tpetra::MultiVector< double, int, SupportId > &X, Tpetra::MultiVector< double, int, SupportId > &Y, Teuchos::ETransp mode=Teuchos::NO_TRANS, double alpha=Teuchos::ScalarTraits< double >::one(), double beta=Teuchos::ScalarTraits< double >::zero()) const override
Computes the operator-multivector application.
PolynomialMatrix(const Teuchos::RCP< const Tpetra::MultiVector< double, int, SupportId >> &polynomial, const Teuchos::RCP< const Tpetra::Map< int, SupportId >> &domain_map, const Teuchos::RCP< const Tpetra::Map< int, SupportId >> &range_map)
Constructor.
Assertions and Design-by-Contract for error handling.
Polynomial matrix.
DTK_BasicEntitySet.cpp.