dune-alugrid 2.8.0
Loading...
Searching...
No Matches
structuredgridfactory.hh
Go to the documentation of this file.
1#ifndef DUNE_ALUGRID_STRUCTUREDGRIDFACTORY_HH
2#define DUNE_ALUGRID_STRUCTUREDGRIDFACTORY_HH
3
4#include <array>
5#include <memory>
6#include <vector>
7
8#include <dune/common/version.hh>
9
10#include <dune/common/classname.hh>
11#include <dune/common/exceptions.hh>
12#include <dune/common/fvector.hh>
13#include <dune/common/shared_ptr.hh>
14#include <dune/common/version.hh>
15
16#include <dune/grid/common/gridfactory.hh>
17#include <dune/grid/common/exceptions.hh>
18
21
23
24// include DGF parser implementation for YaspGrid
25#include <dune/grid/io/file/dgfparser/dgfyasp.hh>
26
27namespace Dune
28{
29
30 // External Forward Declarations
31 // -----------------------------
32
33 template< class Grid >
35
36
37
38 // StructuredGridFactory for ALUGrid
39 // ---------------------------------
40
41 template< int dim, int dimworld, ALUGridElementType eltype, ALUGridRefinementType refineType, class Comm >
42 class StructuredGridFactory< ALUGrid< dim, dimworld, eltype, refineType, Comm > >
43 {
44 public:
46#if DUNE_VERSION_NEWER(DUNE_GRID, 2, 6)
47 typedef std::unique_ptr< Grid > SharedPtrType;
48#else // #if DUNE_VERSION_NEWER(DUNE_GRID, 2, 6)
49 // mygrid_ptr in GridPtr is a derived from std::shared_ptr and implements a method release
50 typedef typename Dune::GridPtr< Grid > :: mygrid_ptr SharedPtrType;
51#endif // #else // #if DUNE_VERSION_NEWER(DUNE_GRID, 2, 6)
52
53 protected:
55
56 private:
57 // SimplePartitioner
58 // -----------------
59 template< class GV, PartitionIteratorType pitype, class IS = typename GV::IndexSet >
60 class SimplePartitioner
61 {
62 typedef SimplePartitioner< GV, pitype, IS > This;
63
64 public:
65 typedef GV GridView;
66 typedef typename GridView::Grid Grid;
67
68 typedef IS IndexSet;
69
70 protected:
71 typedef typename IndexSet::IndexType IndexType;
72
73 static const int dimension = Grid::dimension;
74
75 typedef typename Grid::template Codim< 0 >::Entity Element;
76
77 typedef typename Element::Geometry::GlobalCoordinate VertexType;
78
79 // type of communicator
80 typedef Dune :: CollectiveCommunication< typename MPIHelper :: MPICommunicator >
82
83#ifdef USE_ALUGRID_SFC_ORDERING
84 typedef SpaceFillingCurveOrdering< VertexType > SpaceFillingCurveOrderingType;
85#endif
86
87 public:
88 SimplePartitioner ( const GridView &gridView, const CollectiveCommunication& comm,
89 const VertexType& lowerLeft, const VertexType& upperRight )
90 : comm_( comm ),
91 gridView_( gridView ),
92 indexSet_( gridView_.indexSet() ),
93 pSize_( comm_.size() ),
94 elementCuts_( pSize_, -1 ),
95#ifdef USE_ALUGRID_SFC_ORDERING
96 sfc_( SpaceFillingCurveOrderingType::ZCurve, lowerLeft, upperRight, comm_ ),
97#endif
98 maxIndex_( -1.0 )
99 {
100#ifdef USE_ALUGRID_SFC_ORDERING
101 const auto end = gridView_.template end< 0 > ();
102 for( auto it = gridView_.template begin< 0 > (); it != end; ++it )
103 {
104 VertexType center = (*it).geometry().center();
105 // get hilbert index in [0,1]
106 const double hidx = sfc_.index( center );
107 maxIndex_ = std::max( maxIndex_, hidx );
108 }
109
110 // adjust with number of elements
111 maxIndex_ /= indexSet_.size( 0 );
112#endif
113
114 // compute decomposition of sfc
115 calculateElementCuts();
116 }
117
118 public:
119 template< class Entity >
120 double index( const Entity &entity ) const
121 {
122 alugrid_assert ( Entity::codimension == 0 );
123#ifdef USE_ALUGRID_SFC_ORDERING
124 // get center of entity's geometry
125 VertexType center = entity.geometry().center();
126 // get hilbert index in [0,1]
127 return sfc_.index( center );
128#else
129 return double(indexSet_.index( entity ));
130#endif
131 }
132
133 template< class Entity >
134 int rank( const Entity &entity ) const
135 {
136 alugrid_assert ( Entity::codimension == 0 );
137#ifdef USE_ALUGRID_SFC_ORDERING
138 // get center of entity's geometry
139 VertexType center = entity.geometry().center();
140 // get hilbert index in [0,1]
141 const double hidx = sfc_.index( center );
142 // transform to element index
143 const long int index = (hidx / maxIndex_);
144 //std::cout << "sfc index = " << hidx << " " << index << std::endl;
145#else
146 const long int index = indexSet_.index( entity );
147#endif
148 return rank( index );
149 }
150
151 protected:
152 int rank( long int index ) const
153 {
154 if( index < elementCuts_[ 0 ] ) return 0;
155 for( int p=1; p<pSize_; ++p )
156 {
157 if( index >= elementCuts_[ p-1 ] && index < elementCuts_[ p ] )
158 return p;
159 }
160 return pSize_-1;
161 }
162
163 void calculateElementCuts()
164 {
165 const size_t nElements = indexSet_.size( 0 );
166
167 // get number of MPI processes
168 const int nRanks = pSize_;
169
170 // get minimal number of entities per process
171 const size_t minPerProc = (double(nElements) / double( nRanks ));
172 size_t maxPerProc = minPerProc ;
173 if( nElements % nRanks != 0 )
174 ++ maxPerProc ;
175
176 // calculate percentage of elements with larger number
177 // of elements per process
178 double percentage = (double(nElements) / double( nRanks ));
179 percentage -= minPerProc ;
180 percentage *= nRanks ;
181
182 int rank = 0;
183 size_t elementCount = maxPerProc ;
184 size_t elementNumber = 0;
185 size_t localElementNumber = 0;
186 const int lastRank = nRanks - 1;
187
188 const size_t size = indexSet_.size( 0 );
189 for( size_t i=0; i<size; ++i )
190 {
191 if( localElementNumber >= elementCount )
192 {
193 elementCuts_[ rank ] = i ;
194
195 // increase rank
196 if( rank < lastRank ) ++ rank;
197
198 // reset local number
199 localElementNumber = 0;
200
201 // switch to smaller number if red line is crossed
202 if( elementCount == maxPerProc && rank >= percentage )
203 elementCount = minPerProc ;
204 }
205
206 // increase counters
207 ++elementNumber;
208 ++localElementNumber;
209 }
210
211 // set cut for last process
212 elementCuts_[ lastRank ] = size ;
213
214 //for( int p=0; p<pSize_; ++p )
215 // std::cout << "P[ " << p << " ] = " << elementCuts_[ p ] << std::endl;
216 }
217
218 const CollectiveCommunication& comm_;
219
220 const GridView& gridView_;
221 const IndexSet &indexSet_;
222
223 const int pSize_;
224 std::vector< long int > elementCuts_ ;
225
226#ifdef USE_ALUGRID_SFC_ORDERING
227 // get element to hilbert (or Z) index mapping
229#endif
230 double maxIndex_ ;
231 };
232
233 public:
234 typedef typename Grid::ctype ctype;
235 typedef typename MPIHelper :: MPICommunicator MPICommunicatorType ;
236
237 // type of communicator
238 typedef Dune :: CollectiveCommunication< MPICommunicatorType >
240
241 static SharedPtrType
242 createCubeGrid( const std::string& filename,
243 MPICommunicatorType mpiComm = MPIHelper :: getCommunicator() )
244 {
245 std::ifstream file( filename.c_str() );
246 if( ! file )
247 {
248 DUNE_THROW(InvalidStateException,"file not found " << filename );
249 }
250 return createCubeGrid( file, filename, mpiComm );
251 }
252
253 static SharedPtrType
254 createCubeGrid( std::istream& input,
255 const std::string& name,
256 MPICommunicatorType mpiComm = MPIHelper :: getCommunicator() )
257 {
258 CollectiveCommunication comm( MPIHelper :: getCommunicator() );
259 static_assert( dim == dimworld, "YaspGrid is used for creation of the structured grid which only supports dim == dimworld");
260
261 // if periodic transformations are active we cannot use the YaspGrid
262 // approach to insert the grid cells, otherwise the periodic elements
263 // are not inserted
264 dgf::PeriodicFaceTransformationBlock trafoBlock( input, dimworld );
265 if( trafoBlock.isactive() )
266 {
267 Dune::GridPtr< Grid > grid( input, mpiComm );
268 return SharedPtrType( grid.release() );
269 }
270
271 Dune::dgf::IntervalBlock intervalBlock( input );
272 if( !intervalBlock.isactive() )
273 {
274 std::cerr << "No interval block found, using default DGF method to create ALUGrid!" << std::endl;
275 return SharedPtrType( GridPtr< Grid > (input, mpiComm ).release());
276 }
277
278 if( intervalBlock.numIntervals() != 1 )
279 {
280 std::cerr << "ALUGrid creation from YaspGrid can only handle 1 interval block, using default DGF method to create ALUGrid!" << std::endl;
281 return SharedPtrType( GridPtr< Grid > (input, mpiComm ).release());
282 }
283
284 if( intervalBlock.dimw() != dim )
285 {
286 std::cerr << "ALUGrid creation from YaspGrid only works for dim == dimworld, using default DGF method to create ALUGrid!" << std::endl;
287 return SharedPtrType( GridPtr< Grid > (input, mpiComm ).release());
288 }
289
290 const dgf::IntervalBlock::Interval &interval = intervalBlock.get( 0 );
291
292 // only work for the new ALUGrid version
293 // if creation of SGrid fails the DGF file does not contain a proper
294 // IntervalBlock, and thus we cannot create the grid parallel,
295 // we will use the standard technique
296 std::array<int, dim> dims;
297 FieldVector<ctype, dimworld> lowerLeft;
298 FieldVector<ctype, dimworld> upperRight;
299 for( int i=0; i<dim; ++i )
300 {
301 dims[ i ] = interval.n[ i ] ;
302 lowerLeft[ i ] = interval.p[ 0 ][ i ];
303 upperRight[ i ] = interval.p[ 1 ][ i ];
304 }
305
306 // broadcast array values
307 comm.broadcast( &dims[ 0 ], dim, 0 );
308 comm.broadcast( &lowerLeft [ 0 ], dim, 0 );
309 comm.broadcast( &upperRight[ 0 ], dim, 0 );
310
311 std::string nameYasp( name );
312 nameYasp += " via YaspGrid";
314 return SGF :: createCubeGridImpl( lowerLeft, upperRight, dims, comm, nameYasp );
315 }
316
317 template < class int_t >
318 static SharedPtrType
319 createSimplexGrid ( const FieldVector<ctype,dimworld>& lowerLeft,
320 const FieldVector<ctype,dimworld>& upperRight,
321 const std::array< int_t, dim>& elements,
322 MPICommunicatorType mpiComm = MPIHelper :: getCommunicator() )
323 {
324 // create DGF interval block and use DGF parser to create simplex grid
325 std::stringstream dgfstream;
326 dgfstream << "DGF" << std::endl;
327 dgfstream << "Interval" << std::endl;
328 dgfstream << lowerLeft << std::endl;
329 dgfstream << upperRight << std::endl;
330 for( int i=0; i<dim; ++ i)
331 dgfstream << elements[ i ] << " ";
332 dgfstream << std::endl;
333 dgfstream << "#" << std::endl;
334 dgfstream << "Cube" << std::endl;
335 dgfstream << "#" << std::endl;
336 dgfstream << "Simplex" << std::endl;
337 dgfstream << "#" << std::endl;
338
339 std::cout << dgfstream.str() << std::endl;
340
341 Dune::GridPtr< Grid > grid( dgfstream, mpiComm );
342 return SharedPtrType( grid.release() );
343 }
344
345 template < class int_t >
346 static SharedPtrType
347 createCubeGrid ( const FieldVector<ctype,dimworld>& lowerLeft,
348 const FieldVector<ctype,dimworld>& upperRight,
349 const std::array< int_t, dim>& elements,
350 MPICommunicatorType mpiComm = MPIHelper :: getCommunicator() )
351 {
352 CollectiveCommunication comm( mpiComm );
353 std::string name( "Cartesian ALUGrid via YaspGrid" );
354 return createCubeGridImpl( lowerLeft, upperRight, elements, comm, name );
355 }
356
357 protected:
358 template <int codim, class Entity>
359 int subEntities ( const Entity& entity ) const
360 {
361 return entity.subEntities( codim );
362 }
363
364 template < class int_t >
365 static SharedPtrType
366 createCubeGridImpl ( const FieldVector<ctype,dimworld>& lowerLeft,
367 const FieldVector<ctype,dimworld>& upperRight,
368 const std::array< int_t, dim>& elements,
369 const CollectiveCommunication& comm,
370 const std::string& name )
371 {
372 const int myrank = comm.rank();
373
374 typedef YaspGrid< dimworld, EquidistantOffsetCoordinates<double,dimworld> > CartesianGridType ;
375 std::array< int, dim > dims;
376 for( int i=0; i<dim; ++i ) dims[ i ] = elements[ i ];
377
378 CollectiveCommunication commSelf( MPIHelper :: getLocalCommunicator() );
379 // create YaspGrid to partition and insert elements that belong to process directly
380 CartesianGridType sgrid( lowerLeft, upperRight, dims, std::bitset<dim>(0ULL), 1, commSelf );
381
382 typedef typename CartesianGridType :: LeafGridView GridView ;
383 typedef typename GridView :: IndexSet IndexSet ;
384 typedef typename IndexSet :: IndexType IndexType ;
385 typedef typename GridView :: template Codim< 0 > :: Iterator ElementIterator ;
386 typedef typename ElementIterator::Entity Entity ;
387 typedef typename GridView :: IntersectionIterator IntersectionIterator ;
388 typedef typename IntersectionIterator :: Intersection Intersection ;
389
390 GridView gridView = sgrid.leafGridView();
391 const IndexSet &indexSet = gridView.indexSet();
392
393 // get decompostition of the marco grid
394 SimplePartitioner< GridView, InteriorBorder_Partition > partitioner( gridView, comm, lowerLeft, upperRight );
395
396 // create ALUGrid GridFactory
397 GridFactory< Grid > factory;
398
399 // map global vertex ids to local ones
400 std::map< IndexType, unsigned int > vtxMap;
401 std::map< double, const Entity > sortedElementList;
402
403 const int numVertices = (1 << dim);
404 std::vector< unsigned int > vertices( numVertices );
405
406 const ElementIterator end = gridView.template end< 0 >();
407 for( ElementIterator it = gridView.template begin< 0 >(); it != end; ++it )
408 {
409 const Entity &entity = *it;
410
411 // if the element does not belong to our partition, continue
412 if( partitioner.rank( entity ) != myrank )
413 continue;
414
415 const double elIndex = partitioner.index( entity );
416 assert( sortedElementList.find( elIndex ) == sortedElementList.end() );
417 sortedElementList.insert( std::make_pair( elIndex, entity ) );
418 }
419
420 int nextElementIndex = 0;
421 const auto endSorted = sortedElementList.end();
422 for( auto it = sortedElementList.begin(); it != endSorted; ++it )
423 {
424 const Entity &entity = (*it).second;
425
426 // insert vertices and element
427 const typename Entity::Geometry geo = entity.geometry();
428 alugrid_assert( numVertices == geo.corners() );
429 for( int i = 0; i < numVertices; ++i )
430 {
431 const IndexType vtxId = indexSet.subIndex( entity, i, dim );
432 //auto result = vtxMap.insert( std::make_pair( vtxId, vtxMap.size() ) );
433 std::pair< typename std::map< IndexType, unsigned int >::iterator, bool > result
434 = vtxMap.insert( std::make_pair( vtxId, vtxMap.size() ) );
435 if( result.second )
436 factory.insertVertex( geo.corner( i ), vtxId );
437 vertices[ i ] = result.first->second;
438 }
439
440 factory.insertElement( entity.type(), vertices );
441 const int elementIndex = nextElementIndex++;
442
443 //const auto iend = gridView.iend( entity );
444 //for( auto iit = gridView.ibegin( entity ); iit != iend; ++iit )
445 const IntersectionIterator iend = gridView.iend( entity );
446 for( IntersectionIterator iit = gridView.ibegin( entity ); iit != iend; ++iit )
447 {
448 const Intersection &isec = *iit;
449 const int faceNumber = isec.indexInInside();
450 // insert boundary face in case of domain boundary
451 if( isec.boundary() )
452 factory.insertBoundary( elementIndex, faceNumber );
453 // insert process boundary if the neighboring element has a different rank
454 if( isec.neighbor() && (partitioner.rank( isec.outside() ) != myrank) )
455 factory.insertProcessBorder( elementIndex, faceNumber );
456 }
457 }
458
459 // for structured grids, do not mark longest edge
460 // not necessary
461 factory.setLongestEdgeFlag(false);
462
463 // create shared grid pointer
464 return SharedPtrType( factory.createGrid( true, true, name ) );
465 }
466 };
467
468} // namespace Dune
469
470#endif // #ifndef DUNE_ALUGRID_STRUCTUREDGRIDFACTORY_HH
#define alugrid_assert(EX)
Definition: alugrid_assert.hh:20
Definition: alu3dinclude.hh:33
Definition: alu3dinclude.hh:63
unstructured parallel implementation of the DUNE grid interface
Definition: alugrid.hh:31
BaseType::ctype ctype
Definition: alugrid.hh:46
Definition: hsfc.hh:167
Definition: structuredgridfactory.hh:34
Dune ::CollectiveCommunication< MPICommunicatorType > CollectiveCommunication
Definition: structuredgridfactory.hh:239
MPIHelper::MPICommunicator MPICommunicatorType
Definition: structuredgridfactory.hh:235
static SharedPtrType createCubeGrid(const std::string &filename, MPICommunicatorType mpiComm=MPIHelper ::getCommunicator())
Definition: structuredgridfactory.hh:242
static SharedPtrType createCubeGrid(std::istream &input, const std::string &name, MPICommunicatorType mpiComm=MPIHelper ::getCommunicator())
Definition: structuredgridfactory.hh:254
ALUGrid< dim, dimworld, eltype, refineType, Comm > Grid
Definition: structuredgridfactory.hh:45
int subEntities(const Entity &entity) const
Definition: structuredgridfactory.hh:359
StructuredGridFactory< Grid > This
Definition: structuredgridfactory.hh:54
static SharedPtrType createSimplexGrid(const FieldVector< ctype, dimworld > &lowerLeft, const FieldVector< ctype, dimworld > &upperRight, const std::array< int_t, dim > &elements, MPICommunicatorType mpiComm=MPIHelper ::getCommunicator())
Definition: structuredgridfactory.hh:319
static SharedPtrType createCubeGrid(const FieldVector< ctype, dimworld > &lowerLeft, const FieldVector< ctype, dimworld > &upperRight, const std::array< int_t, dim > &elements, MPICommunicatorType mpiComm=MPIHelper ::getCommunicator())
Definition: structuredgridfactory.hh:347
static SharedPtrType createCubeGridImpl(const FieldVector< ctype, dimworld > &lowerLeft, const FieldVector< ctype, dimworld > &upperRight, const std::array< int_t, dim > &elements, const CollectiveCommunication &comm, const std::string &name)
Definition: structuredgridfactory.hh:366
Dune::GridPtr< Grid >::mygrid_ptr SharedPtrType
Definition: structuredgridfactory.hh:50