Actual source code: ex13f90.F

  1: !
  2: !    "$Id: ex13f90.F,v 1.24 2001/08/07 03:04:00 balay Exp $";
  3: !
  4: !
  5: !/*T
  6: !   Concepts: KSP^basic sequential example
  7: !   Concepts: KSP^Laplacian, 2d
  8: !   Concepts: Laplacian, 2d
  9: !   Processors: 1
 10: !T*/
 11: ! -----------------------------------------------------------------------

 13:       program main
 14:       implicit none

 16: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 17: !                    Include files
 18: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 19: !
 20: !  The following include statements are required for KSP Fortran programs:
 21: !     petsc.h  - base PETSc routines
 22: !     petscvec.h    - vectors
 23: !     petscmat.h    - matrices
 24: !     petscksp.h    - Krylov subspace methods
 25: !     petscpc.h     - preconditioners
 26: !
 27:  #include include/finclude/petsc.h
 28:  #include include/finclude/petscvec.h
 29:  #include include/finclude/petscmat.h
 30:  #include include/finclude/petscksp.h
 31:  #include include/finclude/petscpc.h

 33: !    User-defined context that contains all the data structures used
 34: !    in the linear solution process.

 36: !   Vec    x,b      /* solution vector, right hand side vector and work vector */
 37: !   Mat    A        /* sparse matrix */
 38: !   KSP   ksp     /* linear solver context */
 39: !   int    m,n      /* grid dimensions */
 40: !
 41: !   Since we cannot store Scalars and integers in the same context,
 42: !   we store the integers/pointers in the user-defined context, and
 43: !   the scalar values are carried in the common block.
 44: !   The scalar values in this simplistic example could easily
 45: !   be recalculated in each routine, where they are needed.
 46: !
 47: !   Scalar hx2,hy2  /* 1/(m+1)*(m+1) and 1/(n+1)*(n+1) */

 49: !  Note: Any user-defined Fortran routines MUST be declared as external.

 51:       external UserInitializeLinearSolver
 52:       external UserFinalizeLinearSolver
 53:       external UserDoLinearSolver

 55: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 56: !                   Variable declarations
 57: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

 59:       PetscScalar  hx,hy,x,y
 60:       PetscFortranAddr userctx(6)
 61:       integer ierr,m,n,t,tmax,flg,i,j,Ntot
 62:       integer size,rank
 63:       double precision  enorm
 64:       PetscScalar,ALLOCATABLE :: userx(:,:),userb(:,:)
 65:       PetscScalar,ALLOCATABLE :: solution(:,:),rho(:,:)

 67:       double precision hx2,hy2
 68:       common /param/ hx2,hy2

 70:       tmax = 2
 71:       m = 6
 72:       n = 7

 74: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 75: !                 Beginning of program
 76: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

 78:       call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
 79:       call MPI_Comm_size(PETSC_COMM_WORLD,size,ierr)
 80:       if (size .ne. 1) then
 81:          call MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr)
 82:          if (rank .eq. 0) then
 83:             write(6,*) 'This is a uniprocessor example only!'
 84:          endif
 85:          SETERRQ(1,' ',ierr)
 86:       endif

 88: !  The next two lines are for testing only; these allow the user to
 89: !  decide the grid size at runtime.

 91:       call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-m',m,flg,ierr)
 92:       call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-n',n,flg,ierr)

 94: !  Create the empty sparse matrix and linear solver data structures

 96:       call UserInitializeLinearSolver(m,n,userctx,ierr)
 97:       Ntot = m*n

 99: !  Allocate arrays to hold the solution to the linear system.  This
100: !  approach is not normally done in PETSc programs, but in this case,
101: !  since we are calling these routines from a non-PETSc program, we
102: !  would like to reuse the data structures from another code. So in
103: !  the context of a larger application these would be provided by
104: !  other (non-PETSc) parts of the application code.

106:       ALLOCATE (userx(m,n),userb(m,n),solution(m,n))

108: !  Allocate an array to hold the coefficients in the elliptic operator

110:        ALLOCATE (rho(m,n))

112: !  Fill up the array rho[] with the function rho(x,y) = x; fill the
113: !  right-hand-side b[] and the solution with a known problem for testing.

115:       hx = 1.0/(m+1)
116:       hy = 1.0/(n+1)
117:       y  = hy
118:       do 20 j=1,n
119:          x = hx
120:          do 10 i=1,m
121:             rho(i,j)      = x
122:             solution(i,j) = sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
123:             userb(i,j)    = -2.*PETSC_PI*cos(2.*PETSC_PI*x)*              &
124:      &                sin(2.*PETSC_PI*y) +                                &
125:      &                8*PETSC_PI*PETSC_PI*x*                              &
126:      &                sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
127:            x = x + hx
128:  10      continue
129:          y = y + hy
130:  20   continue

132: !  Loop over a bunch of timesteps, setting up and solver the linear
133: !  system for each time-step.
134: !  Note that this loop is somewhat artificial. It is intended to
135: !  demonstrate how one may reuse the linear solvers in each time-step.

137:       do 100 t=1,tmax
138:          call UserDoLinearSolver(rho,userctx,userb,userx,ierr)

140: !        Compute error: Note that this could (and usually should) all be done
141: !        using the PETSc vector operations. Here we demonstrate using more
142: !        standard programming practices to show how they may be mixed with
143: !        PETSc.
144:          enorm = 0.0
145:          do 90 j=1,n
146:             do 80 i=1,m
147:                enorm = enorm +                                          &
148:      &             (solution(i,j)-userx(i,j))*(solution(i,j)-userx(i,j))
149:  80         continue
150:  90      continue
151:          enorm = enorm * PetscRealPart(hx*hy)
152:          write(6,115) m,n,enorm
153:  115     format ('m = ',I2,' n = ',I2,' error norm = ',1PE10.4)
154:  100  continue

156: !  We are finished solving linear systems, so we clean up the
157: !  data structures.

159:       DEALLOCATE (userx,userb,solution,rho)

161:       call UserFinalizeLinearSolver(userctx,ierr)
162:       call PetscFinalize(ierr)
163:       end

165: ! ----------------------------------------------------------------
166:       subroutine UserInitializeLinearSolver(m,n,userctx,ierr)

168:       implicit none

170:  #include include/finclude/petsc.h
171:  #include include/finclude/petscvec.h
172:  #include include/finclude/petscmat.h
173:  #include include/finclude/petscksp.h
174:  #include include/finclude/petscpc.h

176:       integer          m,n,ierr
177:       PetscFortranAddr userctx(*)

179:       common /param/ hx2,hy2
180:       double precision hx2,hy2

182: !  Local variable declararions
183:       Mat     A
184:       Vec     b,x
185:       KSP    ksp
186:       integer Ntot


189: !  Here we assume use of a grid of size m x n, with all points on the
190: !  interior of the domain, i.e., we do not include the points corresponding
191: !  to homogeneous Dirichlet boundary conditions.  We assume that the domain
192: !  is [0,1]x[0,1].

194:       hx2 = (m+1)*(m+1)
195:       hy2 = (n+1)*(n+1)
196:       Ntot = m*n

198: !  Create the sparse matrix. Preallocate 5 nonzeros per row.

200:       call MatCreateSeqAIJ(PETSC_COMM_SELF,Ntot,Ntot,5,                  &
201:      &     PETSC_NULL_INTEGER,A,ierr)
202: !
203: !  Create vectors. Here we create vectors with no memory allocated.
204: !  This way, we can use the data structures already in the program
205: !  by using VecPlaceArray() subroutine at a later stage.
206: !
207:       call VecCreateSeqWithArray(PETSC_COMM_SELF,Ntot,                  &
208:      &     PETSC_NULL_SCALAR,b,ierr)
209:       call VecDuplicate(b,x,ierr)

211: !  Create linear solver context. This will be used repeatedly for all
212: !  the linear solves needed.

214:       call KSPCreate(PETSC_COMM_SELF,ksp,ierr)

216:       userctx(1) = x
217:       userctx(2) = b
218:       userctx(3) = A
219:       userctx(4) = ksp
220:       userctx(5) = m
221:       userctx(6) = n

223:       return
224:       end
225: ! -----------------------------------------------------------------------

227: !   Solves -div (rho grad psi) = F using finite differences.
228: !   rho is a 2-dimensional array of size m by n, stored in Fortran
229: !   style by columns. userb is a standard one-dimensional array.

231:       subroutine UserDoLinearSolver(rho,userctx,userb,userx,ierr)

233:       implicit none

235:  #include include/finclude/petsc.h
236:  #include include/finclude/petscvec.h
237:  #include include/finclude/petscmat.h
238:  #include include/finclude/petscksp.h
239:  #include include/finclude/petscpc.h

241:       integer ierr
242:       PetscFortranAddr userctx(*)
243:       PetscScalar rho(*),userb(*),userx(*)


246:       common /param/ hx2,hy2
247:       double precision hx2,hy2

249:       PC   pc
250:       KSP ksp
251:       Vec  b,x
252:       Mat  A
253:       integer m,n
254:       integer i,j,II,JJ
255:       PetscScalar  v

257: !      PetscScalar tmpx(*),tmpb(*)

259:       x    = userctx(1)
260:       b    = userctx(2)
261:       A    = userctx(3)
262:       ksp = userctx(4)
263:       m    = userctx(5)
264:       n    = userctx(6)

266: !  This is not the most efficient way of generating the matrix,
267: !  but let's not worry about it.  We should have separate code for
268: !  the four corners, each edge and then the interior. Then we won't
269: !  have the slow if-tests inside the loop.
270: !
271: !  Compute the operator
272: !          -div rho grad
273: !  on an m by n grid with zero Dirichlet boundary conditions. The rho
274: !  is assumed to be given on the same grid as the finite difference
275: !  stencil is applied.  For a staggered grid, one would have to change
276: !  things slightly.

278:       II = 0
279:       do 110 j=1,n
280:          do 100 i=1,m
281:             if (j .gt. 1) then
282:                JJ = II - m
283:                v = -0.5*(rho(II+1) + rho(JJ+1))*hy2
284:                call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
285:             endif
286:             if (j .lt. n) then
287:                JJ = II + m
288:                v = -0.5*(rho(II+1) + rho(JJ+1))*hy2
289:                call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
290:             endif
291:             if (i .gt. 1) then
292:                JJ = II - 1
293:                v = -0.5*(rho(II+1) + rho(JJ+1))*hx2
294:                call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
295:             endif
296:             if (i .lt. m) then
297:                JJ = II + 1
298:                v = -0.5*(rho(II+1) + rho(JJ+1))*hx2
299:                call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
300:             endif
301:             v = 2*rho(II+1)*(hx2+hy2)
302:             call MatSetValues(A,1,II,1,II,v,INSERT_VALUES,ierr)
303:             II = II+1
304:  100     continue
305:  110  continue
306: !
307: !     Assemble matrix
308: !
309:       call MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY,ierr)
310:       call MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY,ierr)

312: !
313: !     Set operators. Here the matrix that defines the linear system
314: !     also serves as the preconditioning matrix. Since all the matrices
315: !     will have the same nonzero pattern here, we indicate this so the
316: !     linear solvers can take advantage of this.
317: !
318:       call KSPSetOperators(ksp,A,A,SAME_NONZERO_PATTERN,ierr)

320: !
321: !     Set linear solver defaults for this problem (optional).
322: !     - Here we set it to use direct LU factorization for the solution
323: !
324:       call KSPGetPC(ksp,pc,ierr)
325:       call PCSetType(pc,PCLU,ierr)

327: !
328: !     Set runtime options, e.g.,
329: !        -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
330: !     These options will override those specified above as long as
331: !     KSPSetFromOptions() is called _after_ any other customization
332: !     routines.
333: !
334: !     Run the program with the option -help to see all the possible
335: !     linear solver options.
336: !
337:       call KSPSetFromOptions(ksp,ierr)

339: !
340: !     This allows the PETSc linear solvers to compute the solution
341: !     directly in the user's array rather than in the PETSc vector.
342: !
343: !     This is essentially a hack and not highly recommend unless you
344: !     are quite comfortable with using PETSc. In general, users should
345: !     write their entire application using PETSc vectors rather than
346: !     arrays.
347: !
348:       call VecPlaceArray(x,userx,ierr)
349:       call VecPlaceArray(b,userb,ierr)

351: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
352: !                      Solve the linear system
353: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

355:       call KSPSetRhs(ksp,b,ierr)
356:       call KSPSetSolution(ksp,x,ierr)
357:       call KSPSolve(ksp,ierr)

359:       return
360:       end

362: ! ------------------------------------------------------------------------

364:       subroutine UserFinalizeLinearSolver(userctx,ierr)

366:       implicit none

368:  #include include/finclude/petsc.h
369:  #include include/finclude/petscvec.h
370:  #include include/finclude/petscmat.h
371:  #include include/finclude/petscksp.h
372:  #include include/finclude/petscpc.h

374:       integer ierr
375:       PetscFortranAddr userctx(*)

377: !  Local variable declararions

379:       Vec  x,b
380:       Mat  A
381:       KSP ksp
382: !
383: !     We are all done and don't need to solve any more linear systems, so
384: !     we free the work space.  All PETSc objects should be destroyed when
385: !     they are no longer needed.
386: !
387:       x    = userctx(1)
388:       b    = userctx(2)
389:       A    = userctx(3)
390:       ksp = userctx(4)

392:       call VecDestroy(x,ierr)
393:       call VecDestroy(b,ierr)
394:       call MatDestroy(A,ierr)
395:       call KSPDestroy(ksp,ierr)

397:       return
398:       end