Actual source code: ex13f90.F
1: !
2: !
3: !/*T
4: ! Concepts: KSP^basic sequential example
5: ! Concepts: KSP^Laplacian, 2d
6: ! Concepts: Laplacian, 2d
7: ! Processors: 1
8: !T*/
9: ! -----------------------------------------------------------------------
11: program main
12: implicit none
14: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
15: ! Include files
16: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
17: !
18: ! The following include statements are required for KSP Fortran programs:
19: ! petsc.h - base PETSc routines
20: ! petscvec.h - vectors
21: ! petscmat.h - matrices
22: ! petscksp.h - Krylov subspace methods
23: ! petscpc.h - preconditioners
24: !
25: #include include/finclude/petsc.h
26: #include include/finclude/petscvec.h
27: #include include/finclude/petscmat.h
28: #include include/finclude/petscksp.h
29: #include include/finclude/petscpc.h
31: ! User-defined context that contains all the data structures used
32: ! in the linear solution process.
34: ! Vec x,b /* solution vector, right hand side vector and work vector */
35: ! Mat A /* sparse matrix */
36: ! KSP ksp /* linear solver context */
37: ! int m,n /* grid dimensions */
38: !
39: ! Since we cannot store Scalars and integers in the same context,
40: ! we store the integers/pointers in the user-defined context, and
41: ! the scalar values are carried in the common block.
42: ! The scalar values in this simplistic example could easily
43: ! be recalculated in each routine, where they are needed.
44: !
45: ! Scalar hx2,hy2 /* 1/(m+1)*(m+1) and 1/(n+1)*(n+1) */
47: ! Note: Any user-defined Fortran routines MUST be declared as external.
49: external UserInitializeLinearSolver
50: external UserFinalizeLinearSolver
51: external UserDoLinearSolver
53: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
54: ! Variable declarations
55: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
57: PetscScalar hx,hy,x,y
58: PetscFortranAddr userctx(6)
59: integer ierr,m,n,t,tmax,flg,i,j
60: integer size,rank
61: double precision enorm
62: PetscScalar,ALLOCATABLE :: userx(:,:)
63: PetscScalar,ALLOCATABLE :: userb(:,:)
64: PetscScalar,ALLOCATABLE :: solution(:,:)
65: PetscScalar,ALLOCATABLE :: rho(:,:)
67: double precision hx2,hy2
68: common /param/ hx2,hy2
70: tmax = 2
71: m = 6
72: n = 7
74: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
75: ! Beginning of program
76: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
78: call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
79: call MPI_Comm_size(PETSC_COMM_WORLD,size,ierr)
80: if (size .ne. 1) then
81: call MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr)
82: if (rank .eq. 0) then
83: write(6,*) 'This is a uniprocessor example only!'
84: endif
85: SETERRQ(1,' ',ierr)
86: endif
88: ! The next two lines are for testing only; these allow the user to
89: ! decide the grid size at runtime.
91: call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-m',m,flg,ierr)
92: call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-n',n,flg,ierr)
94: ! Create the empty sparse matrix and linear solver data structures
96: call UserInitializeLinearSolver(m,n,userctx,ierr)
98: ! Allocate arrays to hold the solution to the linear system. This
99: ! approach is not normally done in PETSc programs, but in this case,
100: ! since we are calling these routines from a non-PETSc program, we
101: ! would like to reuse the data structures from another code. So in
102: ! the context of a larger application these would be provided by
103: ! other (non-PETSc) parts of the application code.
105: ALLOCATE (userx(m,n),userb(m,n),solution(m,n))
107: ! Allocate an array to hold the coefficients in the elliptic operator
109: ALLOCATE (rho(m,n))
111: ! Fill up the array rho[] with the function rho(x,y) = x; fill the
112: ! right-hand-side b[] and the solution with a known problem for testing.
114: hx = 1.0/(m+1)
115: hy = 1.0/(n+1)
116: y = hy
117: do 20 j=1,n
118: x = hx
119: do 10 i=1,m
120: rho(i,j) = x
121: solution(i,j) = sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
122: userb(i,j) = -2.*PETSC_PI*cos(2.*PETSC_PI*x)* &
123: & sin(2.*PETSC_PI*y) + &
124: & 8*PETSC_PI*PETSC_PI*x* &
125: & sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
126: x = x + hx
127: 10 continue
128: y = y + hy
129: 20 continue
131: ! Loop over a bunch of timesteps, setting up and solver the linear
132: ! system for each time-step.
133: ! Note that this loop is somewhat artificial. It is intended to
134: ! demonstrate how one may reuse the linear solvers in each time-step.
136: do 100 t=1,tmax
137: call UserDoLinearSolver(rho,userctx,userb,userx,ierr)
139: ! Compute error: Note that this could (and usually should) all be done
140: ! using the PETSc vector operations. Here we demonstrate using more
141: ! standard programming practices to show how they may be mixed with
142: ! PETSc.
143: enorm = 0.0
144: do 90 j=1,n
145: do 80 i=1,m
146: enorm = enorm + &
147: & (solution(i,j)-userx(i,j))*(solution(i,j)-userx(i,j))
148: 80 continue
149: 90 continue
150: enorm = enorm * PetscRealPart(hx*hy)
151: write(6,115) m,n,enorm
152: 115 format ('m = ',I2,' n = ',I2,' error norm = ',1PE10.4)
153: 100 continue
155: ! We are finished solving linear systems, so we clean up the
156: ! data structures.
158: DEALLOCATE (userx,userb,solution,rho)
160: call UserFinalizeLinearSolver(userctx,ierr)
161: call PetscFinalize(ierr)
162: end
164: ! ----------------------------------------------------------------
165: subroutine UserInitializeLinearSolver(m,n,userctx,ierr)
167: implicit none
169: #include include/finclude/petsc.h
170: #include include/finclude/petscvec.h
171: #include include/finclude/petscmat.h
172: #include include/finclude/petscksp.h
173: #include include/finclude/petscpc.h
175: integer m,n,ierr
176: PetscFortranAddr userctx(*)
178: common /param/ hx2,hy2
179: double precision hx2,hy2
181: ! Local variable declararions
182: Mat A
183: Vec b,x
184: KSP ksp
185: integer Ntot
188: ! Here we assume use of a grid of size m x n, with all points on the
189: ! interior of the domain, i.e., we do not include the points corresponding
190: ! to homogeneous Dirichlet boundary conditions. We assume that the domain
191: ! is [0,1]x[0,1].
193: hx2 = (m+1)*(m+1)
194: hy2 = (n+1)*(n+1)
195: Ntot = m*n
197: ! Create the sparse matrix. Preallocate 5 nonzeros per row.
199: call MatCreateSeqAIJ(PETSC_COMM_SELF,Ntot,Ntot,5, &
200: & PETSC_NULL_INTEGER,A,ierr)
201: !
202: ! Create vectors. Here we create vectors with no memory allocated.
203: ! This way, we can use the data structures already in the program
204: ! by using VecPlaceArray() subroutine at a later stage.
205: !
206: call VecCreateSeqWithArray(PETSC_COMM_SELF,Ntot, &
207: & PETSC_NULL_SCALAR,b,ierr)
208: call VecDuplicate(b,x,ierr)
210: ! Create linear solver context. This will be used repeatedly for all
211: ! the linear solves needed.
213: call KSPCreate(PETSC_COMM_SELF,ksp,ierr)
215: userctx(1) = x
216: userctx(2) = b
217: userctx(3) = A
218: userctx(4) = ksp
219: userctx(5) = m
220: userctx(6) = n
222: return
223: end
224: ! -----------------------------------------------------------------------
226: ! Solves -div (rho grad psi) = F using finite differences.
227: ! rho is a 2-dimensional array of size m by n, stored in Fortran
228: ! style by columns. userb is a standard one-dimensional array.
230: subroutine UserDoLinearSolver(rho,userctx,userb,userx,ierr)
232: implicit none
234: #include include/finclude/petsc.h
235: #include include/finclude/petscvec.h
236: #include include/finclude/petscmat.h
237: #include include/finclude/petscksp.h
238: #include include/finclude/petscpc.h
240: integer ierr
241: PetscFortranAddr userctx(*)
242: PetscScalar rho(*),userb(*),userx(*)
245: common /param/ hx2,hy2
246: double precision hx2,hy2
248: PC pc
249: KSP ksp
250: Vec b,x
251: Mat A
252: integer m,n
253: integer i,j,II,JJ
254: PetscScalar v
256: ! PetscScalar tmpx(*),tmpb(*)
258: x = userctx(1)
259: b = userctx(2)
260: A = userctx(3)
261: ksp = userctx(4)
262: m = int(userctx(5))
263: n = int(userctx(6))
265: ! This is not the most efficient way of generating the matrix,
266: ! but let's not worry about it. We should have separate code for
267: ! the four corners, each edge and then the interior. Then we won't
268: ! have the slow if-tests inside the loop.
269: !
270: ! Compute the operator
271: ! -div rho grad
272: ! on an m by n grid with zero Dirichlet boundary conditions. The rho
273: ! is assumed to be given on the same grid as the finite difference
274: ! stencil is applied. For a staggered grid, one would have to change
275: ! things slightly.
277: II = 0
278: do 110 j=1,n
279: do 100 i=1,m
280: if (j .gt. 1) then
281: JJ = II - m
282: v = -0.5*(rho(II+1) + rho(JJ+1))*hy2
283: call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
284: endif
285: if (j .lt. n) then
286: JJ = II + m
287: v = -0.5*(rho(II+1) + rho(JJ+1))*hy2
288: call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
289: endif
290: if (i .gt. 1) then
291: JJ = II - 1
292: v = -0.5*(rho(II+1) + rho(JJ+1))*hx2
293: call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
294: endif
295: if (i .lt. m) then
296: JJ = II + 1
297: v = -0.5*(rho(II+1) + rho(JJ+1))*hx2
298: call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
299: endif
300: v = 2*rho(II+1)*(hx2+hy2)
301: call MatSetValues(A,1,II,1,II,v,INSERT_VALUES,ierr)
302: II = II+1
303: 100 continue
304: 110 continue
305: !
306: ! Assemble matrix
307: !
308: call MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY,ierr)
309: call MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY,ierr)
311: !
312: ! Set operators. Here the matrix that defines the linear system
313: ! also serves as the preconditioning matrix. Since all the matrices
314: ! will have the same nonzero pattern here, we indicate this so the
315: ! linear solvers can take advantage of this.
316: !
317: call KSPSetOperators(ksp,A,A,SAME_NONZERO_PATTERN,ierr)
319: !
320: ! Set linear solver defaults for this problem (optional).
321: ! - Here we set it to use direct LU factorization for the solution
322: !
323: call KSPGetPC(ksp,pc,ierr)
324: call PCSetType(pc,PCLU,ierr)
326: !
327: ! Set runtime options, e.g.,
328: ! -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
329: ! These options will override those specified above as long as
330: ! KSPSetFromOptions() is called _after_ any other customization
331: ! routines.
332: !
333: ! Run the program with the option -help to see all the possible
334: ! linear solver options.
335: !
336: call KSPSetFromOptions(ksp,ierr)
338: !
339: ! This allows the PETSc linear solvers to compute the solution
340: ! directly in the user's array rather than in the PETSc vector.
341: !
342: ! This is essentially a hack and not highly recommend unless you
343: ! are quite comfortable with using PETSc. In general, users should
344: ! write their entire application using PETSc vectors rather than
345: ! arrays.
346: !
347: call VecPlaceArray(x,userx,ierr)
348: call VecPlaceArray(b,userb,ierr)
350: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
351: ! Solve the linear system
352: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
354: call KSPSolve(ksp,b,x,ierr)
356: call VecResetArray(x,ierr)
357: call VecResetArray(b,ierr)
359: return
360: end
362: ! ------------------------------------------------------------------------
364: subroutine UserFinalizeLinearSolver(userctx,ierr)
366: implicit none
368: #include include/finclude/petsc.h
369: #include include/finclude/petscvec.h
370: #include include/finclude/petscmat.h
371: #include include/finclude/petscksp.h
372: #include include/finclude/petscpc.h
374: integer ierr
375: PetscFortranAddr userctx(*)
377: ! Local variable declararions
379: Vec x,b
380: Mat A
381: KSP ksp
382: !
383: ! We are all done and don't need to solve any more linear systems, so
384: ! we free the work space. All PETSc objects should be destroyed when
385: ! they are no longer needed.
386: !
387: x = userctx(1)
388: b = userctx(2)
389: A = userctx(3)
390: ksp = userctx(4)
392: call VecDestroy(x,ierr)
393: call VecDestroy(b,ierr)
394: call MatDestroy(A,ierr)
395: call KSPDestroy(ksp,ierr)
397: return
398: end