Actual source code: ex14f.F

  1: !
  2: !    "$Id: ex14f.F,v 1.24 2001/08/07 03:04:00 balay Exp $";
  3: !
  4: !  Solves a nonlinear system in parallel with a user-defined
  5: !  Newton method that uses KSP to solve the linearized Newton sytems.  This solver
  6: !  is a very simplistic inexact Newton method.  The intent of this code is to
  7: !  demonstrate the repeated solution of linear sytems with the same nonzero pattern.
  8: !
  9: !  This is NOT the recommended approach for solving nonlinear problems with PETSc!
 10: !  We urge users to employ the SNES component for solving nonlinear problems whenever
 11: !  possible, as it offers many advantages over coding nonlinear solvers independently.
 12: !
 13: !  We solve the  Bratu (SFI - solid fuel ignition) problem in a 2D rectangular
 14: !  domain, using distributed arrays (DAs) to partition the parallel grid.
 15: !
 16: !  The command line options include:
 17: !  -par <parameter>, where <parameter> indicates the problem's nonlinearity
 18: !     problem SFI:  <parameter> = Bratu parameter (0 <= par <= 6.81)
 19: !  -mx <xg>, where <xg> = number of grid points in the x-direction
 20: !  -my <yg>, where <yg> = number of grid points in the y-direction
 21: !  -Nx <npx>, where <npx> = number of processors in the x-direction
 22: !  -Ny <npy>, where <npy> = number of processors in the y-direction
 23: !  -mf use matrix free for matrix vector product
 24: !
 25: !/*T
 26: !   Concepts: KSP^writing a user-defined nonlinear solver
 27: !   Concepts: DA^using distributed arrays
 28: !   Processors: n
 29: !T*/
 30: !  ------------------------------------------------------------------------
 31: !
 32: !    Solid Fuel Ignition (SFI) problem.  This problem is modeled by
 33: !    the partial differential equation
 34: !
 35: !            -Laplacian u - lambda*exp(u) = 0,  0 < x,y < 1,
 36: !
 37: !    with boundary conditions
 38: !
 39: !             u = 0  for  x = 0, x = 1, y = 0, y = 1.
 40: !
 41: !    A finite difference approximation with the usual 5-point stencil
 42: !    is used to discretize the boundary value problem to obtain a nonlinear
 43: !    system of equations.
 44: !
 45: !    The SNES version of this problem is:  snes/examples/tutorials/ex5f.F
 46: !
 47: !  -------------------------------------------------------------------------

 49:       program main
 50:       implicit none

 52: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 53: !                    Include files
 54: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 55: !
 56: !     petsc.h       - base PETSc routines   petscvec.h - vectors
 57: !     petscsys.h    - system routines       petscmat.h - matrices
 58: !     petscis.h     - index sets            petscksp.h - Krylov subspace methods
 59: !     petscviewer.h - viewers               petscpc.h  - preconditioners

 61:  #include include/finclude/petsc.h
 62:  #include include/finclude/petscis.h
 63:  #include include/finclude/petscvec.h
 64:  #include include/finclude/petscmat.h
 65:  #include include/finclude/petscpc.h
 66:  #include include/finclude/petscksp.h
 67:  #include include/finclude/petscda.h

 69:       MPI_Comm comm
 70:       Vec      X,Y,F,localX,localF
 71:       Mat      J,B
 72:       DA       da
 73:       KSP      ksp

 75:       integer  Nx,Ny,flg,N,ierr,mx,my
 76:       integer  usemf,nooutput
 77:       common   /mycommon/ B,mx,my,localX,localF,da
 78: !
 79: !
 80: !      This is the routine to use for matrix-free approach
 81: !
 82:       external mymult

 84: !     --------------- Data to define nonlinear solver --------------
 85:       double precision   rtol,xtol,ttol
 86:       double precision   fnorm,ynorm,xnorm
 87:       integer            max_nonlin_its
 88:       integer            lin_its
 89:       integer            i,m
 90:       PetscScalar        mone

 92:       mone           = -1.d0
 93:       rtol           = 1.d-8
 94:       xtol           = 1.d-8
 95:       max_nonlin_its = 10

 97:       call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
 98:       comm = PETSC_COMM_WORLD

100: !  Initialize problem parameters

102: !
103:       mx = 4
104:       my = 4
105:       call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-mx',mx,flg,ierr)
106:       call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-my',my,flg,ierr)
107:       N = mx*my

109:       nooutput = 0
110:       call PetscOptionsHasName(PETSC_NULL_CHARACTER,'-no_output',       &
111:      &     nooutput,ierr)

113: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
114: !     Create linear solver context
115: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

117:       call KSPCreate(comm,ksp,ierr)

119: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
120: !     Create vector data structures
121: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

123: !
124: !  Create distributed array (DA) to manage parallel grid and vectors
125: !
126:       Nx = PETSC_DECIDE
127:       Ny = PETSC_DECIDE
128:       call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-Nx',Nx,flg,ierr)
129:       call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-Ny',Ny,flg,ierr)
130:       call DACreate2d(comm,DA_NONPERIODIC,DA_STENCIL_STAR,mx,           &
131:      &     my,Nx,Ny,1,1,PETSC_NULL_INTEGER,PETSC_NULL_INTEGER,          &
132:      &     da,ierr)

134: !
135: !  Extract global and local vectors from DA then duplicate for remaining
136: !  vectors that are the same types
137: !
138:        call DACreateGlobalVector(da,X,ierr)
139:        call DACreateLocalVector(da,localX,ierr)
140:        call VecDuplicate(X,F,ierr)
141:        call VecDuplicate(X,Y,ierr)
142:        call VecDuplicate(localX,localF,ierr)


145: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
146: !     Create matrix data structure for Jacobian
147: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
148: !
149: !     Note:  For the parallel case, vectors and matrices MUST be partitioned
150: !     accordingly.  When using distributed arrays (DAs) to create vectors,
151: !     the DAs determine the problem partitioning.  We must explicitly
152: !     specify the local matrix dimensions upon its creation for compatibility
153: !     with the vector distribution.  Thus, the generic MatCreate() routine
154: !     is NOT sufficient when working with distributed arrays.
155: !
156: !     Note: Here we only approximately preallocate storage space for the
157: !     Jacobian.  See the users manual for a discussion of better techniques
158: !     for preallocating matrix memory.
159: !
160:       call VecGetLocalSize(X,m,ierr)
161:       call MatCreateMPIAIJ(comm,m,m,N,N,5,PETSC_NULL_INTEGER,3,         &
162:      &     PETSC_NULL_INTEGER,B,ierr)

164: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
165: !     if usemf is on then matrix vector product is done via matrix free
166: !     approach. Note this is just an example, and not realistic because
167: !     we still use the actual formed matrix, but in reality one would
168: !     provide their own subroutine that would directly do the matrix
169: !     vector product and not call MatMult()
170: !     Note: we put B into a common block so it will be visible to the
171: !     mymult() routine
172:       usemf = 0
173:       call PetscOptionsHasName(PETSC_NULL_CHARACTER,'-mf',usemf,ierr)
174:       if (usemf .eq. 1) then
175:          call MatCreateShell(comm,m,m,N,N,PETSC_NULL_INTEGER,J,ierr)
176:          call MatShellSetOperation(J,MATOP_MULT,mymult,ierr)
177:       else
178: !        If not doing matrix free then matrix operator, J,  and matrix used
179: !        to construct preconditioner, B, are the same
180:         J = B
181:       endif

183: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
184: !     Customize linear solver set runtime options
185: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
186: !
187: !     Set runtime options (e.g., -ksp_monitor -ksp_rtol <rtol> -ksp_type <type>)
188: !
189:        call KSPSetFromOptions(ksp,ierr)

191: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
192: !     Evaluate initial guess
193: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

195:        call FormInitialGuess(X,ierr)
196:        call ComputeFunction(X,F,ierr)
197:        call VecNorm(F,NORM_2,fnorm,ierr)
198:        ttol = fnorm*rtol
199:        if (nooutput .eq. 0) then
200:          print*, 'Initial function norm ',fnorm
201:        endif

203: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
204: !     Solve nonlinear system with a user-defined method
205: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

207: !  This solver is a very simplistic inexact Newton method, with no
208: !  no damping strategies or bells and whistles. The intent of this code
209: !  is merely to demonstrate the repeated solution with KSP of linear
210: !  sytems with the same nonzero structure.
211: !
212: !  This is NOT the recommended approach for solving nonlinear problems
213: !  with PETSc!  We urge users to employ the SNES component for solving
214: !  nonlinear problems whenever possible with application codes, as it
215: !  offers many advantages over coding nonlinear solvers independently.

217:        do 10 i=0,max_nonlin_its

219: !  Compute the Jacobian matrix.  See the comments in this routine for
220: !  important information about setting the flag mat_flag.

222:          call ComputeJacobian(X,B,ierr)

224: !  Solve J Y = F, where J is the Jacobian matrix.
225: !    - First, set the KSP linear operators.  Here the matrix that
226: !      defines the linear system also serves as the preconditioning
227: !      matrix.
228: !    - Then solve the Newton system.

230:          call KSPSetOperators(ksp,J,B,SAME_NONZERO_PATTERN,ierr)
231:          call KSPSetRhs(ksp,F,ierr)
232:          call KSPSetSolution(ksp,Y,ierr)
233:          call KSPSolve(ksp,ierr)

235: !  Compute updated iterate

237:          call VecNorm(Y,NORM_2,ynorm,ierr)
238:          call VecAYPX(mone,X,Y,ierr)
239:          call VecCopy(Y,X,ierr)
240:          call VecNorm(X,NORM_2,xnorm,ierr)
241:          call KSPGetIterationNumber(ksp,lin_its,ierr)
242:          if (nooutput .eq. 0) then
243:            print*,'linear solve iterations = ',lin_its,' xnorm = ',     &
244:      &         xnorm,' ynorm = ',ynorm
245:          endif

247: !  Evaluate nonlinear function at new location

249:          call ComputeFunction(X,F,ierr)
250:          call VecNorm(F,NORM_2,fnorm,ierr)
251:          if (nooutput .eq. 0) then
252:            print*, 'Iteration ',i+1,' function norm',fnorm
253:          endif

255: !  Test for convergence

257:        if (fnorm .le. ttol) then
258:          if (nooutput .eq. 0) then
259:            print*,'Converged: function norm ',fnorm,' tolerance ',ttol
260:          endif
261:          goto 20
262:        endif
263:  10   continue
264:  20   continue

266:       write(6,100) i+1
267:  100  format('Number of Newton iterations =',I2)

269: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
270: !     Free work space.  All PETSc objects should be destroyed when they
271: !     are no longer needed.
272: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

274:        call MatDestroy(B,ierr)
275:        if (usemf .ne. 0) then
276:          call MatDestroy(J,ierr)
277:        endif
278:        call VecDestroy(localX,ierr)
279:        call VecDestroy(X,ierr)
280:        call VecDestroy(Y,ierr)
281:        call VecDestroy(localF,ierr)
282:        call VecDestroy(F,ierr)
283:        call KSPDestroy(ksp,ierr)
284:        call DADestroy(da,ierr)
285:        call PetscFinalize(ierr)
286:        end

288: ! -------------------------------------------------------------------
289: !
290: !   FormInitialGuess - Forms initial approximation.
291: !
292: !   Input Parameters:
293: !   X - vector
294: !
295: !   Output Parameter:
296: !   X - vector
297: !
298:       subroutine FormInitialGuess(X,ierr)
299:       implicit none

301: !     petsc.h       - base PETSc routines   petscvec.h - vectors
302: !     petscsys.h    - system routines       petscmat.h - matrices
303: !     petscis.h     - index sets            petscksp.h - Krylov subspace methods
304: !     petscviewer.h - viewers               petscpc.h  - preconditioners

306:  #include include/finclude/petsc.h
307:  #include include/finclude/petscis.h
308:  #include include/finclude/petscvec.h
309:  #include include/finclude/petscmat.h
310:  #include include/finclude/petscpc.h
311:  #include include/finclude/petscksp.h
312:  #include include/finclude/petscda.h
313:       integer          ierr
314:       PetscOffset      idx
315:       Vec              X,localX,localF
316:       integer          i,j,row,mx,my, xs,ys,xm
317:       integer          ym,gxm,gym,gxs,gys
318:       double precision one,lambda,temp1,temp,hx,hy
319:       double precision hxdhy,hydhx,sc
320:       PetscScalar      xx(1)
321:       DA               da
322:       Mat              B
323:       common   /mycommon/ B,mx,my,localX,localF,da
324: 
325:       one    = 1.d0
326:       lambda = 6.d0
327:       hx     = one/(mx-1)
328:       hy     = one/(my-1)
329:       sc     = hx*hy*lambda
330:       hxdhy  = hx/hy
331:       hydhx  = hy/hx
332:       temp1  = lambda/(lambda + one)

334: !  Get a pointer to vector data.
335: !    - VecGetArray() returns a pointer to the data array.
336: !    - You MUST call VecRestoreArray() when you no longer need access to
337: !      the array.
338:        call VecGetArray(localX,xx,idx,ierr)

340: !  Get local grid boundaries (for 2-dimensional DA):
341: !    xs, ys   - starting grid indices (no ghost points)
342: !    xm, ym   - widths of local grid (no ghost points)
343: !    gxs, gys - starting grid indices (including ghost points)
344: !    gxm, gym - widths of local grid (including ghost points)

346:        call DAGetCorners(da,xs,ys,PETSC_NULL_INTEGER,xm,ym,             &
347:      &      PETSC_NULL_INTEGER,ierr)
348:        call DAGetGhostCorners(da,gxs,gys,PETSC_NULL_INTEGER,gxm,gym,    &
349:      &      PETSC_NULL_INTEGER,ierr)

351: !  Compute initial guess over the locally owned part of the grid

353:       do 30 j=ys,ys+ym-1
354:         temp = (min(j,my-j-1))*hy
355:         do 40 i=xs,xs+xm-1
356:           row = i - gxs + (j - gys)*gxm + 1
357:           if (i .eq. 0 .or. j .eq. 0 .or. i .eq. mx-1 .or.              &
358:      &        j .eq. my-1) then
359:             xx(idx+row) = 0.d0
360:             continue
361:           endif
362:           xx(idx+row) = temp1*sqrt(min((min(i,mx-i-1))*hx,temp))
363:  40     continue
364:  30   continue

366: !     Restore vector

368:        call VecRestoreArray(localX,xx,idx,ierr)

370: !     Insert values into global vector

372:        call DALocalToGlobal(da,localX,INSERT_VALUES,X,ierr)
373:        return
374:        end

376: ! -------------------------------------------------------------------
377: !
378: !   ComputeFunction - Evaluates nonlinear function, F(x).
379: !
380: !   Input Parameters:
381: !.  X - input vector
382: !
383: !   Output Parameter:
384: !.  F - function vector
385: !
386:       subroutine  ComputeFunction(X,F,ierr)
387:       implicit none

389: !     petsc.h       - base PETSc routines   petscvec.h - vectors
390: !     petscsys.h    - system routines       petscmat.h - matrices
391: !     petscis.h     - index sets            petscksp.h - Krylov subspace methods
392: !     petscviewer.h - viewers               petscpc.h  - preconditioners

394:  #include include/finclude/petsc.h
395:  #include include/finclude/petscis.h
396:  #include include/finclude/petscvec.h
397:  #include include/finclude/petscmat.h
398:  #include include/finclude/petscpc.h
399:  #include include/finclude/petscksp.h
400:  #include include/finclude/petscda.h

402:       Vec              X,F,localX,localF
403:       integer          gys,gxm,gym
404:       PetscOffset      idx,idf
405:       integer          ierr,i,j,row,mx,my,xs,ys,xm,ym,gxs
406:       double precision two,one,lambda,hx
407:       double precision hy,hxdhy,hydhx,sc
408:       PetscScalar      u,uxx,uyy,xx(1),ff(1)
409:       DA               da
410:       Mat              B
411:       common   /mycommon/ B,mx,my,localX,localF,da

413:       two    = 2.d0
414:       one    = 1.d0
415:       lambda = 6.d0

417:       hx     = one/(mx-1)
418:       hy     = one/(my-1)
419:       sc     = hx*hy*lambda
420:       hxdhy  = hx/hy
421:       hydhx  = hy/hx

423: !  Scatter ghost points to local vector, using the 2-step process
424: !     DAGlobalToLocalBegin(), DAGlobalToLocalEnd().
425: !  By placing code between these two statements, computations can be
426: !  done while messages are in transition.
427: !
428:       call DAGlobalToLocalBegin(da,X,INSERT_VALUES,localX,ierr)
429:       call DAGlobalToLocalEnd(da,X,INSERT_VALUES,localX,ierr)

431: !  Get pointers to vector data

433:       call VecGetArray(localX,xx,idx,ierr)
434:       call VecGetArray(localF,ff,idf,ierr)

436: !  Get local grid boundaries

438:       call DAGetCorners(da,xs,ys,PETSC_NULL_INTEGER,xm,ym,              &
439:      &     PETSC_NULL_INTEGER,ierr)
440:       call DAGetGhostCorners(da,gxs,gys,PETSC_NULL_INTEGER,gxm,gym,     &
441:      &     PETSC_NULL_INTEGER,ierr)

443: !  Compute function over the locally owned part of the grid

445:       do 50 j=ys,ys+ym-1

447:         row = (j - gys)*gxm + xs - gxs
448:         do 60 i=xs,xs+xm-1
449:           row = row + 1

451:           if (i .eq. 0 .or. j .eq. 0 .or. i .eq. mx-1 .or.              &
452:      &        j .eq. my-1) then
453:             ff(idf+row) = xx(idx+row)
454:             goto 60
455:           endif
456:           u   = xx(idx+row)
457:           uxx = (two*u - xx(idx+row-1) - xx(idx+row+1))*hydhx
458:           uyy = (two*u - xx(idx+row-gxm) - xx(idx+row+gxm))*hxdhy
459:           ff(idf+row) = uxx + uyy - sc*exp(u)
460:  60     continue
461:  50   continue

463: !  Restore vectors

465:        call VecRestoreArray(localX,xx,idx,ierr)
466:        call VecRestoreArray(localF,ff,idf,ierr)

468: !  Insert values into global vector

470:        call DALocalToGlobal(da,localF,INSERT_VALUES,F,ierr)
471:        return
472:        end

474: ! -------------------------------------------------------------------
475: !
476: !   ComputeJacobian - Evaluates Jacobian matrix.
477: !
478: !   Input Parameters:
479: !   x - input vector
480: !
481: !   Output Parameters:
482: !   jac - Jacobian matrix
483: !   flag - flag indicating matrix structure
484: !
485: !   Notes:
486: !   Due to grid point reordering with DAs, we must always work
487: !   with the local grid points, and then transform them to the new
488: !   global numbering with the 'ltog' mapping (via DAGetGlobalIndices()).
489: !   We cannot work directly with the global numbers for the original
490: !   uniprocessor grid!
491: !
492:       subroutine ComputeJacobian(X,jac,ierr)
493:       implicit none

495: !     petsc.h  - base PETSc routines   petscvec.h - vectors
496: !     petscsys.h    - system routines       petscmat.h - matrices
497: !     petscis.h     - index sets            petscksp.h - Krylov subspace methods
498: !     petscviewer.h - viewers               petscpc.h  - preconditioners

500:  #include include/finclude/petsc.h
501:  #include include/finclude/petscis.h
502:  #include include/finclude/petscvec.h
503:  #include include/finclude/petscmat.h
504:  #include include/finclude/petscpc.h
505:  #include include/finclude/petscksp.h
506:  #include include/finclude/petscda.h

508:       Vec         X
509:       Mat         jac
510:       Vec         localX,localF
511:       DA          da
512:       integer     ltog(1)
513:       PetscOffset idltog,idx
514:       integer     ierr,i,j,row,mx,my,col(5)
515:       integer     nloc,xs,ys,xm,ym,gxs,gys,gxm,gym,grow
516:       PetscScalar two,one,lambda,v(5),hx,hy,hxdhy
517:       PetscScalar hydhx,sc,xx(1)
518:       Mat         B
519:       common   /mycommon/ B,mx,my,localX,localF,da

521:       one    = 1.d0
522:       two    = 2.d0
523:       hx     = one/(mx-1)
524:       hy     = one/(my-1)
525:       sc     = hx*hy
526:       hxdhy  = hx/hy
527:       hydhx  = hy/hx
528:       lambda = 6.d0

530: !  Scatter ghost points to local vector, using the 2-step process
531: !     DAGlobalToLocalBegin(), DAGlobalToLocalEnd().
532: !  By placing code between these two statements, computations can be
533: !  done while messages are in transition.

535:       call DAGlobalToLocalBegin(da,X,INSERT_VALUES,localX,ierr)
536:       call DAGlobalToLocalEnd(da,X,INSERT_VALUES,localX,ierr)

538: !  Get pointer to vector data

540:       call VecGetArray(localX,xx,idx,ierr)

542: !  Get local grid boundaries

544:       call DAGetCorners(da,xs,ys,PETSC_NULL_INTEGER,xm,ym,              &
545:      &     PETSC_NULL_INTEGER,ierr)
546:       call DAGetGhostCorners(da,gxs,gys,PETSC_NULL_INTEGER,gxm,gym,     &
547:      &                        PETSC_NULL_INTEGER,ierr)

549: !  Get the global node numbers for all local nodes, including ghost points

551:       call DAGetGlobalIndices(da,nloc,ltog,idltog,ierr)

553: !  Compute entries for the locally owned part of the Jacobian.
554: !   - Currently, all PETSc parallel matrix formats are partitioned by
555: !     contiguous chunks of rows across the processors. The 'grow'
556: !     parameter computed below specifies the global row number
557: !     corresponding to each local grid point.
558: !   - Each processor needs to insert only elements that it owns
559: !     locally (but any non-local elements will be sent to the
560: !     appropriate processor during matrix assembly).
561: !   - Always specify global row and columns of matrix entries.
562: !   - Here, we set all entries for a particular row at once.

564:       do 10 j=ys,ys+ym-1
565:         row = (j - gys)*gxm + xs - gxs
566:         do 20 i=xs,xs+xm-1
567:           row = row + 1
568:           grow = ltog(idltog+row)
569:           if (i .eq. 0 .or. j .eq. 0 .or. i .eq. (mx-1) .or.            &
570:      &        j .eq. (my-1)) then
571:              call MatSetValues(jac,1,grow,1,grow,one,INSERT_VALUES,ierr)
572:              go to 20
573:           endif
574:           v(1)   = -hxdhy
575:           col(1) = ltog(idltog+row - gxm)
576:           v(2)   = -hydhx
577:           col(2) = ltog(idltog+row - 1)
578:           v(3)   = two*(hydhx + hxdhy) - sc*lambda*exp(xx(idx+row))
579:           col(3) = grow
580:           v(4)   = -hydhx
581:           col(4) = ltog(idltog+row + 1)
582:           v(5)   = -hxdhy
583:           col(5) = ltog(idltog+row + gxm)
584:           call MatSetValues(jac,1,grow,5,col,v,INSERT_VALUES,ierr)
585:  20     continue
586:  10   continue

588: !  Assemble matrix, using the 2-step process:
589: !    MatAssemblyBegin(), MatAssemblyEnd().
590: !  By placing code between these two statements, computations can be
591: !  done while messages are in transition.

593:       call MatAssemblyBegin(jac,MAT_FINAL_ASSEMBLY,ierr)
594:       call VecRestoreArray(localX,xx,idx,ierr)
595:       call MatAssemblyEnd(jac,MAT_FINAL_ASSEMBLY,ierr)
596:       return
597:       end


600: ! -------------------------------------------------------------------
601: !
602: !   MyMult - user provided matrix multiply
603: !
604: !   Input Parameters:
605: !.  X - input vector
606: !
607: !   Output Parameter:
608: !.  F - function vector
609: !
610:       subroutine  MyMult(J,X,F,ierr)
611:       implicit none
612:       Mat     J,B
613:       Vec     X,F
614:       integer ierr,mx,my
615:       DA      da
616:       Vec     localX,localF

618:       common   /mycommon/ B,mx,my,localX,localF,da
619: !
620: !       Here we use the actual formed matrix B; users would
621: !     instead write their own matrix vector product routine
622: !
623:       call MatMult(B,X,F,ierr)
624:       return
625:       end