Actual source code: ex14.c

  1: /*$Id: ex14.c,v 1.33 2001/08/07 21:30:50 bsmith Exp $*/

  3: /* Program usage:  mpirun -np <procs> ex14 [-help] [all PETSc options] */

  5: static char help[] = "Solves a nonlinear system in parallel with a user-defined Newton method.\n\
  6: Uses KSP to solve the linearized Newton sytems.  This solver\n\
  7: is a very simplistic inexact Newton method.  The intent of this code is to\n\
  8: demonstrate the repeated solution of linear sytems with the same nonzero pattern.\n\
  9: \n\
 10: This is NOT the recommended approach for solving nonlinear problems with PETSc!\n\
 11: We urge users to employ the SNES component for solving nonlinear problems whenever\n\
 12: possible, as it offers many advantages over coding nonlinear solvers independently.\n\
 13: \n\
 14: We solve the  Bratu (SFI - solid fuel ignition) problem in a 2D rectangular\n\
 15: domain, using distributed arrays (DAs) to partition the parallel grid.\n\
 16: The command line options include:\n\
 17:   -par <parameter>, where <parameter> indicates the problem's nonlinearity\n\
 18:      problem SFI:  <parameter> = Bratu parameter (0 <= par <= 6.81)\n\
 19:   -mx <xg>, where <xg> = number of grid points in the x-direction\n\
 20:   -my <yg>, where <yg> = number of grid points in the y-direction\n\
 21:   -Nx <npx>, where <npx> = number of processors in the x-direction\n\
 22:   -Ny <npy>, where <npy> = number of processors in the y-direction\n\n";

 24: /*T
 25:    Concepts: KSP^writing a user-defined nonlinear solver (parallel Bratu example);
 26:    Concepts: DA^using distributed arrays;
 27:    Processors: n
 28: T*/

 30: /* ------------------------------------------------------------------------

 32:     Solid Fuel Ignition (SFI) problem.  This problem is modeled by
 33:     the partial differential equation
 34:   
 35:             -Laplacian u - lambda*exp(u) = 0,  0 < x,y < 1,
 36:   
 37:     with boundary conditions
 38:    
 39:              u = 0  for  x = 0, x = 1, y = 0, y = 1.
 40:   
 41:     A finite difference approximation with the usual 5-point stencil
 42:     is used to discretize the boundary value problem to obtain a nonlinear 
 43:     system of equations.

 45:     The SNES version of this problem is:  snes/examples/tutorials/ex5.c
 46:     We urge users to employ the SNES component for solving nonlinear
 47:     problems whenever possible, as it offers many advantages over coding 
 48:     nonlinear solvers independently.

 50:   ------------------------------------------------------------------------- */

 52: /* 
 53:    Include "petscda.h" so that we can use distributed arrays (DAs).
 54:    Include "petscksp.h" so that we can use KSP solvers.  Note that this
 55:    file automatically includes:
 56:      petsc.h       - base PETSc routines   petscvec.h - vectors
 57:      petscsys.h    - system routines       petscmat.h - matrices
 58:      petscis.h     - index sets            petscksp.h - Krylov subspace methods
 59:      petscviewer.h - viewers               petscpc.h  - preconditioners
 60: */
 61:  #include petscda.h
 62:  #include petscksp.h

 64: /* 
 65:    User-defined application context - contains data needed by the 
 66:    application-provided call-back routines, ComputeJacobian() and
 67:    ComputeFunction().
 68: */
 69: typedef struct {
 70:    PetscReal   param;          /* test problem parameter */
 71:    int         mx,my;          /* discretization in x,y directions */
 72:    Vec         localX,localF; /* ghosted local vector */
 73:    DA          da;             /* distributed array data structure */
 74:    int         rank;           /* processor rank */
 75: } AppCtx;

 77: /* 
 78:    User-defined routines
 79: */
 80: extern int ComputeFunction(AppCtx*,Vec,Vec),FormInitialGuess(AppCtx*,Vec);
 81: extern int ComputeJacobian(AppCtx*,Vec,Mat,MatStructure*);

 85: int main(int argc,char **argv)
 86: {
 87:   /* -------------- Data to define application problem ---------------- */
 88:   MPI_Comm  comm;                /* communicator */
 89:   KSP      ksp;                /* linear solver */
 90:   Vec       X,Y,F;             /* solution, update, residual vectors */
 91:   Mat       J;                   /* Jacobian matrix */
 92:   AppCtx    user;                /* user-defined work context */
 93:   int       Nx,Ny;              /* number of preocessors in x- and y- directions */
 94:   int       size;                /* number of processors */
 95:   PetscReal bratu_lambda_max = 6.81,bratu_lambda_min = 0.;
 96:   int       m,N,ierr;
 97:   KSP       ksp;

 99:   /* --------------- Data to define nonlinear solver -------------- */
100:   PetscReal       rtol = 1.e-8;        /* relative convergence tolerance */
101:   PetscReal       xtol = 1.e-8;        /* step convergence tolerance */
102:   PetscReal       ttol;                /* convergence tolerance */
103:   PetscReal       fnorm,ynorm,xnorm; /* various vector norms */
104:   int          max_nonlin_its = 10; /* maximum number of iterations for nonlinear solver */
105:   int          max_functions = 50;  /* maximum number of function evaluations */
106:   int          lin_its;             /* number of linear solver iterations for each step */
107:   int          i;                   /* nonlinear solve iteration number */
108:   MatStructure mat_flag;        /* flag indicating structure of preconditioner matrix */
109:   PetscTruth   no_output;           /* flag indicating whether to surpress output */
110:   PetscScalar  mone = -1.0;

112:   PetscInitialize(&argc,&argv,(char *)0,help);
113:   comm = PETSC_COMM_WORLD;
114:   MPI_Comm_rank(comm,&user.rank);
115:   PetscOptionsHasName(PETSC_NULL,"-no_output",&no_output);

117:   /*
118:      Initialize problem parameters
119:   */
120:   user.mx = 4; user.my = 4; user.param = 6.0;
121:   PetscOptionsGetInt(PETSC_NULL,"-mx",&user.mx,PETSC_NULL);
122:   PetscOptionsGetInt(PETSC_NULL,"-my",&user.my,PETSC_NULL);
123:   PetscOptionsGetReal(PETSC_NULL,"-par",&user.param,PETSC_NULL);
124:   if (user.param >= bratu_lambda_max || user.param <= bratu_lambda_min) {
125:     SETERRQ(1,"Lambda is out of range");
126:   }
127:   N = user.mx*user.my;

129:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
130:      Create linear solver context
131:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

133:   KSPCreate(comm,&ksp);

135:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
136:      Create vector data structures
137:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

139:   /*
140:      Create distributed array (DA) to manage parallel grid and vectors
141:   */
142:   MPI_Comm_size(comm,&size);
143:   Nx = PETSC_DECIDE; Ny = PETSC_DECIDE;
144:   PetscOptionsGetInt(PETSC_NULL,"-Nx",&Nx,PETSC_NULL);
145:   PetscOptionsGetInt(PETSC_NULL,"-Ny",&Ny,PETSC_NULL);
146:   if (Nx*Ny != size && (Nx != PETSC_DECIDE || Ny != PETSC_DECIDE))
147:     SETERRQ(1,"Incompatible number of processors:  Nx * Ny != size");
148:   DACreate2d(comm,DA_NONPERIODIC,DA_STENCIL_STAR,user.mx,
149:                     user.my,Nx,Ny,1,1,PETSC_NULL,PETSC_NULL,&user.da);

151:   /*
152:      Extract global and local vectors from DA; then duplicate for remaining
153:      vectors that are the same types
154:   */
155:   DACreateGlobalVector(user.da,&X);
156:   DACreateLocalVector(user.da,&user.localX);
157:   VecDuplicate(X,&F);
158:   VecDuplicate(X,&Y);
159:   VecDuplicate(user.localX,&user.localF);


162:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
163:      Create matrix data structure for Jacobian
164:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
165:   /*
166:      Note:  For the parallel case, vectors and matrices MUST be partitioned
167:      accordingly.  When using distributed arrays (DAs) to create vectors,
168:      the DAs determine the problem partitioning.  We must explicitly
169:      specify the local matrix dimensions upon its creation for compatibility
170:      with the vector distribution.  Thus, the generic MatCreate() routine
171:      is NOT sufficient when working with distributed arrays.

173:      Note: Here we only approximately preallocate storage space for the
174:      Jacobian.  See the users manual for a discussion of better techniques
175:      for preallocating matrix memory.
176:   */
177:   if (size == 1) {
178:     MatCreateSeqAIJ(comm,N,N,5,PETSC_NULL,&J);
179:   } else {
180:     VecGetLocalSize(X,&m);
181:     MatCreateMPIAIJ(comm,m,m,N,N,5,PETSC_NULL,3,PETSC_NULL,&J);
182:   }

184:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
185:      Customize linear solver; set runtime options
186:    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

188:   /*
189:      Set runtime options (e.g.,-ksp_monitor -ksp_rtol <rtol> -ksp_type <type>)
190:   */
191:   KSPSetFromOptions(ksp);

193:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
194:      Evaluate initial guess
195:    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

197:   FormInitialGuess(&user,X);
198:   ComputeFunction(&user,X,F);   /* Compute F(X)    */
199:   VecNorm(F,NORM_2,&fnorm);     /* fnorm = || F || */
200:   ttol = fnorm*rtol;
201:   if (!no_output) PetscPrintf(comm,"Initial function norm = %g\n",fnorm);

203:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
204:      Solve nonlinear system with a user-defined method
205:    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

207:   /* 
208:       This solver is a very simplistic inexact Newton method, with no
209:       no damping strategies or bells and whistles. The intent of this code
210:       is  merely to demonstrate the repeated solution with KSP of linear
211:       sytems with the same nonzero structure.

213:       This is NOT the recommended approach for solving nonlinear problems
214:       with PETSc!  We urge users to employ the SNES component for solving
215:       nonlinear problems whenever possible with application codes, as it
216:       offers many advantages over coding nonlinear solvers independently.
217:    */

219:   for (i=0; i<max_nonlin_its; i++) {

221:     /* 
222:         Compute the Jacobian matrix.  See the comments in this routine for
223:         important information about setting the flag mat_flag.
224:      */
225:     ComputeJacobian(&user,X,J,&mat_flag);

227:     /* 
228:         Solve J Y = F, where J is the Jacobian matrix.
229:           - First, set the KSP linear operators.  Here the matrix that
230:             defines the linear system also serves as the preconditioning
231:             matrix.
232:           - Then solve the Newton system.
233:      */
234:     KSPSetOperators(ksp,J,J,mat_flag);
235:     KSPSetRhs(ksp,F);
236:     KSPSetSolution(ksp,Y);
237:     KSPSolve(ksp);
238:     KSPGetIterationNumber(ksp,&lin_its);

240:     /* 
241:        Compute updated iterate
242:      */
243:     VecNorm(Y,NORM_2,&ynorm);       /* ynorm = || Y || */
244:     VecAYPX(&mone,X,Y);             /* Y <- X - Y      */
245:     VecCopy(Y,X);                   /* X <- Y          */
246:     VecNorm(X,NORM_2,&xnorm);       /* xnorm = || X || */
247:     if (!no_output) {
248:       PetscPrintf(comm,"   linear solve iterations = %d, xnorm=%g, ynorm=%g\n",lin_its,xnorm,ynorm);
249:     }

251:     /* 
252:        Evaluate new nonlinear function
253:      */
254:     ComputeFunction(&user,X,F);     /* Compute F(X)    */
255:     VecNorm(F,NORM_2,&fnorm);       /* fnorm = || F || */
256:     if (!no_output) {
257:       PetscPrintf(comm,"Iteration %d, function norm = %g\n",i+1,fnorm);
258:     }

260:     /*
261:        Test for convergence
262:      */
263:     if (fnorm <= ttol) {
264:       if (!no_output) {
265:          PetscPrintf(comm,"Converged due to function norm %g < %g (relative tolerance)\n",fnorm,ttol);
266:       }
267:       break;
268:     }
269:     if (ynorm < xtol*(xnorm)) {
270:       if (!no_output) {
271:          PetscPrintf(comm,"Converged due to small update length: %g < %g * %g\n",ynorm,xtol,xnorm);
272:       }
273:       break;
274:     }
275:     if (i > max_functions) {
276:       if (!no_output) {
277:         PetscPrintf(comm,"Exceeded maximum number of function evaluations: %d > %d\n",i,max_functions);
278:       }
279:       break;
280:     }
281:   }
282:   PetscPrintf(comm,"Number of Newton iterations = %d\n",i+1);

284:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
285:      Free work space.  All PETSc objects should be destroyed when they
286:      are no longer needed.
287:    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

289:   MatDestroy(J);           VecDestroy(Y);
290:   VecDestroy(user.localX); VecDestroy(X);
291:   VecDestroy(user.localF); VecDestroy(F);
292:   KSPDestroy(ksp);  DADestroy(user.da);
293:   PetscFinalize();

295:   return 0;
296: }
297: /* ------------------------------------------------------------------- */
300: /* 
301:    FormInitialGuess - Forms initial approximation.

303:    Input Parameters:
304:    user - user-defined application context
305:    X - vector

307:    Output Parameter:
308:    X - vector
309:  */
310: int FormInitialGuess(AppCtx *user,Vec X)
311: {
312:   int          i,j,row,mx,my,ierr,xs,ys,xm,ym,gxm,gym,gxs,gys;
313:   PetscReal    one = 1.0,lambda,temp1,temp,hx,hy;
314:   PetscScalar  *x;
315:   Vec          localX = user->localX;

317:   mx = user->mx;            my = user->my;            lambda = user->param;
318:   hx = one/(PetscReal)(mx-1);  hy = one/(PetscReal)(my-1);
319:   temp1 = lambda/(lambda + one);

321:   /*
322:      Get a pointer to vector data.
323:        - For default PETSc vectors, VecGetArray() returns a pointer to
324:          the data array.  Otherwise, the routine is implementation dependent.
325:        - You MUST call VecRestoreArray() when you no longer need access to
326:          the array.
327:   */
328:   VecGetArray(localX,&x);

330:   /*
331:      Get local grid boundaries (for 2-dimensional DA):
332:        xs, ys   - starting grid indices (no ghost points)
333:        xm, ym   - widths of local grid (no ghost points)
334:        gxs, gys - starting grid indices (including ghost points)
335:        gxm, gym - widths of local grid (including ghost points)
336:   */
337:   DAGetCorners(user->da,&xs,&ys,PETSC_NULL,&xm,&ym,PETSC_NULL);
338:   DAGetGhostCorners(user->da,&gxs,&gys,PETSC_NULL,&gxm,&gym,PETSC_NULL);

340:   /*
341:      Compute initial guess over the locally owned part of the grid
342:   */
343:   for (j=ys; j<ys+ym; j++) {
344:     temp = (PetscReal)(PetscMin(j,my-j-1))*hy;
345:     for (i=xs; i<xs+xm; i++) {
346:       row = i - gxs + (j - gys)*gxm;
347:       if (i == 0 || j == 0 || i == mx-1 || j == my-1) {
348:         x[row] = 0.0;
349:         continue;
350:       }
351:       x[row] = temp1*sqrt(PetscMin((PetscReal)(PetscMin(i,mx-i-1))*hx,temp));
352:     }
353:   }

355:   /*
356:      Restore vector
357:   */
358:   VecRestoreArray(localX,&x);

360:   /*
361:      Insert values into global vector
362:   */
363:   DALocalToGlobal(user->da,localX,INSERT_VALUES,X);
364:   return 0;
365: }
366: /* ------------------------------------------------------------------- */
369: /* 
370:    ComputeFunction - Evaluates nonlinear function, F(x).

372:    Input Parameters:
373: .  X - input vector
374: .  user - user-defined application context

376:    Output Parameter:
377: .  F - function vector
378:  */
379: int ComputeFunction(AppCtx *user,Vec X,Vec F)
380: {
381:   int          ierr,i,j,row,mx,my,xs,ys,xm,ym,gxs,gys,gxm,gym;
382:   PetscReal    two = 2.0,one = 1.0,lambda,hx,hy,hxdhy,hydhx,sc;
383:   PetscScalar  u,uxx,uyy,*x,*f;
384:   Vec          localX = user->localX,localF = user->localF;

386:   mx = user->mx;            my = user->my;            lambda = user->param;
387:   hx = one/(PetscReal)(mx-1);  hy = one/(PetscReal)(my-1);
388:   sc = hx*hy*lambda;        hxdhy = hx/hy;            hydhx = hy/hx;

390:   /*
391:      Scatter ghost points to local vector, using the 2-step process
392:         DAGlobalToLocalBegin(), DAGlobalToLocalEnd().
393:      By placing code between these two statements, computations can be
394:      done while messages are in transition.
395:   */
396:   DAGlobalToLocalBegin(user->da,X,INSERT_VALUES,localX);
397:   DAGlobalToLocalEnd(user->da,X,INSERT_VALUES,localX);

399:   /*
400:      Get pointers to vector data
401:   */
402:   VecGetArray(localX,&x);
403:   VecGetArray(localF,&f);

405:   /*
406:      Get local grid boundaries
407:   */
408:   DAGetCorners(user->da,&xs,&ys,PETSC_NULL,&xm,&ym,PETSC_NULL);
409:   DAGetGhostCorners(user->da,&gxs,&gys,PETSC_NULL,&gxm,&gym,PETSC_NULL);

411:   /*
412:      Compute function over the locally owned part of the grid
413:   */
414:   for (j=ys; j<ys+ym; j++) {
415:     row = (j - gys)*gxm + xs - gxs - 1;
416:     for (i=xs; i<xs+xm; i++) {
417:       row++;
418:       if (i == 0 || j == 0 || i == mx-1 || j == my-1) {
419:         f[row] = x[row];
420:         continue;
421:       }
422:       u = x[row];
423:       uxx = (two*u - x[row-1] - x[row+1])*hydhx;
424:       uyy = (two*u - x[row-gxm] - x[row+gxm])*hxdhy;
425:       f[row] = uxx + uyy - sc*PetscExpScalar(u);
426:     }
427:   }

429:   /*
430:      Restore vectors
431:   */
432:   VecRestoreArray(localX,&x);
433:   VecRestoreArray(localF,&f);

435:   /*
436:      Insert values into global vector
437:   */
438:   DALocalToGlobal(user->da,localF,INSERT_VALUES,F);
439:   PetscLogFlops(11*ym*xm);
440:   return 0;
441: }
442: /* ------------------------------------------------------------------- */
445: /*
446:    ComputeJacobian - Evaluates Jacobian matrix.

448:    Input Parameters:
449: .  x - input vector
450: .  user - user-defined application context

452:    Output Parameters:
453: .  jac - Jacobian matrix
454: .  flag - flag indicating matrix structure

456:    Notes:
457:    Due to grid point reordering with DAs, we must always work
458:    with the local grid points, and then transform them to the new
459:    global numbering with the "ltog" mapping (via DAGetGlobalIndices()).
460:    We cannot work directly with the global numbers for the original
461:    uniprocessor grid!
462: */
463: int ComputeJacobian(AppCtx *user,Vec X,Mat jac,MatStructure *flag)
464: {
465:   Vec     localX = user->localX;   /* local vector */
466:   int     *ltog;                   /* local-to-global mapping */
467:   int     ierr,i,j,row,mx,my,col[5];
468:   int     nloc,xs,ys,xm,ym,gxs,gys,gxm,gym,grow;
469:   PetscScalar  two = 2.0,one = 1.0,lambda,v[5],hx,hy,hxdhy,hydhx,sc,*x;

471:   mx = user->mx;            my = user->my;            lambda = user->param;
472:   hx = one/(PetscReal)(mx-1);  hy = one/(PetscReal)(my-1);
473:   sc = hx*hy;               hxdhy = hx/hy;            hydhx = hy/hx;

475:   /*
476:      Scatter ghost points to local vector, using the 2-step process
477:         DAGlobalToLocalBegin(), DAGlobalToLocalEnd().
478:      By placing code between these two statements, computations can be
479:      done while messages are in transition.
480:   */
481:   DAGlobalToLocalBegin(user->da,X,INSERT_VALUES,localX);
482:   DAGlobalToLocalEnd(user->da,X,INSERT_VALUES,localX);

484:   /*
485:      Get pointer to vector data
486:   */
487:   VecGetArray(localX,&x);

489:   /*
490:      Get local grid boundaries
491:   */
492:   DAGetCorners(user->da,&xs,&ys,PETSC_NULL,&xm,&ym,PETSC_NULL);
493:   DAGetGhostCorners(user->da,&gxs,&gys,PETSC_NULL,&gxm,&gym,PETSC_NULL);

495:   /*
496:      Get the global node numbers for all local nodes, including ghost points
497:   */
498:   DAGetGlobalIndices(user->da,&nloc,&ltog);

500:   /* 
501:      Compute entries for the locally owned part of the Jacobian.
502:       - Currently, all PETSc parallel matrix formats are partitioned by
503:         contiguous chunks of rows across the processors. The "grow"
504:         parameter computed below specifies the global row number 
505:         corresponding to each local grid point.
506:       - Each processor needs to insert only elements that it owns
507:         locally (but any non-local elements will be sent to the
508:         appropriate processor during matrix assembly). 
509:       - Always specify global row and columns of matrix entries.
510:       - Here, we set all entries for a particular row at once.
511:   */
512:   for (j=ys; j<ys+ym; j++) {
513:     row = (j - gys)*gxm + xs - gxs - 1;
514:     for (i=xs; i<xs+xm; i++) {
515:       row++;
516:       grow = ltog[row];
517:       /* boundary points */
518:       if (i == 0 || j == 0 || i == mx-1 || j == my-1) {
519:         MatSetValues(jac,1,&grow,1,&grow,&one,INSERT_VALUES);
520:         continue;
521:       }
522:       /* interior grid points */
523:       v[0] = -hxdhy; col[0] = ltog[row - gxm];
524:       v[1] = -hydhx; col[1] = ltog[row - 1];
525:       v[2] = two*(hydhx + hxdhy) - sc*lambda*PetscExpScalar(x[row]); col[2] = grow;
526:       v[3] = -hydhx; col[3] = ltog[row + 1];
527:       v[4] = -hxdhy; col[4] = ltog[row + gxm];
528:       MatSetValues(jac,1,&grow,5,col,v,INSERT_VALUES);
529:     }
530:   }

532:   /* 
533:      Assemble matrix, using the 2-step process:
534:        MatAssemblyBegin(), MatAssemblyEnd().
535:      By placing code between these two statements, computations can be
536:      done while messages are in transition.
537:   */
538:   MatAssemblyBegin(jac,MAT_FINAL_ASSEMBLY);
539:   VecRestoreArray(localX,&x);
540:   MatAssemblyEnd(jac,MAT_FINAL_ASSEMBLY);

542:   /*
543:      Set flag to indicate that the Jacobian matrix retains an identical
544:      nonzero structure throughout all nonlinear iterations (although the
545:      values of the entries change). Thus, we can save some work in setting
546:      up the preconditioner (e.g., no need to redo symbolic factorization for
547:      ILU/ICC preconditioners).
548:       - If the nonzero structure of the matrix is different during
549:         successive linear solves, then the flag DIFFERENT_NONZERO_PATTERN
550:         must be used instead.  If you are unsure whether the matrix
551:         structure has changed or not, use the flag DIFFERENT_NONZERO_PATTERN.
552:       - Caution:  If you specify SAME_NONZERO_PATTERN, PETSc
553:         believes your assertion and does not check the structure
554:         of the matrix.  If you erroneously claim that the structure
555:         is the same when it actually is not, the new preconditioner
556:         will not function correctly.  Thus, use this optimization
557:         feature with caution!
558:   */
559:   *flag = SAME_NONZERO_PATTERN;
560:   return 0;
561: }