Drizzled Public API Documentation

row0vers.cc

00001 /*****************************************************************************
00002 
00003 Copyright (C) 1997, 2009, Innobase Oy. All Rights Reserved.
00004 
00005 This program is free software; you can redistribute it and/or modify it under
00006 the terms of the GNU General Public License as published by the Free Software
00007 Foundation; version 2 of the License.
00008 
00009 This program is distributed in the hope that it will be useful, but WITHOUT
00010 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
00011 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
00012 
00013 You should have received a copy of the GNU General Public License along with
00014 this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
00015 St, Fifth Floor, Boston, MA 02110-1301 USA
00016 
00017 *****************************************************************************/
00018 
00019 /**************************************************/
00026 #include "row0vers.h"
00027 
00028 #ifdef UNIV_NONINL
00029 #include "row0vers.ic"
00030 #endif
00031 
00032 #include "dict0dict.h"
00033 #include "dict0boot.h"
00034 #include "btr0btr.h"
00035 #include "mach0data.h"
00036 #include "trx0rseg.h"
00037 #include "trx0trx.h"
00038 #include "trx0roll.h"
00039 #include "trx0undo.h"
00040 #include "trx0purge.h"
00041 #include "trx0rec.h"
00042 #include "que0que.h"
00043 #include "row0row.h"
00044 #include "row0upd.h"
00045 #include "rem0cmp.h"
00046 #include "read0read.h"
00047 #include "lock0lock.h"
00048 
00049 /*****************************************************************/
00054 UNIV_INTERN
00055 trx_t*
00056 row_vers_impl_x_locked_off_kernel(
00057 /*==============================*/
00058   const rec_t*  rec,  
00059   dict_index_t* index,  
00060   const ulint*  offsets)
00061 {
00062   dict_index_t* clust_index;
00063   rec_t*    clust_rec;
00064   ulint*    clust_offsets;
00065   rec_t*    version;
00066   trx_id_t  trx_id;
00067   mem_heap_t* heap;
00068   mem_heap_t* heap2;
00069   dtuple_t* row;
00070   dtuple_t* entry = NULL; /* assignment to eliminate compiler
00071           warning */
00072   trx_t*    trx;
00073   ulint   rec_del;
00074 #ifdef UNIV_DEBUG
00075   ulint   err;
00076 #endif /* UNIV_DEBUG */
00077   mtr_t   mtr;
00078   ulint   comp;
00079 
00080   ut_ad(mutex_own(&kernel_mutex));
00081 #ifdef UNIV_SYNC_DEBUG
00082   ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
00083 #endif /* UNIV_SYNC_DEBUG */
00084 
00085   mutex_exit(&kernel_mutex);
00086 
00087   mtr_start(&mtr);
00088 
00089   /* Search for the clustered index record: this is a time-consuming
00090   operation: therefore we release the kernel mutex; also, the release
00091   is required by the latching order convention. The latch on the
00092   clustered index locks the top of the stack of versions. We also
00093   reserve purge_latch to lock the bottom of the version stack. */
00094 
00095   clust_rec = row_get_clust_rec(BTR_SEARCH_LEAF, rec, index,
00096               &clust_index, &mtr);
00097   if (!clust_rec) {
00098     /* In a rare case it is possible that no clust rec is found
00099     for a secondary index record: if in row0umod.c
00100     row_undo_mod_remove_clust_low() we have already removed the
00101     clust rec, while purge is still cleaning and removing
00102     secondary index records associated with earlier versions of
00103     the clustered index record. In that case there cannot be
00104     any implicit lock on the secondary index record, because
00105     an active transaction which has modified the secondary index
00106     record has also modified the clustered index record. And in
00107     a rollback we always undo the modifications to secondary index
00108     records before the clustered index record. */
00109 
00110     mutex_enter(&kernel_mutex);
00111     mtr_commit(&mtr);
00112 
00113     return(NULL);
00114   }
00115 
00116   heap = mem_heap_create(1024);
00117   clust_offsets = rec_get_offsets(clust_rec, clust_index, NULL,
00118           ULINT_UNDEFINED, &heap);
00119   trx_id = row_get_rec_trx_id(clust_rec, clust_index, clust_offsets);
00120 
00121   mtr_s_lock(&(purge_sys->latch), &mtr);
00122 
00123   mutex_enter(&kernel_mutex);
00124 
00125   trx = NULL;
00126   if (!trx_is_active(trx_id)) {
00127     /* The transaction that modified or inserted clust_rec is no
00128     longer active: no implicit lock on rec */
00129     goto exit_func;
00130   }
00131 
00132   if (!lock_check_trx_id_sanity(trx_id, clust_rec, clust_index,
00133               clust_offsets, TRUE)) {
00134     /* Corruption noticed: try to avoid a crash by returning */
00135     goto exit_func;
00136   }
00137 
00138   comp = page_rec_is_comp(rec);
00139   ut_ad(index->table == clust_index->table);
00140   ut_ad(!!comp == dict_table_is_comp(index->table));
00141   ut_ad(!comp == !page_rec_is_comp(clust_rec));
00142 
00143   /* We look up if some earlier version, which was modified by the trx_id
00144   transaction, of the clustered index record would require rec to be in
00145   a different state (delete marked or unmarked, or have different field
00146   values, or not existing). If there is such a version, then rec was
00147   modified by the trx_id transaction, and it has an implicit x-lock on
00148   rec. Note that if clust_rec itself would require rec to be in a
00149   different state, then the trx_id transaction has not yet had time to
00150   modify rec, and does not necessarily have an implicit x-lock on rec. */
00151 
00152   rec_del = rec_get_deleted_flag(rec, comp);
00153   trx = NULL;
00154 
00155   version = clust_rec;
00156 
00157   for (;;) {
00158     rec_t*    prev_version;
00159     ulint   vers_del;
00160     row_ext_t*  ext;
00161     trx_id_t  prev_trx_id;
00162 
00163     mutex_exit(&kernel_mutex);
00164 
00165     /* While we retrieve an earlier version of clust_rec, we
00166     release the kernel mutex, because it may take time to access
00167     the disk. After the release, we have to check if the trx_id
00168     transaction is still active. We keep the semaphore in mtr on
00169     the clust_rec page, so that no other transaction can update
00170     it and get an implicit x-lock on rec. */
00171 
00172     heap2 = heap;
00173     heap = mem_heap_create(1024);
00174 #ifdef UNIV_DEBUG
00175     err =
00176 #endif /* UNIV_DEBUG */
00177     trx_undo_prev_version_build(clust_rec, &mtr, version,
00178               clust_index, clust_offsets,
00179               heap, &prev_version);
00180     mem_heap_free(heap2); /* free version and clust_offsets */
00181 
00182     if (prev_version == NULL) {
00183       mutex_enter(&kernel_mutex);
00184 
00185       if (!trx_is_active(trx_id)) {
00186         /* Transaction no longer active: no
00187         implicit x-lock */
00188 
00189         break;
00190       }
00191 
00192       /* If the transaction is still active,
00193       clust_rec must be a fresh insert, because no
00194       previous version was found. */
00195       ut_ad(err == DB_SUCCESS);
00196 
00197       /* It was a freshly inserted version: there is an
00198       implicit x-lock on rec */
00199 
00200       trx = trx_get_on_id(trx_id);
00201 
00202       break;
00203     }
00204 
00205     clust_offsets = rec_get_offsets(prev_version, clust_index,
00206             NULL, ULINT_UNDEFINED, &heap);
00207 
00208     vers_del = rec_get_deleted_flag(prev_version, comp);
00209     prev_trx_id = row_get_rec_trx_id(prev_version, clust_index,
00210              clust_offsets);
00211 
00212     /* If the trx_id and prev_trx_id are different and if
00213     the prev_version is marked deleted then the
00214     prev_trx_id must have already committed for the trx_id
00215     to be able to modify the row. Therefore, prev_trx_id
00216     cannot hold any implicit lock. */
00217     if (vers_del && trx_id != prev_trx_id) {
00218 
00219       mutex_enter(&kernel_mutex);
00220       break;
00221     }
00222 
00223     /* The stack of versions is locked by mtr.  Thus, it
00224     is safe to fetch the prefixes for externally stored
00225     columns. */
00226     row = row_build(ROW_COPY_POINTERS, clust_index, prev_version,
00227         clust_offsets, NULL, &ext, heap);
00228     entry = row_build_index_entry(row, ext, index, heap);
00229     /* entry may be NULL if a record was inserted in place
00230     of a deleted record, and the BLOB pointers of the new
00231     record were not initialized yet.  But in that case,
00232     prev_version should be NULL. */
00233     ut_a(entry);
00234 
00235     mutex_enter(&kernel_mutex);
00236 
00237     if (!trx_is_active(trx_id)) {
00238       /* Transaction no longer active: no implicit x-lock */
00239 
00240       break;
00241     }
00242 
00243     /* If we get here, we know that the trx_id transaction is
00244     still active and it has modified prev_version. Let us check
00245     if prev_version would require rec to be in a different
00246     state. */
00247 
00248     /* The previous version of clust_rec must be
00249     accessible, because the transaction is still active
00250     and clust_rec was not a fresh insert. */
00251     ut_ad(err == DB_SUCCESS);
00252 
00253     /* We check if entry and rec are identified in the alphabetical
00254     ordering */
00255     if (0 == cmp_dtuple_rec(entry, rec, offsets)) {
00256       /* The delete marks of rec and prev_version should be
00257       equal for rec to be in the state required by
00258       prev_version */
00259 
00260       if (rec_del != vers_del) {
00261         trx = trx_get_on_id(trx_id);
00262 
00263         break;
00264       }
00265 
00266       /* It is possible that the row was updated so that the
00267       secondary index record remained the same in
00268       alphabetical ordering, but the field values changed
00269       still. For example, 'abc' -> 'ABC'. Check also that. */
00270 
00271       dtuple_set_types_binary(entry,
00272             dtuple_get_n_fields(entry));
00273       if (0 != cmp_dtuple_rec(entry, rec, offsets)) {
00274 
00275         trx = trx_get_on_id(trx_id);
00276 
00277         break;
00278       }
00279     } else if (!rec_del) {
00280       /* The delete mark should be set in rec for it to be
00281       in the state required by prev_version */
00282 
00283       trx = trx_get_on_id(trx_id);
00284 
00285       break;
00286     }
00287 
00288     if (trx_id != prev_trx_id) {
00289       /* The versions modified by the trx_id transaction end
00290       to prev_version: no implicit x-lock */
00291 
00292       break;
00293     }
00294 
00295     version = prev_version;
00296   }/* for (;;) */
00297 
00298 exit_func:
00299   mtr_commit(&mtr);
00300   mem_heap_free(heap);
00301 
00302   return(trx);
00303 }
00304 
00305 /*****************************************************************/
00309 UNIV_INTERN
00310 ibool
00311 row_vers_must_preserve_del_marked(
00312 /*==============================*/
00313   trx_id_t  trx_id, 
00314   mtr_t*    mtr)  
00317 {
00318 #ifdef UNIV_SYNC_DEBUG
00319   ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
00320 #endif /* UNIV_SYNC_DEBUG */
00321 
00322   mtr_s_lock(&(purge_sys->latch), mtr);
00323 
00324   if (trx_purge_update_undo_must_exist(trx_id)) {
00325 
00326     /* A purge operation is not yet allowed to remove this
00327     delete marked record */
00328 
00329     return(TRUE);
00330   }
00331 
00332   return(FALSE);
00333 }
00334 
00335 /*****************************************************************/
00342 UNIV_INTERN
00343 ibool
00344 row_vers_old_has_index_entry(
00345 /*=========================*/
00346   ibool   also_curr,
00349   const rec_t*  rec,  
00351   mtr_t*    mtr,  
00353   dict_index_t* index,  
00354   const dtuple_t* ientry) 
00355 {
00356   const rec_t*  version;
00357   rec_t*    prev_version;
00358   dict_index_t* clust_index;
00359   ulint*    clust_offsets;
00360   mem_heap_t* heap;
00361   mem_heap_t* heap2;
00362   const dtuple_t* row;
00363   const dtuple_t* entry;
00364   ulint   err;
00365   ulint   comp;
00366 
00367   ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)
00368         || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX));
00369 #ifdef UNIV_SYNC_DEBUG
00370   ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
00371 #endif /* UNIV_SYNC_DEBUG */
00372   mtr_s_lock(&(purge_sys->latch), mtr);
00373 
00374   clust_index = dict_table_get_first_index(index->table);
00375 
00376   comp = page_rec_is_comp(rec);
00377   ut_ad(!dict_table_is_comp(index->table) == !comp);
00378   heap = mem_heap_create(1024);
00379   clust_offsets = rec_get_offsets(rec, clust_index, NULL,
00380           ULINT_UNDEFINED, &heap);
00381 
00382   if (also_curr && !rec_get_deleted_flag(rec, comp)) {
00383     row_ext_t*  ext;
00384 
00385     /* The stack of versions is locked by mtr.
00386     Thus, it is safe to fetch the prefixes for
00387     externally stored columns. */
00388     row = row_build(ROW_COPY_POINTERS, clust_index,
00389         rec, clust_offsets, NULL, &ext, heap);
00390     entry = row_build_index_entry(row, ext, index, heap);
00391 
00392     /* If entry == NULL, the record contains unset BLOB
00393     pointers.  This must be a freshly inserted record.  If
00394     this is called from
00395     row_purge_remove_sec_if_poss_low(), the thread will
00396     hold latches on the clustered index and the secondary
00397     index.  Because the insert works in three steps:
00398 
00399       (1) insert the record to clustered index
00400       (2) store the BLOBs and update BLOB pointers
00401       (3) insert records to secondary indexes
00402 
00403     the purge thread can safely ignore freshly inserted
00404     records and delete the secondary index record.  The
00405     thread that inserted the new record will be inserting
00406     the secondary index records. */
00407 
00408     /* NOTE that we cannot do the comparison as binary
00409     fields because the row is maybe being modified so that
00410     the clustered index record has already been updated to
00411     a different binary value in a char field, but the
00412     collation identifies the old and new value anyway! */
00413     if (entry && !dtuple_coll_cmp(ientry, entry)) {
00414 
00415       mem_heap_free(heap);
00416 
00417       return(TRUE);
00418     }
00419   }
00420 
00421   version = rec;
00422 
00423   for (;;) {
00424     heap2 = heap;
00425     heap = mem_heap_create(1024);
00426     err = trx_undo_prev_version_build(rec, mtr, version,
00427               clust_index, clust_offsets,
00428               heap, &prev_version);
00429     mem_heap_free(heap2); /* free version and clust_offsets */
00430 
00431     if (err != DB_SUCCESS || !prev_version) {
00432       /* Versions end here */
00433 
00434       mem_heap_free(heap);
00435 
00436       return(FALSE);
00437     }
00438 
00439     clust_offsets = rec_get_offsets(prev_version, clust_index,
00440             NULL, ULINT_UNDEFINED, &heap);
00441 
00442     if (!rec_get_deleted_flag(prev_version, comp)) {
00443       row_ext_t*  ext;
00444 
00445       /* The stack of versions is locked by mtr.
00446       Thus, it is safe to fetch the prefixes for
00447       externally stored columns. */
00448       row = row_build(ROW_COPY_POINTERS, clust_index,
00449           prev_version, clust_offsets,
00450           NULL, &ext, heap);
00451       entry = row_build_index_entry(row, ext, index, heap);
00452 
00453       /* If entry == NULL, the record contains unset
00454       BLOB pointers.  This must be a freshly
00455       inserted record that we can safely ignore.
00456       For the justification, see the comments after
00457       the previous row_build_index_entry() call. */
00458 
00459       /* NOTE that we cannot do the comparison as binary
00460       fields because maybe the secondary index record has
00461       already been updated to a different binary value in
00462       a char field, but the collation identifies the old
00463       and new value anyway! */
00464 
00465       if (entry && !dtuple_coll_cmp(ientry, entry)) {
00466 
00467         mem_heap_free(heap);
00468 
00469         return(TRUE);
00470       }
00471     }
00472 
00473     version = prev_version;
00474   }
00475 }
00476 
00477 /*****************************************************************/
00482 UNIV_INTERN
00483 ulint
00484 row_vers_build_for_consistent_read(
00485 /*===============================*/
00486   const rec_t*  rec,  
00490   mtr_t*    mtr,  
00491   dict_index_t* index,  
00492   ulint**   offsets,
00494   read_view_t*  view, 
00495   mem_heap_t**  offset_heap,
00497   mem_heap_t* in_heap,
00501   rec_t**   old_vers)
00504 {
00505   const rec_t*  version;
00506   rec_t*    prev_version;
00507   trx_id_t  trx_id;
00508   mem_heap_t* heap    = NULL;
00509   byte*   buf;
00510   ulint   err;
00511 
00512   ut_ad(dict_index_is_clust(index));
00513   ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)
00514         || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX));
00515 #ifdef UNIV_SYNC_DEBUG
00516   ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
00517 #endif /* UNIV_SYNC_DEBUG */
00518 
00519   ut_ad(rec_offs_validate(rec, index, *offsets));
00520 
00521   trx_id = row_get_rec_trx_id(rec, index, *offsets);
00522 
00523   ut_ad(!read_view_sees_trx_id(view, trx_id));
00524 
00525   rw_lock_s_lock(&(purge_sys->latch));
00526   version = rec;
00527 
00528   for (;;) {
00529     mem_heap_t* heap2 = heap;
00530     trx_undo_rec_t* undo_rec;
00531     roll_ptr_t  roll_ptr;
00532     undo_no_t undo_no;
00533     heap = mem_heap_create(1024);
00534 
00535     /* If we have high-granularity consistent read view and
00536     creating transaction of the view is the same as trx_id in
00537     the record we see this record only in the case when
00538     undo_no of the record is < undo_no in the view. */
00539 
00540     if (view->type == VIEW_HIGH_GRANULARITY
00541         && view->creator_trx_id == trx_id) {
00542 
00543       roll_ptr = row_get_rec_roll_ptr(version, index,
00544               *offsets);
00545       undo_rec = trx_undo_get_undo_rec_low(roll_ptr, heap);
00546       undo_no = trx_undo_rec_get_undo_no(undo_rec);
00547       mem_heap_empty(heap);
00548 
00549       if (view->undo_no > undo_no) {
00550         /* The view already sees this version: we can
00551         copy it to in_heap and return */
00552 
00553         buf = static_cast<byte *>(mem_heap_alloc(in_heap,
00554                  rec_offs_size(*offsets)));
00555         *old_vers = rec_copy(buf, version, *offsets);
00556         rec_offs_make_valid(*old_vers, index,
00557                 *offsets);
00558         err = DB_SUCCESS;
00559 
00560         break;
00561       }
00562     }
00563 
00564     err = trx_undo_prev_version_build(rec, mtr, version, index,
00565               *offsets, heap,
00566               &prev_version);
00567     if (heap2) {
00568       mem_heap_free(heap2); /* free version */
00569     }
00570 
00571     if (err != DB_SUCCESS) {
00572       break;
00573     }
00574 
00575     if (prev_version == NULL) {
00576       /* It was a freshly inserted version */
00577       *old_vers = NULL;
00578       err = DB_SUCCESS;
00579 
00580       break;
00581     }
00582 
00583     *offsets = rec_get_offsets(prev_version, index, *offsets,
00584              ULINT_UNDEFINED, offset_heap);
00585 
00586     trx_id = row_get_rec_trx_id(prev_version, index, *offsets);
00587 
00588     if (read_view_sees_trx_id(view, trx_id)) {
00589 
00590       /* The view already sees this version: we can copy
00591       it to in_heap and return */
00592 
00593       buf = static_cast<byte *>(mem_heap_alloc(in_heap, rec_offs_size(*offsets)));
00594       *old_vers = rec_copy(buf, prev_version, *offsets);
00595       rec_offs_make_valid(*old_vers, index, *offsets);
00596       err = DB_SUCCESS;
00597 
00598       break;
00599     }
00600 
00601     version = prev_version;
00602   }/* for (;;) */
00603 
00604   mem_heap_free(heap);
00605   rw_lock_s_unlock(&(purge_sys->latch));
00606 
00607   return(err);
00608 }
00609 
00610 /*****************************************************************/
00614 UNIV_INTERN
00615 ulint
00616 row_vers_build_for_semi_consistent_read(
00617 /*====================================*/
00618   const rec_t*  rec,  
00622   mtr_t*    mtr,  
00623   dict_index_t* index,  
00624   ulint**   offsets,
00626   mem_heap_t**  offset_heap,
00628   mem_heap_t* in_heap,
00632   const rec_t** old_vers)
00635 {
00636   const rec_t*  version;
00637   mem_heap_t* heap    = NULL;
00638   byte*   buf;
00639   ulint   err;
00640   trx_id_t  rec_trx_id  = 0;
00641 
00642   ut_ad(dict_index_is_clust(index));
00643   ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)
00644         || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX));
00645 #ifdef UNIV_SYNC_DEBUG
00646   ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
00647 #endif /* UNIV_SYNC_DEBUG */
00648 
00649   ut_ad(rec_offs_validate(rec, index, *offsets));
00650 
00651   rw_lock_s_lock(&(purge_sys->latch));
00652   /* The S-latch on purge_sys prevents the purge view from
00653   changing.  Thus, if we have an uncommitted transaction at
00654   this point, then purge cannot remove its undo log even if
00655   the transaction could commit now. */
00656 
00657   version = rec;
00658 
00659   for (;;) {
00660     trx_t*    version_trx;
00661     mem_heap_t* heap2;
00662     rec_t*    prev_version;
00663     trx_id_t  version_trx_id;
00664 
00665     version_trx_id = row_get_rec_trx_id(version, index, *offsets);
00666     if (rec == version) {
00667       rec_trx_id = version_trx_id;
00668     }
00669 
00670     mutex_enter(&kernel_mutex);
00671     version_trx = trx_get_on_id(version_trx_id);
00672     mutex_exit(&kernel_mutex);
00673 
00674     if (!version_trx
00675         || version_trx->conc_state == TRX_NOT_STARTED
00676         || version_trx->conc_state == TRX_COMMITTED_IN_MEMORY) {
00677 
00678       /* We found a version that belongs to a
00679       committed transaction: return it. */
00680 
00681       if (rec == version) {
00682         *old_vers = rec;
00683         err = DB_SUCCESS;
00684         break;
00685       }
00686 
00687       /* We assume that a rolled-back transaction stays in
00688       TRX_ACTIVE state until all the changes have been
00689       rolled back and the transaction is removed from
00690       the global list of transactions. */
00691 
00692       if (rec_trx_id == version_trx_id) {
00693         /* The transaction was committed while
00694         we searched for earlier versions.
00695         Return the current version as a
00696         semi-consistent read. */
00697 
00698         version = rec;
00699         *offsets = rec_get_offsets(version,
00700                  index, *offsets,
00701                  ULINT_UNDEFINED,
00702                  offset_heap);
00703       }
00704 
00705       buf = static_cast<byte *>(mem_heap_alloc(in_heap, rec_offs_size(*offsets)));
00706       *old_vers = rec_copy(buf, version, *offsets);
00707       rec_offs_make_valid(*old_vers, index, *offsets);
00708       err = DB_SUCCESS;
00709 
00710       break;
00711     }
00712 
00713     heap2 = heap;
00714     heap = mem_heap_create(1024);
00715 
00716     err = trx_undo_prev_version_build(rec, mtr, version, index,
00717               *offsets, heap,
00718               &prev_version);
00719     if (heap2) {
00720       mem_heap_free(heap2); /* free version */
00721     }
00722 
00723     if (UNIV_UNLIKELY(err != DB_SUCCESS)) {
00724       break;
00725     }
00726 
00727     if (prev_version == NULL) {
00728       /* It was a freshly inserted version */
00729       *old_vers = NULL;
00730       err = DB_SUCCESS;
00731 
00732       break;
00733     }
00734 
00735     version = prev_version;
00736     *offsets = rec_get_offsets(version, index, *offsets,
00737              ULINT_UNDEFINED, offset_heap);
00738   }/* for (;;) */
00739 
00740   if (heap) {
00741     mem_heap_free(heap);
00742   }
00743   rw_lock_s_unlock(&(purge_sys->latch));
00744 
00745   return(err);
00746 }