00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00031 #include <config.h>
00032
00033
00034
00035
00036
00037
00038 #include "univ.i"
00039
00040 #if !defined(BUILD_DRIZZLE)
00041 # include <mysql/plugin.h>
00042 #endif
00043
00044 #include "buf0buf.h"
00045 #include "dict0dict.h"
00046 #include "ha0storage.h"
00047 #include "ha_prototypes.h"
00048 #include "hash0hash.h"
00049 #include "lock0iter.h"
00050 #include "lock0lock.h"
00051 #include "mem0mem.h"
00052 #include "page0page.h"
00053 #include "rem0rec.h"
00054 #include "row0row.h"
00055 #include "srv0srv.h"
00056 #include "sync0rw.h"
00057 #include "sync0sync.h"
00058 #include "sync0types.h"
00059 #include "trx0i_s.h"
00060 #include "trx0sys.h"
00061 #include "trx0trx.h"
00062 #include "ut0mem.h"
00063 #include "ut0ut.h"
00064
00065 #include <drizzled/session.h>
00066
00068 #define TABLE_CACHE_INITIAL_ROWSNUM 1024
00069
00078 #define MEM_CHUNKS_IN_TABLE_CACHE 39
00079
00082
00083
00084 #if 0
00085
00089 #define TEST_LOCK_FOLD_ALWAYS_DIFFERENT
00090 #endif
00091
00092 #if 0
00093
00097 #define TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
00098 #endif
00099
00100 #if 0
00101
00104 #define TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
00105 #endif
00106
00107 #if 0
00108
00110 #define TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
00111 #endif
00112
00113 #if 0
00114
00117 #define TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
00118 #endif
00119
00120
00124 #define MAX_ALLOWED_FOR_STORAGE(cache) \
00125 (TRX_I_S_MEM_LIMIT \
00126 - (cache)->mem_allocd)
00127
00131 #define MAX_ALLOWED_FOR_ALLOC(cache) \
00132 (TRX_I_S_MEM_LIMIT \
00133 - (cache)->mem_allocd \
00134 - ha_storage_get_size((cache)->storage))
00135
00139 typedef struct i_s_mem_chunk_struct {
00140 ulint offset;
00141 ulint rows_allocd;
00143 void* base;
00144 } i_s_mem_chunk_t;
00145
00147 typedef struct i_s_table_cache_struct {
00148 ulint rows_used;
00149 ulint rows_allocd;
00150 ulint row_size;
00151 i_s_mem_chunk_t chunks[MEM_CHUNKS_IN_TABLE_CACHE];
00154 } i_s_table_cache_t;
00155
00157 struct trx_i_s_cache_struct {
00158 rw_lock_t rw_lock;
00160 ullint last_read;
00163 mutex_t last_read_mutex;
00167 i_s_table_cache_t innodb_trx;
00168 i_s_table_cache_t innodb_locks;
00169 i_s_table_cache_t innodb_lock_waits;
00171 #define LOCKS_HASH_CELLS_NUM 10000
00172 hash_table_t* locks_hash;
00176 #define CACHE_STORAGE_INITIAL_SIZE 1024
00177
00178 #define CACHE_STORAGE_HASH_CELLS 2048
00179 ha_storage_t* storage;
00183 ulint mem_allocd;
00185 ibool is_truncated;
00188 };
00189
00193 static trx_i_s_cache_t trx_i_s_cache_static;
00197 UNIV_INTERN trx_i_s_cache_t* trx_i_s_cache = &trx_i_s_cache_static;
00198
00199
00200 #ifdef UNIV_PFS_RWLOCK
00201 UNIV_INTERN mysql_pfs_key_t trx_i_s_cache_lock_key;
00202 #endif
00203
00204 #ifdef UNIV_PFS_MUTEX
00205 UNIV_INTERN mysql_pfs_key_t cache_last_read_mutex_key;
00206 #endif
00207
00208
00212 static
00213 ulint
00214 wait_lock_get_heap_no(
00215
00216 const lock_t* lock)
00217 {
00218 ulint ret;
00219
00220 switch (lock_get_type(lock)) {
00221 case LOCK_REC:
00222 ret = lock_rec_find_set_bit(lock);
00223 ut_a(ret != ULINT_UNDEFINED);
00224 break;
00225 case LOCK_TABLE:
00226 ret = ULINT_UNDEFINED;
00227 break;
00228 default:
00229 ut_error;
00230 }
00231
00232 return(ret);
00233 }
00234
00235
00237 static
00238 void
00239 table_cache_init(
00240
00241 i_s_table_cache_t* table_cache,
00242 size_t row_size)
00244 {
00245 ulint i;
00246
00247 table_cache->rows_used = 0;
00248 table_cache->rows_allocd = 0;
00249 table_cache->row_size = row_size;
00250
00251 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
00252
00253
00254
00255 table_cache->chunks[i].base = NULL;
00256 }
00257 }
00258
00259
00261 static
00262 void
00263 table_cache_free(
00264
00265 i_s_table_cache_t* table_cache)
00266 {
00267 ulint i;
00268
00269 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
00270
00271
00272
00273 if (table_cache->chunks[i].base) {
00274 mem_free(table_cache->chunks[i].base);
00275 table_cache->chunks[i].base = NULL;
00276 }
00277 }
00278 }
00279
00280
00286 static
00287 void*
00288 table_cache_create_empty_row(
00289
00290 i_s_table_cache_t* table_cache,
00291 trx_i_s_cache_t* cache)
00294 {
00295 ulint i;
00296 void* row;
00297
00298 ut_a(table_cache->rows_used <= table_cache->rows_allocd);
00299
00300 if (table_cache->rows_used == table_cache->rows_allocd) {
00301
00302
00303
00304
00305
00306
00307 i_s_mem_chunk_t* chunk;
00308 ulint req_bytes;
00309 ulint got_bytes;
00310 ulint req_rows;
00311 ulint got_rows;
00312
00313
00314 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
00315
00316 if (table_cache->chunks[i].base == NULL) {
00317
00318 break;
00319 }
00320 }
00321
00322
00323
00324 ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
00325
00326
00327
00328 if (i == 0) {
00329
00330
00331 req_rows = TABLE_CACHE_INITIAL_ROWSNUM;
00332 } else {
00333
00334
00335
00336
00337
00338
00339
00340
00341
00342
00343
00344 req_rows = table_cache->rows_allocd / 2;
00345 }
00346 req_bytes = req_rows * table_cache->row_size;
00347
00348 if (req_bytes > MAX_ALLOWED_FOR_ALLOC(cache)) {
00349
00350 return(NULL);
00351 }
00352
00353 chunk = &table_cache->chunks[i];
00354
00355 chunk->base = mem_alloc2(req_bytes, &got_bytes);
00356
00357 got_rows = got_bytes / table_cache->row_size;
00358
00359 cache->mem_allocd += got_bytes;
00360
00361 #if 0
00362 printf("allocating chunk %d req bytes=%lu, got bytes=%lu, "
00363 "row size=%lu, "
00364 "req rows=%lu, got rows=%lu\n",
00365 i, req_bytes, got_bytes,
00366 table_cache->row_size,
00367 req_rows, got_rows);
00368 #endif
00369
00370 chunk->rows_allocd = got_rows;
00371
00372 table_cache->rows_allocd += got_rows;
00373
00374
00375 if (i < MEM_CHUNKS_IN_TABLE_CACHE - 1) {
00376
00377 table_cache->chunks[i + 1].offset
00378 = chunk->offset + chunk->rows_allocd;
00379 }
00380
00381
00382
00383 row = chunk->base;
00384 } else {
00385
00386 char* chunk_start;
00387 ulint offset;
00388
00389
00390
00391
00392
00393
00394 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
00395
00396 if (table_cache->chunks[i].offset
00397 + table_cache->chunks[i].rows_allocd
00398 > table_cache->rows_used) {
00399
00400 break;
00401 }
00402 }
00403
00404
00405
00406
00407
00408
00409 ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
00410
00411 chunk_start = (char*) table_cache->chunks[i].base;
00412 offset = table_cache->rows_used
00413 - table_cache->chunks[i].offset;
00414
00415 row = chunk_start + offset * table_cache->row_size;
00416 }
00417
00418 table_cache->rows_used++;
00419
00420 return(row);
00421 }
00422
00423 #ifdef UNIV_DEBUG
00424
00427 static
00428 ibool
00429 i_s_locks_row_validate(
00430
00431 const i_s_locks_row_t* row)
00432 {
00433 ut_ad(row->lock_trx_id != 0);
00434 ut_ad(row->lock_mode != NULL);
00435 ut_ad(row->lock_type != NULL);
00436 ut_ad(row->lock_table != NULL);
00437 ut_ad(row->lock_table_id != 0);
00438
00439 if (row->lock_space == ULINT_UNDEFINED) {
00440
00441 ut_ad(!strcmp("TABLE", row->lock_type));
00442 ut_ad(row->lock_index == NULL);
00443 ut_ad(row->lock_data == NULL);
00444 ut_ad(row->lock_page == ULINT_UNDEFINED);
00445 ut_ad(row->lock_rec == ULINT_UNDEFINED);
00446 } else {
00447
00448 ut_ad(!strcmp("RECORD", row->lock_type));
00449 ut_ad(row->lock_index != NULL);
00450 ut_ad(row->lock_data != NULL);
00451 ut_ad(row->lock_page != ULINT_UNDEFINED);
00452 ut_ad(row->lock_rec != ULINT_UNDEFINED);
00453 }
00454
00455 return(TRUE);
00456 }
00457 #endif
00458
00459
00463 static
00464 ibool
00465 fill_trx_row(
00466
00467 i_s_trx_row_t* row,
00469 const trx_t* trx,
00471 const i_s_locks_row_t* requested_lock_row,
00476 trx_i_s_cache_t* cache)
00479 {
00480 const char* stmt;
00481 size_t stmt_len;
00482 const char* s;
00483
00484 ut_ad(mutex_own(&kernel_mutex));
00485
00486 row->trx_id = trx->id;
00487 row->trx_started = (ib_time_t) trx->start_time;
00488 row->trx_state = trx_get_que_state_str(trx);
00489 row->requested_lock_row = requested_lock_row;
00490 ut_ad(requested_lock_row == NULL
00491 || i_s_locks_row_validate(requested_lock_row));
00492
00493 if (trx->wait_lock != NULL) {
00494 ut_a(requested_lock_row != NULL);
00495 row->trx_wait_started = (ib_time_t) trx->wait_started;
00496 } else {
00497 ut_a(requested_lock_row == NULL);
00498 row->trx_wait_started = 0;
00499 }
00500
00501 row->trx_weight = (ullint) TRX_WEIGHT(trx);
00502
00503 if (trx->mysql_thd == NULL) {
00504
00505
00506
00507 row->trx_mysql_thread_id = 0;
00508 row->trx_query = NULL;
00509 goto thd_done;
00510 }
00511
00512 row->trx_mysql_thread_id = trx->session()->getSessionId();
00513 stmt= trx->mysql_thd->getQueryStringCopy(stmt_len);
00514
00515 if (stmt != NULL) {
00516
00517 char query[TRX_I_S_TRX_QUERY_MAX_LEN + 1];
00518
00519 if (stmt_len > TRX_I_S_TRX_QUERY_MAX_LEN) {
00520 stmt_len = TRX_I_S_TRX_QUERY_MAX_LEN;
00521 }
00522
00523 memcpy(query, stmt, stmt_len);
00524 query[stmt_len] = '\0';
00525
00526 row->trx_query = static_cast<const char *>(ha_storage_put_memlim(
00527 cache->storage, stmt, stmt_len + 1,
00528 MAX_ALLOWED_FOR_STORAGE(cache)));
00529
00530 if (row->trx_query == NULL) {
00531
00532 return(FALSE);
00533 }
00534 } else {
00535
00536 row->trx_query = NULL;
00537 }
00538
00539 thd_done:
00540 s = trx->op_info;
00541
00542 if (s != NULL && s[0] != '\0') {
00543
00544 TRX_I_S_STRING_COPY(s, row->trx_operation_state,
00545 TRX_I_S_TRX_OP_STATE_MAX_LEN, cache);
00546
00547 if (row->trx_operation_state == NULL) {
00548
00549 return(FALSE);
00550 }
00551 } else {
00552
00553 row->trx_operation_state = NULL;
00554 }
00555
00556
00557
00558 row->trx_tables_locked = trx->mysql_n_tables_locked;
00559
00560 row->trx_lock_structs = UT_LIST_GET_LEN(trx->trx_locks);
00561
00562 row->trx_lock_memory_bytes = mem_heap_get_size(trx->lock_heap);
00563
00564 row->trx_rows_locked = lock_number_of_rows_locked(trx);
00565
00566 row->trx_rows_modified = trx->undo_no;
00567
00568 row->trx_concurrency_tickets = trx->n_tickets_to_enter_innodb;
00569
00570 switch (trx->isolation_level) {
00571 case TRX_ISO_READ_UNCOMMITTED:
00572 row->trx_isolation_level = "READ UNCOMMITTED";
00573 break;
00574 case TRX_ISO_READ_COMMITTED:
00575 row->trx_isolation_level = "READ COMMITTED";
00576 break;
00577 case TRX_ISO_REPEATABLE_READ:
00578 row->trx_isolation_level = "REPEATABLE READ";
00579 break;
00580 case TRX_ISO_SERIALIZABLE:
00581 row->trx_isolation_level = "SERIALIZABLE";
00582 break;
00583
00584 default:
00585 row->trx_isolation_level = "UNKNOWN";
00586 }
00587
00588 row->trx_unique_checks = (ibool) trx->check_unique_secondary;
00589
00590 row->trx_foreign_key_checks = (ibool) trx->check_foreigns;
00591
00592 s = trx->detailed_error;
00593
00594 if (s != NULL && s[0] != '\0') {
00595
00596 TRX_I_S_STRING_COPY(s,
00597 row->trx_foreign_key_error,
00598 TRX_I_S_TRX_FK_ERROR_MAX_LEN, cache);
00599
00600 if (row->trx_foreign_key_error == NULL) {
00601
00602 return(FALSE);
00603 }
00604 } else {
00605 row->trx_foreign_key_error = NULL;
00606 }
00607
00608 row->trx_has_search_latch = (ibool) trx->has_search_latch;
00609
00610 row->trx_search_latch_timeout = trx->search_latch_timeout;
00611
00612 return(TRUE);
00613 }
00614
00615
00620 static
00621 ulint
00622 put_nth_field(
00623
00624 char* buf,
00625 ulint buf_size,
00626 ulint n,
00627 const dict_index_t* index,
00628 const rec_t* rec,
00629 const ulint* offsets)
00631 {
00632 const byte* data;
00633 ulint data_len;
00634 dict_field_t* dict_field;
00635 ulint ret;
00636
00637 ut_ad(rec_offs_validate(rec, NULL, offsets));
00638
00639 if (buf_size == 0) {
00640
00641 return(0);
00642 }
00643
00644 ret = 0;
00645
00646 if (n > 0) {
00647
00648
00649 if (buf_size < 3) {
00650
00651 buf[0] = '\0';
00652 return(1);
00653 }
00654
00655 memcpy(buf, ", ", 3);
00656
00657 buf += 2;
00658 buf_size -= 2;
00659 ret += 2;
00660 }
00661
00662
00663
00664 data = rec_get_nth_field(rec, offsets, n, &data_len);
00665
00666 dict_field = dict_index_get_nth_field(index, n);
00667
00668 ret += row_raw_format((const char*) data, data_len,
00669 dict_field, buf, buf_size);
00670
00671 return(ret);
00672 }
00673
00674
00678 static
00679 ibool
00680 fill_lock_data(
00681
00682 const char** lock_data,
00683 const lock_t* lock,
00684 ulint heap_no,
00685 trx_i_s_cache_t* cache)
00687 {
00688 mtr_t mtr;
00689
00690 const buf_block_t* block;
00691 const page_t* page;
00692 const rec_t* rec;
00693
00694 ut_a(lock_get_type(lock) == LOCK_REC);
00695
00696 mtr_start(&mtr);
00697
00698 block = buf_page_try_get(lock_rec_get_space_id(lock),
00699 lock_rec_get_page_no(lock),
00700 &mtr);
00701
00702 if (block == NULL) {
00703
00704 *lock_data = NULL;
00705
00706 mtr_commit(&mtr);
00707
00708 return(TRUE);
00709 }
00710
00711 page = (const page_t*) buf_block_get_frame(block);
00712
00713 rec = page_find_rec_with_heap_no(page, heap_no);
00714
00715 if (page_rec_is_infimum(rec)) {
00716
00717 *lock_data = ha_storage_put_str_memlim(
00718 cache->storage, "infimum pseudo-record",
00719 MAX_ALLOWED_FOR_STORAGE(cache));
00720 } else if (page_rec_is_supremum(rec)) {
00721
00722 *lock_data = ha_storage_put_str_memlim(
00723 cache->storage, "supremum pseudo-record",
00724 MAX_ALLOWED_FOR_STORAGE(cache));
00725 } else {
00726
00727 const dict_index_t* index;
00728 ulint n_fields;
00729 mem_heap_t* heap;
00730 ulint offsets_onstack[REC_OFFS_NORMAL_SIZE];
00731 ulint* offsets;
00732 char buf[TRX_I_S_LOCK_DATA_MAX_LEN];
00733 ulint buf_used;
00734 ulint i;
00735
00736 rec_offs_init(offsets_onstack);
00737 offsets = offsets_onstack;
00738
00739 index = lock_rec_get_index(lock);
00740
00741 n_fields = dict_index_get_n_unique(index);
00742
00743 ut_a(n_fields > 0);
00744
00745 heap = NULL;
00746 offsets = rec_get_offsets(rec, index, offsets, n_fields,
00747 &heap);
00748
00749
00750
00751 buf_used = 0;
00752 for (i = 0; i < n_fields; i++) {
00753
00754 buf_used += put_nth_field(
00755 buf + buf_used, sizeof(buf) - buf_used,
00756 i, index, rec, offsets) - 1;
00757 }
00758
00759 *lock_data = (const char*) ha_storage_put_memlim(
00760 cache->storage, buf, buf_used + 1,
00761 MAX_ALLOWED_FOR_STORAGE(cache));
00762
00763 if (UNIV_UNLIKELY(heap != NULL)) {
00764
00765
00766
00767
00768 ut_a(offsets != offsets_onstack);
00769 mem_heap_free(heap);
00770 }
00771 }
00772
00773 mtr_commit(&mtr);
00774
00775 if (*lock_data == NULL) {
00776
00777 return(FALSE);
00778 }
00779
00780 return(TRUE);
00781 }
00782
00783
00787 static
00788 ibool
00789 fill_locks_row(
00790
00791 i_s_locks_row_t* row,
00792 const lock_t* lock,
00793 ulint heap_no,
00796 trx_i_s_cache_t* cache)
00798 {
00799 row->lock_trx_id = lock_get_trx_id(lock);
00800 row->lock_mode = lock_get_mode_str(lock);
00801 row->lock_type = lock_get_type_str(lock);
00802
00803 row->lock_table = ha_storage_put_str_memlim(
00804 cache->storage, lock_get_table_name(lock),
00805 MAX_ALLOWED_FOR_STORAGE(cache));
00806
00807
00808 if (row->lock_table == NULL) {
00809
00810 return(FALSE);
00811 }
00812
00813 switch (lock_get_type(lock)) {
00814 case LOCK_REC:
00815 row->lock_index = ha_storage_put_str_memlim(
00816 cache->storage, lock_rec_get_index_name(lock),
00817 MAX_ALLOWED_FOR_STORAGE(cache));
00818
00819
00820 if (row->lock_index == NULL) {
00821
00822 return(FALSE);
00823 }
00824
00825 row->lock_space = lock_rec_get_space_id(lock);
00826 row->lock_page = lock_rec_get_page_no(lock);
00827 row->lock_rec = heap_no;
00828
00829 if (!fill_lock_data(&row->lock_data, lock, heap_no, cache)) {
00830
00831
00832 return(FALSE);
00833 }
00834
00835 break;
00836 case LOCK_TABLE:
00837 row->lock_index = NULL;
00838
00839 row->lock_space = ULINT_UNDEFINED;
00840 row->lock_page = ULINT_UNDEFINED;
00841 row->lock_rec = ULINT_UNDEFINED;
00842
00843 row->lock_data = NULL;
00844
00845 break;
00846 default:
00847 ut_error;
00848 }
00849
00850 row->lock_table_id = lock_get_table_id(lock);
00851
00852 row->hash_chain.value = row;
00853 ut_ad(i_s_locks_row_validate(row));
00854
00855 return(TRUE);
00856 }
00857
00858
00861 static
00862 i_s_lock_waits_row_t*
00863 fill_lock_waits_row(
00864
00865 i_s_lock_waits_row_t* row,
00867 const i_s_locks_row_t* requested_lock_row,
00870 const i_s_locks_row_t* blocking_lock_row)
00873 {
00874 ut_ad(i_s_locks_row_validate(requested_lock_row));
00875 ut_ad(i_s_locks_row_validate(blocking_lock_row));
00876
00877 row->requested_lock_row = requested_lock_row;
00878 row->blocking_lock_row = blocking_lock_row;
00879
00880 return(row);
00881 }
00882
00883
00889 static
00890 ulint
00891 fold_lock(
00892
00893 const lock_t* lock,
00894 ulint heap_no)
00897 {
00898 #ifdef TEST_LOCK_FOLD_ALWAYS_DIFFERENT
00899 static ulint fold = 0;
00900
00901 return(fold++);
00902 #else
00903 ulint ret;
00904
00905 switch (lock_get_type(lock)) {
00906 case LOCK_REC:
00907 ut_a(heap_no != ULINT_UNDEFINED);
00908
00909 ret = ut_fold_ulint_pair((ulint) lock_get_trx_id(lock),
00910 lock_rec_get_space_id(lock));
00911
00912 ret = ut_fold_ulint_pair(ret,
00913 lock_rec_get_page_no(lock));
00914
00915 ret = ut_fold_ulint_pair(ret, heap_no);
00916
00917 break;
00918 case LOCK_TABLE:
00919
00920
00921
00922 ut_a(heap_no == ULINT_UNDEFINED);
00923
00924 ret = (ulint) lock_get_table_id(lock);
00925
00926 break;
00927 default:
00928 ut_error;
00929 }
00930
00931 return(ret);
00932 #endif
00933 }
00934
00935
00938 static
00939 ibool
00940 locks_row_eq_lock(
00941
00942 const i_s_locks_row_t* row,
00943 const lock_t* lock,
00944 ulint heap_no)
00947 {
00948 ut_ad(i_s_locks_row_validate(row));
00949 #ifdef TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
00950 return(0);
00951 #else
00952 switch (lock_get_type(lock)) {
00953 case LOCK_REC:
00954 ut_a(heap_no != ULINT_UNDEFINED);
00955
00956 return(row->lock_trx_id == lock_get_trx_id(lock)
00957 && row->lock_space == lock_rec_get_space_id(lock)
00958 && row->lock_page == lock_rec_get_page_no(lock)
00959 && row->lock_rec == heap_no);
00960
00961 case LOCK_TABLE:
00962
00963
00964
00965 ut_a(heap_no == ULINT_UNDEFINED);
00966
00967 return(row->lock_trx_id == lock_get_trx_id(lock)
00968 && row->lock_table_id == lock_get_table_id(lock));
00969
00970 default:
00971 ut_error;
00972 return(FALSE);
00973 }
00974 #endif
00975 }
00976
00977
00982 static
00983 i_s_locks_row_t*
00984 search_innodb_locks(
00985
00986 trx_i_s_cache_t* cache,
00987 const lock_t* lock,
00988 ulint heap_no)
00991 {
00992 i_s_hash_chain_t* hash_chain;
00993
00994 HASH_SEARCH(
00995
00996 next,
00997
00998 cache->locks_hash,
00999
01000 fold_lock(lock, heap_no),
01001
01002 i_s_hash_chain_t*,
01003
01004 hash_chain,
01005
01006 ut_ad(i_s_locks_row_validate(hash_chain->value)),
01007
01008 locks_row_eq_lock(hash_chain->value, lock, heap_no));
01009
01010 if (hash_chain == NULL) {
01011
01012 return(NULL);
01013 }
01014
01015
01016 return(hash_chain->value);
01017 }
01018
01019
01025 static
01026 i_s_locks_row_t*
01027 add_lock_to_cache(
01028
01029 trx_i_s_cache_t* cache,
01030 const lock_t* lock,
01031 ulint heap_no)
01034 {
01035 i_s_locks_row_t* dst_row;
01036
01037 #ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
01038 ulint i;
01039 for (i = 0; i < 10000; i++) {
01040 #endif
01041 #ifndef TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
01042
01043 dst_row = search_innodb_locks(cache, lock, heap_no);
01044 if (dst_row != NULL) {
01045
01046 ut_ad(i_s_locks_row_validate(dst_row));
01047 return(dst_row);
01048 }
01049 #endif
01050
01051 dst_row = (i_s_locks_row_t*)
01052 table_cache_create_empty_row(&cache->innodb_locks, cache);
01053
01054
01055 if (dst_row == NULL) {
01056
01057 return(NULL);
01058 }
01059
01060 if (!fill_locks_row(dst_row, lock, heap_no, cache)) {
01061
01062
01063 cache->innodb_locks.rows_used--;
01064 return(NULL);
01065 }
01066
01067 #ifndef TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
01068 HASH_INSERT(
01069
01070 i_s_hash_chain_t,
01071
01072 next,
01073
01074 cache->locks_hash,
01075
01076 fold_lock(lock, heap_no),
01077
01078 &dst_row->hash_chain);
01079 #endif
01080 #ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
01081 }
01082 #endif
01083
01084 ut_ad(i_s_locks_row_validate(dst_row));
01085 return(dst_row);
01086 }
01087
01088
01092 static
01093 ibool
01094 add_lock_wait_to_cache(
01095
01096 trx_i_s_cache_t* cache,
01097 const i_s_locks_row_t* requested_lock_row,
01100 const i_s_locks_row_t* blocking_lock_row)
01103 {
01104 i_s_lock_waits_row_t* dst_row;
01105
01106 dst_row = (i_s_lock_waits_row_t*)
01107 table_cache_create_empty_row(&cache->innodb_lock_waits,
01108 cache);
01109
01110
01111 if (dst_row == NULL) {
01112
01113 return(FALSE);
01114 }
01115
01116 fill_lock_waits_row(dst_row, requested_lock_row, blocking_lock_row);
01117
01118 return(TRUE);
01119 }
01120
01121
01129 static
01130 ibool
01131 add_trx_relevant_locks_to_cache(
01132
01133 trx_i_s_cache_t* cache,
01134 const trx_t* trx,
01135 i_s_locks_row_t** requested_lock_row)
01138 {
01139 ut_ad(mutex_own(&kernel_mutex));
01140
01141
01142
01143 if (trx->que_state == TRX_QUE_LOCK_WAIT) {
01144
01145 const lock_t* curr_lock;
01146 ulint wait_lock_heap_no;
01147 i_s_locks_row_t* blocking_lock_row;
01148 lock_queue_iterator_t iter;
01149
01150 ut_a(trx->wait_lock != NULL);
01151
01152 wait_lock_heap_no
01153 = wait_lock_get_heap_no(trx->wait_lock);
01154
01155
01156 *requested_lock_row
01157 = add_lock_to_cache(cache, trx->wait_lock,
01158 wait_lock_heap_no);
01159
01160
01161 if (*requested_lock_row == NULL) {
01162
01163 return(FALSE);
01164 }
01165
01166
01167
01168
01169 lock_queue_iterator_reset(&iter, trx->wait_lock,
01170 ULINT_UNDEFINED);
01171
01172 curr_lock = lock_queue_iterator_get_prev(&iter);
01173 while (curr_lock != NULL) {
01174
01175 if (lock_has_to_wait(trx->wait_lock,
01176 curr_lock)) {
01177
01178
01179
01180 blocking_lock_row
01181 = add_lock_to_cache(
01182 cache, curr_lock,
01183
01184
01185
01186 wait_lock_heap_no);
01187
01188
01189 if (blocking_lock_row == NULL) {
01190
01191 return(FALSE);
01192 }
01193
01194
01195
01196 if (!add_lock_wait_to_cache(
01197 cache, *requested_lock_row,
01198 blocking_lock_row)) {
01199
01200
01201 return(FALSE);
01202 }
01203 }
01204
01205 curr_lock = lock_queue_iterator_get_prev(&iter);
01206 }
01207 } else {
01208
01209 *requested_lock_row = NULL;
01210 }
01211
01212 return(TRUE);
01213 }
01214
01219 #define CACHE_MIN_IDLE_TIME_US 100000
01220
01221
01224 static
01225 ibool
01226 can_cache_be_updated(
01227
01228 trx_i_s_cache_t* cache)
01229 {
01230 ullint now;
01231
01232
01233
01234
01235
01236
01237
01238
01239 #ifdef UNIV_SYNC_DEBUG
01240 ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_EX));
01241 #endif
01242
01243 now = ut_time_us(NULL);
01244 if (now - cache->last_read > CACHE_MIN_IDLE_TIME_US) {
01245
01246 return(TRUE);
01247 }
01248
01249 return(FALSE);
01250 }
01251
01252
01255 static
01256 void
01257 trx_i_s_cache_clear(
01258
01259 trx_i_s_cache_t* cache)
01260 {
01261 cache->innodb_trx.rows_used = 0;
01262 cache->innodb_locks.rows_used = 0;
01263 cache->innodb_lock_waits.rows_used = 0;
01264
01265 hash_table_clear(cache->locks_hash);
01266
01267 ha_storage_empty(&cache->storage);
01268 }
01269
01270
01273 static
01274 void
01275 fetch_data_into_cache(
01276
01277 trx_i_s_cache_t* cache)
01278 {
01279 trx_t* trx;
01280 i_s_trx_row_t* trx_row;
01281 i_s_locks_row_t* requested_lock_row;
01282
01283 ut_ad(mutex_own(&kernel_mutex));
01284
01285 trx_i_s_cache_clear(cache);
01286
01287
01288
01289
01290
01291
01292 for (trx = UT_LIST_GET_FIRST(trx_sys->trx_list);
01293 trx != NULL;
01294 trx = UT_LIST_GET_NEXT(trx_list, trx)) {
01295
01296 if (!add_trx_relevant_locks_to_cache(cache, trx,
01297 &requested_lock_row)) {
01298
01299 cache->is_truncated = TRUE;
01300 return;
01301 }
01302
01303 trx_row = (i_s_trx_row_t*)
01304 table_cache_create_empty_row(&cache->innodb_trx,
01305 cache);
01306
01307
01308 if (trx_row == NULL) {
01309
01310 cache->is_truncated = TRUE;
01311 return;
01312 }
01313
01314 if (!fill_trx_row(trx_row, trx, requested_lock_row, cache)) {
01315
01316
01317 cache->innodb_trx.rows_used--;
01318 cache->is_truncated = TRUE;
01319 return;
01320 }
01321 }
01322
01323 cache->is_truncated = FALSE;
01324 }
01325
01326
01330 UNIV_INTERN
01331 int
01332 trx_i_s_possibly_fetch_data_into_cache(
01333
01334 trx_i_s_cache_t* cache)
01335 {
01336 if (!can_cache_be_updated(cache)) {
01337
01338 return(1);
01339 }
01340
01341
01342 mutex_enter(&kernel_mutex);
01343
01344 fetch_data_into_cache(cache);
01345
01346 mutex_exit(&kernel_mutex);
01347
01348 return(0);
01349 }
01350
01351
01355 UNIV_INTERN
01356 ibool
01357 trx_i_s_cache_is_truncated(
01358
01359 trx_i_s_cache_t* cache)
01360 {
01361 return(cache->is_truncated);
01362 }
01363
01364
01366 UNIV_INTERN
01367 void
01368 trx_i_s_cache_init(
01369
01370 trx_i_s_cache_t* cache)
01371 {
01372
01373
01374
01375
01376
01377
01378
01379
01380
01381
01382 rw_lock_create(trx_i_s_cache_lock_key, &cache->rw_lock,
01383 SYNC_TRX_I_S_RWLOCK);
01384
01385 cache->last_read = 0;
01386
01387 mutex_create(cache_last_read_mutex_key,
01388 &cache->last_read_mutex, SYNC_TRX_I_S_LAST_READ);
01389
01390 table_cache_init(&cache->innodb_trx, sizeof(i_s_trx_row_t));
01391 table_cache_init(&cache->innodb_locks, sizeof(i_s_locks_row_t));
01392 table_cache_init(&cache->innodb_lock_waits,
01393 sizeof(i_s_lock_waits_row_t));
01394
01395 cache->locks_hash = hash_create(LOCKS_HASH_CELLS_NUM);
01396
01397 cache->storage = ha_storage_create(CACHE_STORAGE_INITIAL_SIZE,
01398 CACHE_STORAGE_HASH_CELLS);
01399
01400 cache->mem_allocd = 0;
01401
01402 cache->is_truncated = FALSE;
01403 }
01404
01405
01407 UNIV_INTERN
01408 void
01409 trx_i_s_cache_free(
01410
01411 trx_i_s_cache_t* cache)
01412 {
01413 hash_table_free(cache->locks_hash);
01414 ha_storage_free(cache->storage);
01415 table_cache_free(&cache->innodb_trx);
01416 table_cache_free(&cache->innodb_locks);
01417 table_cache_free(&cache->innodb_lock_waits);
01418 memset(cache, 0, sizeof *cache);
01419 }
01420
01421
01423 UNIV_INTERN
01424 void
01425 trx_i_s_cache_start_read(
01426
01427 trx_i_s_cache_t* cache)
01428 {
01429 rw_lock_s_lock(&cache->rw_lock);
01430 }
01431
01432
01434 UNIV_INTERN
01435 void
01436 trx_i_s_cache_end_read(
01437
01438 trx_i_s_cache_t* cache)
01439 {
01440 ullint now;
01441
01442 #ifdef UNIV_SYNC_DEBUG
01443 ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_SHARED));
01444 #endif
01445
01446
01447 now = ut_time_us(NULL);
01448 mutex_enter(&cache->last_read_mutex);
01449 cache->last_read = now;
01450 mutex_exit(&cache->last_read_mutex);
01451
01452 rw_lock_s_unlock(&cache->rw_lock);
01453 }
01454
01455
01457 UNIV_INTERN
01458 void
01459 trx_i_s_cache_start_write(
01460
01461 trx_i_s_cache_t* cache)
01462 {
01463 rw_lock_x_lock(&cache->rw_lock);
01464 }
01465
01466
01468 UNIV_INTERN
01469 void
01470 trx_i_s_cache_end_write(
01471
01472 trx_i_s_cache_t* cache)
01473 {
01474 #ifdef UNIV_SYNC_DEBUG
01475 ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_EX));
01476 #endif
01477
01478 rw_lock_x_unlock(&cache->rw_lock);
01479 }
01480
01481
01484 static
01485 i_s_table_cache_t*
01486 cache_select_table(
01487
01488 trx_i_s_cache_t* cache,
01489 enum i_s_table table)
01490 {
01491 i_s_table_cache_t* table_cache;
01492
01493 #ifdef UNIV_SYNC_DEBUG
01494 ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_SHARED)
01495 || rw_lock_own(&cache->rw_lock, RW_LOCK_EX));
01496 #endif
01497
01498 switch (table) {
01499 case I_S_INNODB_TRX:
01500 table_cache = &cache->innodb_trx;
01501 break;
01502 case I_S_INNODB_LOCKS:
01503 table_cache = &cache->innodb_locks;
01504 break;
01505 case I_S_INNODB_LOCK_WAITS:
01506 table_cache = &cache->innodb_lock_waits;
01507 break;
01508 default:
01509 ut_error;
01510 }
01511
01512 return(table_cache);
01513 }
01514
01515
01519 UNIV_INTERN
01520 ulint
01521 trx_i_s_cache_get_rows_used(
01522
01523 trx_i_s_cache_t* cache,
01524 enum i_s_table table)
01525 {
01526 i_s_table_cache_t* table_cache;
01527
01528 table_cache = cache_select_table(cache, table);
01529
01530 return(table_cache->rows_used);
01531 }
01532
01533
01537 UNIV_INTERN
01538 void*
01539 trx_i_s_cache_get_nth_row(
01540
01541 trx_i_s_cache_t* cache,
01542 enum i_s_table table,
01543 ulint n)
01544 {
01545 i_s_table_cache_t* table_cache;
01546 ulint i;
01547 void* row;
01548
01549 table_cache = cache_select_table(cache, table);
01550
01551 ut_a(n < table_cache->rows_used);
01552
01553 row = NULL;
01554
01555 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
01556
01557 if (table_cache->chunks[i].offset
01558 + table_cache->chunks[i].rows_allocd > n) {
01559
01560 row = (char*) table_cache->chunks[i].base
01561 + (n - table_cache->chunks[i].offset)
01562 * table_cache->row_size;
01563 break;
01564 }
01565 }
01566
01567 ut_a(row != NULL);
01568
01569 return(row);
01570 }
01571
01572
01578 UNIV_INTERN
01579 char*
01580 trx_i_s_create_lock_id(
01581
01582 const i_s_locks_row_t* row,
01583 char* lock_id,
01584 ulint lock_id_size)
01586 {
01587 int res_len;
01588
01589
01590
01591 if (row->lock_space != ULINT_UNDEFINED) {
01592
01593 res_len = ut_snprintf(lock_id, lock_id_size,
01594 TRX_ID_FMT ":%lu:%lu:%lu",
01595 row->lock_trx_id, row->lock_space,
01596 row->lock_page, row->lock_rec);
01597 } else {
01598
01599 res_len = ut_snprintf(lock_id, lock_id_size,
01600 TRX_ID_FMT ":" TRX_ID_FMT,
01601 row->lock_trx_id,
01602 row->lock_table_id);
01603 }
01604
01605
01606
01607 ut_a(res_len >= 0);
01608 ut_a((ulint) res_len < lock_id_size);
01609
01610 return(lock_id);
01611 }