00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060
00061
00062
00063
00064
00065
00066
00067
00068
00069
00070
00071
00072
00073
00074
00075
00076
00077
00078
00079
00080
00081
00082
00083
00084
00085
00086
00087
00088
00089
00090
00091
00092
00093
00094
00095
00096
00097
00098
00099
00100
00101
00102
00103 #include <config.h>
00104
00105 #include <math.h>
00106 #include <float.h>
00107
00108 #include <string>
00109 #include <vector>
00110 #include <algorithm>
00111
00112 #include <boost/dynamic_bitset.hpp>
00113
00114 #include <drizzled/check_stack_overrun.h>
00115 #include <drizzled/error.h>
00116 #include <drizzled/field/num.h>
00117 #include <drizzled/internal/iocache.h>
00118 #include <drizzled/internal/my_sys.h>
00119 #include <drizzled/item/cmpfunc.h>
00120 #include <drizzled/optimizer/cost_vector.h>
00121 #include <drizzled/optimizer/quick_group_min_max_select.h>
00122 #include <drizzled/optimizer/quick_index_merge_select.h>
00123 #include <drizzled/optimizer/quick_range.h>
00124 #include <drizzled/optimizer/quick_range_select.h>
00125 #include <drizzled/optimizer/quick_ror_intersect_select.h>
00126 #include <drizzled/optimizer/quick_ror_union_select.h>
00127 #include <drizzled/optimizer/range.h>
00128 #include <drizzled/optimizer/range_param.h>
00129 #include <drizzled/optimizer/sel_arg.h>
00130 #include <drizzled/optimizer/sel_imerge.h>
00131 #include <drizzled/optimizer/sel_tree.h>
00132 #include <drizzled/optimizer/sum.h>
00133 #include <drizzled/optimizer/table_read_plan.h>
00134 #include <drizzled/plugin/storage_engine.h>
00135 #include <drizzled/records.h>
00136 #include <drizzled/sql_base.h>
00137 #include <drizzled/sql_select.h>
00138 #include <drizzled/table_reference.h>
00139 #include <drizzled/session.h>
00140 #include <drizzled/key.h>
00141 #include <drizzled/unique.h>
00142 #include <drizzled/temporal.h>
00143 #include <drizzled/sql_lex.h>
00144
00145 using namespace std;
00146 namespace drizzled
00147 {
00148
00149 #define HA_END_SPACE_KEY 0
00150
00151
00152
00153
00154
00155 static inline ha_rows double2rows(double x)
00156 {
00157 return static_cast<ha_rows>(x);
00158 }
00159
00160 static unsigned char is_null_string[2]= {1,0};
00161
00162
00206 static void get_sweep_read_cost(Table *table,
00207 ha_rows nrows,
00208 bool interrupted,
00209 optimizer::CostVector *cost)
00210 {
00211 cost->zero();
00212 if (table->cursor->primary_key_is_clustered())
00213 {
00214 cost->setIOCount(table->cursor->read_time(table->getShare()->getPrimaryKey(),
00215 static_cast<uint32_t>(nrows),
00216 nrows));
00217 }
00218 else
00219 {
00220 double n_blocks=
00221 ceil(static_cast<double>(table->cursor->stats.data_file_length) / IO_SIZE);
00222 double busy_blocks=
00223 n_blocks * (1.0 - pow(1.0 - 1.0/n_blocks, static_cast<double>(nrows)));
00224 if (busy_blocks < 1.0)
00225 busy_blocks= 1.0;
00226
00227 cost->setIOCount(busy_blocks);
00228
00229 if (! interrupted)
00230 {
00231
00232 cost->setAvgIOCost((DISK_SEEK_BASE_COST +
00233 DISK_SEEK_PROP_COST*n_blocks/busy_blocks));
00234 }
00235 }
00236 }
00237
00238 static optimizer::SEL_TREE * get_mm_parts(optimizer::RangeParameter *param,
00239 COND *cond_func,
00240 Field *field,
00241 Item_func::Functype type,
00242 Item *value,
00243 Item_result cmp_type);
00244
00245 static optimizer::SEL_ARG *get_mm_leaf(optimizer::RangeParameter *param,
00246 COND *cond_func,
00247 Field *field,
00248 KEY_PART *key_part,
00249 Item_func::Functype type,
00250 Item *value);
00251
00252 static optimizer::SEL_TREE *get_mm_tree(optimizer::RangeParameter *param, COND *cond);
00253
00254 static bool is_key_scan_ror(optimizer::Parameter *param, uint32_t keynr, uint8_t nparts);
00255
00256 static ha_rows check_quick_select(Session *session,
00257 optimizer::Parameter *param,
00258 uint32_t idx,
00259 bool index_only,
00260 optimizer::SEL_ARG *tree,
00261 bool update_tbl_stats,
00262 uint32_t *mrr_flags,
00263 uint32_t *bufsize,
00264 optimizer::CostVector *cost);
00265
00266 static optimizer::RangeReadPlan *get_key_scans_params(Session *session,
00267 optimizer::Parameter *param,
00268 optimizer::SEL_TREE *tree,
00269 bool index_read_must_be_used,
00270 bool update_tbl_stats,
00271 double read_time);
00272
00273 static
00274 optimizer::RorIntersectReadPlan *get_best_ror_intersect(const optimizer::Parameter *param,
00275 optimizer::SEL_TREE *tree,
00276 double read_time,
00277 bool *are_all_covering);
00278
00279 static
00280 optimizer::RorIntersectReadPlan *get_best_covering_ror_intersect(optimizer::Parameter *param,
00281 optimizer::SEL_TREE *tree,
00282 double read_time);
00283
00284 static
00285 optimizer::TableReadPlan *get_best_disjunct_quick(Session *session,
00286 optimizer::Parameter *param,
00287 optimizer::SEL_IMERGE *imerge,
00288 double read_time);
00289
00290 static
00291 optimizer::GroupMinMaxReadPlan *get_best_group_min_max(optimizer::Parameter *param, optimizer::SEL_TREE *tree);
00292
00293 static optimizer::SEL_TREE *tree_and(optimizer::RangeParameter *param,
00294 optimizer::SEL_TREE *tree1,
00295 optimizer::SEL_TREE *tree2);
00296
00297 static optimizer::SEL_ARG *sel_add(optimizer::SEL_ARG *key1, optimizer::SEL_ARG *key2);
00298
00299 static optimizer::SEL_ARG *key_and(optimizer::RangeParameter *param,
00300 optimizer::SEL_ARG *key1,
00301 optimizer::SEL_ARG *key2,
00302 uint32_t clone_flag);
00303
00304 static bool get_range(optimizer::SEL_ARG **e1, optimizer::SEL_ARG **e2, optimizer::SEL_ARG *root1);
00305
00306 optimizer::SEL_ARG optimizer::null_element(optimizer::SEL_ARG::IMPOSSIBLE);
00307
00308 static bool null_part_in_key(KEY_PART *key_part,
00309 const unsigned char *key,
00310 uint32_t length);
00311
00312 bool sel_trees_can_be_ored(optimizer::SEL_TREE *tree1,
00313 optimizer::SEL_TREE *tree2,
00314 optimizer::RangeParameter *param);
00315
00316
00317
00318
00319
00320
00321
00322
00323
00324
00325 inline void imerge_list_and_list(List<optimizer::SEL_IMERGE> *im1, List<optimizer::SEL_IMERGE> *im2)
00326 {
00327 im1->concat(im2);
00328 }
00329
00330
00331
00332
00333
00334
00335
00336
00337
00338
00339
00340
00341 optimizer::SqlSelect *optimizer::make_select(Table *head,
00342 table_map const_tables,
00343 table_map read_tables,
00344 COND *conds,
00345 bool allow_null_cond,
00346 int *error)
00347 {
00348 optimizer::SqlSelect *select= NULL;
00349
00350 *error= 0;
00351
00352 if (! conds && ! allow_null_cond)
00353 {
00354 return 0;
00355 }
00356 if (! (select= new optimizer::SqlSelect()))
00357 {
00358 *error= 1;
00359 return 0;
00360 }
00361 select->read_tables=read_tables;
00362 select->const_tables=const_tables;
00363 select->head=head;
00364 select->cond=conds;
00365
00366 if (head->sort.io_cache)
00367 {
00368 memcpy(select->file, head->sort.io_cache, sizeof(internal::IO_CACHE));
00369 select->records=(ha_rows) (select->file->end_of_file/
00370 head->cursor->ref_length);
00371 delete head->sort.io_cache;
00372 head->sort.io_cache=0;
00373 }
00374 return(select);
00375 }
00376
00377
00378 optimizer::SqlSelect::SqlSelect()
00379 :
00380 quick(NULL),
00381 cond(NULL),
00382 file(static_cast<internal::IO_CACHE *>(memory::sql_calloc(sizeof(internal::IO_CACHE)))),
00383 free_cond(false)
00384 {
00385 quick_keys.reset();
00386 needed_reg.reset();
00387 my_b_clear(file);
00388 }
00389
00390
00391 void optimizer::SqlSelect::cleanup()
00392 {
00393 if (quick)
00394 {
00395 delete quick;
00396 quick= NULL;
00397 }
00398
00399 if (free_cond)
00400 {
00401 free_cond= 0;
00402 delete cond;
00403 cond= 0;
00404 }
00405 file->close_cached_file();
00406 }
00407
00408
00409 optimizer::SqlSelect::~SqlSelect()
00410 {
00411 cleanup();
00412 }
00413
00414
00415 bool optimizer::SqlSelect::check_quick(Session *session,
00416 bool force_quick_range,
00417 ha_rows limit)
00418 {
00419 key_map tmp;
00420 tmp.set();
00421 return (test_quick_select(session,
00422 tmp,
00423 0,
00424 limit,
00425 force_quick_range,
00426 false) < 0);
00427 }
00428
00429
00430 bool optimizer::SqlSelect::skip_record()
00431 {
00432 return (cond ? cond->val_int() == 0 : 0);
00433 }
00434
00435
00436 optimizer::QuickSelectInterface::QuickSelectInterface()
00437 :
00438 max_used_key_length(0),
00439 used_key_parts(0)
00440 {}
00441
00442
00443
00444
00445
00446
00447
00448
00449
00450
00451
00452
00453
00454
00455
00456
00457
00458
00459
00460
00461
00462
00463
00464
00465
00466
00467
00468
00469
00470
00471
00472 uint32_t optimizer::get_index_for_order(Table *table, Order *order, ha_rows limit)
00473 {
00474 uint32_t idx;
00475 uint32_t match_key= MAX_KEY, match_key_len= MAX_KEY_LENGTH + 1;
00476 Order *ord;
00477
00478 for (ord= order; ord; ord= ord->next)
00479 if (!ord->asc)
00480 return MAX_KEY;
00481
00482 for (idx= 0; idx < table->getShare()->sizeKeys(); idx++)
00483 {
00484 if (!(table->keys_in_use_for_query.test(idx)))
00485 continue;
00486 KeyPartInfo *keyinfo= table->key_info[idx].key_part;
00487 uint32_t n_parts= table->key_info[idx].key_parts;
00488 uint32_t partno= 0;
00489
00490
00491
00492
00493
00494
00495 if (! (table->index_flags(idx) & HA_READ_ORDER))
00496 continue;
00497 for (ord= order; ord && partno < n_parts; ord= ord->next, partno++)
00498 {
00499 Item *item= order->item[0];
00500 if (! (item->type() == Item::FIELD_ITEM &&
00501 ((Item_field*)item)->field->eq(keyinfo[partno].field)))
00502 break;
00503 }
00504
00505 if (! ord && table->key_info[idx].key_length < match_key_len)
00506 {
00507
00508
00509
00510
00511
00512 match_key= idx;
00513 match_key_len= table->key_info[idx].key_length;
00514 }
00515 }
00516
00517 if (match_key != MAX_KEY)
00518 {
00519
00520
00521
00522
00523
00524 double full_scan_time= table->cursor->scan_time();
00525 double index_scan_time= table->cursor->read_time(match_key, 1, limit);
00526 if (index_scan_time > full_scan_time)
00527 match_key= MAX_KEY;
00528 }
00529 return match_key;
00530 }
00531
00532
00533
00534
00535
00536
00537
00538
00539
00540
00541
00542
00543
00544
00545
00546
00547
00548 static int fill_used_fields_bitmap(optimizer::Parameter *param)
00549 {
00550 Table *table= param->table;
00551 uint32_t pk;
00552 param->tmp_covered_fields.clear();
00553 param->needed_fields.resize(table->getShare()->sizeFields());
00554 param->needed_fields.reset();
00555
00556 param->needed_fields|= *table->read_set;
00557 param->needed_fields|= *table->write_set;
00558
00559 pk= param->table->getShare()->getPrimaryKey();
00560 if (pk != MAX_KEY && param->table->cursor->primary_key_is_clustered())
00561 {
00562
00563 KeyPartInfo *key_part= param->table->key_info[pk].key_part;
00564 KeyPartInfo *key_part_end= key_part +
00565 param->table->key_info[pk].key_parts;
00566 for (;key_part != key_part_end; ++key_part)
00567 param->needed_fields.reset(key_part->fieldnr-1);
00568 }
00569 return 0;
00570 }
00571
00572
00573
00574
00575
00576
00577
00578
00579
00580
00581
00582
00583
00584
00585
00586
00587
00588
00589
00590
00591
00592
00593
00594
00595
00596
00597
00598
00599
00600
00601
00602
00603
00604
00605
00606
00607
00608
00609
00610
00611
00612
00613
00614
00615
00616
00617
00618
00619
00620
00621
00622
00623
00624
00625
00626
00627
00628
00629
00630
00631
00632
00633
00634
00635
00636
00637
00638
00639 int optimizer::SqlSelect::test_quick_select(Session *session,
00640 key_map keys_to_use,
00641 table_map prev_tables,
00642 ha_rows limit,
00643 bool force_quick_range,
00644 bool ordered_output)
00645 {
00646 uint32_t idx;
00647 double scan_time;
00648 if (quick)
00649 {
00650 delete quick;
00651 quick= NULL;
00652 }
00653 needed_reg.reset();
00654 quick_keys.reset();
00655 if (keys_to_use.none())
00656 return 0;
00657 records= head->cursor->stats.records;
00658 if (!records)
00659 records++;
00660 scan_time= (double) records / TIME_FOR_COMPARE + 1;
00661 read_time= (double) head->cursor->scan_time() + scan_time + 1.1;
00662 if (head->force_index)
00663 scan_time= read_time= DBL_MAX;
00664 if (limit < records)
00665 read_time= (double) records + scan_time + 1;
00666 else if (read_time <= 2.0 && !force_quick_range)
00667 return 0;
00668
00669 keys_to_use&= head->keys_in_use_for_query;
00670 if (keys_to_use.any())
00671 {
00672 memory::Root alloc;
00673 optimizer::SEL_TREE *tree= NULL;
00674 KEY_PART *key_parts;
00675 KeyInfo *key_info;
00676 optimizer::Parameter param;
00677
00678 if (check_stack_overrun(session, 2*STACK_MIN_SIZE, NULL))
00679 return 0;
00680
00681
00682 param.session= session;
00683 param.prev_tables= prev_tables | const_tables;
00684 param.read_tables= read_tables;
00685 param.current_table= head->map;
00686 param.table=head;
00687 param.keys=0;
00688 param.mem_root= &alloc;
00689 param.old_root= session->mem_root;
00690 param.needed_reg= &needed_reg;
00691 param.imerge_cost_buff_size= 0;
00692 param.using_real_indexes= true;
00693 param.remove_jump_scans= true;
00694 param.force_default_mrr= ordered_output;
00695
00696 session->no_errors=1;
00697 memory::init_sql_alloc(&alloc, session->variables.range_alloc_block_size, 0);
00698 if (!(param.key_parts= (KEY_PART*) alloc.alloc_root( sizeof(KEY_PART) * head->getShare()->key_parts)) ||
00699 fill_used_fields_bitmap(¶m))
00700 {
00701 session->no_errors=0;
00702 alloc.free_root(MYF(0));
00703
00704 return 0;
00705 }
00706 key_parts= param.key_parts;
00707 session->mem_root= &alloc;
00708
00709
00710
00711
00712
00713 key_info= head->key_info;
00714 for (idx=0 ; idx < head->getShare()->sizeKeys() ; idx++, key_info++)
00715 {
00716 KeyPartInfo *key_part_info;
00717 if (! keys_to_use.test(idx))
00718 continue;
00719
00720 param.key[param.keys]=key_parts;
00721 key_part_info= key_info->key_part;
00722 for (uint32_t part=0;
00723 part < key_info->key_parts;
00724 part++, key_parts++, key_part_info++)
00725 {
00726 key_parts->key= param.keys;
00727 key_parts->part= part;
00728 key_parts->length= key_part_info->length;
00729 key_parts->store_length= key_part_info->store_length;
00730 key_parts->field= key_part_info->field;
00731 key_parts->null_bit= key_part_info->null_bit;
00732
00733 key_parts->flag= (uint8_t) key_part_info->key_part_flag;
00734 }
00735 param.real_keynr[param.keys++]=idx;
00736 }
00737 param.key_parts_end=key_parts;
00738 param.alloced_sel_args= 0;
00739
00740
00741 if (!head->covering_keys.none())
00742 {
00743 int key_for_use= head->find_shortest_key(&head->covering_keys);
00744 double key_read_time=
00745 param.table->cursor->index_only_read_time(key_for_use, records) +
00746 (double) records / TIME_FOR_COMPARE;
00747 if (key_read_time < read_time)
00748 read_time= key_read_time;
00749 }
00750
00751 optimizer::TableReadPlan *best_trp= NULL;
00752 optimizer::GroupMinMaxReadPlan *group_trp= NULL;
00753 double best_read_time= read_time;
00754
00755 if (cond)
00756 {
00757 if ((tree= get_mm_tree(¶m,cond)))
00758 {
00759 if (tree->type == optimizer::SEL_TREE::IMPOSSIBLE)
00760 {
00761 records=0L;
00762 read_time= (double) HA_POS_ERROR;
00763 goto free_mem;
00764 }
00765
00766
00767
00768
00769 if (tree->type != optimizer::SEL_TREE::KEY && tree->type != optimizer::SEL_TREE::KEY_SMALLER)
00770 tree= NULL;
00771 }
00772 }
00773
00774
00775
00776
00777
00778 group_trp= get_best_group_min_max(¶m, tree);
00779 if (group_trp)
00780 {
00781 param.table->quick_condition_rows= min(group_trp->records,
00782 head->cursor->stats.records);
00783 if (group_trp->read_cost < best_read_time)
00784 {
00785 best_trp= group_trp;
00786 best_read_time= best_trp->read_cost;
00787 }
00788 }
00789
00790 if (tree)
00791 {
00792
00793
00794
00795
00796 if (tree->merges.is_empty())
00797 {
00798 optimizer::RangeReadPlan *range_trp= NULL;
00799 optimizer::RorIntersectReadPlan *rori_trp= NULL;
00800 bool can_build_covering= false;
00801
00802
00803 if ((range_trp= get_key_scans_params(session, ¶m, tree, false, true,
00804 best_read_time)))
00805 {
00806 best_trp= range_trp;
00807 best_read_time= best_trp->read_cost;
00808 }
00809
00810
00811
00812
00813
00814
00815 if ((session->lex().sql_command != SQLCOM_DELETE))
00816 {
00817
00818
00819
00820
00821 if ((rori_trp= get_best_ror_intersect(¶m, tree, best_read_time,
00822 &can_build_covering)))
00823 {
00824 best_trp= rori_trp;
00825 best_read_time= best_trp->read_cost;
00826
00827
00828
00829
00830 if (!rori_trp->is_covering && can_build_covering &&
00831 (rori_trp= get_best_covering_ror_intersect(¶m, tree,
00832 best_read_time)))
00833 best_trp= rori_trp;
00834 }
00835 }
00836 }
00837 else
00838 {
00839
00840 optimizer::SEL_IMERGE *imerge= NULL;
00841 optimizer::TableReadPlan *best_conj_trp= NULL;
00842 optimizer::TableReadPlan *new_conj_trp= NULL;
00843 List<optimizer::SEL_IMERGE>::iterator it(tree->merges.begin());
00844 while ((imerge= it++))
00845 {
00846 new_conj_trp= get_best_disjunct_quick(session, ¶m, imerge, best_read_time);
00847 if (new_conj_trp)
00848 set_if_smaller(param.table->quick_condition_rows,
00849 new_conj_trp->records);
00850 if (!best_conj_trp || (new_conj_trp && new_conj_trp->read_cost <
00851 best_conj_trp->read_cost))
00852 best_conj_trp= new_conj_trp;
00853 }
00854 if (best_conj_trp)
00855 best_trp= best_conj_trp;
00856 }
00857 }
00858
00859 session->mem_root= param.old_root;
00860
00861
00862 if (best_trp)
00863 {
00864 records= best_trp->records;
00865 if (! (quick= best_trp->make_quick(¶m, true)) || quick->init())
00866 {
00867
00868 if (quick)
00869 {
00870 delete quick;
00871 quick= NULL;
00872 }
00873 }
00874 }
00875
00876 free_mem:
00877 alloc.free_root(MYF(0));
00878 session->mem_root= param.old_root;
00879 session->no_errors=0;
00880 }
00881
00882
00883
00884
00885
00886 return(records ? test(quick) : -1);
00887 }
00888
00889
00890
00891
00892
00893
00894
00895
00896
00897
00898
00899
00900
00901
00902
00903
00904
00905
00906
00907
00908
00909
00910
00911
00912
00913
00914
00915
00916
00917
00918
00919
00920
00921
00922
00923
00924
00925
00926
00927
00928
00929
00930
00931
00932
00933
00934
00935
00936
00937
00938
00939
00940
00941
00942
00943
00944
00945
00946
00947
00948
00949
00950
00951
00952
00953
00954
00955 static
00956 optimizer::TableReadPlan *get_best_disjunct_quick(Session *session,
00957 optimizer::Parameter *param,
00958 optimizer::SEL_IMERGE *imerge,
00959 double read_time)
00960 {
00961 optimizer::SEL_TREE **ptree= NULL;
00962 optimizer::IndexMergeReadPlan *imerge_trp= NULL;
00963 uint32_t n_child_scans= imerge->trees_next - imerge->trees;
00964 optimizer::RangeReadPlan **range_scans= NULL;
00965 optimizer::RangeReadPlan **cur_child= NULL;
00966 optimizer::RangeReadPlan **cpk_scan= NULL;
00967 bool imerge_too_expensive= false;
00968 double imerge_cost= 0.0;
00969 ha_rows cpk_scan_records= 0;
00970 ha_rows non_cpk_scan_records= 0;
00971 bool pk_is_clustered= param->table->cursor->primary_key_is_clustered();
00972 bool all_scans_ror_able= true;
00973 bool all_scans_rors= true;
00974 uint32_t unique_calc_buff_size;
00975 optimizer::TableReadPlan **roru_read_plans= NULL;
00976 optimizer::TableReadPlan **cur_roru_plan= NULL;
00977 double roru_index_costs;
00978 ha_rows roru_total_records;
00979 double roru_intersect_part= 1.0;
00980
00981 if (! (range_scans= (optimizer::RangeReadPlan**)param->mem_root->alloc_root(sizeof(optimizer::RangeReadPlan*)* n_child_scans)))
00982 {
00983 return NULL;
00984 }
00985
00986
00987
00988
00989
00990
00991 for (ptree= imerge->trees, cur_child= range_scans;
00992 ptree != imerge->trees_next;
00993 ptree++, cur_child++)
00994 {
00995 if (!(*cur_child= get_key_scans_params(session, param, *ptree, true, false, read_time)))
00996 {
00997
00998
00999
01000
01001
01002
01003 imerge_too_expensive= true;
01004 }
01005 if (imerge_too_expensive)
01006 continue;
01007
01008 imerge_cost += (*cur_child)->read_cost;
01009 all_scans_ror_able &= ((*ptree)->n_ror_scans > 0);
01010 all_scans_rors &= (*cur_child)->is_ror;
01011 if (pk_is_clustered &&
01012 param->real_keynr[(*cur_child)->key_idx] ==
01013 param->table->getShare()->getPrimaryKey())
01014 {
01015 cpk_scan= cur_child;
01016 cpk_scan_records= (*cur_child)->records;
01017 }
01018 else
01019 non_cpk_scan_records += (*cur_child)->records;
01020 }
01021
01022 if (imerge_too_expensive || (imerge_cost > read_time) ||
01023 ((non_cpk_scan_records+cpk_scan_records >= param->table->cursor->stats.records) && read_time != DBL_MAX))
01024 {
01025
01026
01027
01028
01029 return NULL;
01030 }
01031 if (all_scans_rors)
01032 {
01033 roru_read_plans= (optimizer::TableReadPlan **) range_scans;
01034 goto skip_to_ror_scan;
01035 }
01036 if (cpk_scan)
01037 {
01038
01039
01040
01041
01042 imerge_cost += non_cpk_scan_records / TIME_FOR_COMPARE_ROWID;
01043 }
01044
01045
01046 {
01047 optimizer::CostVector sweep_cost;
01048 Join *join= param->session->lex().select_lex.join;
01049 bool is_interrupted= test(join && join->tables == 1);
01050 get_sweep_read_cost(param->table, non_cpk_scan_records, is_interrupted,
01051 &sweep_cost);
01052 imerge_cost += sweep_cost.total_cost();
01053 }
01054 if (imerge_cost > read_time)
01055 goto build_ror_index_merge;
01056
01057
01058 unique_calc_buff_size=
01059 Unique::get_cost_calc_buff_size((ulong)non_cpk_scan_records,
01060 param->table->cursor->ref_length,
01061 param->session->variables.sortbuff_size);
01062 if (param->imerge_cost_buff_size < unique_calc_buff_size)
01063 {
01064 if (!(param->imerge_cost_buff= (uint*)param->mem_root->alloc_root(unique_calc_buff_size)))
01065 {
01066 return NULL;
01067 }
01068
01069 param->imerge_cost_buff_size= unique_calc_buff_size;
01070 }
01071
01072 imerge_cost +=
01073 Unique::get_use_cost(param->imerge_cost_buff, (uint32_t)non_cpk_scan_records,
01074 param->table->cursor->ref_length,
01075 param->session->variables.sortbuff_size);
01076 if (imerge_cost < read_time)
01077 {
01078 if ((imerge_trp= new (param->mem_root) optimizer::IndexMergeReadPlan))
01079 {
01080 imerge_trp->read_cost= imerge_cost;
01081 imerge_trp->records= non_cpk_scan_records + cpk_scan_records;
01082 imerge_trp->records= min(imerge_trp->records,
01083 param->table->cursor->stats.records);
01084 imerge_trp->range_scans= range_scans;
01085 imerge_trp->range_scans_end= range_scans + n_child_scans;
01086 read_time= imerge_cost;
01087 }
01088 }
01089
01090 build_ror_index_merge:
01091 if (!all_scans_ror_able || param->session->lex().sql_command == SQLCOM_DELETE)
01092 return(imerge_trp);
01093
01094
01095 bool dummy;
01096 if (! (roru_read_plans=
01097 (optimizer::TableReadPlan **) param->mem_root->alloc_root(sizeof(optimizer::TableReadPlan*) * n_child_scans)))
01098 {
01099 return imerge_trp;
01100 }
01101 skip_to_ror_scan:
01102 roru_index_costs= 0.0;
01103 roru_total_records= 0;
01104 cur_roru_plan= roru_read_plans;
01105
01106
01107 for (ptree= imerge->trees, cur_child= range_scans;
01108 ptree != imerge->trees_next;
01109 ptree++, cur_child++, cur_roru_plan++)
01110 {
01111
01112
01113
01114
01115
01116
01117 double cost;
01118 if ((*cur_child)->is_ror)
01119 {
01120
01121 cost= param->table->cursor->
01122 read_time(param->real_keynr[(*cur_child)->key_idx], 1,
01123 (*cur_child)->records) +
01124 static_cast<double>((*cur_child)->records) / TIME_FOR_COMPARE;
01125 }
01126 else
01127 cost= read_time;
01128
01129 optimizer::TableReadPlan *prev_plan= *cur_child;
01130 if (!(*cur_roru_plan= get_best_ror_intersect(param, *ptree, cost,
01131 &dummy)))
01132 {
01133 if (prev_plan->is_ror)
01134 *cur_roru_plan= prev_plan;
01135 else
01136 return(imerge_trp);
01137 roru_index_costs += (*cur_roru_plan)->read_cost;
01138 }
01139 else
01140 roru_index_costs +=
01141 ((optimizer::RorIntersectReadPlan*)(*cur_roru_plan))->index_scan_costs;
01142 roru_total_records += (*cur_roru_plan)->records;
01143 roru_intersect_part *= (*cur_roru_plan)->records /
01144 param->table->cursor->stats.records;
01145 }
01146
01147
01148
01149
01150
01151
01152
01153 roru_total_records -= (ha_rows)(roru_intersect_part*
01154 param->table->cursor->stats.records);
01155
01156
01157
01158
01159
01160
01161
01162
01163 double roru_total_cost;
01164 {
01165 optimizer::CostVector sweep_cost;
01166 Join *join= param->session->lex().select_lex.join;
01167 bool is_interrupted= test(join && join->tables == 1);
01168 get_sweep_read_cost(param->table, roru_total_records, is_interrupted,
01169 &sweep_cost);
01170 roru_total_cost= roru_index_costs +
01171 static_cast<double>(roru_total_records)*log((double)n_child_scans) /
01172 (TIME_FOR_COMPARE_ROWID * M_LN2) +
01173 sweep_cost.total_cost();
01174 }
01175
01176 optimizer::RorUnionReadPlan *roru= NULL;
01177 if (roru_total_cost < read_time)
01178 {
01179 if ((roru= new (param->mem_root) optimizer::RorUnionReadPlan))
01180 {
01181 roru->first_ror= roru_read_plans;
01182 roru->last_ror= roru_read_plans + n_child_scans;
01183 roru->read_cost= roru_total_cost;
01184 roru->records= roru_total_records;
01185 return roru;
01186 }
01187 }
01188 return(imerge_trp);
01189 }
01190
01191
01192
01193
01194
01195
01196
01197
01198
01199
01200
01201
01202
01203
01204
01205
01206
01207
01208 static
01209 optimizer::RorScanInfo *make_ror_scan(const optimizer::Parameter *param, int idx, optimizer::SEL_ARG *sel_arg)
01210 {
01211 optimizer::RorScanInfo *ror_scan= NULL;
01212
01213 uint32_t keynr;
01214
01215 if (!(ror_scan= (optimizer::RorScanInfo*)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo))))
01216 return NULL;
01217
01218 ror_scan->idx= idx;
01219 ror_scan->keynr= keynr= param->real_keynr[idx];
01220 ror_scan->key_rec_length= (param->table->key_info[keynr].key_length +
01221 param->table->cursor->ref_length);
01222 ror_scan->sel_arg= sel_arg;
01223 ror_scan->records= param->table->quick_rows[keynr];
01224
01225 ror_scan->covered_fields_size= param->table->getShare()->sizeFields();
01226 boost::dynamic_bitset<> tmp_bitset(param->table->getShare()->sizeFields());
01227 tmp_bitset.reset();
01228
01229 KeyPartInfo *key_part= param->table->key_info[keynr].key_part;
01230 KeyPartInfo *key_part_end= key_part +
01231 param->table->key_info[keynr].key_parts;
01232 for (; key_part != key_part_end; ++key_part)
01233 {
01234 if (param->needed_fields.test(key_part->fieldnr-1))
01235 tmp_bitset.set(key_part->fieldnr-1);
01236 }
01237 double rows= param->table->quick_rows[ror_scan->keynr];
01238 ror_scan->index_read_cost=
01239 param->table->cursor->index_only_read_time(ror_scan->keynr, rows);
01240 ror_scan->covered_fields= tmp_bitset.to_ulong();
01241 return ror_scan;
01242 }
01243
01244
01245
01246
01247
01248
01249
01250
01251
01252
01253
01254
01255
01256
01257
01258 static int cmp_ror_scan_info(optimizer::RorScanInfo** a, optimizer::RorScanInfo** b)
01259 {
01260 double val1= static_cast<double>((*a)->records) * (*a)->key_rec_length;
01261 double val2= static_cast<double>((*b)->records) * (*b)->key_rec_length;
01262 return (val1 < val2)? -1: (val1 == val2)? 0 : 1;
01263 }
01264
01265
01266
01267
01268
01269
01270
01271
01272
01273
01274
01275
01276
01277
01278
01279
01280
01281
01282
01283 static int cmp_ror_scan_info_covering(optimizer::RorScanInfo** a, optimizer::RorScanInfo** b)
01284 {
01285 if ((*a)->used_fields_covered > (*b)->used_fields_covered)
01286 return -1;
01287 if ((*a)->used_fields_covered < (*b)->used_fields_covered)
01288 return 1;
01289 if ((*a)->key_components < (*b)->key_components)
01290 return -1;
01291 if ((*a)->key_components > (*b)->key_components)
01292 return 1;
01293 if ((*a)->first_uncovered_field < (*b)->first_uncovered_field)
01294 return -1;
01295 if ((*a)->first_uncovered_field > (*b)->first_uncovered_field)
01296 return 1;
01297 return 0;
01298 }
01299
01300
01301 typedef struct st_ror_intersect_info
01302 {
01303 st_ror_intersect_info()
01304 :
01305 param(NULL),
01306 covered_fields(),
01307 out_rows(0.0),
01308 is_covering(false),
01309 index_records(0),
01310 index_scan_costs(0.0),
01311 total_cost(0.0)
01312 {}
01313
01314 st_ror_intersect_info(const optimizer::Parameter *in_param)
01315 :
01316 param(in_param),
01317 covered_fields(in_param->table->getShare()->sizeFields()),
01318 out_rows(in_param->table->cursor->stats.records),
01319 is_covering(false),
01320 index_records(0),
01321 index_scan_costs(0.0),
01322 total_cost(0.0)
01323 {
01324 covered_fields.reset();
01325 }
01326
01327 const optimizer::Parameter *param;
01328 boost::dynamic_bitset<> covered_fields;
01329
01330
01331
01332
01333
01334 double out_rows;
01335
01336 bool is_covering;
01337
01338 ha_rows index_records;
01339 double index_scan_costs;
01340 double total_cost;
01341 } ROR_INTERSECT_INFO;
01342
01343
01344 static void ror_intersect_cpy(ROR_INTERSECT_INFO *dst,
01345 const ROR_INTERSECT_INFO *src)
01346 {
01347 dst->param= src->param;
01348 dst->covered_fields= src->covered_fields;
01349 dst->out_rows= src->out_rows;
01350 dst->is_covering= src->is_covering;
01351 dst->index_records= src->index_records;
01352 dst->index_scan_costs= src->index_scan_costs;
01353 dst->total_cost= src->total_cost;
01354 }
01355
01356
01357
01358
01359
01360
01361
01362
01363
01364
01365
01366
01367
01368
01369
01370
01371
01372
01373
01374
01375
01376
01377
01378
01379
01380
01381
01382
01383
01384
01385
01386
01387
01388
01389
01390
01391
01392
01393
01394
01395
01396
01397
01398
01399
01400
01401
01402
01403
01404
01405
01406
01407
01408
01409
01410
01411
01412
01413
01414
01415
01416
01417
01418
01419
01420
01421
01422
01423
01424
01425
01426
01427
01428
01429
01430
01431
01432
01433
01434
01435
01436
01437
01438
01439
01440
01441
01442
01443
01444
01445
01446
01447 static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info,
01448 const optimizer::RorScanInfo *scan)
01449 {
01450 double selectivity_mult= 1.0;
01451 KeyPartInfo *key_part= info->param->table->key_info[scan->keynr].key_part;
01452 unsigned char key_val[MAX_KEY_LENGTH+MAX_FIELD_WIDTH];
01453 unsigned char *key_ptr= key_val;
01454 optimizer::SEL_ARG *sel_arg= NULL;
01455 optimizer::SEL_ARG *tuple_arg= NULL;
01456 key_part_map keypart_map= 0;
01457 bool cur_covered;
01458 bool prev_covered= test(info->covered_fields.test(key_part->fieldnr-1));
01459 key_range min_range;
01460 key_range max_range;
01461 min_range.key= key_val;
01462 min_range.flag= HA_READ_KEY_EXACT;
01463 max_range.key= key_val;
01464 max_range.flag= HA_READ_AFTER_KEY;
01465 ha_rows prev_records= info->param->table->cursor->stats.records;
01466
01467 for (sel_arg= scan->sel_arg; sel_arg;
01468 sel_arg= sel_arg->next_key_part)
01469 {
01470 cur_covered=
01471 test(info->covered_fields.test(key_part[sel_arg->part].fieldnr-1));
01472 if (cur_covered != prev_covered)
01473 {
01474
01475 ha_rows records;
01476 if (!tuple_arg)
01477 {
01478 tuple_arg= scan->sel_arg;
01479
01480 tuple_arg->store_min(key_part->store_length, &key_ptr, 0);
01481 keypart_map= 1;
01482 }
01483 while (tuple_arg->next_key_part != sel_arg)
01484 {
01485 tuple_arg= tuple_arg->next_key_part;
01486 tuple_arg->store_min(key_part[tuple_arg->part].store_length,
01487 &key_ptr, 0);
01488 keypart_map= (keypart_map << 1) | 1;
01489 }
01490 min_range.length= max_range.length= (size_t) (key_ptr - key_val);
01491 min_range.keypart_map= max_range.keypart_map= keypart_map;
01492 records= (info->param->table->cursor->
01493 records_in_range(scan->keynr, &min_range, &max_range));
01494 if (cur_covered)
01495 {
01496
01497 selectivity_mult *= static_cast<double>(records) / prev_records;
01498 prev_records= HA_POS_ERROR;
01499 }
01500 else
01501 {
01502
01503 prev_records= records;
01504 }
01505 }
01506 prev_covered= cur_covered;
01507 }
01508 if (!prev_covered)
01509 {
01510 selectivity_mult *= static_cast<double>(info->param->table->quick_rows[scan->keynr]) / prev_records;
01511 }
01512 return selectivity_mult;
01513 }
01514
01515
01516
01517
01518
01519
01520
01521
01522
01523
01524
01525
01526
01527
01528
01529
01530
01531
01532
01533
01534
01535
01536
01537
01538
01539
01540
01541
01542
01543
01544
01545
01546
01547
01548
01549
01550
01551
01552 static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
01553 optimizer::RorScanInfo* ror_scan, bool is_cpk_scan)
01554 {
01555 double selectivity_mult= 1.0;
01556
01557 selectivity_mult = ror_scan_selectivity(info, ror_scan);
01558 if (selectivity_mult == 1.0)
01559 {
01560
01561 return false;
01562 }
01563
01564 info->out_rows *= selectivity_mult;
01565
01566 if (is_cpk_scan)
01567 {
01568
01569
01570
01571
01572
01573 info->index_scan_costs += static_cast<double>(info->index_records) /
01574 TIME_FOR_COMPARE_ROWID;
01575 }
01576 else
01577 {
01578 info->index_records += info->param->table->quick_rows[ror_scan->keynr];
01579 info->index_scan_costs += ror_scan->index_read_cost;
01580 boost::dynamic_bitset<> tmp_bitset= ror_scan->bitsToBitset();
01581 info->covered_fields|= tmp_bitset;
01582 if (! info->is_covering && info->param->needed_fields.is_subset_of(info->covered_fields))
01583 {
01584 info->is_covering= true;
01585 }
01586 }
01587
01588 info->total_cost= info->index_scan_costs;
01589 if (! info->is_covering)
01590 {
01591 optimizer::CostVector sweep_cost;
01592 Join *join= info->param->session->lex().select_lex.join;
01593 bool is_interrupted= test(join && join->tables == 1);
01594 get_sweep_read_cost(info->param->table, double2rows(info->out_rows),
01595 is_interrupted, &sweep_cost);
01596 info->total_cost += sweep_cost.total_cost();
01597 }
01598 return true;
01599 }
01600
01601
01602
01603
01604
01605
01606
01607
01608
01609
01610
01611
01612
01613
01614
01615
01616
01617
01618
01619
01620
01621
01622
01623
01624
01625
01626
01627
01628
01629
01630
01631
01632
01633
01634
01635 static
01636 optimizer::RorIntersectReadPlan *get_best_covering_ror_intersect(optimizer::Parameter *param,
01637 optimizer::SEL_TREE *tree,
01638 double read_time)
01639 {
01640 optimizer::RorScanInfo **ror_scan_mark;
01641 optimizer::RorScanInfo **ror_scans_end= tree->ror_scans_end;
01642
01643 for (optimizer::RorScanInfo **scan= tree->ror_scans; scan != ror_scans_end; ++scan)
01644 (*scan)->key_components=
01645 param->table->key_info[(*scan)->keynr].key_parts;
01646
01647
01648
01649
01650
01651
01652
01653 ror_scan_mark= tree->ror_scans;
01654
01655 boost::dynamic_bitset<> *covered_fields= ¶m->tmp_covered_fields;
01656 if (covered_fields->empty())
01657 {
01658 covered_fields->resize(param->table->getShare()->sizeFields());
01659 }
01660 covered_fields->reset();
01661
01662 double total_cost= 0.0f;
01663 ha_rows records=0;
01664 bool all_covered;
01665
01666 do
01667 {
01668
01669
01670
01671
01672
01673
01674 for (optimizer::RorScanInfo **scan= ror_scan_mark; scan != ror_scans_end; ++scan)
01675 {
01676
01677 (*scan)->subtractBitset(*covered_fields);
01678 (*scan)->used_fields_covered=
01679 (*scan)->getBitCount();
01680 (*scan)->first_uncovered_field= (*scan)->findFirstNotSet();
01681 }
01682
01683 internal::my_qsort(ror_scan_mark, ror_scans_end-ror_scan_mark,
01684 sizeof(optimizer::RorScanInfo*),
01685 (qsort_cmp)cmp_ror_scan_info_covering);
01686
01687
01688 total_cost += (*ror_scan_mark)->index_read_cost;
01689 records += (*ror_scan_mark)->records;
01690 if (total_cost > read_time)
01691 return NULL;
01692
01693 boost::dynamic_bitset<> tmp_bitset= (*ror_scan_mark)->bitsToBitset();
01694 *covered_fields|= tmp_bitset;
01695 all_covered= param->needed_fields.is_subset_of(*covered_fields);
01696 } while ((++ror_scan_mark < ror_scans_end) && ! all_covered);
01697
01698 if (!all_covered || (ror_scan_mark - tree->ror_scans) == 1)
01699 return NULL;
01700
01701
01702
01703
01704
01705
01706 total_cost += static_cast<double>(records) *
01707 log((double)(ror_scan_mark - tree->ror_scans)) /
01708 (TIME_FOR_COMPARE_ROWID * M_LN2);
01709
01710 if (total_cost > read_time)
01711 return NULL;
01712
01713 optimizer::RorIntersectReadPlan *trp= NULL;
01714 if (! (trp= new (param->mem_root) optimizer::RorIntersectReadPlan))
01715 {
01716 return trp;
01717 }
01718
01719 uint32_t best_num= (ror_scan_mark - tree->ror_scans);
01720 if (!(trp->first_scan= (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*)* best_num)))
01721 return NULL;
01722 memcpy(trp->first_scan, tree->ror_scans, best_num*sizeof(optimizer::RorScanInfo*));
01723 trp->last_scan= trp->first_scan + best_num;
01724 trp->is_covering= true;
01725 trp->read_cost= total_cost;
01726 trp->records= records;
01727 trp->cpk_scan= NULL;
01728 set_if_smaller(param->table->quick_condition_rows, records);
01729
01730 return(trp);
01731 }
01732
01733
01734
01735
01736
01737
01738
01739
01740
01741
01742
01743
01744
01745
01746
01747
01748
01749
01750
01751
01752
01753
01754
01755
01756
01757
01758
01759
01760
01761
01762
01763
01764
01765
01766
01767
01768
01769
01770
01771
01772
01773
01774
01775
01776
01777
01778
01779
01780
01781
01782
01783
01784
01785
01786
01787
01788
01789
01790
01791
01792
01793
01794
01795
01796
01797
01798 static
01799 optimizer::RorIntersectReadPlan *get_best_ror_intersect(const optimizer::Parameter *param,
01800 optimizer::SEL_TREE *tree,
01801 double read_time,
01802 bool *are_all_covering)
01803 {
01804 uint32_t idx= 0;
01805 double min_cost= DBL_MAX;
01806
01807 if ((tree->n_ror_scans < 2) || ! param->table->cursor->stats.records)
01808 return NULL;
01809
01810
01811
01812
01813
01814 optimizer::RorScanInfo **cur_ror_scan= NULL;
01815 optimizer::RorScanInfo *cpk_scan= NULL;
01816 uint32_t cpk_no= 0;
01817 bool cpk_scan_used= false;
01818
01819 if (! (tree->ror_scans= (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*)* param->keys)))
01820 {
01821 return NULL;
01822 }
01823 cpk_no= ((param->table->cursor->primary_key_is_clustered()) ?
01824 param->table->getShare()->getPrimaryKey() : MAX_KEY);
01825
01826 for (idx= 0, cur_ror_scan= tree->ror_scans; idx < param->keys; idx++)
01827 {
01828 optimizer::RorScanInfo *scan;
01829 if (! tree->ror_scans_map.test(idx))
01830 continue;
01831 if (! (scan= make_ror_scan(param, idx, tree->keys[idx])))
01832 return NULL;
01833 if (param->real_keynr[idx] == cpk_no)
01834 {
01835 cpk_scan= scan;
01836 tree->n_ror_scans--;
01837 }
01838 else
01839 *(cur_ror_scan++)= scan;
01840 }
01841
01842 tree->ror_scans_end= cur_ror_scan;
01843
01844
01845
01846
01847
01848 internal::my_qsort(tree->ror_scans, tree->n_ror_scans, sizeof(optimizer::RorScanInfo*),
01849 (qsort_cmp)cmp_ror_scan_info);
01850
01851 optimizer::RorScanInfo **intersect_scans= NULL;
01852 optimizer::RorScanInfo **intersect_scans_end= NULL;
01853 if (! (intersect_scans= (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*) * tree->n_ror_scans)))
01854 return NULL;
01855 intersect_scans_end= intersect_scans;
01856
01857
01858 ROR_INTERSECT_INFO intersect(param);
01859 ROR_INTERSECT_INFO intersect_best(param);
01860
01861
01862 optimizer::RorScanInfo **intersect_scans_best= NULL;
01863 cur_ror_scan= tree->ror_scans;
01864 intersect_scans_best= intersect_scans;
01865 while (cur_ror_scan != tree->ror_scans_end && ! intersect.is_covering)
01866 {
01867
01868 if (! ror_intersect_add(&intersect, *cur_ror_scan, false))
01869 {
01870 cur_ror_scan++;
01871 continue;
01872 }
01873
01874 *(intersect_scans_end++)= *(cur_ror_scan++);
01875
01876 if (intersect.total_cost < min_cost)
01877 {
01878
01879 ror_intersect_cpy(&intersect_best, &intersect);
01880 intersect_scans_best= intersect_scans_end;
01881 min_cost = intersect.total_cost;
01882 }
01883 }
01884
01885 if (intersect_scans_best == intersect_scans)
01886 {
01887 return NULL;
01888 }
01889
01890 *are_all_covering= intersect.is_covering;
01891 uint32_t best_num= intersect_scans_best - intersect_scans;
01892 ror_intersect_cpy(&intersect, &intersect_best);
01893
01894
01895
01896
01897
01898
01899 if (cpk_scan && ! intersect.is_covering)
01900 {
01901 if (ror_intersect_add(&intersect, cpk_scan, true) &&
01902 (intersect.total_cost < min_cost))
01903 {
01904 cpk_scan_used= true;
01905 intersect_best= intersect;
01906 }
01907 }
01908
01909
01910 optimizer::RorIntersectReadPlan *trp= NULL;
01911 if (min_cost < read_time && (cpk_scan_used || best_num > 1))
01912 {
01913 if (! (trp= new (param->mem_root) optimizer::RorIntersectReadPlan))
01914 return trp;
01915
01916 if (! (trp->first_scan=
01917 (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*)*best_num)))
01918 return NULL;
01919 memcpy(trp->first_scan, intersect_scans, best_num*sizeof(optimizer::RorScanInfo*));
01920 trp->last_scan= trp->first_scan + best_num;
01921 trp->is_covering= intersect_best.is_covering;
01922 trp->read_cost= intersect_best.total_cost;
01923
01924 ha_rows best_rows = double2rows(intersect_best.out_rows);
01925 if (! best_rows)
01926 best_rows= 1;
01927 set_if_smaller(param->table->quick_condition_rows, best_rows);
01928 trp->records= best_rows;
01929 trp->index_scan_costs= intersect_best.index_scan_costs;
01930 trp->cpk_scan= cpk_scan_used? cpk_scan: NULL;
01931 }
01932 return trp;
01933 }
01934
01935
01936
01937
01938
01939
01940
01941
01942
01943
01944
01945
01946
01947
01948
01949
01950
01951
01952
01953
01954
01955
01956
01957
01958
01959
01960
01961
01962
01963 static optimizer::RangeReadPlan *get_key_scans_params(Session *session,
01964 optimizer::Parameter *param,
01965 optimizer::SEL_TREE *tree,
01966 bool index_read_must_be_used,
01967 bool update_tbl_stats,
01968 double read_time)
01969 {
01970 uint32_t idx;
01971 optimizer::SEL_ARG **key= NULL;
01972 optimizer::SEL_ARG **end= NULL;
01973 optimizer::SEL_ARG **key_to_read= NULL;
01974 ha_rows best_records= 0;
01975 uint32_t best_mrr_flags= 0;
01976 uint32_t best_buf_size= 0;
01977 optimizer::RangeReadPlan *read_plan= NULL;
01978
01979
01980
01981
01982
01983 tree->ror_scans_map.reset();
01984 tree->n_ror_scans= 0;
01985 for (idx= 0,key=tree->keys, end=key+param->keys; key != end; key++,idx++)
01986 {
01987 if (*key)
01988 {
01989 ha_rows found_records;
01990 optimizer::CostVector cost;
01991 double found_read_time= 0.0;
01992 uint32_t mrr_flags, buf_size;
01993 uint32_t keynr= param->real_keynr[idx];
01994 if ((*key)->type == optimizer::SEL_ARG::MAYBE_KEY ||
01995 (*key)->maybe_flag)
01996 param->needed_reg->set(keynr);
01997
01998 bool read_index_only= index_read_must_be_used ||
01999 param->table->covering_keys.test(keynr);
02000
02001 found_records= check_quick_select(session, param, idx, read_index_only, *key,
02002 update_tbl_stats, &mrr_flags,
02003 &buf_size, &cost);
02004 found_read_time= cost.total_cost();
02005 if ((found_records != HA_POS_ERROR) && param->is_ror_scan)
02006 {
02007 tree->n_ror_scans++;
02008 tree->ror_scans_map.set(idx);
02009 }
02010 if (read_time > found_read_time && found_records != HA_POS_ERROR)
02011 {
02012 read_time= found_read_time;
02013 best_records= found_records;
02014 key_to_read= key;
02015 best_mrr_flags= mrr_flags;
02016 best_buf_size= buf_size;
02017 }
02018 }
02019 }
02020
02021 if (key_to_read)
02022 {
02023 idx= key_to_read - tree->keys;
02024 if ((read_plan= new (param->mem_root) optimizer::RangeReadPlan(*key_to_read, idx,
02025 best_mrr_flags)))
02026 {
02027 read_plan->records= best_records;
02028 read_plan->is_ror= tree->ror_scans_map.test(idx);
02029 read_plan->read_cost= read_time;
02030 read_plan->mrr_buf_size= best_buf_size;
02031 }
02032 }
02033
02034 return(read_plan);
02035 }
02036
02037
02038 optimizer::QuickSelectInterface *optimizer::IndexMergeReadPlan::make_quick(optimizer::Parameter *param, bool, memory::Root *)
02039 {
02040 optimizer::QuickIndexMergeSelect *quick_imerge;
02041 optimizer::QuickRangeSelect *quick= NULL;
02042
02043 if (! (quick_imerge= new optimizer::QuickIndexMergeSelect(param->session, param->table)))
02044 {
02045 return NULL;
02046 }
02047
02048 quick_imerge->records= records;
02049 quick_imerge->read_time= read_cost;
02050 for (optimizer::RangeReadPlan **range_scan= range_scans;
02051 range_scan != range_scans_end;
02052 range_scan++)
02053 {
02054 if (! (quick= (optimizer::QuickRangeSelect*)
02055 ((*range_scan)->make_quick(param, false, &quick_imerge->alloc))) ||
02056 quick_imerge->push_quick_back(quick))
02057 {
02058 delete quick;
02059 delete quick_imerge;
02060 return NULL;
02061 }
02062 }
02063 return quick_imerge;
02064 }
02065
02066 optimizer::QuickSelectInterface *optimizer::RorIntersectReadPlan::make_quick(optimizer::Parameter *param,
02067 bool retrieve_full_rows,
02068 memory::Root *parent_alloc)
02069 {
02070 optimizer::QuickRorIntersectSelect *quick_intersect= NULL;
02071 optimizer::QuickRangeSelect *quick= NULL;
02072 memory::Root *alloc= NULL;
02073
02074 if ((quick_intersect=
02075 new optimizer::QuickRorIntersectSelect(param->session,
02076 param->table,
02077 (retrieve_full_rows? (! is_covering) : false),
02078 parent_alloc)))
02079 {
02080 alloc= parent_alloc ? parent_alloc : &quick_intersect->alloc;
02081 for (; first_scan != last_scan; ++first_scan)
02082 {
02083 if (! (quick= optimizer::get_quick_select(param,
02084 (*first_scan)->idx,
02085 (*first_scan)->sel_arg,
02086 HA_MRR_USE_DEFAULT_IMPL | HA_MRR_SORTED,
02087 0,
02088 alloc)) ||
02089 quick_intersect->push_quick_back(quick))
02090 {
02091 delete quick_intersect;
02092 return NULL;
02093 }
02094 }
02095 if (cpk_scan)
02096 {
02097 if (! (quick= optimizer::get_quick_select(param,
02098 cpk_scan->idx,
02099 cpk_scan->sel_arg,
02100 HA_MRR_USE_DEFAULT_IMPL | HA_MRR_SORTED,
02101 0,
02102 alloc)))
02103 {
02104 delete quick_intersect;
02105 return NULL;
02106 }
02107 quick->resetCursor();
02108 quick_intersect->cpk_quick= quick;
02109 }
02110 quick_intersect->records= records;
02111 quick_intersect->read_time= read_cost;
02112 }
02113 return quick_intersect;
02114 }
02115
02116
02117 optimizer::QuickSelectInterface *optimizer::RorUnionReadPlan::make_quick(optimizer::Parameter *param, bool, memory::Root *)
02118 {
02119 optimizer::QuickRorUnionSelect *quick_roru= NULL;
02120 optimizer::TableReadPlan **scan= NULL;
02121 optimizer::QuickSelectInterface *quick= NULL;
02122
02123
02124
02125
02126 if ((quick_roru= new optimizer::QuickRorUnionSelect(param->session, param->table)))
02127 {
02128 for (scan= first_ror; scan != last_ror; scan++)
02129 {
02130 if (! (quick= (*scan)->make_quick(param, false, &quick_roru->alloc)) ||
02131 quick_roru->push_quick_back(quick))
02132 {
02133 return NULL;
02134 }
02135 }
02136 quick_roru->records= records;
02137 quick_roru->read_time= read_cost;
02138 }
02139 return quick_roru;
02140 }
02141
02142
02143
02144
02145
02146
02147
02148
02149
02150
02151
02152
02153
02154
02155
02156
02157
02158
02159 static optimizer::SEL_TREE *get_ne_mm_tree(optimizer::RangeParameter *param,
02160 Item_func *cond_func,
02161 Field *field,
02162 Item *lt_value, Item *gt_value,
02163 Item_result cmp_type)
02164 {
02165 optimizer::SEL_TREE *tree= NULL;
02166 tree= get_mm_parts(param, cond_func, field, Item_func::LT_FUNC,
02167 lt_value, cmp_type);
02168 if (tree)
02169 {
02170 tree= tree_or(param,
02171 tree,
02172 get_mm_parts(param, cond_func, field,
02173 Item_func::GT_FUNC,
02174 gt_value,
02175 cmp_type));
02176 }
02177 return tree;
02178 }
02179
02180
02181
02182
02183
02184
02185
02186
02187
02188
02189
02190
02191
02192
02193
02194
02195
02196
02197 static optimizer::SEL_TREE *get_func_mm_tree(optimizer::RangeParameter *param,
02198 Item_func *cond_func,
02199 Field *field,
02200 Item *value,
02201 Item_result cmp_type,
02202 bool inv)
02203 {
02204 optimizer::SEL_TREE *tree= NULL;
02205
02206 switch (cond_func->functype())
02207 {
02208
02209 case Item_func::NE_FUNC:
02210 tree= get_ne_mm_tree(param, cond_func, field, value, value, cmp_type);
02211 break;
02212
02213 case Item_func::BETWEEN:
02214 {
02215 if (! value)
02216 {
02217 if (inv)
02218 {
02219 tree= get_ne_mm_tree(param,
02220 cond_func,
02221 field,
02222 cond_func->arguments()[1],
02223 cond_func->arguments()[2],
02224 cmp_type);
02225 }
02226 else
02227 {
02228 tree= get_mm_parts(param,
02229 cond_func,
02230 field,
02231 Item_func::GE_FUNC,
02232 cond_func->arguments()[1],
02233 cmp_type);
02234 if (tree)
02235 {
02236 tree= tree_and(param,
02237 tree,
02238 get_mm_parts(param, cond_func, field,
02239 Item_func::LE_FUNC,
02240 cond_func->arguments()[2],
02241 cmp_type));
02242 }
02243 }
02244 }
02245 else
02246 tree= get_mm_parts(param,
02247 cond_func,
02248 field,
02249 (inv ?
02250 (value == (Item*)1 ? Item_func::GT_FUNC :
02251 Item_func::LT_FUNC):
02252 (value == (Item*)1 ? Item_func::LE_FUNC :
02253 Item_func::GE_FUNC)),
02254 cond_func->arguments()[0],
02255 cmp_type);
02256 break;
02257 }
02258 case Item_func::IN_FUNC:
02259 {
02260 Item_func_in *func= (Item_func_in*) cond_func;
02261
02262
02263
02264
02265
02266
02267 if (! func->arg_types_compatible)
02268 break;
02269
02270 if (inv)
02271 {
02272 if (func->array && func->array->result_type() != ROW_RESULT)
02273 {
02274
02275
02276
02277
02278
02279
02280
02281
02282
02283
02284
02285
02286
02287
02288
02289
02290
02291
02292
02293
02294
02295
02296
02297
02298
02299
02300
02301 #define NOT_IN_IGNORE_THRESHOLD 1000
02302 memory::Root *tmp_root= param->mem_root;
02303 param->session->mem_root= param->old_root;
02304
02305
02306
02307
02308
02309
02310
02311
02312 Item *value_item= func->array->create_item();
02313 param->session->mem_root= tmp_root;
02314
02315 if (func->array->count > NOT_IN_IGNORE_THRESHOLD || ! value_item)
02316 break;
02317
02318
02319 uint32_t i=0;
02320 do
02321 {
02322 func->array->value_to_item(i, value_item);
02323 tree= get_mm_parts(param,
02324 cond_func,
02325 field, Item_func::LT_FUNC,
02326 value_item,
02327 cmp_type);
02328 if (! tree)
02329 break;
02330 i++;
02331 } while (i < func->array->count && tree->type == optimizer::SEL_TREE::IMPOSSIBLE);
02332
02333 if (!tree || tree->type == optimizer::SEL_TREE::IMPOSSIBLE)
02334 {
02335
02336 tree= NULL;
02337 break;
02338 }
02339 optimizer::SEL_TREE *tree2= NULL;
02340 for (; i < func->array->count; i++)
02341 {
02342 if (func->array->compare_elems(i, i-1))
02343 {
02344
02345 func->array->value_to_item(i, value_item);
02346 tree2= get_mm_parts(param, cond_func, field, Item_func::LT_FUNC,
02347 value_item, cmp_type);
02348 if (!tree2)
02349 {
02350 tree= NULL;
02351 break;
02352 }
02353
02354
02355 for (uint32_t idx= 0; idx < param->keys; idx++)
02356 {
02357 optimizer::SEL_ARG *new_interval, *last_val;
02358 if (((new_interval= tree2->keys[idx])) &&
02359 (tree->keys[idx]) &&
02360 ((last_val= tree->keys[idx]->last())))
02361 {
02362 new_interval->min_value= last_val->max_value;
02363 new_interval->min_flag= NEAR_MIN;
02364 }
02365 }
02366
02367
02368
02369
02370 tree= tree_or(param, tree, tree2);
02371 }
02372 }
02373
02374 if (tree && tree->type != optimizer::SEL_TREE::IMPOSSIBLE)
02375 {
02376
02377
02378
02379
02380 tree2= get_mm_parts(param, cond_func, field, Item_func::GT_FUNC,
02381 value_item, cmp_type);
02382 tree= tree_or(param, tree, tree2);
02383 }
02384 }
02385 else
02386 {
02387 tree= get_ne_mm_tree(param, cond_func, field,
02388 func->arguments()[1], func->arguments()[1],
02389 cmp_type);
02390 if (tree)
02391 {
02392 Item **arg, **end;
02393 for (arg= func->arguments()+2, end= arg+func->argument_count()-2;
02394 arg < end ; arg++)
02395 {
02396 tree= tree_and(param, tree, get_ne_mm_tree(param, cond_func, field,
02397 *arg, *arg, cmp_type));
02398 }
02399 }
02400 }
02401 }
02402 else
02403 {
02404 tree= get_mm_parts(param, cond_func, field, Item_func::EQ_FUNC,
02405 func->arguments()[1], cmp_type);
02406 if (tree)
02407 {
02408 Item **arg, **end;
02409 for (arg= func->arguments()+2, end= arg+func->argument_count()-2;
02410 arg < end ; arg++)
02411 {
02412 tree= tree_or(param, tree, get_mm_parts(param, cond_func, field,
02413 Item_func::EQ_FUNC,
02414 *arg, cmp_type));
02415 }
02416 }
02417 }
02418 break;
02419 }
02420 default:
02421 {
02422
02423
02424
02425
02426
02427
02428
02429 Item_func::Functype func_type=
02430 (value != cond_func->arguments()[0]) ? cond_func->functype() :
02431 ((Item_bool_func2*) cond_func)->rev_functype();
02432 tree= get_mm_parts(param, cond_func, field, func_type, value, cmp_type);
02433 }
02434 }
02435
02436 return(tree);
02437 }
02438
02439
02440
02441
02442
02443
02444
02445
02446
02447
02448
02449
02450
02451
02452
02453
02454
02455
02456
02457
02458
02459
02460
02461
02462
02463
02464
02465
02466
02467
02468
02469
02470
02471
02472
02473
02474
02475
02476
02477
02478
02479
02480
02481
02482
02483
02484
02485
02486
02487
02488
02489
02490
02491
02492
02493
02494
02495
02496
02497
02498
02499
02500
02501
02502
02503
02504
02505
02506
02507
02508
02509
02510 static optimizer::SEL_TREE *get_full_func_mm_tree(optimizer::RangeParameter *param,
02511 Item_func *cond_func,
02512 Item_field *field_item, Item *value,
02513 bool inv)
02514 {
02515 optimizer::SEL_TREE *tree= 0;
02516 optimizer::SEL_TREE *ftree= 0;
02517 table_map ref_tables= 0;
02518 table_map param_comp= ~(param->prev_tables | param->read_tables |
02519 param->current_table);
02520
02521 for (uint32_t i= 0; i < cond_func->arg_count; i++)
02522 {
02523 Item *arg= cond_func->arguments()[i]->real_item();
02524 if (arg != field_item)
02525 ref_tables|= arg->used_tables();
02526 }
02527
02528 Field *field= field_item->field;
02529 field->setWriteSet();
02530
02531 Item_result cmp_type= field->cmp_type();
02532 if (!((ref_tables | field->getTable()->map) & param_comp))
02533 ftree= get_func_mm_tree(param, cond_func, field, value, cmp_type, inv);
02534 Item_equal *item_equal= field_item->item_equal;
02535 if (item_equal)
02536 {
02537 Item_equal_iterator it(item_equal->begin());
02538 Item_field *item;
02539 while ((item= it++))
02540 {
02541 Field *f= item->field;
02542 f->setWriteSet();
02543
02544 if (field->eq(f))
02545 continue;
02546 if (!((ref_tables | f->getTable()->map) & param_comp))
02547 {
02548 tree= get_func_mm_tree(param, cond_func, f, value, cmp_type, inv);
02549 ftree= !ftree ? tree : tree_and(param, ftree, tree);
02550 }
02551 }
02552 }
02553 return(ftree);
02554 }
02555
02556
02557
02558 static optimizer::SEL_TREE *get_mm_tree(optimizer::RangeParameter *param, COND *cond)
02559 {
02560 optimizer::SEL_TREE *tree=0;
02561 optimizer::SEL_TREE *ftree= 0;
02562 Item_field *field_item= 0;
02563 bool inv= false;
02564 Item *value= 0;
02565
02566 if (cond->type() == Item::COND_ITEM)
02567 {
02568 List<Item>::iterator li(((Item_cond*) cond)->argument_list()->begin());
02569
02570 if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
02571 {
02572 tree=0;
02573 Item *item;
02574 while ((item=li++))
02575 {
02576 optimizer::SEL_TREE *new_tree= get_mm_tree(param,item);
02577 if (param->session->is_fatal_error ||
02578 param->alloced_sel_args > optimizer::SEL_ARG::MAX_SEL_ARGS)
02579 return 0;
02580 tree=tree_and(param,tree,new_tree);
02581 if (tree && tree->type == optimizer::SEL_TREE::IMPOSSIBLE)
02582 break;
02583 }
02584 }
02585 else
02586 {
02587 tree= get_mm_tree(param,li++);
02588 if (tree)
02589 {
02590 Item *item;
02591 while ((item=li++))
02592 {
02593 optimizer::SEL_TREE *new_tree= get_mm_tree(param,item);
02594 if (!new_tree)
02595 return 0;
02596 tree=tree_or(param,tree,new_tree);
02597 if (!tree || tree->type == optimizer::SEL_TREE::ALWAYS)
02598 break;
02599 }
02600 }
02601 }
02602 return(tree);
02603 }
02604
02605
02606
02607
02608 if (cond->const_item() && !cond->is_expensive())
02609 {
02610
02611
02612
02613
02614
02615
02616 memory::Root *tmp_root= param->mem_root;
02617 param->session->mem_root= param->old_root;
02618 tree= cond->val_int() ? new(tmp_root) optimizer::SEL_TREE(optimizer::SEL_TREE::ALWAYS) :
02619 new(tmp_root) optimizer::SEL_TREE(optimizer::SEL_TREE::IMPOSSIBLE);
02620 param->session->mem_root= tmp_root;
02621 return(tree);
02622 }
02623
02624 table_map ref_tables= 0;
02625 table_map param_comp= ~(param->prev_tables | param->read_tables |
02626 param->current_table);
02627 if (cond->type() != Item::FUNC_ITEM)
02628 {
02629 ref_tables= cond->used_tables();
02630 if ((ref_tables & param->current_table) ||
02631 (ref_tables & ~(param->prev_tables | param->read_tables)))
02632 return 0;
02633 return(new optimizer::SEL_TREE(optimizer::SEL_TREE::MAYBE));
02634 }
02635
02636 Item_func *cond_func= (Item_func*) cond;
02637 if (cond_func->functype() == Item_func::BETWEEN ||
02638 cond_func->functype() == Item_func::IN_FUNC)
02639 inv= ((Item_func_opt_neg *) cond_func)->negated;
02640 else if (cond_func->select_optimize() == Item_func::OPTIMIZE_NONE)
02641 return 0;
02642
02643 param->cond= cond;
02644
02645 switch (cond_func->functype()) {
02646 case Item_func::BETWEEN:
02647 if (cond_func->arguments()[0]->real_item()->type() == Item::FIELD_ITEM)
02648 {
02649 field_item= (Item_field*) (cond_func->arguments()[0]->real_item());
02650 ftree= get_full_func_mm_tree(param, cond_func, field_item, NULL, inv);
02651 }
02652
02653
02654
02655
02656
02657 for (uint32_t i= 1 ; i < cond_func->arg_count ; i++)
02658 {
02659 if (cond_func->arguments()[i]->real_item()->type() == Item::FIELD_ITEM)
02660 {
02661 field_item= (Item_field*) (cond_func->arguments()[i]->real_item());
02662 optimizer::SEL_TREE *tmp= get_full_func_mm_tree(param, cond_func,
02663 field_item, (Item*)(intptr_t)i, inv);
02664 if (inv)
02665 tree= !tree ? tmp : tree_or(param, tree, tmp);
02666 else
02667 tree= tree_and(param, tree, tmp);
02668 }
02669 else if (inv)
02670 {
02671 tree= 0;
02672 break;
02673 }
02674 }
02675
02676 ftree = tree_and(param, ftree, tree);
02677 break;
02678 case Item_func::IN_FUNC:
02679 {
02680 Item_func_in *func=(Item_func_in*) cond_func;
02681 if (func->key_item()->real_item()->type() != Item::FIELD_ITEM)
02682 return 0;
02683 field_item= (Item_field*) (func->key_item()->real_item());
02684 ftree= get_full_func_mm_tree(param, cond_func, field_item, NULL, inv);
02685 break;
02686 }
02687 case Item_func::MULT_EQUAL_FUNC:
02688 {
02689 Item_equal *item_equal= (Item_equal *) cond;
02690 if (!(value= item_equal->get_const()))
02691 return 0;
02692 Item_equal_iterator it(item_equal->begin());
02693 ref_tables= value->used_tables();
02694 while ((field_item= it++))
02695 {
02696 Field *field= field_item->field;
02697 field->setWriteSet();
02698
02699 Item_result cmp_type= field->cmp_type();
02700 if (!((ref_tables | field->getTable()->map) & param_comp))
02701 {
02702 tree= get_mm_parts(param, cond, field, Item_func::EQ_FUNC,
02703 value,cmp_type);
02704 ftree= !ftree ? tree : tree_and(param, ftree, tree);
02705 }
02706 }
02707
02708 return(ftree);
02709 }
02710 default:
02711 if (cond_func->arguments()[0]->real_item()->type() == Item::FIELD_ITEM)
02712 {
02713 field_item= (Item_field*) (cond_func->arguments()[0]->real_item());
02714 value= cond_func->arg_count > 1 ? cond_func->arguments()[1] : 0;
02715 }
02716 else if (cond_func->have_rev_func() &&
02717 cond_func->arguments()[1]->real_item()->type() ==
02718 Item::FIELD_ITEM)
02719 {
02720 field_item= (Item_field*) (cond_func->arguments()[1]->real_item());
02721 value= cond_func->arguments()[0];
02722 }
02723 else
02724 return 0;
02725 ftree= get_full_func_mm_tree(param, cond_func, field_item, value, inv);
02726 }
02727
02728 return(ftree);
02729 }
02730
02731
02732 static optimizer::SEL_TREE *
02733 get_mm_parts(optimizer::RangeParameter *param,
02734 COND *cond_func,
02735 Field *field,
02736 Item_func::Functype type,
02737 Item *value, Item_result)
02738 {
02739 if (field->getTable() != param->table)
02740 return 0;
02741
02742 KEY_PART *key_part = param->key_parts;
02743 KEY_PART *end = param->key_parts_end;
02744 optimizer::SEL_TREE *tree=0;
02745 if (value &&
02746 value->used_tables() & ~(param->prev_tables | param->read_tables))
02747 return 0;
02748 for (; key_part != end; key_part++)
02749 {
02750 if (field->eq(key_part->field))
02751 {
02752 optimizer::SEL_ARG *sel_arg=0;
02753 if (!tree && !(tree=new optimizer::SEL_TREE()))
02754 return 0;
02755 if (!value || !(value->used_tables() & ~param->read_tables))
02756 {
02757 sel_arg= get_mm_leaf(param,cond_func,
02758 key_part->field,key_part,type,value);
02759 if (! sel_arg)
02760 continue;
02761 if (sel_arg->type == optimizer::SEL_ARG::IMPOSSIBLE)
02762 {
02763 tree->type=optimizer::SEL_TREE::IMPOSSIBLE;
02764 return(tree);
02765 }
02766 }
02767 else
02768 {
02769
02770 if (! (sel_arg= new optimizer::SEL_ARG(optimizer::SEL_ARG::MAYBE_KEY)))
02771 return 0;
02772 }
02773 sel_arg->part=(unsigned char) key_part->part;
02774 tree->keys[key_part->key]=sel_add(tree->keys[key_part->key],sel_arg);
02775 tree->keys_map.set(key_part->key);
02776 }
02777 }
02778
02779 return tree;
02780 }
02781
02782
02783 static optimizer::SEL_ARG *
02784 get_mm_leaf(optimizer::RangeParameter *param,
02785 COND *conf_func,
02786 Field *field,
02787 KEY_PART *key_part,
02788 Item_func::Functype type,
02789 Item *value)
02790 {
02791 uint32_t maybe_null=(uint32_t) field->real_maybe_null();
02792 bool optimize_range;
02793 optimizer::SEL_ARG *tree= NULL;
02794 memory::Root *alloc= param->mem_root;
02795 unsigned char *str;
02796 int err= 0;
02797
02798
02799
02800
02801
02802
02803
02804
02805
02806 param->session->mem_root= param->old_root;
02807 if (!value)
02808 {
02809 if (field->getTable()->maybe_null)
02810 goto end;
02811 if (!maybe_null)
02812 {
02813 if (type == Item_func::ISNULL_FUNC)
02814 tree= &optimizer::null_element;
02815 goto end;
02816 }
02817 if (!(tree= new (alloc) optimizer::SEL_ARG(field,is_null_string,is_null_string)))
02818 goto end;
02819 if (type == Item_func::ISNOTNULL_FUNC)
02820 {
02821 tree->min_flag=NEAR_MIN;
02822 tree->max_flag=NO_MAX_RANGE;
02823 }
02824 goto end;
02825 }
02826
02827
02828
02829
02830
02831
02832
02833
02834
02835
02836
02837
02838
02839 if (field->result_type() == STRING_RESULT &&
02840 value->result_type() == STRING_RESULT &&
02841 ((Field_str*)field)->charset() != conf_func->compare_collation() &&
02842 !(conf_func->compare_collation()->state & MY_CS_BINSORT))
02843 goto end;
02844
02845 if (param->using_real_indexes)
02846 optimize_range= field->optimize_range(param->real_keynr[key_part->key],
02847 key_part->part);
02848 else
02849 optimize_range= true;
02850
02851 if (type == Item_func::LIKE_FUNC)
02852 {
02853 bool like_error;
02854 char buff1[MAX_FIELD_WIDTH];
02855 unsigned char *min_str,*max_str;
02856 String tmp(buff1,sizeof(buff1),value->collation.collation),*res;
02857 size_t length, offset, min_length, max_length;
02858 uint32_t field_length= field->pack_length()+maybe_null;
02859
02860 if (!optimize_range)
02861 goto end;
02862 if (!(res= value->val_str(&tmp)))
02863 {
02864 tree= &optimizer::null_element;
02865 goto end;
02866 }
02867
02868
02869
02870
02871
02872
02873 if (res != &tmp)
02874 {
02875 tmp.copy(*res);
02876 res= &tmp;
02877 }
02878 if (field->cmp_type() != STRING_RESULT)
02879 goto end;
02880
02881 offset=maybe_null;
02882 length=key_part->store_length;
02883
02884 if (length != key_part->length + maybe_null)
02885 {
02886
02887 offset+= HA_KEY_BLOB_LENGTH;
02888 field_length= length - HA_KEY_BLOB_LENGTH;
02889 }
02890 else
02891 {
02892 if (unlikely(length < field_length))
02893 {
02894
02895
02896
02897
02898 length= field_length;
02899 }
02900 else
02901 field_length= length;
02902 }
02903 length+=offset;
02904 if (!(min_str= (unsigned char*) alloc->alloc_root(length*2)))
02905 {
02906 goto end;
02907 }
02908
02909 max_str=min_str+length;
02910 if (maybe_null)
02911 max_str[0]= min_str[0]=0;
02912
02913 field_length-= maybe_null;
02914 int escape_code= make_escape_code(field->charset(),
02915 ((Item_func_like*)(param->cond))->escape);
02916 like_error= my_like_range(field->charset(),
02917 res->ptr(), res->length(),
02918 escape_code,
02919 internal::wild_one, internal::wild_many,
02920 field_length,
02921 (char*) min_str+offset, (char*) max_str+offset,
02922 &min_length, &max_length);
02923 if (like_error)
02924 goto end;
02925
02926 if (offset != maybe_null)
02927 {
02928 int2store(min_str+maybe_null,min_length);
02929 int2store(max_str+maybe_null,max_length);
02930 }
02931 tree= new (alloc) optimizer::SEL_ARG(field, min_str, max_str);
02932 goto end;
02933 }
02934
02935 if (! optimize_range &&
02936 type != Item_func::EQ_FUNC &&
02937 type != Item_func::EQUAL_FUNC)
02938 goto end;
02939
02940
02941
02942
02943
02944 if (field->result_type() == STRING_RESULT &&
02945 value->result_type() != STRING_RESULT &&
02946 field->cmp_type() != value->result_type())
02947 goto end;
02948
02949
02950
02951
02952
02953
02954
02955
02956
02957
02958
02959
02960
02961
02962
02963
02964
02965
02966
02967
02968
02969
02970
02971
02972
02973
02974
02975
02976
02977
02978
02979
02980 if (field->is_timestamp())
02981 {
02982
02983
02984
02985
02986
02987
02988 if (value->real_item()->type() == Item::FIELD_ITEM
02989 && value->result_type() == STRING_RESULT)
02990 {
02991 char buff[DateTime::MAX_STRING_LENGTH];
02992 String tmp(buff, sizeof(buff), &my_charset_bin);
02993 String *res= value->val_str(&tmp);
02994
02995 if (!res)
02996 goto end;
02997 else
02998 {
02999
03000
03001
03002
03003 DateTime value_datetime;
03004
03005 if (! value_datetime.from_string(res->c_ptr(), (size_t) res->length()))
03006 goto end;
03007
03008 Timestamp max_timestamp;
03009 Timestamp min_timestamp;
03010
03011 (void) max_timestamp.from_time_t((time_t) INT32_MAX);
03012 (void) min_timestamp.from_time_t((time_t) 0);
03013
03014
03015 if (value_datetime < min_timestamp)
03016 {
03017
03018
03019
03020
03021 char new_value_buff[DateTime::MAX_STRING_LENGTH];
03022 int new_value_length;
03023 String new_value_string(new_value_buff, sizeof(new_value_buff), &my_charset_bin);
03024
03025 new_value_length= min_timestamp.to_string(new_value_string.c_ptr(),
03026 DateTime::MAX_STRING_LENGTH);
03027 assert((new_value_length+1) < DateTime::MAX_STRING_LENGTH);
03028 new_value_string.length(new_value_length);
03029 err= value->save_str_value_in_field(field, &new_value_string);
03030 }
03031 else if (value_datetime > max_timestamp)
03032 {
03033
03034
03035
03036
03037 char new_value_buff[DateTime::MAX_STRING_LENGTH];
03038 int new_value_length;
03039 String new_value_string(new_value_buff, sizeof(new_value_buff), &my_charset_bin);
03040
03041 new_value_length= max_timestamp.to_string(new_value_string.c_ptr(),
03042 DateTime::MAX_STRING_LENGTH);
03043 assert((new_value_length+1) < DateTime::MAX_STRING_LENGTH);
03044 new_value_string.length(new_value_length);
03045 err= value->save_str_value_in_field(field, &new_value_string);
03046 }
03047 else
03048 err= value->save_in_field(field, 1);
03049 }
03050 }
03051 else
03052 err= value->save_in_field(field, 1);
03053 }
03054 else
03055 err= value->save_in_field(field, 1);
03056
03057 if (err > 0)
03058 {
03059 if (field->cmp_type() != value->result_type())
03060 {
03061 if ((type == Item_func::EQ_FUNC || type == Item_func::EQUAL_FUNC) &&
03062 value->result_type() == item_cmp_type(field->result_type(),
03063 value->result_type()))
03064 {
03065 tree= new (alloc) optimizer::SEL_ARG(field, 0, 0);
03066 tree->type= optimizer::SEL_ARG::IMPOSSIBLE;
03067 goto end;
03068 }
03069 else
03070 {
03071
03072
03073
03074
03075 tree= 0;
03076 if (err == 3 && field->type() == DRIZZLE_TYPE_DATE &&
03077 (type == Item_func::GT_FUNC || type == Item_func::GE_FUNC ||
03078 type == Item_func::LT_FUNC || type == Item_func::LE_FUNC) )
03079 {
03080
03081
03082
03083
03084
03085
03086
03087
03088
03089
03090
03091
03092
03093
03094
03095
03096
03097
03098 }
03099 else
03100 goto end;
03101 }
03102 }
03103
03104
03105
03106
03107
03108
03109 else if (err == 1 && field->result_type() == INT_RESULT)
03110 {
03111 if (type == Item_func::LT_FUNC && (value->val_int() > 0))
03112 type = Item_func::LE_FUNC;
03113 else if (type == Item_func::GT_FUNC &&
03114 !((Field_num*)field)->unsigned_flag &&
03115 !((Item_int*)value)->unsigned_flag &&
03116 (value->val_int() < 0))
03117 type = Item_func::GE_FUNC;
03118 }
03119 else if (err == 1)
03120 {
03121 tree= new (alloc) optimizer::SEL_ARG(field, 0, 0);
03122 tree->type= optimizer::SEL_ARG::IMPOSSIBLE;
03123 goto end;
03124 }
03125 }
03126 else if (err < 0)
03127 {
03128
03129 tree= &optimizer::null_element;
03130 goto end;
03131 }
03132
03133
03134
03135
03136
03137
03138 if (type != Item_func::EQUAL_FUNC && field->is_real_null())
03139 {
03140 tree= &optimizer::null_element;
03141 goto end;
03142 }
03143
03144 str= (unsigned char*) alloc->alloc_root(key_part->store_length+1);
03145 if (!str)
03146 goto end;
03147 if (maybe_null)
03148 *str= (unsigned char) field->is_real_null();
03149 field->get_key_image(str+maybe_null, key_part->length);
03150 if (! (tree= new (alloc) optimizer::SEL_ARG(field, str, str)))
03151 goto end;
03152
03153
03154
03155
03156
03157
03158
03159
03160
03161
03162
03163
03164 if (field->result_type() == INT_RESULT &&
03165 value->result_type() == INT_RESULT &&
03166 ((Field_num*)field)->unsigned_flag && !((Item_int*)value)->unsigned_flag)
03167 {
03168 int64_t item_val= value->val_int();
03169 if (item_val < 0)
03170 {
03171 if (type == Item_func::LT_FUNC || type == Item_func::LE_FUNC)
03172 {
03173 tree->type= optimizer::SEL_ARG::IMPOSSIBLE;
03174 goto end;
03175 }
03176 if (type == Item_func::GT_FUNC || type == Item_func::GE_FUNC)
03177 {
03178 tree= 0;
03179 goto end;
03180 }
03181 }
03182 }
03183
03184 switch (type) {
03185 case Item_func::LT_FUNC:
03186 if (field_is_equal_to_item(field,value))
03187 tree->max_flag=NEAR_MAX;
03188
03189 case Item_func::LE_FUNC:
03190 if (!maybe_null)
03191 tree->min_flag=NO_MIN_RANGE;
03192 else
03193 {
03194 tree->min_value=is_null_string;
03195 tree->min_flag=NEAR_MIN;
03196 }
03197 break;
03198 case Item_func::GT_FUNC:
03199
03200 if (field_is_equal_to_item(field,value) &&
03201 !(key_part->flag & HA_PART_KEY_SEG))
03202 tree->min_flag=NEAR_MIN;
03203
03204 case Item_func::GE_FUNC:
03205 tree->max_flag=NO_MAX_RANGE;
03206 break;
03207 default:
03208 break;
03209 }
03210
03211 end:
03212 param->session->mem_root= alloc;
03213 return(tree);
03214 }
03215
03216
03217
03218
03219
03220
03221
03222
03223
03224
03225
03226
03227
03228
03229
03230
03231
03232
03233
03234 static optimizer::SEL_ARG *
03235 sel_add(optimizer::SEL_ARG *key1, optimizer::SEL_ARG *key2)
03236 {
03237 optimizer::SEL_ARG *root= NULL;
03238 optimizer::SEL_ARG **key_link= NULL;
03239
03240 if (!key1)
03241 return key2;
03242 if (!key2)
03243 return key1;
03244
03245 key_link= &root;
03246 while (key1 && key2)
03247 {
03248 if (key1->part < key2->part)
03249 {
03250 *key_link= key1;
03251 key_link= &key1->next_key_part;
03252 key1=key1->next_key_part;
03253 }
03254 else
03255 {
03256 *key_link= key2;
03257 key_link= &key2->next_key_part;
03258 key2=key2->next_key_part;
03259 }
03260 }
03261 *key_link=key1 ? key1 : key2;
03262 return root;
03263 }
03264
03265 #define CLONE_KEY1_MAYBE 1
03266 #define CLONE_KEY2_MAYBE 2
03267
03268 static uint32_t swap_clone_flag(uint32_t a)
03269 {
03270 return ((a & 1) << 1) | ((a & 2) >> 1);
03271 }
03272
03273 static optimizer::SEL_TREE *
03274 tree_and(optimizer::RangeParameter *param, optimizer::SEL_TREE *tree1, optimizer::SEL_TREE *tree2)
03275 {
03276 if (!tree1)
03277 return(tree2);
03278 if (!tree2)
03279 return(tree1);
03280 if (tree1->type == optimizer::SEL_TREE::IMPOSSIBLE || tree2->type == optimizer::SEL_TREE::ALWAYS)
03281 return(tree1);
03282 if (tree2->type == optimizer::SEL_TREE::IMPOSSIBLE || tree1->type == optimizer::SEL_TREE::ALWAYS)
03283 return(tree2);
03284 if (tree1->type == optimizer::SEL_TREE::MAYBE)
03285 {
03286 if (tree2->type == optimizer::SEL_TREE::KEY)
03287 tree2->type=optimizer::SEL_TREE::KEY_SMALLER;
03288 return(tree2);
03289 }
03290 if (tree2->type == optimizer::SEL_TREE::MAYBE)
03291 {
03292 tree1->type=optimizer::SEL_TREE::KEY_SMALLER;
03293 return(tree1);
03294 }
03295 key_map result_keys;
03296 result_keys.reset();
03297
03298
03299 optimizer::SEL_ARG **key1,**key2,**end;
03300 for (key1= tree1->keys,key2= tree2->keys,end=key1+param->keys ;
03301 key1 != end ; key1++,key2++)
03302 {
03303 uint32_t flag=0;
03304 if (*key1 || *key2)
03305 {
03306 if (*key1 && !(*key1)->simple_key())
03307 flag|=CLONE_KEY1_MAYBE;
03308 if (*key2 && !(*key2)->simple_key())
03309 flag|=CLONE_KEY2_MAYBE;
03310 *key1=key_and(param, *key1, *key2, flag);
03311 if (*key1 && (*key1)->type == optimizer::SEL_ARG::IMPOSSIBLE)
03312 {
03313 tree1->type= optimizer::SEL_TREE::IMPOSSIBLE;
03314 return(tree1);
03315 }
03316 result_keys.set(key1 - tree1->keys);
03317 }
03318 }
03319 tree1->keys_map= result_keys;
03320
03321 if (result_keys.any())
03322 {
03323 tree1->merges.clear();
03324 return(tree1);
03325 }
03326
03327
03328 imerge_list_and_list(&tree1->merges, &tree2->merges);
03329 return(tree1);
03330 }
03331
03332
03333
03334
03335
03336 static optimizer::SEL_ARG *
03337 and_all_keys(optimizer::RangeParameter *param,
03338 optimizer::SEL_ARG *key1,
03339 optimizer::SEL_ARG *key2,
03340 uint32_t clone_flag)
03341 {
03342 optimizer::SEL_ARG *next= NULL;
03343 ulong use_count=key1->use_count;
03344
03345 if (key1->size() != 1)
03346 {
03347 key2->use_count+=key1->size()-1;
03348 key2->increment_use_count((int) key1->size()-1);
03349 }
03350 if (key1->type == optimizer::SEL_ARG::MAYBE_KEY)
03351 {
03352 key1->right= key1->left= &optimizer::null_element;
03353 key1->next= key1->prev= 0;
03354 }
03355 for (next= key1->first(); next ; next=next->next)
03356 {
03357 if (next->next_key_part)
03358 {
03359 optimizer::SEL_ARG *tmp= key_and(param, next->next_key_part, key2, clone_flag);
03360 if (tmp && tmp->type == optimizer::SEL_ARG::IMPOSSIBLE)
03361 {
03362 key1=key1->tree_delete(next);
03363 continue;
03364 }
03365 next->next_key_part=tmp;
03366 if (use_count)
03367 next->increment_use_count(use_count);
03368 if (param->alloced_sel_args > optimizer::SEL_ARG::MAX_SEL_ARGS)
03369 break;
03370 }
03371 else
03372 next->next_key_part=key2;
03373 }
03374 if (! key1)
03375 return &optimizer::null_element;
03376 key1->use_count++;
03377 return key1;
03378 }
03379
03380
03381
03382
03383
03384
03385
03386
03387
03388
03389
03390
03391
03392
03393
03394
03395
03396 static optimizer::SEL_ARG *
03397 key_and(optimizer::RangeParameter *param,
03398 optimizer::SEL_ARG *key1,
03399 optimizer::SEL_ARG *key2,
03400 uint32_t clone_flag)
03401 {
03402 if (! key1)
03403 return key2;
03404 if (! key2)
03405 return key1;
03406 if (key1->part != key2->part)
03407 {
03408 if (key1->part > key2->part)
03409 {
03410 std::swap(key1, key2);
03411 clone_flag=swap_clone_flag(clone_flag);
03412 }
03413
03414 key1->use_count--;
03415 if (key1->use_count > 0)
03416 if (! (key1= key1->clone_tree(param)))
03417 return 0;
03418 return and_all_keys(param, key1, key2, clone_flag);
03419 }
03420
03421 if (((clone_flag & CLONE_KEY2_MAYBE) &&
03422 ! (clone_flag & CLONE_KEY1_MAYBE) &&
03423 key2->type != optimizer::SEL_ARG::MAYBE_KEY) ||
03424 key1->type == optimizer::SEL_ARG::MAYBE_KEY)
03425 {
03426 std::swap(key1, key2);
03427 clone_flag= swap_clone_flag(clone_flag);
03428 }
03429
03430
03431 if (key2->type == optimizer::SEL_ARG::MAYBE_KEY)
03432 {
03433 if (key1->use_count > 1)
03434 {
03435 key1->use_count--;
03436 if (! (key1=key1->clone_tree(param)))
03437 return 0;
03438 key1->use_count++;
03439 }
03440 if (key1->type == optimizer::SEL_ARG::MAYBE_KEY)
03441 {
03442 key1->next_key_part= key_and(param,
03443 key1->next_key_part,
03444 key2->next_key_part,
03445 clone_flag);
03446 if (key1->next_key_part &&
03447 key1->next_key_part->type == optimizer::SEL_ARG::IMPOSSIBLE)
03448 return key1;
03449 }
03450 else
03451 {
03452 key1->maybe_smaller();
03453 if (key2->next_key_part)
03454 {
03455 key1->use_count--;
03456 return and_all_keys(param, key1, key2, clone_flag);
03457 }
03458 key2->use_count--;
03459 }
03460 return key1;
03461 }
03462
03463 key1->use_count--;
03464 key2->use_count--;
03465 optimizer::SEL_ARG *e1= key1->first();
03466 optimizer::SEL_ARG *e2= key2->first();
03467 optimizer::SEL_ARG *new_tree= NULL;
03468
03469 while (e1 && e2)
03470 {
03471 int cmp= e1->cmp_min_to_min(e2);
03472 if (cmp < 0)
03473 {
03474 if (get_range(&e1, &e2, key1))
03475 continue;
03476 }
03477 else if (get_range(&e2, &e1, key2))
03478 continue;
03479 optimizer::SEL_ARG *next= key_and(param,
03480 e1->next_key_part,
03481 e2->next_key_part,
03482 clone_flag);
03483 e1->increment_use_count(1);
03484 e2->increment_use_count(1);
03485 if (! next || next->type != optimizer::SEL_ARG::IMPOSSIBLE)
03486 {
03487 optimizer::SEL_ARG *new_arg= e1->clone_and(e2);
03488 if (! new_arg)
03489 return &optimizer::null_element;
03490 new_arg->next_key_part=next;
03491 if (! new_tree)
03492 {
03493 new_tree=new_arg;
03494 }
03495 else
03496 new_tree=new_tree->insert(new_arg);
03497 }
03498 if (e1->cmp_max_to_max(e2) < 0)
03499 e1=e1->next;
03500 else
03501 e2=e2->next;
03502 }
03503 key1->free_tree();
03504 key2->free_tree();
03505 if (! new_tree)
03506 return &optimizer::null_element;
03507 return new_tree;
03508 }
03509
03510
03511 static bool
03512 get_range(optimizer::SEL_ARG **e1, optimizer::SEL_ARG **e2, optimizer::SEL_ARG *root1)
03513 {
03514 (*e1)= root1->find_range(*e2);
03515 if ((*e1)->cmp_max_to_min(*e2) < 0)
03516 {
03517 if (! ((*e1)=(*e1)->next))
03518 return 1;
03519 if ((*e1)->cmp_min_to_max(*e2) > 0)
03520 {
03521 (*e2)=(*e2)->next;
03522 return 1;
03523 }
03524 }
03525 return 0;
03526 }
03527
03528
03529
03530
03531
03532
03533
03534 typedef struct st_range_seq_entry
03535 {
03536
03537
03538
03539
03540 unsigned char *min_key, *max_key;
03541
03542
03543
03544
03545
03546 uint32_t min_key_flag, max_key_flag;
03547
03548
03549 uint32_t min_key_parts, max_key_parts;
03550 optimizer::SEL_ARG *key_tree;
03551 } RANGE_SEQ_ENTRY;
03552
03553
03554
03555
03556
03557 typedef struct st_sel_arg_range_seq
03558 {
03559 uint32_t keyno;
03560 uint32_t real_keyno;
03561 optimizer::Parameter *param;
03562 optimizer::SEL_ARG *start;
03563
03564 RANGE_SEQ_ENTRY stack[MAX_REF_PARTS];
03565 int i;
03566
03567 bool at_start;
03568 } SEL_ARG_RANGE_SEQ;
03569
03570
03571
03572
03573
03574
03575
03576
03577
03578
03579
03580
03581
03582
03583
03584 static range_seq_t sel_arg_range_seq_init(void *init_param, uint32_t, uint32_t)
03585 {
03586 SEL_ARG_RANGE_SEQ *seq= (SEL_ARG_RANGE_SEQ*)init_param;
03587 seq->at_start= true;
03588 seq->stack[0].key_tree= NULL;
03589 seq->stack[0].min_key= seq->param->min_key;
03590 seq->stack[0].min_key_flag= 0;
03591 seq->stack[0].min_key_parts= 0;
03592
03593 seq->stack[0].max_key= seq->param->max_key;
03594 seq->stack[0].max_key_flag= 0;
03595 seq->stack[0].max_key_parts= 0;
03596 seq->i= 0;
03597 return init_param;
03598 }
03599
03600
03601 static void step_down_to(SEL_ARG_RANGE_SEQ *arg, optimizer::SEL_ARG *key_tree)
03602 {
03603 RANGE_SEQ_ENTRY *cur= &arg->stack[arg->i+1];
03604 RANGE_SEQ_ENTRY *prev= &arg->stack[arg->i];
03605
03606 cur->key_tree= key_tree;
03607 cur->min_key= prev->min_key;
03608 cur->max_key= prev->max_key;
03609 cur->min_key_parts= prev->min_key_parts;
03610 cur->max_key_parts= prev->max_key_parts;
03611
03612 uint16_t stor_length= arg->param->key[arg->keyno][key_tree->part].store_length;
03613 cur->min_key_parts += key_tree->store_min(stor_length, &cur->min_key,
03614 prev->min_key_flag);
03615 cur->max_key_parts += key_tree->store_max(stor_length, &cur->max_key,
03616 prev->max_key_flag);
03617
03618 cur->min_key_flag= prev->min_key_flag | key_tree->min_flag;
03619 cur->max_key_flag= prev->max_key_flag | key_tree->max_flag;
03620
03621 if (key_tree->is_null_interval())
03622 cur->min_key_flag |= NULL_RANGE;
03623 (arg->i)++;
03624 }
03625
03626
03627
03628
03629
03630
03631
03632
03633
03634
03635
03636
03637
03638
03639
03640
03641
03642
03643
03644
03645
03646
03647
03648
03649
03650
03651 static uint32_t sel_arg_range_seq_next(range_seq_t rseq, KEY_MULTI_RANGE *range)
03652 {
03653 optimizer::SEL_ARG *key_tree;
03654 SEL_ARG_RANGE_SEQ *seq= (SEL_ARG_RANGE_SEQ*)rseq;
03655 if (seq->at_start)
03656 {
03657 key_tree= seq->start;
03658 seq->at_start= false;
03659 goto walk_up_n_right;
03660 }
03661
03662 key_tree= seq->stack[seq->i].key_tree;
03663
03664
03665
03666 if (key_tree->next && key_tree->next != &optimizer::null_element)
03667 {
03668
03669 seq->i--;
03670 step_down_to(seq, key_tree->next);
03671 key_tree= key_tree->next;
03672 seq->param->is_ror_scan= false;
03673 goto walk_right_n_up;
03674 }
03675
03676
03677 while (1)
03678 {
03679 if (seq->i == 1)
03680 return 1;
03681
03682 seq->i--;
03683 key_tree= seq->stack[seq->i].key_tree;
03684
03685
03686 if (key_tree->next && key_tree->next != &optimizer::null_element)
03687 {
03688
03689 seq->i--;
03690 step_down_to(seq, key_tree->next);
03691 key_tree= key_tree->next;
03692 break;
03693 }
03694 }
03695
03696
03697
03698
03699
03700 walk_right_n_up:
03701 while (key_tree->next_key_part && key_tree->next_key_part != &optimizer::null_element &&
03702 key_tree->next_key_part->part == key_tree->part + 1 &&
03703 key_tree->next_key_part->type == optimizer::SEL_ARG::KEY_RANGE)
03704 {
03705 {
03706 RANGE_SEQ_ENTRY *cur= &seq->stack[seq->i];
03707 uint32_t min_key_length= cur->min_key - seq->param->min_key;
03708 uint32_t max_key_length= cur->max_key - seq->param->max_key;
03709 uint32_t len= cur->min_key - cur[-1].min_key;
03710 if (! (min_key_length == max_key_length &&
03711 ! memcmp(cur[-1].min_key, cur[-1].max_key, len) &&
03712 ! key_tree->min_flag && !key_tree->max_flag))
03713 {
03714 seq->param->is_ror_scan= false;
03715 if (! key_tree->min_flag)
03716 cur->min_key_parts +=
03717 key_tree->next_key_part->store_min_key(seq->param->key[seq->keyno],
03718 &cur->min_key,
03719 &cur->min_key_flag);
03720 if (! key_tree->max_flag)
03721 cur->max_key_parts +=
03722 key_tree->next_key_part->store_max_key(seq->param->key[seq->keyno],
03723 &cur->max_key,
03724 &cur->max_key_flag);
03725 break;
03726 }
03727 }
03728
03729
03730
03731
03732
03733 key_tree= key_tree->next_key_part;
03734
03735 walk_up_n_right:
03736 while (key_tree->prev && key_tree->prev != &optimizer::null_element)
03737 {
03738
03739 key_tree= key_tree->prev;
03740 }
03741 step_down_to(seq, key_tree);
03742 }
03743
03744
03745 RANGE_SEQ_ENTRY *cur= &seq->stack[seq->i];
03746
03747 range->ptr= (char*)(size_t)(key_tree->part);
03748 {
03749 range->range_flag= cur->min_key_flag | cur->max_key_flag;
03750
03751 range->start_key.key= seq->param->min_key;
03752 range->start_key.length= cur->min_key - seq->param->min_key;
03753 range->start_key.keypart_map= make_prev_keypart_map(cur->min_key_parts);
03754 range->start_key.flag= (cur->min_key_flag & NEAR_MIN ? HA_READ_AFTER_KEY :
03755 HA_READ_KEY_EXACT);
03756
03757 range->end_key.key= seq->param->max_key;
03758 range->end_key.length= cur->max_key - seq->param->max_key;
03759 range->end_key.flag= (cur->max_key_flag & NEAR_MAX ? HA_READ_BEFORE_KEY :
03760 HA_READ_AFTER_KEY);
03761 range->end_key.keypart_map= make_prev_keypart_map(cur->max_key_parts);
03762
03763 if (!(cur->min_key_flag & ~NULL_RANGE) && !cur->max_key_flag &&
03764 (uint32_t)key_tree->part+1 == seq->param->table->key_info[seq->real_keyno].key_parts &&
03765 (seq->param->table->key_info[seq->real_keyno].flags & (HA_NOSAME)) ==
03766 HA_NOSAME &&
03767 range->start_key.length == range->end_key.length &&
03768 !memcmp(seq->param->min_key,seq->param->max_key,range->start_key.length))
03769 range->range_flag= UNIQUE_RANGE | (cur->min_key_flag & NULL_RANGE);
03770
03771 if (seq->param->is_ror_scan)
03772 {
03773
03774
03775
03776
03777
03778
03779
03780
03781
03782 if (!(!(cur->min_key_flag & ~NULL_RANGE) && !cur->max_key_flag &&
03783 (range->start_key.length == range->end_key.length) &&
03784 !memcmp(range->start_key.key, range->end_key.key, range->start_key.length) &&
03785 is_key_scan_ror(seq->param, seq->real_keyno, key_tree->part + 1)))
03786 seq->param->is_ror_scan= false;
03787 }
03788 }
03789 seq->param->range_count++;
03790 seq->param->max_key_part= max(seq->param->max_key_part,(uint32_t)key_tree->part);
03791 return 0;
03792 }
03793
03794
03795
03796
03797
03798
03799
03800
03801
03802
03803
03804
03805
03806
03807
03808
03809
03810
03811
03812
03813
03814
03815
03816
03817
03818
03819
03820
03821
03822 static
03823 ha_rows check_quick_select(Session *session,
03824 optimizer::Parameter *param,
03825 uint32_t idx,
03826 bool index_only,
03827 optimizer::SEL_ARG *tree,
03828 bool update_tbl_stats,
03829 uint32_t *mrr_flags,
03830 uint32_t *bufsize,
03831 optimizer::CostVector *cost)
03832 {
03833 SEL_ARG_RANGE_SEQ seq;
03834 RANGE_SEQ_IF seq_if = {sel_arg_range_seq_init, sel_arg_range_seq_next};
03835 Cursor *cursor= param->table->cursor;
03836 ha_rows rows;
03837 uint32_t keynr= param->real_keynr[idx];
03838
03839
03840 if (! tree)
03841 return(HA_POS_ERROR);
03842 if (tree->type == optimizer::SEL_ARG::IMPOSSIBLE)
03843 return(0L);
03844 if (tree->type != optimizer::SEL_ARG::KEY_RANGE || tree->part != 0)
03845 return(HA_POS_ERROR);
03846
03847 seq.keyno= idx;
03848 seq.real_keyno= keynr;
03849 seq.param= param;
03850 seq.start= tree;
03851
03852 param->range_count=0;
03853 param->max_key_part=0;
03854
03855 param->is_ror_scan= true;
03856 if (param->table->index_flags(keynr) & HA_KEY_SCAN_NOT_ROR)
03857 param->is_ror_scan= false;
03858
03859 *mrr_flags= param->force_default_mrr? HA_MRR_USE_DEFAULT_IMPL: 0;
03860 *mrr_flags|= HA_MRR_NO_ASSOCIATION;
03861
03862 bool pk_is_clustered= cursor->primary_key_is_clustered();
03863 if (index_only &&
03864 (param->table->index_flags(keynr) & HA_KEYREAD_ONLY) &&
03865 !(pk_is_clustered && keynr == param->table->getShare()->getPrimaryKey()))
03866 *mrr_flags |= HA_MRR_INDEX_ONLY;
03867
03868 if (session->lex().sql_command != SQLCOM_SELECT)
03869 *mrr_flags |= HA_MRR_USE_DEFAULT_IMPL;
03870
03871 *bufsize= param->session->variables.read_rnd_buff_size;
03872 rows= cursor->multi_range_read_info_const(keynr, &seq_if, (void*)&seq, 0,
03873 bufsize, mrr_flags, cost);
03874 if (rows != HA_POS_ERROR)
03875 {
03876 param->table->quick_rows[keynr]=rows;
03877 if (update_tbl_stats)
03878 {
03879 param->table->quick_keys.set(keynr);
03880 param->table->quick_key_parts[keynr]=param->max_key_part+1;
03881 param->table->quick_n_ranges[keynr]= param->range_count;
03882 param->table->quick_condition_rows=
03883 min(param->table->quick_condition_rows, rows);
03884 }
03885 }
03886
03887 enum ha_key_alg key_alg= param->table->key_info[seq.real_keyno].algorithm;
03888 if ((key_alg != HA_KEY_ALG_BTREE) && (key_alg!= HA_KEY_ALG_UNDEF))
03889 {
03890
03891
03892
03893
03894
03895 param->is_ror_scan= false;
03896 }
03897 else
03898 {
03899
03900 if (param->table->getShare()->getPrimaryKey() == keynr && pk_is_clustered)
03901 param->is_ror_scan= true;
03902 }
03903
03904 return(rows);
03905 }
03906
03907
03908
03909
03910
03911
03912
03913
03914
03915
03916
03917
03918
03919
03920
03921
03922
03923
03924
03925
03926
03927
03928
03929
03930
03931
03932
03933
03934
03935
03936
03937
03938
03939
03940
03941
03942
03943
03944
03945 static bool is_key_scan_ror(optimizer::Parameter *param, uint32_t keynr, uint8_t nparts)
03946 {
03947 KeyInfo *table_key= param->table->key_info + keynr;
03948 KeyPartInfo *key_part= table_key->key_part + nparts;
03949 KeyPartInfo *key_part_end= (table_key->key_part +
03950 table_key->key_parts);
03951 uint32_t pk_number;
03952
03953 for (KeyPartInfo *kp= table_key->key_part; kp < key_part; kp++)
03954 {
03955 uint16_t fieldnr= param->table->key_info[keynr].
03956 key_part[kp - table_key->key_part].fieldnr - 1;
03957 if (param->table->getField(fieldnr)->key_length() != kp->length)
03958 return false;
03959 }
03960
03961 if (key_part == key_part_end)
03962 return true;
03963
03964 key_part= table_key->key_part + nparts;
03965 pk_number= param->table->getShare()->getPrimaryKey();
03966 if (!param->table->cursor->primary_key_is_clustered() || pk_number == MAX_KEY)
03967 return false;
03968
03969 KeyPartInfo *pk_part= param->table->key_info[pk_number].key_part;
03970 KeyPartInfo *pk_part_end= pk_part +
03971 param->table->key_info[pk_number].key_parts;
03972 for (;(key_part!=key_part_end) && (pk_part != pk_part_end);
03973 ++key_part, ++pk_part)
03974 {
03975 if ((key_part->field != pk_part->field) ||
03976 (key_part->length != pk_part->length))
03977 return false;
03978 }
03979 return (key_part == key_part_end);
03980 }
03981
03982
03983 optimizer::QuickRangeSelect *
03984 optimizer::get_quick_select(Parameter *param,
03985 uint32_t idx,
03986 optimizer::SEL_ARG *key_tree,
03987 uint32_t mrr_flags,
03988 uint32_t mrr_buf_size,
03989 memory::Root *parent_alloc)
03990 {
03991 optimizer::QuickRangeSelect *quick= new optimizer::QuickRangeSelect(param->session,
03992 param->table,
03993 param->real_keynr[idx],
03994 test(parent_alloc),
03995 NULL);
03996
03997 if (quick)
03998 {
03999 if (get_quick_keys(param,
04000 quick,
04001 param->key[idx],
04002 key_tree,
04003 param->min_key,
04004 0,
04005 param->max_key,
04006 0))
04007 {
04008 delete quick;
04009 quick= NULL;
04010 }
04011 else
04012 {
04013 quick->mrr_flags= mrr_flags;
04014 quick->mrr_buf_size= mrr_buf_size;
04015 if (parent_alloc)
04016 {
04017 quick->key_parts=(KEY_PART*)
04018 parent_alloc->memdup_root( (char*) param->key[idx], sizeof(KEY_PART)* param->table->key_info[param->real_keynr[idx]].key_parts);
04019 }
04020 else
04021 {
04022 quick->key_parts=(KEY_PART*)
04023 quick->alloc.memdup_root((char*) param->key[idx], sizeof(KEY_PART)* param->table->key_info[param->real_keynr[idx]].key_parts);
04024 }
04025 }
04026 }
04027 return quick;
04028 }
04029
04030
04031
04032
04033
04034 bool
04035 optimizer::get_quick_keys(optimizer::Parameter *param,
04036 optimizer::QuickRangeSelect *quick,
04037 KEY_PART *key,
04038 optimizer::SEL_ARG *key_tree,
04039 unsigned char *min_key,
04040 uint32_t min_key_flag,
04041 unsigned char *max_key,
04042 uint32_t max_key_flag)
04043 {
04044 optimizer::QuickRange *range= NULL;
04045 uint32_t flag;
04046 int min_part= key_tree->part - 1;
04047 int max_part= key_tree->part - 1;
04048
04049 if (key_tree->left != &optimizer::null_element)
04050 {
04051 if (get_quick_keys(param,
04052 quick,
04053 key,
04054 key_tree->left,
04055 min_key,
04056 min_key_flag,
04057 max_key,
04058 max_key_flag))
04059 {
04060 return 1;
04061 }
04062 }
04063 unsigned char *tmp_min_key= min_key,*tmp_max_key= max_key;
04064 min_part+= key_tree->store_min(key[key_tree->part].store_length,
04065 &tmp_min_key,min_key_flag);
04066 max_part+= key_tree->store_max(key[key_tree->part].store_length,
04067 &tmp_max_key,max_key_flag);
04068
04069 if (key_tree->next_key_part &&
04070 key_tree->next_key_part->part == key_tree->part+1 &&
04071 key_tree->next_key_part->type == optimizer::SEL_ARG::KEY_RANGE)
04072 {
04073 if ((tmp_min_key - min_key) == (tmp_max_key - max_key) &&
04074 memcmp(min_key, max_key, (uint32_t)(tmp_max_key - max_key))==0 &&
04075 key_tree->min_flag==0 && key_tree->max_flag==0)
04076 {
04077 if (get_quick_keys(param,
04078 quick,
04079 key,
04080 key_tree->next_key_part,
04081 tmp_min_key,
04082 min_key_flag | key_tree->min_flag,
04083 tmp_max_key,
04084 max_key_flag | key_tree->max_flag))
04085 {
04086 return 1;
04087 }
04088 goto end;
04089 }
04090 {
04091 uint32_t tmp_min_flag=key_tree->min_flag,tmp_max_flag=key_tree->max_flag;
04092 if (! tmp_min_flag)
04093 {
04094 min_part+= key_tree->next_key_part->store_min_key(key,
04095 &tmp_min_key,
04096 &tmp_min_flag);
04097 }
04098 if (! tmp_max_flag)
04099 {
04100 max_part+= key_tree->next_key_part->store_max_key(key,
04101 &tmp_max_key,
04102 &tmp_max_flag);
04103 }
04104 flag=tmp_min_flag | tmp_max_flag;
04105 }
04106 }
04107 else
04108 {
04109 flag= key_tree->min_flag | key_tree->max_flag;
04110 }
04111
04112
04113
04114
04115
04116 if (tmp_min_key != param->min_key)
04117 {
04118 flag&= ~NO_MIN_RANGE;
04119 }
04120 else
04121 {
04122 flag|= NO_MIN_RANGE;
04123 }
04124 if (tmp_max_key != param->max_key)
04125 {
04126 flag&= ~NO_MAX_RANGE;
04127 }
04128 else
04129 {
04130 flag|= NO_MAX_RANGE;
04131 }
04132 if (flag == 0)
04133 {
04134 uint32_t length= (uint32_t) (tmp_min_key - param->min_key);
04135 if (length == (uint32_t) (tmp_max_key - param->max_key) &&
04136 ! memcmp(param->min_key,param->max_key,length))
04137 {
04138 KeyInfo *table_key= quick->head->key_info+quick->index;
04139 flag= EQ_RANGE;
04140 if ((table_key->flags & (HA_NOSAME)) == HA_NOSAME &&
04141 key->part == table_key->key_parts-1)
04142 {
04143 if (! (table_key->flags & HA_NULL_PART_KEY) ||
04144 ! null_part_in_key(key,
04145 param->min_key,
04146 (uint32_t) (tmp_min_key - param->min_key)))
04147 {
04148 flag|= UNIQUE_RANGE;
04149 }
04150 else
04151 {
04152 flag|= NULL_RANGE;
04153 }
04154 }
04155 }
04156 }
04157
04158
04159 if (! (range= new optimizer::QuickRange(param->min_key,
04160 (uint32_t) (tmp_min_key - param->min_key),
04161 min_part >=0 ? make_keypart_map(min_part) : 0,
04162 param->max_key,
04163 (uint32_t) (tmp_max_key - param->max_key),
04164 max_part >=0 ? make_keypart_map(max_part) : 0,
04165 flag)))
04166 {
04167 return 1;
04168 }
04169
04170 set_if_bigger(quick->max_used_key_length, (uint32_t)range->min_length);
04171 set_if_bigger(quick->max_used_key_length, (uint32_t)range->max_length);
04172 set_if_bigger(quick->used_key_parts, (uint32_t) key_tree->part+1);
04173 quick->ranges.push_back(&range);
04174
04175 end:
04176 if (key_tree->right != &optimizer::null_element)
04177 {
04178 return get_quick_keys(param,
04179 quick,
04180 key,
04181 key_tree->right,
04182 min_key,
04183 min_key_flag,
04184 max_key,
04185 max_key_flag);
04186 }
04187 return 0;
04188 }
04189
04190
04191
04192
04193
04194
04195
04196
04197
04198
04199
04200
04201
04202
04203
04204 static bool null_part_in_key(KEY_PART *key_part, const unsigned char *key, uint32_t length)
04205 {
04206 for (const unsigned char *end=key+length ;
04207 key < end;
04208 key+= key_part++->store_length)
04209 {
04210 if (key_part->null_bit && *key)
04211 return 1;
04212 }
04213 return 0;
04214 }
04215
04216
04217 bool optimizer::QuickSelectInterface::is_keys_used(const boost::dynamic_bitset<>& fields)
04218 {
04219 return is_key_used(head, index, fields);
04220 }
04221
04222
04223
04224
04225
04226
04227
04228
04229
04230
04231
04232
04233
04234
04235
04236
04237
04238
04239
04240
04241
04242 optimizer::QuickRangeSelect *optimizer::get_quick_select_for_ref(Session *session,
04243 Table *table,
04244 table_reference_st *ref,
04245 ha_rows records)
04246 {
04247 memory::Root *old_root= NULL;
04248 memory::Root *alloc= NULL;
04249 KeyInfo *key_info = &table->key_info[ref->key];
04250 KEY_PART *key_part;
04251 optimizer::QuickRange *range= NULL;
04252 uint32_t part;
04253 optimizer::CostVector cost;
04254
04255 old_root= session->mem_root;
04256
04257 optimizer::QuickRangeSelect *quick= new optimizer::QuickRangeSelect(session, table, ref->key, 0, 0);
04258
04259 alloc= session->mem_root;
04260
04261
04262
04263
04264 session->mem_root= old_root;
04265
04266 if (! quick)
04267 return 0;
04268 if (quick->init())
04269 goto err;
04270 quick->records= records;
04271
04272 if ((cp_buffer_from_ref(session, ref) && session->is_fatal_error) ||
04273 !(range= new(alloc) optimizer::QuickRange()))
04274 goto err;
04275
04276 range->min_key= range->max_key= ref->key_buff;
04277 range->min_length= range->max_length= ref->key_length;
04278 range->min_keypart_map= range->max_keypart_map=
04279 make_prev_keypart_map(ref->key_parts);
04280 range->flag= ((ref->key_length == key_info->key_length &&
04281 (key_info->flags & HA_END_SPACE_KEY) == 0) ? EQ_RANGE : 0);
04282
04283
04284 if (!(quick->key_parts=key_part=(KEY_PART *)
04285 quick->alloc.alloc_root(sizeof(KEY_PART)*ref->key_parts)))
04286 goto err;
04287
04288 for (part=0 ; part < ref->key_parts ;part++,key_part++)
04289 {
04290 key_part->part=part;
04291 key_part->field= key_info->key_part[part].field;
04292 key_part->length= key_info->key_part[part].length;
04293 key_part->store_length= key_info->key_part[part].store_length;
04294 key_part->null_bit= key_info->key_part[part].null_bit;
04295 key_part->flag= (uint8_t) key_info->key_part[part].key_part_flag;
04296 }
04297 quick->ranges.push_back(&range);
04298
04299
04300
04301
04302
04303
04304
04305 if (ref->null_ref_key)
04306 {
04307 optimizer::QuickRange *null_range= NULL;
04308
04309 *ref->null_ref_key= 1;
04310 if (!(null_range= new (alloc)
04311 optimizer::QuickRange(ref->key_buff, ref->key_length,
04312 make_prev_keypart_map(ref->key_parts),
04313 ref->key_buff, ref->key_length,
04314 make_prev_keypart_map(ref->key_parts), EQ_RANGE)))
04315 goto err;
04316 *ref->null_ref_key= 0;
04317 quick->ranges.push_back(&null_range);
04318 }
04319
04320
04321 quick->mrr_flags= HA_MRR_NO_ASSOCIATION |
04322 (table->key_read ? HA_MRR_INDEX_ONLY : 0);
04323 if (session->lex().sql_command != SQLCOM_SELECT)
04324 quick->mrr_flags |= HA_MRR_USE_DEFAULT_IMPL;
04325
04326 quick->mrr_buf_size= session->variables.read_rnd_buff_size;
04327 if (table->cursor->multi_range_read_info(quick->index, 1, (uint32_t)records,
04328 &quick->mrr_buf_size,
04329 &quick->mrr_flags, &cost))
04330 goto err;
04331
04332 return quick;
04333 err:
04334 delete quick;
04335 return 0;
04336 }
04337
04338
04339
04340
04341
04342
04343
04344
04345
04346
04347
04348
04349
04350
04351
04352 range_seq_t optimizer::quick_range_seq_init(void *init_param, uint32_t, uint32_t)
04353 {
04354 optimizer::QuickRangeSelect *quick= (optimizer::QuickRangeSelect*)init_param;
04355 quick->qr_traversal_ctx.first= (optimizer::QuickRange**)quick->ranges.buffer;
04356 quick->qr_traversal_ctx.cur= (optimizer::QuickRange**)quick->ranges.buffer;
04357 quick->qr_traversal_ctx.last= quick->qr_traversal_ctx.cur +
04358 quick->ranges.size();
04359 return &quick->qr_traversal_ctx;
04360 }
04361
04362
04363
04364
04365
04366
04367
04368
04369
04370
04371
04372
04373
04374
04375 uint32_t optimizer::quick_range_seq_next(range_seq_t rseq, KEY_MULTI_RANGE *range)
04376 {
04377 QuickRangeSequenceContext *ctx= (QuickRangeSequenceContext*) rseq;
04378
04379 if (ctx->cur == ctx->last)
04380 return 1;
04381
04382 optimizer::QuickRange *cur= *(ctx->cur);
04383 key_range *start_key= &range->start_key;
04384 key_range *end_key= &range->end_key;
04385
04386 start_key->key= cur->min_key;
04387 start_key->length= cur->min_length;
04388 start_key->keypart_map= cur->min_keypart_map;
04389 start_key->flag= ((cur->flag & NEAR_MIN) ? HA_READ_AFTER_KEY :
04390 (cur->flag & EQ_RANGE) ?
04391 HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT);
04392 end_key->key= cur->max_key;
04393 end_key->length= cur->max_length;
04394 end_key->keypart_map= cur->max_keypart_map;
04395
04396
04397
04398
04399 end_key->flag= (cur->flag & NEAR_MAX ? HA_READ_BEFORE_KEY :
04400 HA_READ_AFTER_KEY);
04401 range->range_flag= cur->flag;
04402 ctx->cur++;
04403 return 0;
04404 }
04405
04406
04407 static inline uint32_t get_field_keypart(KeyInfo *index, Field *field);
04408
04409 static inline optimizer::SEL_ARG * get_index_range_tree(uint32_t index,
04410 optimizer::SEL_TREE *range_tree,
04411 optimizer::Parameter *param,
04412 uint32_t *param_idx);
04413
04414 static bool get_constant_key_infix(KeyInfo *index_info,
04415 optimizer::SEL_ARG *index_range_tree,
04416 KeyPartInfo *first_non_group_part,
04417 KeyPartInfo *min_max_arg_part,
04418 KeyPartInfo *last_part,
04419 Session *session,
04420 unsigned char *key_infix,
04421 uint32_t *key_infix_len,
04422 KeyPartInfo **first_non_infix_part);
04423
04424 static bool check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item);
04425
04426 static void
04427 cost_group_min_max(Table* table,
04428 KeyInfo *index_info,
04429 uint32_t used_key_parts,
04430 uint32_t group_key_parts,
04431 optimizer::SEL_TREE *range_tree,
04432 optimizer::SEL_ARG *index_tree,
04433 ha_rows quick_prefix_records,
04434 bool have_min,
04435 bool have_max,
04436 double *read_cost,
04437 ha_rows *records);
04438
04439
04440
04441
04442
04443
04444
04445
04446
04447
04448
04449
04450
04451
04452
04453
04454
04455
04456
04457
04458
04459
04460
04461
04462
04463
04464
04465
04466
04467
04468
04469
04470
04471
04472
04473
04474
04475
04476
04477
04478
04479
04480
04481
04482
04483
04484
04485
04486
04487
04488
04489
04490
04491
04492
04493
04494
04495
04496
04497
04498
04499
04500
04501
04502
04503
04504
04505
04506
04507
04508
04509
04510
04511
04512
04513
04514
04515
04516
04517
04518
04519
04520
04521
04522
04523
04524
04525
04526
04527
04528
04529
04530
04531
04532
04533
04534
04535
04536
04537
04538
04539
04540
04541
04542
04543
04544
04545
04546
04547
04548
04549
04550
04551
04552
04553
04554
04555
04556
04557
04558
04559
04560
04561
04562
04563
04564
04565
04566
04567 static optimizer::GroupMinMaxReadPlan *
04568 get_best_group_min_max(optimizer::Parameter *param, optimizer::SEL_TREE *tree)
04569 {
04570 Session *session= param->session;
04571 Join *join= session->lex().current_select->join;
04572 Table *table= param->table;
04573 bool have_min= false;
04574 bool have_max= false;
04575 Item_field *min_max_arg_item= NULL;
04576 KeyPartInfo *min_max_arg_part= NULL;
04577 uint32_t group_prefix_len= 0;
04578 KeyInfo *index_info= NULL;
04579 uint32_t index= 0;
04580 uint32_t group_key_parts= 0;
04581 uint32_t used_key_parts= 0;
04582 unsigned char key_infix[MAX_KEY_LENGTH];
04583 uint32_t key_infix_len= 0;
04584 optimizer::GroupMinMaxReadPlan *read_plan= NULL;
04585 uint32_t key_part_nr;
04586 Order *tmp_group= NULL;
04587 Item *item= NULL;
04588 Item_field *item_field= NULL;
04589
04590
04591 if (! join)
04592 return NULL;
04593
04594 if ((join->tables != 1) ||
04595 ((! join->group_list) &&
04596 (! join->select_distinct)) ||
04597 (join->select_lex->olap == ROLLUP_TYPE))
04598 return NULL;
04599 if (table->getShare()->sizeKeys() == 0)
04600 return NULL;
04601
04602
04603 List<Item>::iterator select_items_it(join->fields_list.begin());
04604
04605
04606 if (join->make_sum_func_list(join->all_fields, join->fields_list, 1))
04607 return NULL;
04608
04609 if (join->sum_funcs[0])
04610 {
04611 Item_sum *min_max_item= NULL;
04612 Item_sum **func_ptr= join->sum_funcs;
04613 while ((min_max_item= *(func_ptr++)))
04614 {
04615 if (min_max_item->sum_func() == Item_sum::MIN_FUNC)
04616 have_min= true;
04617 else if (min_max_item->sum_func() == Item_sum::MAX_FUNC)
04618 have_max= true;
04619 else
04620 return NULL;
04621
04622
04623 Item *expr= min_max_item->args[0]->real_item();
04624 if (expr->type() == Item::FIELD_ITEM)
04625 {
04626 if (! min_max_arg_item)
04627 min_max_arg_item= (Item_field*) expr;
04628 else if (! min_max_arg_item->eq(expr, 1))
04629 return NULL;
04630 }
04631 else
04632 return NULL;
04633 }
04634 }
04635
04636
04637 if (join->select_distinct)
04638 {
04639 while ((item= select_items_it++))
04640 {
04641 if (item->type() != Item::FIELD_ITEM)
04642 return NULL;
04643 }
04644 }
04645
04646
04647 for (tmp_group= join->group_list; tmp_group; tmp_group= tmp_group->next)
04648 {
04649 if ((*tmp_group->item)->type() != Item::FIELD_ITEM)
04650 return NULL;
04651 }
04652
04653
04654
04655
04656
04657
04658 KeyInfo *cur_index_info= table->key_info;
04659 KeyInfo *cur_index_info_end= cur_index_info + table->getShare()->sizeKeys();
04660 KeyPartInfo *cur_part= NULL;
04661 KeyPartInfo *end_part= NULL;
04662
04663 KeyPartInfo *last_part= NULL;
04664 KeyPartInfo *first_non_group_part= NULL;
04665 KeyPartInfo *first_non_infix_part= NULL;
04666 uint32_t key_infix_parts= 0;
04667 uint32_t cur_group_key_parts= 0;
04668 uint32_t cur_group_prefix_len= 0;
04669
04670 double best_read_cost= DBL_MAX;
04671 ha_rows best_records= 0;
04672 optimizer::SEL_ARG *best_index_tree= NULL;
04673 ha_rows best_quick_prefix_records= 0;
04674 uint32_t best_param_idx= 0;
04675 double cur_read_cost= DBL_MAX;
04676 ha_rows cur_records;
04677 optimizer::SEL_ARG *cur_index_tree= NULL;
04678 ha_rows cur_quick_prefix_records= 0;
04679 uint32_t cur_param_idx= MAX_KEY;
04680 key_map used_key_parts_map;
04681 uint32_t cur_key_infix_len= 0;
04682 unsigned char cur_key_infix[MAX_KEY_LENGTH];
04683 uint32_t cur_used_key_parts= 0;
04684 uint32_t pk= param->table->getShare()->getPrimaryKey();
04685
04686 for (uint32_t cur_index= 0;
04687 cur_index_info != cur_index_info_end;
04688 cur_index_info++, cur_index++)
04689 {
04690
04691 if (! table->covering_keys.test(cur_index))
04692 goto next_index;
04693
04694
04695
04696
04697
04698
04699
04700
04701
04702
04703 if (pk < MAX_KEY && cur_index != pk &&
04704 (table->cursor->getEngine()->check_flag(HTON_BIT_PRIMARY_KEY_IN_READ_INDEX)))
04705 {
04706
04707 for (uint32_t i= 0; i < table->getShare()->sizeFields(); i++)
04708 {
04709 Field *cur_field= table->getField(i);
04710
04711
04712
04713
04714 if ((cur_field->isReadSet()) &&
04715 ! cur_field->part_of_key_not_clustered.test(cur_index))
04716 goto next_index;
04717 }
04718 }
04719
04720
04721
04722
04723 if (join->group_list)
04724 {
04725 cur_part= cur_index_info->key_part;
04726 end_part= cur_part + cur_index_info->key_parts;
04727
04728 for (tmp_group= join->group_list;
04729 tmp_group && (cur_part != end_part);
04730 tmp_group= tmp_group->next, cur_part++)
04731 {
04732
04733
04734
04735
04736
04737
04738 assert((*tmp_group->item)->type() == Item::FIELD_ITEM);
04739 Item_field *group_field= (Item_field *) (*tmp_group->item);
04740 if (group_field->field->eq(cur_part->field))
04741 {
04742 cur_group_prefix_len+= cur_part->store_length;
04743 ++cur_group_key_parts;
04744 }
04745 else
04746 goto next_index;
04747 }
04748 }
04749
04750
04751
04752
04753
04754
04755
04756
04757 else if (join->select_distinct)
04758 {
04759 select_items_it= join->fields_list.begin();
04760 used_key_parts_map.reset();
04761 uint32_t max_key_part= 0;
04762 while ((item= select_items_it++))
04763 {
04764 item_field= (Item_field*) item;
04765
04766 key_part_nr= get_field_keypart(cur_index_info, item_field->field);
04767
04768
04769
04770
04771 if (used_key_parts_map.test(key_part_nr))
04772 continue;
04773 if (key_part_nr < 1 || key_part_nr > join->fields_list.size())
04774 goto next_index;
04775 cur_part= cur_index_info->key_part + key_part_nr - 1;
04776 cur_group_prefix_len+= cur_part->store_length;
04777 used_key_parts_map.set(key_part_nr);
04778 ++cur_group_key_parts;
04779 max_key_part= max(max_key_part,key_part_nr);
04780 }
04781
04782
04783
04784
04785
04786
04787 key_map all_parts;
04788 key_map cur_parts;
04789 for (uint32_t pos= 0; pos < max_key_part; pos++)
04790 all_parts.set(pos);
04791 cur_parts= used_key_parts_map >> 1;
04792 if (all_parts != cur_parts)
04793 goto next_index;
04794 }
04795 else
04796 assert(false);
04797
04798
04799 if (min_max_arg_item)
04800 {
04801 key_part_nr= get_field_keypart(cur_index_info, min_max_arg_item->field);
04802 if (key_part_nr <= cur_group_key_parts)
04803 goto next_index;
04804 min_max_arg_part= cur_index_info->key_part + key_part_nr - 1;
04805 }
04806
04807
04808
04809
04810
04811
04812
04813
04814
04815
04816
04817
04818
04819
04820
04821
04822 last_part= cur_index_info->key_part + cur_index_info->key_parts;
04823 first_non_group_part= (cur_group_key_parts < cur_index_info->key_parts) ?
04824 cur_index_info->key_part + cur_group_key_parts :
04825 NULL;
04826 first_non_infix_part= min_max_arg_part ?
04827 (min_max_arg_part < last_part) ?
04828 min_max_arg_part :
04829 NULL :
04830 NULL;
04831 if (first_non_group_part &&
04832 (! min_max_arg_part || (min_max_arg_part - first_non_group_part > 0)))
04833 {
04834 if (tree)
04835 {
04836 uint32_t dummy;
04837 optimizer::SEL_ARG *index_range_tree= get_index_range_tree(cur_index,
04838 tree,
04839 param,
04840 &dummy);
04841 if (! get_constant_key_infix(cur_index_info,
04842 index_range_tree,
04843 first_non_group_part,
04844 min_max_arg_part,
04845 last_part,
04846 session,
04847 cur_key_infix,
04848 &cur_key_infix_len,
04849 &first_non_infix_part))
04850 {
04851 goto next_index;
04852 }
04853 }
04854 else if (min_max_arg_part &&
04855 (min_max_arg_part - first_non_group_part > 0))
04856 {
04857
04858
04859
04860
04861 goto next_index;
04862 }
04863 else if (first_non_group_part && join->conds)
04864 {
04865
04866
04867
04868
04869
04870
04871
04872
04873
04874
04875
04876
04877 KeyPartInfo *key_part_range[2];
04878 key_part_range[0]= first_non_group_part;
04879 key_part_range[1]= last_part;
04880
04881
04882 if (join->conds->walk(&Item::find_item_in_field_list_processor,
04883 0,
04884 (unsigned char*) key_part_range))
04885 goto next_index;
04886 }
04887 }
04888
04889
04890
04891
04892
04893 if (first_non_infix_part)
04894 {
04895 cur_part= first_non_infix_part +
04896 (min_max_arg_part && (min_max_arg_part < last_part));
04897 for (; cur_part != last_part; cur_part++)
04898 {
04899 if (cur_part->field->isReadSet())
04900 goto next_index;
04901 }
04902 }
04903
04904
04905 key_infix_parts= cur_key_infix_len ?
04906 (first_non_infix_part - first_non_group_part) : 0;
04907 cur_used_key_parts= cur_group_key_parts + key_infix_parts;
04908
04909
04910 if (tree)
04911 {
04912
04913 cur_index_tree= get_index_range_tree(cur_index,
04914 tree,
04915 param,
04916 &cur_param_idx);
04917
04918 optimizer::CostVector dummy_cost;
04919 uint32_t mrr_flags= HA_MRR_USE_DEFAULT_IMPL;
04920 uint32_t mrr_bufsize= 0;
04921 cur_quick_prefix_records= check_quick_select(session,
04922 param,
04923 cur_param_idx,
04924 false ,
04925 cur_index_tree,
04926 true,
04927 &mrr_flags,
04928 &mrr_bufsize,
04929 &dummy_cost);
04930 }
04931 cost_group_min_max(table,
04932 cur_index_info,
04933 cur_used_key_parts,
04934 cur_group_key_parts,
04935 tree,
04936 cur_index_tree,
04937 cur_quick_prefix_records,
04938 have_min,
04939 have_max,
04940 &cur_read_cost,
04941 &cur_records);
04942
04943
04944
04945
04946
04947 if (cur_read_cost < best_read_cost - (DBL_EPSILON * cur_read_cost))
04948 {
04949 assert(tree != 0 || cur_param_idx == MAX_KEY);
04950 index_info= cur_index_info;
04951 index= cur_index;
04952 best_read_cost= cur_read_cost;
04953 best_records= cur_records;
04954 best_index_tree= cur_index_tree;
04955 best_quick_prefix_records= cur_quick_prefix_records;
04956 best_param_idx= cur_param_idx;
04957 group_key_parts= cur_group_key_parts;
04958 group_prefix_len= cur_group_prefix_len;
04959 key_infix_len= cur_key_infix_len;
04960
04961 if (key_infix_len)
04962 {
04963 memcpy(key_infix, cur_key_infix, sizeof (key_infix));
04964 }
04965
04966 used_key_parts= cur_used_key_parts;
04967 }
04968
04969 next_index:
04970 cur_group_key_parts= 0;
04971 cur_group_prefix_len= 0;
04972 cur_key_infix_len= 0;
04973 }
04974 if (! index_info)
04975 return NULL;
04976
04977
04978 if (join->conds && min_max_arg_item &&
04979 ! check_group_min_max_predicates(join->conds, min_max_arg_item))
04980 return NULL;
04981
04982
04983 read_plan=
04984 new(param->mem_root) optimizer::GroupMinMaxReadPlan(have_min,
04985 have_max,
04986 min_max_arg_part,
04987 group_prefix_len,
04988 used_key_parts,
04989 group_key_parts,
04990 index_info,
04991 index,
04992 key_infix_len,
04993 (key_infix_len > 0) ? key_infix : NULL,
04994 tree,
04995 best_index_tree,
04996 best_param_idx,
04997 best_quick_prefix_records);
04998 if (read_plan)
04999 {
05000 if (tree && read_plan->quick_prefix_records == 0)
05001 return NULL;
05002
05003 read_plan->read_cost= best_read_cost;
05004 read_plan->records= best_records;
05005 }
05006
05007 return read_plan;
05008 }
05009
05010
05011
05012
05013
05014
05015
05016
05017
05018
05019
05020
05021
05022
05023
05024
05025
05026
05027
05028
05029
05030
05031
05032 static bool check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item)
05033 {
05034 assert(cond && min_max_arg_item);
05035
05036 cond= cond->real_item();
05037 Item::Type cond_type= cond->type();
05038 if (cond_type == Item::COND_ITEM)
05039 {
05040 List<Item>::iterator li(((Item_cond*) cond)->argument_list()->begin());
05041 Item *and_or_arg= NULL;
05042 while ((and_or_arg= li++))
05043 {
05044 if (! check_group_min_max_predicates(and_or_arg, min_max_arg_item))
05045 return false;
05046 }
05047 return true;
05048 }
05049
05050
05051
05052
05053
05054
05055
05056
05057
05058
05059 if (cond_type == Item::SUBSELECT_ITEM)
05060 return false;
05061
05062
05063 assert(cond_type == Item::FUNC_ITEM);
05064
05065
05066 Item_func *pred= (Item_func*) cond;
05067 Item **arguments= pred->arguments();
05068 Item *cur_arg= NULL;
05069 for (uint32_t arg_idx= 0; arg_idx < pred->argument_count (); arg_idx++)
05070 {
05071 cur_arg= arguments[arg_idx]->real_item();
05072 if (cur_arg->type() == Item::FIELD_ITEM)
05073 {
05074 if (min_max_arg_item->eq(cur_arg, 1))
05075 {
05076
05077
05078
05079
05080 Item_func::Functype pred_type= pred->functype();
05081 if (pred_type != Item_func::EQUAL_FUNC &&
05082 pred_type != Item_func::LT_FUNC &&
05083 pred_type != Item_func::LE_FUNC &&
05084 pred_type != Item_func::GT_FUNC &&
05085 pred_type != Item_func::GE_FUNC &&
05086 pred_type != Item_func::BETWEEN &&
05087 pred_type != Item_func::ISNULL_FUNC &&
05088 pred_type != Item_func::ISNOTNULL_FUNC &&
05089 pred_type != Item_func::EQ_FUNC &&
05090 pred_type != Item_func::NE_FUNC)
05091 return false;
05092
05093
05094 Item *args[3];
05095 memset(args, 0, 3 * sizeof(Item*));
05096 bool inv= false;
05097
05098 if (! optimizer::simple_pred(pred, args, inv))
05099 return false;
05100
05101
05102 if (args[0] && args[1] && !args[2] &&
05103 min_max_arg_item->result_type() == STRING_RESULT &&
05104
05105
05106
05107 ((args[1]->result_type() == STRING_RESULT &&
05108 ((Field_str*) min_max_arg_item->field)->charset() !=
05109 pred->compare_collation())
05110 ||
05111
05112
05113
05114
05115 (args[1]->result_type() != STRING_RESULT &&
05116 min_max_arg_item->field->cmp_type() != args[1]->result_type())))
05117 {
05118 return false;
05119 }
05120 }
05121 }
05122 else if (cur_arg->type() == Item::FUNC_ITEM)
05123 {
05124 if (! check_group_min_max_predicates(cur_arg, min_max_arg_item))
05125 return false;
05126 }
05127 else if (cur_arg->const_item())
05128 {
05129 return true;
05130 }
05131 else
05132 return false;
05133 }
05134
05135 return true;
05136 }
05137
05138
05139
05140
05141
05142
05143
05144
05145
05146
05147
05148
05149
05150
05151
05152
05153
05154
05155
05156
05157
05158
05159
05160
05161
05162
05163
05164
05165
05166
05167
05168
05169 static bool
05170 get_constant_key_infix(KeyInfo *,
05171 optimizer::SEL_ARG *index_range_tree,
05172 KeyPartInfo *first_non_group_part,
05173 KeyPartInfo *min_max_arg_part,
05174 KeyPartInfo *last_part,
05175 Session *,
05176 unsigned char *key_infix,
05177 uint32_t *key_infix_len,
05178 KeyPartInfo **first_non_infix_part)
05179 {
05180 optimizer::SEL_ARG *cur_range= NULL;
05181 KeyPartInfo *cur_part= NULL;
05182
05183 KeyPartInfo *end_part= min_max_arg_part ? min_max_arg_part : last_part;
05184
05185 *key_infix_len= 0;
05186 unsigned char *key_ptr= key_infix;
05187 for (cur_part= first_non_group_part; cur_part != end_part; cur_part++)
05188 {
05189
05190
05191
05192
05193 for (cur_range= index_range_tree; cur_range;
05194 cur_range= cur_range->next_key_part)
05195 {
05196 if (cur_range->field->eq(cur_part->field))
05197 break;
05198 }
05199 if (! cur_range)
05200 {
05201 if (min_max_arg_part)
05202 return false;
05203 else
05204 {
05205 *first_non_infix_part= cur_part;
05206 return true;
05207 }
05208 }
05209
05210
05211 if (cur_range->prev || cur_range->next)
05212 return false;
05213 if ((cur_range->min_flag & NO_MIN_RANGE) ||
05214 (cur_range->max_flag & NO_MAX_RANGE) ||
05215 (cur_range->min_flag & NEAR_MIN) ||
05216 (cur_range->max_flag & NEAR_MAX))
05217 return false;
05218
05219 uint32_t field_length= cur_part->store_length;
05220 if ((cur_range->maybe_null &&
05221 cur_range->min_value[0] && cur_range->max_value[0]) ||
05222 !memcmp(cur_range->min_value, cur_range->max_value, field_length))
05223 {
05224
05225 memcpy(key_ptr, cur_range->min_value, field_length);
05226 key_ptr+= field_length;
05227 *key_infix_len+= field_length;
05228 }
05229 else
05230 return false;
05231 }
05232
05233 if (!min_max_arg_part && (cur_part == last_part))
05234 *first_non_infix_part= last_part;
05235
05236 return true;
05237 }
05238
05239
05240
05241
05242
05243
05244
05245
05246
05247
05248
05249
05250
05251
05252
05253
05254
05255
05256 static inline uint
05257 get_field_keypart(KeyInfo *index, Field *field)
05258 {
05259 KeyPartInfo *part= NULL;
05260 KeyPartInfo *end= NULL;
05261
05262 for (part= index->key_part, end= part + index->key_parts; part < end; part++)
05263 {
05264 if (field->eq(part->field))
05265 return part - index->key_part + 1;
05266 }
05267 return 0;
05268 }
05269
05270
05271
05272
05273
05274
05275
05276
05277
05278
05279
05280
05281
05282
05283
05284
05285
05286
05287
05288
05289
05290
05291
05292
05293 optimizer::SEL_ARG *get_index_range_tree(uint32_t index,
05294 optimizer::SEL_TREE* range_tree,
05295 optimizer::Parameter *param,
05296 uint32_t *param_idx)
05297 {
05298 uint32_t idx= 0;
05299 while (idx < param->keys)
05300 {
05301 if (index == param->real_keynr[idx])
05302 break;
05303 idx++;
05304 }
05305 *param_idx= idx;
05306 return range_tree->keys[idx];
05307 }
05308
05309
05310
05311
05312
05313
05314
05315
05316
05317
05318
05319
05320
05321
05322
05323
05324
05325
05326
05327
05328
05329
05330
05331
05332
05333
05334
05335
05336
05337
05338
05339
05340
05341
05342
05343
05344
05345
05346
05347
05348
05349
05350
05351
05352
05353
05354
05355
05356
05357
05358
05359
05360
05361
05362
05363
05364
05365
05366
05367
05368
05369 void cost_group_min_max(Table* table,
05370 KeyInfo *index_info,
05371 uint32_t used_key_parts,
05372 uint32_t group_key_parts,
05373 optimizer::SEL_TREE *range_tree,
05374 optimizer::SEL_ARG *,
05375 ha_rows quick_prefix_records,
05376 bool have_min,
05377 bool have_max,
05378 double *read_cost,
05379 ha_rows *records)
05380 {
05381 ha_rows table_records;
05382 uint32_t num_groups;
05383 uint32_t num_blocks;
05384 uint32_t keys_per_block;
05385 uint32_t keys_per_group;
05386 uint32_t keys_per_subgroup;
05387
05388 double p_overlap;
05389 double quick_prefix_selectivity;
05390 double io_cost;
05391 double cpu_cost= 0;
05392
05393 table_records= table->cursor->stats.records;
05394 keys_per_block= (table->cursor->stats.block_size / 2 /
05395 (index_info->key_length + table->cursor->ref_length)
05396 + 1);
05397 num_blocks= (uint32_t) (table_records / keys_per_block) + 1;
05398
05399
05400 keys_per_group= index_info->rec_per_key[group_key_parts - 1];
05401 if (keys_per_group == 0)
05402
05403 keys_per_group= (uint32_t)(table_records / 10) + 1;
05404 num_groups= (uint32_t)(table_records / keys_per_group) + 1;
05405
05406
05407 if (range_tree && (quick_prefix_records != HA_POS_ERROR))
05408 {
05409 quick_prefix_selectivity= (double) quick_prefix_records /
05410 (double) table_records;
05411 num_groups= (uint32_t) rint(num_groups * quick_prefix_selectivity);
05412 set_if_bigger(num_groups, 1U);
05413 }
05414
05415 if (used_key_parts > group_key_parts)
05416 {
05417
05418
05419
05420 keys_per_subgroup= index_info->rec_per_key[used_key_parts - 1];
05421 if (keys_per_subgroup >= keys_per_block)
05422 p_overlap= 1.0;
05423 else
05424 {
05425 double blocks_per_group= (double) num_blocks / (double) num_groups;
05426 p_overlap= (blocks_per_group * (keys_per_subgroup - 1)) / keys_per_group;
05427 p_overlap= min(p_overlap, 1.0);
05428 }
05429 io_cost= (double) min(num_groups * (1 + p_overlap), (double)num_blocks);
05430 }
05431 else
05432 io_cost= (keys_per_group > keys_per_block) ?
05433 (have_min && have_max) ? (double) (num_groups + 1) :
05434 (double) num_groups :
05435 (double) num_blocks;
05436
05437
05438
05439
05440
05441
05442 cpu_cost= (double) num_groups / TIME_FOR_COMPARE;
05443
05444 *read_cost= io_cost + cpu_cost;
05445 *records= num_groups;
05446 }
05447
05448
05449
05450
05451
05452
05453
05454
05455
05456
05457
05458
05459
05460
05461
05462
05463
05464
05465
05466
05467
05468
05469 optimizer::QuickSelectInterface *
05470 optimizer::GroupMinMaxReadPlan::make_quick(optimizer::Parameter *param, bool, memory::Root *parent_alloc)
05471 {
05472 optimizer::QuickGroupMinMaxSelect *quick= NULL;
05473
05474 quick= new optimizer::QuickGroupMinMaxSelect(param->table,
05475 param->session->lex().current_select->join,
05476 have_min,
05477 have_max,
05478 min_max_arg_part,
05479 group_prefix_len,
05480 group_key_parts,
05481 used_key_parts,
05482 index_info,
05483 index,
05484 read_cost,
05485 records,
05486 key_infix_len,
05487 key_infix,
05488 parent_alloc);
05489 if (! quick)
05490 {
05491 return NULL;
05492 }
05493
05494 if (quick->init())
05495 {
05496 delete quick;
05497 return NULL;
05498 }
05499
05500 if (range_tree)
05501 {
05502 assert(quick_prefix_records > 0);
05503 if (quick_prefix_records == HA_POS_ERROR)
05504 {
05505 quick->quick_prefix_select= NULL;
05506 }
05507 else
05508 {
05509
05510 quick->quick_prefix_select= optimizer::get_quick_select(param,
05511 param_idx,
05512 index_tree,
05513 HA_MRR_USE_DEFAULT_IMPL,
05514 0,
05515 &quick->alloc);
05516 }
05517
05518
05519
05520
05521
05522
05523 if (min_max_arg_part)
05524 {
05525 optimizer::SEL_ARG *min_max_range= index_tree;
05526 while (min_max_range)
05527 {
05528 if (min_max_range->field->eq(min_max_arg_part->field))
05529 break;
05530 min_max_range= min_max_range->next_key_part;
05531 }
05532
05533 while (min_max_range && min_max_range->prev)
05534 min_max_range= min_max_range->prev;
05535
05536 while (min_max_range)
05537 {
05538 if (quick->add_range(min_max_range))
05539 {
05540 delete quick;
05541 quick= NULL;
05542 return NULL;
05543 }
05544 min_max_range= min_max_range->next;
05545 }
05546 }
05547 }
05548 else
05549 quick->quick_prefix_select= NULL;
05550
05551 quick->update_key_stat();
05552 quick->adjust_prefix_ranges();
05553
05554 return quick;
05555 }
05556
05557
05558 optimizer::QuickSelectInterface *optimizer::RangeReadPlan::make_quick(optimizer::Parameter *param, bool, memory::Root *parent_alloc)
05559 {
05560 optimizer::QuickRangeSelect *quick= NULL;
05561 if ((quick= optimizer::get_quick_select(param,
05562 key_idx,
05563 key,
05564 mrr_flags,
05565 mrr_buf_size,
05566 parent_alloc)))
05567 {
05568 quick->records= records;
05569 quick->read_time= read_cost;
05570 }
05571 return quick;
05572 }
05573
05574
05575 uint32_t optimizer::RorScanInfo::findFirstNotSet() const
05576 {
05577 boost::dynamic_bitset<> map= bitsToBitset();
05578 for (boost::dynamic_bitset<>::size_type i= 0; i < map.size(); i++)
05579 {
05580 if (! map.test(i))
05581 {
05582 return i;
05583 }
05584 }
05585 return map.size();
05586 }
05587
05588
05589 size_t optimizer::RorScanInfo::getBitCount() const
05590 {
05591 boost::dynamic_bitset<> tmp_bitset= bitsToBitset();
05592 return tmp_bitset.count();
05593 }
05594
05595
05596 void optimizer::RorScanInfo::subtractBitset(const boost::dynamic_bitset<>& in_bitset)
05597 {
05598 boost::dynamic_bitset<> tmp_bitset= bitsToBitset();
05599 tmp_bitset-= in_bitset;
05600 covered_fields= tmp_bitset.to_ulong();
05601 }
05602
05603
05604 boost::dynamic_bitset<> optimizer::RorScanInfo::bitsToBitset() const
05605 {
05606 string res;
05607 uint64_t conv= covered_fields;
05608 while (conv)
05609 {
05610 res.push_back((conv & 1) + '0');
05611 conv>>= 1;
05612 }
05613 if (! res.empty())
05614 {
05615 std::reverse(res.begin(), res.end());
05616 }
05617 string final(covered_fields_size - res.length(), '0');
05618 final.append(res);
05619 return (boost::dynamic_bitset<>(final));
05620 }
05621
05622
05623 }