📄 pathnode.c
字号:
* 'clause_groups' is a list of lists of RestrictInfo nodes * to be used as index qual conditions in the scan. * 'pathkeys' describes the ordering of the path. * 'indexscandir' is ForwardScanDirection or BackwardScanDirection * for an ordered index, or NoMovementScanDirection for * an unordered index. * 'isjoininner' is TRUE if this is a join inner indexscan path. * (pathkeys and indexscandir are ignored if so.) * * Returns the new path node. */IndexPath *create_index_path(PlannerInfo *root, IndexOptInfo *index, List *clause_groups, List *pathkeys, ScanDirection indexscandir, bool isjoininner){ IndexPath *pathnode = makeNode(IndexPath); RelOptInfo *rel = index->rel; List *indexquals, *allclauses; /* * For a join inner scan, there's no point in marking the path with any * pathkeys, since it will only ever be used as the inner path of a * nestloop, and so its ordering does not matter. For the same reason we * don't really care what order it's scanned in. (We could expect the * caller to supply the correct values, but it's easier to force it here.) */ if (isjoininner) { pathkeys = NIL; indexscandir = NoMovementScanDirection; } pathnode->path.pathtype = T_IndexScan; pathnode->path.parent = rel; pathnode->path.pathkeys = pathkeys; /* Convert clauses to indexquals the executor can handle */ indexquals = expand_indexqual_conditions(index, clause_groups); /* Flatten the clause-groups list to produce indexclauses list */ allclauses = flatten_clausegroups_list(clause_groups); /* Fill in the pathnode */ pathnode->indexinfo = index; pathnode->indexclauses = allclauses; pathnode->indexquals = indexquals; pathnode->isjoininner = isjoininner; pathnode->indexscandir = indexscandir; if (isjoininner) { /* * We must compute the estimated number of output rows for the * indexscan. This is less than rel->rows because of the additional * selectivity of the join clauses. Since clause_groups may contain * both restriction and join clauses, we have to do a set union to get * the full set of clauses that must be considered to compute the * correct selectivity. (Without the union operation, we might have * some restriction clauses appearing twice, which'd mislead * clauselist_selectivity into double-counting their selectivity. * However, since RestrictInfo nodes aren't copied when linking them * into different lists, it should be sufficient to use pointer * comparison to remove duplicates.) * * Always assume the join type is JOIN_INNER; even if some of the join * clauses come from other contexts, that's not our problem. */ allclauses = list_union_ptr(rel->baserestrictinfo, allclauses); pathnode->rows = rel->tuples * clauselist_selectivity(root, allclauses, rel->relid, /* do not use 0! */ JOIN_INNER); /* Like costsize.c, force estimate to be at least one row */ pathnode->rows = clamp_row_est(pathnode->rows); } else { /* * The number of rows is the same as the parent rel's estimate, since * this isn't a join inner indexscan. */ pathnode->rows = rel->rows; } cost_index(pathnode, root, index, indexquals, isjoininner); return pathnode;}/* * create_bitmap_heap_path * Creates a path node for a bitmap scan. * * 'bitmapqual' is a tree of IndexPath, BitmapAndPath, and BitmapOrPath nodes. */BitmapHeapPath *create_bitmap_heap_path(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, bool isjoininner){ BitmapHeapPath *pathnode = makeNode(BitmapHeapPath); pathnode->path.pathtype = T_BitmapHeapScan; pathnode->path.parent = rel; pathnode->path.pathkeys = NIL; /* always unordered */ pathnode->bitmapqual = bitmapqual; pathnode->isjoininner = isjoininner; if (isjoininner) { /* * We must compute the estimated number of output rows for the * indexscan. This is less than rel->rows because of the additional * selectivity of the join clauses. We make use of the selectivity * estimated for the bitmap to do this; this isn't really quite right * since there may be restriction conditions not included in the * bitmap ... */ Cost indexTotalCost; Selectivity indexSelectivity; cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity); pathnode->rows = rel->tuples * indexSelectivity; if (pathnode->rows > rel->rows) pathnode->rows = rel->rows; /* Like costsize.c, force estimate to be at least one row */ pathnode->rows = clamp_row_est(pathnode->rows); } else { /* * The number of rows is the same as the parent rel's estimate, since * this isn't a join inner indexscan. */ pathnode->rows = rel->rows; } cost_bitmap_heap_scan(&pathnode->path, root, rel, bitmapqual, false); return pathnode;}/* * create_bitmap_and_path * Creates a path node representing a BitmapAnd. */BitmapAndPath *create_bitmap_and_path(PlannerInfo *root, RelOptInfo *rel, List *bitmapquals){ BitmapAndPath *pathnode = makeNode(BitmapAndPath); pathnode->path.pathtype = T_BitmapAnd; pathnode->path.parent = rel; pathnode->path.pathkeys = NIL; /* always unordered */ pathnode->bitmapquals = bitmapquals; /* this sets bitmapselectivity as well as the regular cost fields: */ cost_bitmap_and_node(pathnode, root); return pathnode;}/* * create_bitmap_or_path * Creates a path node representing a BitmapOr. */BitmapOrPath *create_bitmap_or_path(PlannerInfo *root, RelOptInfo *rel, List *bitmapquals){ BitmapOrPath *pathnode = makeNode(BitmapOrPath); pathnode->path.pathtype = T_BitmapOr; pathnode->path.parent = rel; pathnode->path.pathkeys = NIL; /* always unordered */ pathnode->bitmapquals = bitmapquals; /* this sets bitmapselectivity as well as the regular cost fields: */ cost_bitmap_or_node(pathnode, root); return pathnode;}/* * create_tidscan_path * Creates a path corresponding to a tid_direct scan, returning the * pathnode. */TidPath *create_tidscan_path(PlannerInfo *root, RelOptInfo *rel, List *tideval){ TidPath *pathnode = makeNode(TidPath); pathnode->path.pathtype = T_TidScan; pathnode->path.parent = rel; pathnode->path.pathkeys = NIL; pathnode->tideval = tideval; cost_tidscan(&pathnode->path, root, rel, tideval); /* * divide selectivity for each clause to get an equal selectivity as * IndexScan does OK ? */ return pathnode;}/* * create_append_path * Creates a path corresponding to an Append plan, returning the * pathnode. */AppendPath *create_append_path(RelOptInfo *rel, List *subpaths){ AppendPath *pathnode = makeNode(AppendPath); ListCell *l; pathnode->path.pathtype = T_Append; pathnode->path.parent = rel; pathnode->path.pathkeys = NIL; /* result is always considered * unsorted */ pathnode->subpaths = subpaths; pathnode->path.startup_cost = 0; pathnode->path.total_cost = 0; foreach(l, subpaths) { Path *subpath = (Path *) lfirst(l); if (l == list_head(subpaths)) /* first node? */ pathnode->path.startup_cost = subpath->startup_cost; pathnode->path.total_cost += subpath->total_cost; } return pathnode;}/* * create_result_path * Creates a path corresponding to a Result plan, returning the * pathnode. */ResultPath *create_result_path(RelOptInfo *rel, Path *subpath, List *constantqual){ ResultPath *pathnode = makeNode(ResultPath); pathnode->path.pathtype = T_Result; pathnode->path.parent = rel; /* may be NULL */ if (subpath) pathnode->path.pathkeys = subpath->pathkeys; else pathnode->path.pathkeys = NIL; pathnode->subpath = subpath; pathnode->constantqual = constantqual; /* Ideally should define cost_result(), but I'm too lazy */ if (subpath) { pathnode->path.startup_cost = subpath->startup_cost; pathnode->path.total_cost = subpath->total_cost; } else { pathnode->path.startup_cost = 0; pathnode->path.total_cost = cpu_tuple_cost; } return pathnode;}/* * create_material_path * Creates a path corresponding to a Material plan, returning the * pathnode. */MaterialPath *create_material_path(RelOptInfo *rel, Path *subpath){ MaterialPath *pathnode = makeNode(MaterialPath); pathnode->path.pathtype = T_Material; pathnode->path.parent = rel; pathnode->path.pathkeys = subpath->pathkeys; pathnode->subpath = subpath; cost_material(&pathnode->path, subpath->total_cost, rel->rows, rel->width); return pathnode;}/* * create_unique_path * Creates a path representing elimination of distinct rows from the * input data. * * If used at all, this is likely to be called repeatedly on the same rel; * and the input subpath should always be the same (the cheapest_total path * for the rel). So we cache the result. */UniquePath *create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath){ UniquePath *pathnode; Path sort_path; /* dummy for result of cost_sort */ Path agg_path; /* dummy for result of cost_agg */ MemoryContext oldcontext; List *sub_targetlist; ListCell *l; int numCols; /* Caller made a mistake if subpath isn't cheapest_total */ Assert(subpath == rel->cheapest_total_path); /* If result already cached, return it */ if (rel->cheapest_unique_path) return (UniquePath *) rel->cheapest_unique_path; /* * We must ensure path struct is allocated in same context as parent rel; * otherwise GEQO memory management causes trouble. (Compare * best_inner_indexscan().) */ oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel)); pathnode = makeNode(UniquePath); /* There is no substructure to allocate, so can switch back right away */ MemoryContextSwitchTo(oldcontext); pathnode->path.pathtype = T_Unique; pathnode->path.parent = rel; /* * Treat the output as always unsorted, since we don't necessarily have * pathkeys to represent it. */ pathnode->path.pathkeys = NIL; pathnode->subpath = subpath; /* * Try to identify the targetlist that will actually be unique-ified. In * current usage, this routine is only used for sub-selects of IN clauses, * so we should be able to find the tlist in in_info_list. */ sub_targetlist = NIL; foreach(l, root->in_info_list) { InClauseInfo *ininfo = (InClauseInfo *) lfirst(l); if (bms_equal(ininfo->righthand, rel->relids)) { sub_targetlist = ininfo->sub_targetlist; break; } } /* * If the input is a subquery whose output must be unique already, then we * don't need to do anything. The test for uniqueness has to consider * exactly which columns we are extracting; for example "SELECT DISTINCT * x,y" doesn't guarantee that x alone is distinct. So we cannot check for * this optimization unless we found our own targetlist above, and it * consists only of simple Vars referencing subquery outputs. (Possibly * we could do something with expressions in the subquery outputs, too, * but for now keep it simple.) */ if (sub_targetlist && rel->rtekind == RTE_SUBQUERY) { RangeTblEntry *rte = rt_fetch(rel->relid, root->parse->rtable); List *sub_tlist_colnos; sub_tlist_colnos = translate_sub_tlist(sub_targetlist, rel->relid); if (sub_tlist_colnos && query_is_distinct_for(rte->subquery, sub_tlist_colnos)) { pathnode->umethod = UNIQUE_PATH_NOOP; pathnode->rows = rel->rows; pathnode->path.startup_cost = subpath->startup_cost; pathnode->path.total_cost = subpath->total_cost; pathnode->path.pathkeys = subpath->pathkeys; rel->cheapest_unique_path = (Path *) pathnode; return pathnode; } } /* * If we know the targetlist, try to estimate number of result rows;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -