📄 trans-array.c
字号:
/* Move the evaluation of scalar expressions outside the scalarization loop. */ if (subscript) se.expr = convert(gfc_array_index_type, se.expr); se.expr = gfc_evaluate_now (se.expr, &loop->pre); gfc_add_block_to_block (&loop->pre, &se.post); } else gfc_add_block_to_block (&loop->post, &se.post); ss->data.scalar.expr = se.expr; ss->string_length = se.string_length; break; case GFC_SS_REFERENCE: /* Scalar reference. Evaluate this now. */ gfc_init_se (&se, NULL); gfc_conv_expr_reference (&se, ss->expr); gfc_add_block_to_block (&loop->pre, &se.pre); gfc_add_block_to_block (&loop->post, &se.post); ss->data.scalar.expr = gfc_evaluate_now (se.expr, &loop->pre); ss->string_length = se.string_length; break; case GFC_SS_SECTION: /* Add the expressions for scalar and vector subscripts. */ for (n = 0; n < GFC_MAX_DIMENSIONS; n++) if (ss->data.info.subscript[n]) gfc_add_loop_ss_code (loop, ss->data.info.subscript[n], true); gfc_set_vector_loop_bounds (loop, &ss->data.info); break; case GFC_SS_VECTOR: /* Get the vector's descriptor and store it in SS. */ gfc_init_se (&se, NULL); gfc_conv_expr_descriptor (&se, ss->expr, gfc_walk_expr (ss->expr)); gfc_add_block_to_block (&loop->pre, &se.pre); gfc_add_block_to_block (&loop->post, &se.post); ss->data.info.descriptor = se.expr; break; case GFC_SS_INTRINSIC: gfc_add_intrinsic_ss_code (loop, ss); break; case GFC_SS_FUNCTION: /* Array function return value. We call the function and save its result in a temporary for use inside the loop. */ gfc_init_se (&se, NULL); se.loop = loop; se.ss = ss; gfc_conv_expr (&se, ss->expr); gfc_add_block_to_block (&loop->pre, &se.pre); gfc_add_block_to_block (&loop->post, &se.post); ss->string_length = se.string_length; break; case GFC_SS_CONSTRUCTOR: gfc_trans_array_constructor (loop, ss); break; case GFC_SS_TEMP: case GFC_SS_COMPONENT: /* Do nothing. These are handled elsewhere. */ break; default: gcc_unreachable (); } }}/* Translate expressions for the descriptor and data pointer of a SS. *//*GCC ARRAYS*/static voidgfc_conv_ss_descriptor (stmtblock_t * block, gfc_ss * ss, int base){ gfc_se se; tree tmp; /* Get the descriptor for the array to be scalarized. */ gcc_assert (ss->expr->expr_type == EXPR_VARIABLE); gfc_init_se (&se, NULL); se.descriptor_only = 1; gfc_conv_expr_lhs (&se, ss->expr); gfc_add_block_to_block (block, &se.pre); ss->data.info.descriptor = se.expr; ss->string_length = se.string_length; if (base) { /* Also the data pointer. */ tmp = gfc_conv_array_data (se.expr); /* If this is a variable or address of a variable we use it directly. Otherwise we must evaluate it now to avoid breaking dependency analysis by pulling the expressions for elemental array indices inside the loop. */ if (!(DECL_P (tmp) || (TREE_CODE (tmp) == ADDR_EXPR && DECL_P (TREE_OPERAND (tmp, 0))))) tmp = gfc_evaluate_now (tmp, block); ss->data.info.data = tmp; tmp = gfc_conv_array_offset (se.expr); ss->data.info.offset = gfc_evaluate_now (tmp, block); }}/* Initialize a gfc_loopinfo structure. */voidgfc_init_loopinfo (gfc_loopinfo * loop){ int n; memset (loop, 0, sizeof (gfc_loopinfo)); gfc_init_block (&loop->pre); gfc_init_block (&loop->post); /* Initially scalarize in order. */ for (n = 0; n < GFC_MAX_DIMENSIONS; n++) loop->order[n] = n; loop->ss = gfc_ss_terminator;}/* Copies the loop variable info to a gfc_se structure. Does not copy the SS chain. */voidgfc_copy_loopinfo_to_se (gfc_se * se, gfc_loopinfo * loop){ se->loop = loop;}/* Return an expression for the data pointer of an array. */treegfc_conv_array_data (tree descriptor){ tree type; type = TREE_TYPE (descriptor); if (GFC_ARRAY_TYPE_P (type)) { if (TREE_CODE (type) == POINTER_TYPE) return descriptor; else { /* Descriptorless arrays. */ return gfc_build_addr_expr (NULL, descriptor); } } else return gfc_conv_descriptor_data_get (descriptor);}/* Return an expression for the base offset of an array. */treegfc_conv_array_offset (tree descriptor){ tree type; type = TREE_TYPE (descriptor); if (GFC_ARRAY_TYPE_P (type)) return GFC_TYPE_ARRAY_OFFSET (type); else return gfc_conv_descriptor_offset (descriptor);}/* Get an expression for the array stride. */treegfc_conv_array_stride (tree descriptor, int dim){ tree tmp; tree type; type = TREE_TYPE (descriptor); /* For descriptorless arrays use the array size. */ tmp = GFC_TYPE_ARRAY_STRIDE (type, dim); if (tmp != NULL_TREE) return tmp; tmp = gfc_conv_descriptor_stride (descriptor, gfc_rank_cst[dim]); return tmp;}/* Like gfc_conv_array_stride, but for the lower bound. */treegfc_conv_array_lbound (tree descriptor, int dim){ tree tmp; tree type; type = TREE_TYPE (descriptor); tmp = GFC_TYPE_ARRAY_LBOUND (type, dim); if (tmp != NULL_TREE) return tmp; tmp = gfc_conv_descriptor_lbound (descriptor, gfc_rank_cst[dim]); return tmp;}/* Like gfc_conv_array_stride, but for the upper bound. */treegfc_conv_array_ubound (tree descriptor, int dim){ tree tmp; tree type; type = TREE_TYPE (descriptor); tmp = GFC_TYPE_ARRAY_UBOUND (type, dim); if (tmp != NULL_TREE) return tmp; /* This should only ever happen when passing an assumed shape array as an actual parameter. The value will never be used. */ if (GFC_ARRAY_TYPE_P (TREE_TYPE (descriptor))) return gfc_index_zero_node; tmp = gfc_conv_descriptor_ubound (descriptor, gfc_rank_cst[dim]); return tmp;}/* Generate code to perform an array index bound check. */static treegfc_trans_array_bound_check (gfc_se * se, tree descriptor, tree index, int n){ tree cond; tree fault; tree tmp; if (!flag_bounds_check) return index; index = gfc_evaluate_now (index, &se->pre); /* Check lower bound. */ tmp = gfc_conv_array_lbound (descriptor, n); fault = fold_build2 (LT_EXPR, boolean_type_node, index, tmp); /* Check upper bound. */ tmp = gfc_conv_array_ubound (descriptor, n); cond = fold_build2 (GT_EXPR, boolean_type_node, index, tmp); fault = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, fault, cond); gfc_trans_runtime_check (fault, gfc_strconst_fault, &se->pre); return index;}/* Return the offset for an index. Performs bound checking for elemental dimensions. Single element references are processed separately. */static treegfc_conv_array_index_offset (gfc_se * se, gfc_ss_info * info, int dim, int i, gfc_array_ref * ar, tree stride){ tree index; tree desc; tree data; /* Get the index into the array for this dimension. */ if (ar) { gcc_assert (ar->type != AR_ELEMENT); switch (ar->dimen_type[dim]) { case DIMEN_ELEMENT: gcc_assert (i == -1); /* Elemental dimension. */ gcc_assert (info->subscript[dim] && info->subscript[dim]->type == GFC_SS_SCALAR); /* We've already translated this value outside the loop. */ index = info->subscript[dim]->data.scalar.expr; index = gfc_trans_array_bound_check (se, info->descriptor, index, dim); break; case DIMEN_VECTOR: gcc_assert (info && se->loop); gcc_assert (info->subscript[dim] && info->subscript[dim]->type == GFC_SS_VECTOR); desc = info->subscript[dim]->data.info.descriptor; /* Get a zero-based index into the vector. */ index = fold_build2 (MINUS_EXPR, gfc_array_index_type, se->loop->loopvar[i], se->loop->from[i]); /* Multiply the index by the stride. */ index = fold_build2 (MULT_EXPR, gfc_array_index_type, index, gfc_conv_array_stride (desc, 0)); /* Read the vector to get an index into info->descriptor. */ data = gfc_build_indirect_ref (gfc_conv_array_data (desc)); index = gfc_build_array_ref (data, index); index = gfc_evaluate_now (index, &se->pre); /* Do any bounds checking on the final info->descriptor index. */ index = gfc_trans_array_bound_check (se, info->descriptor, index, dim); break; case DIMEN_RANGE: /* Scalarized dimension. */ gcc_assert (info && se->loop); /* Multiply the loop variable by the stride and delta. */ index = se->loop->loopvar[i]; index = fold_build2 (MULT_EXPR, gfc_array_index_type, index, info->stride[i]); index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, info->delta[i]); break; default: gcc_unreachable (); } } else { /* Temporary array or derived type component. */ gcc_assert (se->loop); index = se->loop->loopvar[se->loop->order[i]]; if (!integer_zerop (info->delta[i])) index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, info->delta[i]); } /* Multiply by the stride. */ index = fold_build2 (MULT_EXPR, gfc_array_index_type, index, stride); return index;}/* Build a scalarized reference to an array. */static voidgfc_conv_scalarized_array_ref (gfc_se * se, gfc_array_ref * ar){ gfc_ss_info *info; tree index; tree tmp; int n; info = &se->ss->data.info; if (ar) n = se->loop->order[0]; else n = 0; index = gfc_conv_array_index_offset (se, info, info->dim[n], n, ar, info->stride0); /* Add the offset for this dimension to the stored offset for all other dimensions. */ index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, info->offset); tmp = gfc_build_indirect_ref (info->data); se->expr = gfc_build_array_ref (tmp, index);}/* Translate access of temporary array. */voidgfc_conv_tmp_array_ref (gfc_se * se){ se->string_length = se->ss->string_length; gfc_conv_scalarized_array_ref (se, NULL);}/* Build an array reference. se->expr already holds the array descriptor. This should be either a variable, indirect variable reference or component reference. For arrays which do not have a descriptor, se->expr will be the data pointer. a(i, j, k) = base[offset + i * stride[0] + j * stride[1] + k * stride[2]]*/voidgfc_conv_array_ref (gfc_se * se, gfc_array_ref * ar){ int n; tree index; tree tmp; tree stride; tree fault; gfc_se indexse; /* Handle scalarized references separately. */ if (ar->type != AR_ELEMENT) { gfc_conv_scalarized_array_ref (se, ar); gfc_advance_se_ss_chain (se); return; } index = gfc_index_zero_node; fault = gfc_index_zero_node; /* Calculate the offsets from all the dimensions. */ for (n = 0; n < ar->dimen; n++) { /* Calculate the index for this dimension. */ gfc_init_se (&indexse, se); gfc_conv_expr_type (&indexse, ar->start[n], gfc_array_index_type); gfc_add_block_to_block (&se->pre, &indexse.pre); if (flag_bounds_check) { /* Check array bounds. */ tree cond; indexse.expr = gfc_evaluate_now (indexse.expr, &se->pre); tmp = gfc_conv_array_lbound (se->expr, n); cond = fold_build2 (LT_EXPR, boolean_type_node, indexse.expr, tmp); fault = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, fault, cond); tmp = gfc_conv_array_ubound (se->expr, n); cond = fold_build2 (GT_EXPR, boolean_type_node, indexse.expr, tmp); fault = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, fault, cond); } /* Multiply the index by the stride. */ stride = gfc_conv_array_stride (se->expr, n); tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, indexse.expr, stride); /* And add it to the total. */ index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, tmp); } if (flag_bounds_check) gfc_trans_runtime_check (fault, gfc_strconst_fault, &se->pre); tmp = gfc_conv_array_offset (se->expr); if (!integer_zerop (tmp)) index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, tmp); /* Access the calculated element. */ tmp = gfc_conv_array_data (se->expr); tmp = gfc_build_indirect_ref (tmp); se->expr = gfc_build_array_ref (tmp, index);}/* Generate the code to be executed immediately before entering a scalarization loop. */static void
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -