📄 trans-stmt.c
字号:
static treegfc_trans_forall_loop (forall_info *forall_tmp, int nvar, tree body, int mask_flag){ int n; tree tmp; tree cond; stmtblock_t block; tree exit_label; tree count; tree var, start, end, step; iter_info *iter; iter = forall_tmp->this_loop; for (n = 0; n < nvar; n++) { var = iter->var; start = iter->start; end = iter->end; step = iter->step; exit_label = gfc_build_label_decl (NULL_TREE); TREE_USED (exit_label) = 1; /* The loop counter. */ count = gfc_create_var (TREE_TYPE (var), "count"); /* The body of the loop. */ gfc_init_block (&block); /* The exit condition. */ cond = build2 (LE_EXPR, boolean_type_node, count, integer_zero_node); tmp = build1_v (GOTO_EXPR, exit_label); tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt ()); gfc_add_expr_to_block (&block, tmp); /* The main loop body. */ gfc_add_expr_to_block (&block, body); /* Increment the loop variable. */ tmp = build2 (PLUS_EXPR, TREE_TYPE (var), var, step); gfc_add_modify_expr (&block, var, tmp); /* Advance to the next mask element. Only do this for the innermost loop. */ if (n == 0 && mask_flag && forall_tmp->mask) { tree maskindex = forall_tmp->maskindex; tmp = build2 (PLUS_EXPR, gfc_array_index_type, maskindex, gfc_index_one_node); gfc_add_modify_expr (&block, maskindex, tmp); } /* Decrement the loop counter. */ tmp = build2 (MINUS_EXPR, TREE_TYPE (var), count, gfc_index_one_node); gfc_add_modify_expr (&block, count, tmp); body = gfc_finish_block (&block); /* Loop var initialization. */ gfc_init_block (&block); gfc_add_modify_expr (&block, var, start); /* Initialize maskindex counter. Only do this before the outermost loop. */ if (n == nvar - 1 && mask_flag && forall_tmp->mask) gfc_add_modify_expr (&block, forall_tmp->maskindex, gfc_index_zero_node); /* Initialize the loop counter. */ tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (var), step, start); tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (var), end, tmp); tmp = fold_build2 (TRUNC_DIV_EXPR, TREE_TYPE (var), tmp, step); gfc_add_modify_expr (&block, count, tmp); /* The loop expression. */ tmp = build1_v (LOOP_EXPR, body); gfc_add_expr_to_block (&block, tmp); /* The exit label. */ tmp = build1_v (LABEL_EXPR, exit_label); gfc_add_expr_to_block (&block, tmp); body = gfc_finish_block (&block); iter = iter->next; } return body;}/* Generate the body and loops according to MASK_FLAG and NEST_FLAG. if MASK_FLAG is nonzero, the body is controlled by maskes in forall nest, otherwise, the body is not controlled by maskes. if NEST_FLAG is nonzero, generate loops for nested forall, otherwise, only generate loops for the current forall level. */static treegfc_trans_nested_forall_loop (forall_info * nested_forall_info, tree body, int mask_flag, int nest_flag){ tree tmp; int nvar; forall_info *forall_tmp; tree pmask, mask, maskindex; forall_tmp = nested_forall_info; /* Generate loops for nested forall. */ if (nest_flag) { while (forall_tmp->next_nest != NULL) forall_tmp = forall_tmp->next_nest; while (forall_tmp != NULL) { /* Generate body with masks' control. */ if (mask_flag) { pmask = forall_tmp->pmask; mask = forall_tmp->mask; maskindex = forall_tmp->maskindex; if (mask) { /* If a mask was specified make the assignment conditional. */ if (pmask) tmp = gfc_build_indirect_ref (mask); else tmp = mask; tmp = gfc_build_array_ref (tmp, maskindex); body = build3_v (COND_EXPR, tmp, body, build_empty_stmt ()); } } nvar = forall_tmp->nvar; body = gfc_trans_forall_loop (forall_tmp, nvar, body, mask_flag); forall_tmp = forall_tmp->outer; } } else { nvar = forall_tmp->nvar; body = gfc_trans_forall_loop (forall_tmp, nvar, body, mask_flag); } return body;}/* Allocate data for holding a temporary array. Returns either a local temporary array or a pointer variable. */static treegfc_do_allocate (tree bytesize, tree size, tree * pdata, stmtblock_t * pblock, tree elem_type){ tree tmpvar; tree type; tree tmp; tree args; if (INTEGER_CST_P (size)) { tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, size, gfc_index_one_node); } else tmp = NULL_TREE; type = build_range_type (gfc_array_index_type, gfc_index_zero_node, tmp); type = build_array_type (elem_type, type); if (gfc_can_put_var_on_stack (bytesize)) { gcc_assert (INTEGER_CST_P (size)); tmpvar = gfc_create_var (type, "temp"); *pdata = NULL_TREE; } else { tmpvar = gfc_create_var (build_pointer_type (type), "temp"); *pdata = convert (pvoid_type_node, tmpvar); args = gfc_chainon_list (NULL_TREE, bytesize); if (gfc_index_integer_kind == 4) tmp = gfor_fndecl_internal_malloc; else if (gfc_index_integer_kind == 8) tmp = gfor_fndecl_internal_malloc64; else gcc_unreachable (); tmp = gfc_build_function_call (tmp, args); tmp = convert (TREE_TYPE (tmpvar), tmp); gfc_add_modify_expr (pblock, tmpvar, tmp); } return tmpvar;}/* Generate codes to copy the temporary to the actual lhs. */static treegenerate_loop_for_temp_to_lhs (gfc_expr *expr, tree tmp1, tree count3, tree count1, tree wheremask){ gfc_ss *lss; gfc_se lse, rse; stmtblock_t block, body; gfc_loopinfo loop1; tree tmp, tmp2; tree wheremaskexpr; /* Walk the lhs. */ lss = gfc_walk_expr (expr); if (lss == gfc_ss_terminator) { gfc_start_block (&block); gfc_init_se (&lse, NULL); /* Translate the expression. */ gfc_conv_expr (&lse, expr); /* Form the expression for the temporary. */ tmp = gfc_build_array_ref (tmp1, count1); /* Use the scalar assignment as is. */ gfc_add_block_to_block (&block, &lse.pre); gfc_add_modify_expr (&block, lse.expr, tmp); gfc_add_block_to_block (&block, &lse.post); /* Increment the count1. */ tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (count1), count1, gfc_index_one_node); gfc_add_modify_expr (&block, count1, tmp); tmp = gfc_finish_block (&block); } else { gfc_start_block (&block); gfc_init_loopinfo (&loop1); gfc_init_se (&rse, NULL); gfc_init_se (&lse, NULL); /* Associate the lss with the loop. */ gfc_add_ss_to_loop (&loop1, lss); /* Calculate the bounds of the scalarization. */ gfc_conv_ss_startstride (&loop1); /* Setup the scalarizing loops. */ gfc_conv_loop_setup (&loop1); gfc_mark_ss_chain_used (lss, 1); /* Start the scalarized loop body. */ gfc_start_scalarized_body (&loop1, &body); /* Setup the gfc_se structures. */ gfc_copy_loopinfo_to_se (&lse, &loop1); lse.ss = lss; /* Form the expression of the temporary. */ if (lss != gfc_ss_terminator) rse.expr = gfc_build_array_ref (tmp1, count1); /* Translate expr. */ gfc_conv_expr (&lse, expr); /* Use the scalar assignment. */ tmp = gfc_trans_scalar_assign (&lse, &rse, expr->ts.type); /* Form the mask expression according to the mask tree list. */ if (wheremask) { wheremaskexpr = gfc_build_array_ref (wheremask, count3); tmp2 = TREE_CHAIN (wheremask); while (tmp2) { tmp1 = gfc_build_array_ref (tmp2, count3); wheremaskexpr = build2 (TRUTH_AND_EXPR, TREE_TYPE (tmp1), wheremaskexpr, tmp1); tmp2 = TREE_CHAIN (tmp2); } tmp = build3_v (COND_EXPR, wheremaskexpr, tmp, build_empty_stmt ()); } gfc_add_expr_to_block (&body, tmp); /* Increment count1. */ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type, count1, gfc_index_one_node); gfc_add_modify_expr (&body, count1, tmp); /* Increment count3. */ if (count3) { tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type, count3, gfc_index_one_node); gfc_add_modify_expr (&body, count3, tmp); } /* Generate the copying loops. */ gfc_trans_scalarizing_loops (&loop1, &body); gfc_add_block_to_block (&block, &loop1.pre); gfc_add_block_to_block (&block, &loop1.post); gfc_cleanup_loop (&loop1); tmp = gfc_finish_block (&block); } return tmp;}/* Generate codes to copy rhs to the temporary. TMP1 is the address of temporary LSS and RSS are formed in function compute_inner_temp_size(), and should not be freed. */static treegenerate_loop_for_rhs_to_temp (gfc_expr *expr2, tree tmp1, tree count3, tree count1, gfc_ss *lss, gfc_ss *rss, tree wheremask){ stmtblock_t block, body1; gfc_loopinfo loop; gfc_se lse; gfc_se rse; tree tmp, tmp2; tree wheremaskexpr; gfc_start_block (&block); gfc_init_se (&rse, NULL); gfc_init_se (&lse, NULL); if (lss == gfc_ss_terminator) { gfc_init_block (&body1); gfc_conv_expr (&rse, expr2); lse.expr = gfc_build_array_ref (tmp1, count1); } else { /* Initialize the loop. */ gfc_init_loopinfo (&loop); /* We may need LSS to determine the shape of the expression. */ gfc_add_ss_to_loop (&loop, lss); gfc_add_ss_to_loop (&loop, rss); gfc_conv_ss_startstride (&loop); gfc_conv_loop_setup (&loop); gfc_mark_ss_chain_used (rss, 1); /* Start the loop body. */ gfc_start_scalarized_body (&loop, &body1); /* Translate the expression. */ gfc_copy_loopinfo_to_se (&rse, &loop); rse.ss = rss; gfc_conv_expr (&rse, expr2); /* Form the expression of the temporary. */ lse.expr = gfc_build_array_ref (tmp1, count1); } /* Use the scalar assignment. */ tmp = gfc_trans_scalar_assign (&lse, &rse, expr2->ts.type); /* Form the mask expression according to the mask tree list. */ if (wheremask) { wheremaskexpr = gfc_build_array_ref (wheremask, count3); tmp2 = TREE_CHAIN (wheremask); while (tmp2) { tmp1 = gfc_build_array_ref (tmp2, count3); wheremaskexpr = build2 (TRUTH_AND_EXPR, TREE_TYPE (tmp1), wheremaskexpr, tmp1); tmp2 = TREE_CHAIN (tmp2); } tmp = build3_v (COND_EXPR, wheremaskexpr, tmp, build_empty_stmt ()); } gfc_add_expr_to_block (&body1, tmp); if (lss == gfc_ss_terminator) { gfc_add_block_to_block (&block, &body1); /* Increment count1. */ tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (count1), count1, gfc_index_one_node); gfc_add_modify_expr (&block, count1, tmp); } else { /* Increment count1. */ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type, count1, gfc_index_one_node); gfc_add_modify_expr (&body1, count1, tmp); /* Increment count3. */ if (count3) { tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type, count3, gfc_index_one_node); gfc_add_modify_expr (&body1, count3, tmp); } /* Generate the copying loops. */ gfc_trans_scalarizing_loops (&loop, &body1); gfc_add_block_to_block (&block, &loop.pre); gfc_add_block_to_block (&block, &loop.post); gfc_cleanup_loop (&loop); /* TODO: Reuse lss and rss when copying temp->lhs. Need to be careful as tree nodes in SS may not be valid in different scope. */ } tmp = gfc_finish_block (&block); return tmp;}/* Calculate the size of temporary needed in the assignment inside forall. LSS and RSS are filled in this function. */static treecompute_inner_temp_size (gfc_expr *expr1, gfc_expr *expr2, stmtblock_t * pblock, gfc_ss **lss, gfc_ss **rss){ gfc_loopinfo loop; tree size; int i; tree tmp; *lss = gfc_walk_expr (expr1); *rss = NULL; size = gfc_index_one_node; if (*lss != gfc_ss_terminator) { gfc_init_loopinfo (&loop); /* Walk the RHS of the expression. */ *rss = gfc_walk_expr (expr2); if (*rss == gfc_ss_terminator) { /* The rhs is scalar. Add a ss for the expression. */ *rss = gfc_get_ss (); (*rss)->next = gfc_ss_terminator; (*rss)->type = GFC_SS_SCALAR; (*rss)->expr = expr2; } /* Associate the SS with the loop. */ gfc_add_ss_to_loop (&loop, *lss); /* We don't actually need to add the rhs at this point, but it might make guessing the loop bounds a bit easier. */ gfc_add_ss_to_loop (&loop, *rss); /* We only want the shape of the expression, not rest of the junk generated by the scalarizer. */ loop.array_parameter = 1; /* Calculate the bounds of the scalarization. */ gfc_conv_ss_startstride (&loop); gfc_conv_loop_setup (&loop);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -