📄 assembler-arm.cc.svn-base
字号:
void Assembler::target_at_put(int pos, int target_pos) { int imm26 = target_pos - pos - 8; Instr instr = instr_at(pos); ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 if ((instr & CondMask) == nv) { // blx uses bit 24 to encode bit 2 of imm26 ASSERT((imm26 & 1) == 0); instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24; } else { ASSERT((imm26 & 3) == 0); instr &= ~Imm24Mask; } int imm24 = imm26 >> 2; ASSERT(is_int24(imm24)); instr_at_put(pos, instr | (imm24 & Imm24Mask));}void Assembler::print(Label* L) { if (L->is_unused()) { PrintF("unused label\n"); } else if (L->is_bound()) { PrintF("bound label to %d\n", L->pos()); } else if (L->is_linked()) { Label l = *L; PrintF("unbound label"); while (l.is_linked()) { PrintF("@ %d ", l.pos()); Instr instr = instr_at(l.pos()); ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx int cond = instr & CondMask; const char* b; const char* c; if (cond == nv) { b = "blx"; c = ""; } else { if ((instr & B24) != 0) b = "bl"; else b = "b"; switch (cond) { case eq: c = "eq"; break; case ne: c = "ne"; break; case hs: c = "hs"; break; case lo: c = "lo"; break; case mi: c = "mi"; break; case pl: c = "pl"; break; case vs: c = "vs"; break; case vc: c = "vc"; break; case hi: c = "hi"; break; case ls: c = "ls"; break; case ge: c = "ge"; break; case lt: c = "lt"; break; case gt: c = "gt"; break; case le: c = "le"; break; case al: c = ""; break; default: c = ""; UNREACHABLE(); } } PrintF("%s%s\n", b, c); next(&l); } } else { PrintF("label in inconsistent state (pos = %d)\n", L->pos_); }}void Assembler::bind_to(Label* L, int pos) { ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position while (L->is_linked()) { int fixup_pos = L->pos(); next(L); // call next before overwriting link with target at fixup_pos target_at_put(fixup_pos, pos); } L->bind_to(pos); // do not eliminate jump instructions before the last bound position if (pos > last_bound_pos_) last_bound_pos_ = pos;}void Assembler::link_to(Label* L, Label* appendix) { if (appendix->is_linked()) { if (L->is_linked()) { // append appendix to L's list int fixup_pos; int link = L->pos(); do { fixup_pos = link; link = target_at(fixup_pos); } while (link > 0); ASSERT(link == kEndOfChain); target_at_put(fixup_pos, appendix->pos()); } else { // L is empty, simply use appendix *L = *appendix; } } appendix->Unuse(); // appendix should not be used anymore}void Assembler::bind(Label* L) { ASSERT(!L->is_bound()); // label can only be bound once if (FLAG_eliminate_jumps) { // Resolve unbound label. if (unbound_label_.is_linked()) { // Unbound label exists => link it with L if same binding // position, otherwise fix it. if (binding_pos_ == pc_offset()) { // Link it to L's list. link_to(L, &unbound_label_); } else { // Otherwise bind unbound label. ASSERT(binding_pos_ < pc_offset()); bind_to(&unbound_label_, binding_pos_); } } ASSERT(!unbound_label_.is_linked()); // Try to eliminate jumps to next instruction. Instr instr; // Do not remove an already bound jump target. while (last_bound_pos_ < pc_offset() && reloc_info_writer.last_pc() <= pc_ - kInstrSize && L->is_linked() && L->pos() == pc_offset() - kInstrSize && (((instr = instr_at(L->pos())) & CondMask) != nv && // not blx (instr & 15*B24) == 10*B24)) { // b<cond>, but not bl<cond> // Previous instruction is b<cond> jumping immediately after it // => eliminate it if (FLAG_print_jump_elimination) PrintF("@ %d jump to next eliminated\n", L->pos()); // Remove first entry from label list. next(L); // Eliminate instruction (set code pointers back). pc_ -= kInstrSize; // Make sure not to skip relocation information when rewinding. ASSERT(reloc_info_writer.last_pc() <= pc_); } // delay fixup of L => store it as unbound label unbound_label_ = *L; binding_pos_ = pc_offset(); L->Unuse(); } bind_to(L, pc_offset());}void Assembler::next(Label* L) { ASSERT(L->is_linked()); int link = target_at(L->pos()); if (link > 0) { L->link_to(link); } else { ASSERT(link == kEndOfChain); L->Unuse(); }}// Low-level code emission routines depending on the addressing modestatic bool fits_shifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8, Instr* instr) { // imm32 must be unsigned for (int rot = 0; rot < 16; rot++) { uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot)); if ((imm8 <= 0xff)) { *rotate_imm = rot; *immed_8 = imm8; return true; } } // if the opcode is mov or mvn and if ~imm32 fits, change the opcode if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) { if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { *instr ^= 0x2*B21; return true; } } return false;}void Assembler::addrmod1(Instr instr, Register rn, Register rd, const Operand& x) { CheckBuffer(); ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0); if (!x.rm_.is_valid()) { // immediate uint32_t rotate_imm; uint32_t immed_8; if ((x.rmode_ != RelocInfo::NONE && x.rmode_ != RelocInfo::EXTERNAL_REFERENCE) || !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { // The immediate operand cannot be encoded as a shifter operand, so load // it first to register ip and change the original instruction to use ip. // However, if the original instruction is a 'mov rd, x' (not setting the // condition code), then replace it with a 'ldr rd, [pc]' RecordRelocInfo(x.rmode_, x.imm32_); ASSERT(!rn.is(ip)); // rn should never be ip, or will be trashed Condition cond = static_cast<Condition>(instr & CondMask); if ((instr & ~CondMask) == 13*B21) { // mov, S not set ldr(rd, MemOperand(pc, 0), cond); } else { ldr(ip, MemOperand(pc, 0), cond); addrmod1(instr, rn, rd, Operand(ip)); } return; } instr |= I | rotate_imm*B8 | immed_8; } else if (!x.rs_.is_valid()) { // immediate shift instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); } else { // register shift ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc)); instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code(); } emit(instr | rn.code()*B16 | rd.code()*B12); if (rn.is(pc) || x.rm_.is(pc)) // block constant pool emission for one instruction after reading pc BlockConstPoolBefore(pc_offset() + kInstrSize);}void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) { ASSERT((instr & ~(CondMask | B | L)) == B26); int am = x.am_; if (!x.rm_.is_valid()) { // immediate offset int offset_12 = x.offset_; if (offset_12 < 0) { offset_12 = -offset_12; am ^= U; } if (!is_uint12(offset_12)) { // immediate offset cannot be encoded, load it first to register ip // rn (and rd in a load) should never be ip, or will be trashed ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); mov(ip, Operand(x.offset_), LeaveCC, static_cast<Condition>(instr & CondMask)); addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_)); return; } ASSERT(offset_12 >= 0); // no masking needed instr |= offset_12; } else { // register offset (shift_imm_ and shift_op_ are 0) or scaled // register offset the constructors make sure than both shift_imm_ // and shift_op_ are initialized ASSERT(!x.rm_.is(pc)); instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); } ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);}void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) { ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7)); ASSERT(x.rn_.is_valid()); int am = x.am_; if (!x.rm_.is_valid()) { // immediate offset int offset_8 = x.offset_; if (offset_8 < 0) { offset_8 = -offset_8; am ^= U; } if (!is_uint8(offset_8)) { // immediate offset cannot be encoded, load it first to register ip // rn (and rd in a load) should never be ip, or will be trashed ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); mov(ip, Operand(x.offset_), LeaveCC, static_cast<Condition>(instr & CondMask)); addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); return; } ASSERT(offset_8 >= 0); // no masking needed instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf); } else if (x.shift_imm_ != 0) { // scaled register offset not supported, load index first // rn (and rd in a load) should never be ip, or will be trashed ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC, static_cast<Condition>(instr & CondMask)); addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); return; } else { // register offset ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback instr |= x.rm_.code(); } ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);}void Assembler::addrmod4(Instr instr, Register rn, RegList rl) { ASSERT((instr & ~(CondMask | P | U | W | L)) == B27); ASSERT(rl != 0); ASSERT(!rn.is(pc)); emit(instr | rn.code()*B16 | rl);}void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) { // unindexed addressing is not encoded by this function ASSERT((instr & ~(CondMask | P | U | N | W | L)) == (B27 | B26)); ASSERT(x.rn_.is_valid() && !x.rm_.is_valid()); int am = x.am_; int offset_8 = x.offset_; ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset offset_8 >>= 2; if (offset_8 < 0) { offset_8 = -offset_8; am ^= U; } ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback // post-indexed addressing requires W == 1; different than in addrmod2/3 if ((am & P) == 0) am |= W; ASSERT(offset_8 >= 0); // no masking needed emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);}int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { // if we emit an unconditional jump/call and if the current position is the // target of the unbound label, we can change the binding position of the // unbound label, thereby eliminating an unnecessary jump bool can_eliminate = false; if (jump_elimination_allowed && FLAG_eliminate_jumps && unbound_label_.is_linked() && binding_pos_ == pc_offset()) { can_eliminate = true; if (FLAG_print_jump_elimination) { PrintF("eliminated jumps/calls to %d from ", binding_pos_); print(&unbound_label_); } } int target_pos; if (L->is_bound()) { target_pos = L->pos(); if (can_eliminate) binding_pos_ = target_pos; } else { if (can_eliminate) link_to(L, &unbound_label_); // may modify L's link if (L->is_linked()) target_pos = L->pos(); // L's link else target_pos = kEndOfChain; L->link_to(pc_offset()); } // Block the emission of the constant pool, since the branch instruction must // be emitted at the pc offset recorded by the label BlockConstPoolBefore(pc_offset() + kInstrSize); return target_pos - pc_offset() - 8;}// Branch instructionsvoid Assembler::b(int branch_offset, Condition cond) { ASSERT((branch_offset & 3) == 0); int imm24 = branch_offset >> 2; ASSERT(is_int24(imm24)); emit(cond | B27 | B25 | (imm24 & Imm24Mask)); if (cond == al) // dead code is a good location to emit the constant pool CheckConstPool(false, false);}void Assembler::bl(int branch_offset, Condition cond) { ASSERT((branch_offset & 3) == 0);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -