Skip to content

Commit ed52f1b

Browse files
committed
aarch64: copy SP whenever it's involved in an address lowering with an explicit add;
1 parent d721dfa commit ed52f1b

File tree

5 files changed

+122
-14
lines changed

5 files changed

+122
-14
lines changed

cranelift/codegen/src/isa/aarch64/abi.rs

Lines changed: 64 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -295,6 +295,10 @@ fn load_stack_from_fp(fp_offset: i64, into_reg: Writable<Reg>, ty: Type) -> Inst
295295
}
296296

297297
fn store_stack(mem: MemArg, from_reg: Reg, ty: Type) -> Inst {
298+
debug_assert!(match &mem {
299+
MemArg::SPOffset(off) => SImm9::maybe_from_i64(*off).is_some(),
300+
_ => true,
301+
});
298302
match ty {
299303
types::B1
300304
| types::B8
@@ -323,6 +327,49 @@ fn store_stack(mem: MemArg, from_reg: Reg, ty: Type) -> Inst {
323327
}
324328
}
325329

330+
fn store_stack_fp(fp_offset: i64, from_reg: Reg, ty: Type) -> Inst {
331+
store_stack(MemArg::FPOffset(fp_offset), from_reg, ty)
332+
}
333+
334+
fn store_stack_sp(
335+
sp_offset: i64,
336+
from_reg: Reg,
337+
ty: Type,
338+
tmp1: Writable<Reg>,
339+
tmp2: Writable<Reg>,
340+
) -> Vec<Inst> {
341+
if SImm9::maybe_from_i64(sp_offset).is_some() {
342+
vec![store_stack(MemArg::SPOffset(sp_offset), from_reg, ty)]
343+
} else {
344+
// mem_finalize will try to generate an add, but in an addition, x31 is the zero register,
345+
// not sp! So we have to synthesize the full add here.
346+
let mut result = Vec::new();
347+
// tmp1 := sp
348+
result.push(Inst::Mov {
349+
rd: tmp1,
350+
rm: stack_reg(),
351+
});
352+
// tmp2 := offset
353+
for inst in Inst::load_constant(tmp2, sp_offset as u64) {
354+
result.push(inst);
355+
}
356+
// tmp1 := add tmp1, tmp2
357+
result.push(Inst::AluRRR {
358+
alu_op: ALUOp::Add64,
359+
rd: tmp1,
360+
rn: tmp1.to_reg(),
361+
rm: tmp2.to_reg(),
362+
});
363+
// Actual store.
364+
result.push(store_stack(
365+
MemArg::Unscaled(tmp1.to_reg(), SImm9::maybe_from_i64(0).unwrap()),
366+
from_reg,
367+
ty,
368+
));
369+
result
370+
}
371+
}
372+
326373
fn is_callee_save(call_conv: isa::CallConv, r: RealReg) -> bool {
327374
if call_conv.extends_baldrdash() {
328375
match r.get_class() {
@@ -523,8 +570,8 @@ impl ABIBody for AArch64ABIBody {
523570
}
524571
_ => {}
525572
};
526-
ret.push(store_stack(
527-
MemArg::FPOffset(off + self.frame_size()),
573+
ret.push(store_stack_fp(
574+
off + self.frame_size(),
528575
from_reg.to_reg(),
529576
ty,
530577
))
@@ -566,7 +613,7 @@ impl ABIBody for AArch64ABIBody {
566613
// Offset from beginning of stackslot area, which is at FP - stackslots_size.
567614
let stack_off = self.stackslots[slot.as_u32() as usize] as i64;
568615
let fp_off: i64 = -(self.stackslots_size as i64) + stack_off + (offset as i64);
569-
store_stack(MemArg::FPOffset(fp_off), from_reg, ty)
616+
store_stack_fp(fp_off, from_reg, ty)
570617
}
571618

572619
fn stackslot_addr(&self, slot: StackSlot, offset: u32, into_reg: Writable<Reg>) -> Inst {
@@ -596,7 +643,7 @@ impl ABIBody for AArch64ABIBody {
596643
let islot = slot.get() as i64;
597644
let ty_size = self.get_spillslot_size(from_reg.get_class(), ty) * 8;
598645
let fp_off: i64 = -(self.stackslots_size as i64) - (8 * islot) - ty_size as i64;
599-
store_stack(MemArg::FPOffset(fp_off), from_reg, ty)
646+
store_stack_fp(fp_off, from_reg, ty)
600647
}
601648

602649
fn gen_prologue(&mut self) -> Vec<Inst> {
@@ -927,10 +974,20 @@ impl ABICall for AArch64ABICall {
927974
adjust_stack(self.sig.stack_arg_space as u64, /* is_sub = */ false)
928975
}
929976

930-
fn gen_copy_reg_to_arg(&self, idx: usize, from_reg: Reg) -> Inst {
977+
fn gen_copy_reg_to_arg(
978+
&self,
979+
idx: usize,
980+
from_reg: Reg,
981+
tmp1: Writable<Reg>,
982+
tmp2: Writable<Reg>,
983+
) -> Vec<Inst> {
931984
match &self.sig.args[idx] {
932-
&ABIArg::Reg(reg, ty) => Inst::gen_move(Writable::from_reg(reg.to_reg()), from_reg, ty),
933-
&ABIArg::Stack(off, ty) => store_stack(MemArg::SPOffset(off), from_reg, ty),
985+
&ABIArg::Reg(reg, ty) => vec![Inst::gen_move(
986+
Writable::from_reg(reg.to_reg()),
987+
from_reg,
988+
ty,
989+
)],
990+
&ABIArg::Stack(off, ty) => store_stack_sp(off, from_reg, ty, tmp1, tmp2),
934991
}
935992
}
936993

cranelift/codegen/src/isa/aarch64/inst/emit.rs

Lines changed: 30 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,14 @@ pub fn mem_finalize(insn_off: CodeOffset, mem: &MemArg) -> (Vec<Inst>, MemArg) {
3535
let mem = MemArg::Unscaled(basereg, simm9);
3636
(vec![], mem)
3737
} else {
38+
// In an addition, x31 is the zero register, not sp; we have only one temporary
39+
// so we can't do the proper add here.
40+
debug_assert_ne!(
41+
basereg,
42+
stack_reg(),
43+
"should have diverted SP before mem_finalize"
44+
);
45+
3846
let tmp = writable_spilltmp_reg();
3947
let mut const_insts = Inst::load_constant(tmp, off as u64);
4048
let add_inst = Inst::AluRRR {
@@ -363,7 +371,11 @@ impl<O: MachSectionOutput> MachInstEmit<O> for Inst {
363371
ALUOp::Lsl32 | ALUOp::Lsl64 => 0b001000,
364372
_ => 0b000000,
365373
};
366-
assert_ne!(writable_stack_reg(), rd);
374+
debug_assert_ne!(writable_stack_reg(), rd);
375+
// The stack pointer is the zero register in this context, so this might be an
376+
// indication that something is wrong.
377+
debug_assert_ne!(stack_reg(), rn);
378+
debug_assert_ne!(stack_reg(), rm);
367379
sink.put4(enc_arith_rrr(top11, bit15_10, rd, rn, rm));
368380
}
369381
&Inst::AluRRRR {
@@ -818,11 +830,25 @@ impl<O: MachSectionOutput> MachInstEmit<O> for Inst {
818830
&Inst::Mov { rd, rm } => {
819831
assert!(rd.to_reg().get_class() == rm.get_class());
820832
assert!(rm.get_class() == RegClass::I64);
833+
821834
// MOV to SP is interpreted as MOV to XZR instead. And our codegen
822835
// should never MOV to XZR.
823-
assert!(machreg_to_gpr(rd.to_reg()) != 31);
824-
// Encoded as ORR rd, rm, zero.
825-
sink.put4(enc_arith_rrr(0b10101010_000, 0b000_000, rd, zero_reg(), rm));
836+
assert!(rd.to_reg() != stack_reg());
837+
838+
if rm == stack_reg() {
839+
// We can't use ORR here, so use an `add rd, sp, #0` instead.
840+
let imm12 = Imm12::maybe_from_u64(0).unwrap();
841+
sink.put4(enc_arith_rr_imm12(
842+
0b100_10001,
843+
imm12.shift_bits(),
844+
imm12.imm_bits(),
845+
rm,
846+
rd,
847+
));
848+
} else {
849+
// Encoded as ORR rd, rm, zero.
850+
sink.put4(enc_arith_rrr(0b10101010_000, 0b000_000, rd, zero_reg(), rm));
851+
}
826852
}
827853
&Inst::Mov32 { rd, rm } => {
828854
// MOV to SP is interpreted as MOV to XZR instead. And our codegen

cranelift/codegen/src/isa/aarch64/inst/mod.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1719,6 +1719,8 @@ impl MachInst for Inst {
17191719

17201720
fn is_move(&self) -> Option<(Writable<Reg>, Reg)> {
17211721
match self {
1722+
// TODO a regalloc assertion is triggered if we don't have this, see also #1586.
1723+
&Inst::Mov { rm, .. } if rm == stack_reg() => None,
17221724
&Inst::Mov { rd, rm } => Some((rd, rm)),
17231725
&Inst::FpuMove64 { rd, rn } => Some((rd, rn)),
17241726
_ => None,

cranelift/codegen/src/isa/aarch64/lower.rs

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -595,6 +595,20 @@ fn lower_address<C: LowerCtx<I = Inst>>(
595595
// Add each addend to the address.
596596
for addend in addends {
597597
let reg = input_to_reg(ctx, *addend, NarrowValueMode::ZeroExtend64);
598+
599+
// In an addition, the stack register is the zero register, so divert it to another
600+
// register just before doing the actual add.
601+
let reg = if reg == stack_reg() {
602+
let tmp = ctx.tmp(RegClass::I64, I64);
603+
ctx.emit(Inst::Mov {
604+
rd: tmp,
605+
rm: stack_reg(),
606+
});
607+
tmp.to_reg()
608+
} else {
609+
reg
610+
};
611+
598612
ctx.emit(Inst::AluRRR {
599613
alu_op: ALUOp::Add64,
600614
rd: addr.clone(),
@@ -1920,9 +1934,13 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(ctx: &mut C, insn: IRInst) {
19201934
ctx.emit(inst);
19211935
}
19221936
assert!(inputs.len() == abi.num_args());
1937+
let tmp1 = ctx.tmp(RegClass::I64, I64);
1938+
let tmp2 = ctx.tmp(RegClass::I64, I64);
19231939
for (i, input) in inputs.iter().enumerate() {
19241940
let arg_reg = input_to_reg(ctx, *input, NarrowValueMode::None);
1925-
ctx.emit(abi.gen_copy_reg_to_arg(i, arg_reg));
1941+
for inst in abi.gen_copy_reg_to_arg(i, arg_reg, tmp1, tmp2) {
1942+
ctx.emit(inst);
1943+
}
19261944
}
19271945
for inst in abi.gen_call().into_iter() {
19281946
ctx.emit(inst);

cranelift/codegen/src/machinst/abi.rs

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -132,9 +132,14 @@ pub trait ABICall {
132132
/// Get the number of arguments expected.
133133
fn num_args(&self) -> usize;
134134

135-
/// Save the clobbered registers.
136135
/// Copy an argument value from a source register, prior to the call.
137-
fn gen_copy_reg_to_arg(&self, idx: usize, from_reg: Reg) -> Self::I;
136+
fn gen_copy_reg_to_arg(
137+
&self,
138+
idx: usize,
139+
from_reg: Reg,
140+
tmp1: Writable<Reg>,
141+
tmp2: Writable<Reg>,
142+
) -> Vec<Self::I>;
138143

139144
/// Copy a return value into a destination register, after the call returns.
140145
fn gen_copy_retval_to_reg(&self, idx: usize, into_reg: Writable<Reg>) -> Self::I;

0 commit comments

Comments
 (0)