diff --git a/decompiler/IR2/AtomicOp.h b/decompiler/IR2/AtomicOp.h index f31c867a2..458def998 100644 --- a/decompiler/IR2/AtomicOp.h +++ b/decompiler/IR2/AtomicOp.h @@ -125,6 +125,11 @@ class AtomicOp { const std::vector& write_regs() { return m_write_regs; } const std::vector& clobber_regs() { return m_clobber_regs; } void add_clobber_reg(Register r) { m_clobber_regs.push_back(r); } + void clear_register_info() { + m_read_regs.clear(); + m_write_regs.clear(); + m_clobber_regs.clear(); + } virtual ~AtomicOp() = default; diff --git a/decompiler/IR2/AtomicOpBuilder.cpp b/decompiler/IR2/AtomicOpBuilder.cpp index 79f870867..c4975bac2 100644 --- a/decompiler/IR2/AtomicOpBuilder.cpp +++ b/decompiler/IR2/AtomicOpBuilder.cpp @@ -1037,10 +1037,15 @@ std::unique_ptr convert_dsubu_3(const Instruction& i0, return nullptr; } -void add_clobber_if_unritten(AtomicOp& op, Register clobber) { +void add_clobber_if_unwritten(AtomicOp& op, Register clobber) { op.update_register_info(); + std::vector clobber_regs = op.clobber_regs(); if (std::find(op.write_regs().begin(), op.write_regs().end(), clobber) == op.write_regs().end()) { - op.add_clobber_reg(clobber); + clobber_regs.push_back(clobber); + } + op.clear_register_info(); + for (auto& reg : clobber_regs) { + op.add_clobber_reg(reg); } } @@ -1082,7 +1087,7 @@ std::unique_ptr convert_slt_3(const Instruction& i0, condition.invert(); } result = make_branch(condition, i2, false, dest, idx); - add_clobber_if_unritten(*result, temp); + add_clobber_if_unwritten(*result, temp); return result; } else if (i1.kind == InstructionKind::DADDIU && (i2.kind == InstructionKind::MOVZ || i2.kind == InstructionKind::MOVN)) { @@ -1117,7 +1122,7 @@ std::unique_ptr convert_slt_3(const Instruction& i0, condition.invert(); } result = std::make_unique(make_dst_var(dest, idx), condition, idx); - add_clobber_if_unritten(*result, temp); + add_clobber_if_unwritten(*result, temp); return result; } return nullptr; @@ -1147,7 +1152,7 @@ std::unique_ptr convert_slti_3(const Instruction& i0, condition.invert(); } result = make_branch(condition, i2, false, dest, idx); - add_clobber_if_unritten(*result, temp); + add_clobber_if_unwritten(*result, temp); return result; } else if (i1.kind == InstructionKind::DADDIU && (i2.kind == InstructionKind::MOVZ || i2.kind == InstructionKind::MOVN)) { @@ -1172,7 +1177,7 @@ std::unique_ptr convert_slti_3(const Instruction& i0, condition.invert(); } result = std::make_unique(make_dst_var(dest, idx), condition, idx); - add_clobber_if_unritten(*result, temp); + add_clobber_if_unwritten(*result, temp); return result; } return nullptr; diff --git a/test/decompiler/test_AtomicOpBuilder.cpp b/test/decompiler/test_AtomicOpBuilder.cpp index ee78b6911..e1be81487 100644 --- a/test/decompiler/test_AtomicOpBuilder.cpp +++ b/test/decompiler/test_AtomicOpBuilder.cpp @@ -118,6 +118,27 @@ void test_case(std::string assembly_lines, } } +TEST(DecompilerAtomicOpBuilder, RegUseDuplication) { + auto assembly = + assembly_from_list({"L100:", "sltiu a3, a0, 12", "beq a3, r0, L100", "or a3, s7, r0"}); + + InstructionParser parser; + ParsedProgram prg = parser.parse_program(assembly); + EXPECT_EQ(prg.print(), assembly); + FunctionAtomicOps container; + convert_block_to_atomic_ops(0, prg.instructions.begin(), prg.instructions.end(), prg.labels, + &container); + ASSERT_EQ(1, container.ops.size()); + auto& op = container.ops.at(0); + for (const auto& reg_group : {op->read_regs(), op->write_regs(), op->clobber_regs()}) { + std::unordered_set unique_regs; + for (auto& reg : reg_group) { + unique_regs.insert(reg); + } + EXPECT_EQ(unique_regs.size(), reg_group.size()); + } +} + TEST(DecompilerAtomicOpBuilder, Example) { test_case(assembly_from_list({"and v0, v1, a3", "and a1, a2, a2"}), {"(set! v0 (logand v1 a3))", "(set! a1 (logand a2 a2))"}, {{"v0"}, {"a1"}},