py/asmarm: Give a proper name to the temporary register.

This commit performs a small refactoring on the Arm native emitter, by
renaming all but one instance of ASM_ARM_REG_R8 into REG_TEMP.

ASM_ARM_REG_R8 is the temporary register used by the emitter when
operations cannot overwrite the value of a particular register and some
extra storage is needed.

Signed-off-by: Alessandro Gatti <a.gatti@frob.it>
This commit is contained in:
Alessandro Gatti
2025-06-08 23:27:31 +02:00
committed by Damien George
parent bbab2e98f5
commit 5b90d6d418

View File

@@ -36,6 +36,8 @@
#include "py/asmarm.h"
#define REG_TEMP ASM_ARM_REG_R8
#define SIGNED_FIT24(x) (((x) & 0xff800000) == 0) || (((x) & 0xff000000) == 0xff000000)
// Insert word into instruction flow
@@ -171,8 +173,8 @@ void asm_arm_entry(asm_arm_t *as, int num_locals) {
if (as->stack_adjust < 0x100) {
emit_al(as, asm_arm_op_sub_imm(ASM_ARM_REG_SP, ASM_ARM_REG_SP, as->stack_adjust));
} else {
asm_arm_mov_reg_i32_optimised(as, ASM_ARM_REG_R8, as->stack_adjust);
emit_al(as, asm_arm_op_sub_reg(ASM_ARM_REG_SP, ASM_ARM_REG_SP, ASM_ARM_REG_R8));
asm_arm_mov_reg_i32_optimised(as, REG_TEMP, as->stack_adjust);
emit_al(as, asm_arm_op_sub_reg(ASM_ARM_REG_SP, ASM_ARM_REG_SP, REG_TEMP));
}
}
}
@@ -182,8 +184,8 @@ void asm_arm_exit(asm_arm_t *as) {
if (as->stack_adjust < 0x100) {
emit_al(as, asm_arm_op_add_imm(ASM_ARM_REG_SP, ASM_ARM_REG_SP, as->stack_adjust));
} else {
asm_arm_mov_reg_i32_optimised(as, ASM_ARM_REG_R8, as->stack_adjust);
emit_al(as, asm_arm_op_add_reg(ASM_ARM_REG_SP, ASM_ARM_REG_SP, ASM_ARM_REG_R8));
asm_arm_mov_reg_i32_optimised(as, REG_TEMP, as->stack_adjust);
emit_al(as, asm_arm_op_add_reg(ASM_ARM_REG_SP, ASM_ARM_REG_SP, REG_TEMP));
}
}
@@ -293,10 +295,10 @@ void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num) {
if (local_num >= 0x40) {
// mov r8, #local_num*4
// add rd, sp, r8
asm_arm_mov_reg_i32_optimised(as, ASM_ARM_REG_R8, local_num << 2);
emit_al(as, asm_arm_op_add_reg(rd, ASM_ARM_REG_SP, ASM_ARM_REG_R8));
// mov temp, #local_num*4
// add rd, sp, temp
asm_arm_mov_reg_i32_optimised(as, REG_TEMP, local_num << 2);
emit_al(as, asm_arm_op_add_reg(rd, ASM_ARM_REG_SP, REG_TEMP));
} else {
// add rd, sp, #local_num*4
emit_al(as, asm_arm_op_add_imm(rd, ASM_ARM_REG_SP, local_num << 2));
@@ -338,10 +340,10 @@ void asm_arm_ldr_reg_reg_offset(asm_arm_t *as, uint rd, uint rn, uint byte_offse
// ldr rd, [rn, #off]
emit_al(as, 0x5900000 | (rn << 16) | (rd << 12) | byte_offset);
} else {
// mov r8, #off
// ldr rd, [rn, r8]
asm_arm_mov_reg_i32_optimised(as, ASM_ARM_REG_R8, byte_offset);
emit_al(as, 0x7900000 | (rn << 16) | (rd << 12) | ASM_ARM_REG_R8);
// mov temp, #off
// ldr rd, [rn, temp]
asm_arm_mov_reg_i32_optimised(as, REG_TEMP, byte_offset);
emit_al(as, 0x7900000 | (rn << 16) | (rd << 12) | REG_TEMP);
}
}
@@ -352,8 +354,8 @@ void asm_arm_ldrh_reg_reg(asm_arm_t *as, uint rd, uint rn) {
void asm_arm_ldrh_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
// ldrh doesn't support scaled register index
emit_al(as, 0x1a00080 | (ASM_ARM_REG_R8 << 12) | rn); // mov r8, rn, lsl #1
emit_al(as, 0x19000b0 | (rm << 16) | (rd << 12) | ASM_ARM_REG_R8); // ldrh rd, [rm, r8];
emit_al(as, 0x1a00080 | (REG_TEMP << 12) | rn); // mov temp, rn, lsl #1
emit_al(as, 0x19000b0 | (rm << 16) | (rd << 12) | REG_TEMP); // ldrh rd, [rm, temp];
}
void asm_arm_ldrh_reg_reg_offset(asm_arm_t *as, uint rd, uint rn, uint byte_offset) {
@@ -361,10 +363,10 @@ void asm_arm_ldrh_reg_reg_offset(asm_arm_t *as, uint rd, uint rn, uint byte_offs
// ldrh rd, [rn, #off]
emit_al(as, 0x1d000b0 | (rn << 16) | (rd << 12) | ((byte_offset & 0xf0) << 4) | (byte_offset & 0xf));
} else {
// mov r8, #off
// ldrh rd, [rn, r8]
asm_arm_mov_reg_i32_optimised(as, ASM_ARM_REG_R8, byte_offset);
emit_al(as, 0x19000b0 | (rn << 16) | (rd << 12) | ASM_ARM_REG_R8);
// mov temp, #off
// ldrh rd, [rn, temp]
asm_arm_mov_reg_i32_optimised(as, REG_TEMP, byte_offset);
emit_al(as, 0x19000b0 | (rn << 16) | (rd << 12) | REG_TEMP);
}
}
@@ -388,10 +390,10 @@ void asm_arm_str_reg_reg_offset(asm_arm_t *as, uint rd, uint rm, uint byte_offse
// str rd, [rm, #off]
emit_al(as, 0x5800000 | (rm << 16) | (rd << 12) | byte_offset);
} else {
// mov r8, #off
// str rd, [rm, r8]
asm_arm_mov_reg_i32_optimised(as, ASM_ARM_REG_R8, byte_offset);
emit_al(as, 0x7800000 | (rm << 16) | (rd << 12) | ASM_ARM_REG_R8);
// mov temp, #off
// str rd, [rm, temp]
asm_arm_mov_reg_i32_optimised(as, REG_TEMP, byte_offset);
emit_al(as, 0x7800000 | (rm << 16) | (rd << 12) | REG_TEMP);
}
}
@@ -412,8 +414,8 @@ void asm_arm_str_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
void asm_arm_strh_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
// strh doesn't support scaled register index
emit_al(as, 0x1a00080 | (ASM_ARM_REG_R8 << 12) | rn); // mov r8, rn, lsl #1
emit_al(as, 0x18000b0 | (rm << 16) | (rd << 12) | ASM_ARM_REG_R8); // strh rd, [rm, r8]
emit_al(as, 0x1a00080 | (REG_TEMP << 12) | rn); // mov temp, rn, lsl #1
emit_al(as, 0x18000b0 | (rm << 16) | (rd << 12) | REG_TEMP); // strh rd, [rm, temp]
}
void asm_arm_strb_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {