Skip to content

Commit 1b0cdc0

Browse files
agattidpgeorge
authored andcommitted
py/asmthumb: Clean up integer-indexed load/store emitters.
This commit cleans up the single entry point integer-indexed load/store emitters that have been built by merging the single operand type load/store functions in 1f5ba69. To follow the same convention found in RV32 and Xtensa emitters, the function operand size is not named after the left shift amount to apply to the initial offset to get its true byte offset, but by a generic "operand size". Also, those functions were updated to use the new MP_FIT_UNSIGNED macros to perform bit width checks when figuring out which opcode encoding is the best one to use. Signed-off-by: Alessandro Gatti <a.gatti@frob.it>
1 parent 6fd7422 commit 1b0cdc0

File tree

2 files changed

+21
-19
lines changed

2 files changed

+21
-19
lines changed

py/asmthumb.c

Lines changed: 19 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,8 @@
3737
#include "py/asmthumb.h"
3838
#include "py/misc.h"
3939

40-
#define UNSIGNED_FIT5(x) ((uint32_t)(x) < 32)
4140
#define UNSIGNED_FIT7(x) ((uint32_t)(x) < 128)
4241
#define UNSIGNED_FIT8(x) (((x) & 0xffffff00) == 0)
43-
#define UNSIGNED_FIT12(x) (((x) & 0xfffff000) == 0)
4442
#define UNSIGNED_FIT16(x) (((x) & 0xffff0000) == 0)
4543
#define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80)
4644
#define SIGNED_FIT9(x) (((x) & 0xffffff00) == 0) || (((x) & 0xffffff00) == 0xffffff00)
@@ -454,7 +452,7 @@ static void asm_thumb_add_reg_reg_offset(asm_thumb_t *as, uint reg_dest, uint re
454452
}
455453
}
456454

457-
#define OP_LDR_STR_W_HI(shift, reg) ((0xf880 | (shift) << 5) | (reg))
455+
#define OP_LDR_STR_W_HI(operation_size, reg) ((0xf880 | (operation_size) << 5) | (reg))
458456
#define OP_LDR_STR_W_LO(reg, imm12) (((reg) << 12) | (imm12))
459457

460458
#define OP_LDR 0x01
@@ -467,31 +465,35 @@ static const uint8_t OP_LDR_STR_TABLE[3] = {
467465
0x0E, 0x10, 0x0C
468466
};
469467

470-
void asm_thumb_load_reg_reg_offset(asm_thumb_t *as, uint reg_dest, uint reg_base, uint offset, uint shift) {
471-
if (UNSIGNED_FIT5(offset) && (reg_dest < ASM_THUMB_REG_R8) && (reg_base < ASM_THUMB_REG_R8)) {
468+
void asm_thumb_load_reg_reg_offset(asm_thumb_t *as, uint reg_dest, uint reg_base, uint offset, uint operation_size) {
469+
assert(operation_size <= 2 && "Operation size out of range.");
470+
471+
if (MP_FIT_UNSIGNED(5, offset) && (reg_dest < ASM_THUMB_REG_R8) && (reg_base < ASM_THUMB_REG_R8)) {
472472
// Can use T1 encoding
473-
asm_thumb_op16(as, ((OP_LDR_STR_TABLE[shift] | OP_LDR) << 11) | (offset << 6) | (reg_base << 3) | reg_dest);
474-
} else if (asm_thumb_allow_armv7m(as) && UNSIGNED_FIT12(offset << shift)) {
473+
asm_thumb_op16(as, ((OP_LDR_STR_TABLE[operation_size] | OP_LDR) << 11) | (offset << 6) | (reg_base << 3) | reg_dest);
474+
} else if (asm_thumb_allow_armv7m(as) && MP_FIT_UNSIGNED(12, offset << operation_size)) {
475475
// Can use T3 encoding
476-
asm_thumb_op32(as, (OP_LDR_STR_W_HI(shift, reg_base) | OP_LDR_W), OP_LDR_STR_W_LO(reg_dest, (offset << shift)));
476+
asm_thumb_op32(as, (OP_LDR_STR_W_HI(operation_size, reg_base) | OP_LDR_W), OP_LDR_STR_W_LO(reg_dest, (offset << operation_size)));
477477
} else {
478478
// Must use the generic sequence
479-
asm_thumb_add_reg_reg_offset(as, reg_dest, reg_base, offset - 31, shift);
480-
asm_thumb_op16(as, ((OP_LDR_STR_TABLE[shift] | OP_LDR) << 11) | (31 << 6) | (reg_dest << 3) | (reg_dest));
479+
asm_thumb_add_reg_reg_offset(as, reg_dest, reg_base, offset - 31, operation_size);
480+
asm_thumb_op16(as, ((OP_LDR_STR_TABLE[operation_size] | OP_LDR) << 11) | (31 << 6) | (reg_dest << 3) | (reg_dest));
481481
}
482482
}
483483

484-
void asm_thumb_store_reg_reg_offset(asm_thumb_t *as, uint reg_src, uint reg_base, uint offset, uint shift) {
485-
if (UNSIGNED_FIT5(offset) && (reg_src < ASM_THUMB_REG_R8) && (reg_base < ASM_THUMB_REG_R8)) {
484+
void asm_thumb_store_reg_reg_offset(asm_thumb_t *as, uint reg_src, uint reg_base, uint offset, uint operation_size) {
485+
assert(operation_size <= 2 && "Operation size out of range.");
486+
487+
if (MP_FIT_UNSIGNED(5, offset) && (reg_src < ASM_THUMB_REG_R8) && (reg_base < ASM_THUMB_REG_R8)) {
486488
// Can use T1 encoding
487-
asm_thumb_op16(as, ((OP_LDR_STR_TABLE[shift] | OP_STR) << 11) | (offset << 6) | (reg_base << 3) | reg_src);
488-
} else if (asm_thumb_allow_armv7m(as) && UNSIGNED_FIT12(offset << shift)) {
489+
asm_thumb_op16(as, ((OP_LDR_STR_TABLE[operation_size] | OP_STR) << 11) | (offset << 6) | (reg_base << 3) | reg_src);
490+
} else if (asm_thumb_allow_armv7m(as) && MP_FIT_UNSIGNED(12, offset << operation_size)) {
489491
// Can use T3 encoding
490-
asm_thumb_op32(as, (OP_LDR_STR_W_HI(shift, reg_base) | OP_STR_W), OP_LDR_STR_W_LO(reg_src, (offset << shift)));
492+
asm_thumb_op32(as, (OP_LDR_STR_W_HI(operation_size, reg_base) | OP_STR_W), OP_LDR_STR_W_LO(reg_src, (offset << operation_size)));
491493
} else {
492494
// Must use the generic sequence
493-
asm_thumb_add_reg_reg_offset(as, reg_base, reg_base, offset - 31, shift);
494-
asm_thumb_op16(as, ((OP_LDR_STR_TABLE[shift] | OP_STR) << 11) | (31 << 6) | (reg_base << 3) | reg_src);
495+
asm_thumb_add_reg_reg_offset(as, reg_base, reg_base, offset - 31, operation_size);
496+
asm_thumb_op16(as, ((OP_LDR_STR_TABLE[operation_size] | OP_STR) << 11) | (31 << 6) | (reg_base << 3) | reg_src);
495497
}
496498
}
497499

py/asmthumb.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -365,9 +365,9 @@ void asm_thumb_mov_reg_local_addr(asm_thumb_t *as, uint rlo_dest, int local_num)
365365
void asm_thumb_mov_reg_pcrel(asm_thumb_t *as, uint rlo_dest, uint label);
366366

367367
// Generate optimised load dest, [src, #offset] sequence
368-
void asm_thumb_load_reg_reg_offset(asm_thumb_t *as, uint reg_dest, uint reg_base, uint offset, uint shift);
368+
void asm_thumb_load_reg_reg_offset(asm_thumb_t *as, uint reg_dest, uint reg_base, uint offset, uint operation_size);
369369
// Generate optimised store src, [dest, #offset] sequence
370-
void asm_thumb_store_reg_reg_offset(asm_thumb_t *as, uint reg_src, uint reg_base, uint offset, uint shift);
370+
void asm_thumb_store_reg_reg_offset(asm_thumb_t *as, uint reg_src, uint reg_base, uint offset, uint operation_size);
371371

372372
void asm_thumb_b_label(asm_thumb_t *as, uint label); // convenience: picks narrow or wide branch
373373
void asm_thumb_bcc_label(asm_thumb_t *as, int cc, uint label); // convenience: picks narrow or wide branch

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy