37
37
#include "py/asmthumb.h"
38
38
#include "py/misc.h"
39
39
40
- #define UNSIGNED_FIT5 (x ) ((uint32_t)(x) < 32)
41
40
#define UNSIGNED_FIT7 (x ) ((uint32_t)(x) < 128)
42
41
#define UNSIGNED_FIT8 (x ) (((x) & 0xffffff00) == 0)
43
- #define UNSIGNED_FIT12 (x ) (((x) & 0xfffff000) == 0)
44
42
#define UNSIGNED_FIT16 (x ) (((x) & 0xffff0000) == 0)
45
43
#define SIGNED_FIT8 (x ) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80)
46
44
#define SIGNED_FIT9 (x ) (((x) & 0xffffff00) == 0) || (((x) & 0xffffff00) == 0xffffff00)
@@ -454,7 +452,7 @@ static void asm_thumb_add_reg_reg_offset(asm_thumb_t *as, uint reg_dest, uint re
454
452
}
455
453
}
456
454
457
- #define OP_LDR_STR_W_HI (shift , reg ) ((0xf880 | (shift ) << 5) | (reg))
455
+ #define OP_LDR_STR_W_HI (operation_size , reg ) ((0xf880 | (operation_size ) << 5) | (reg))
458
456
#define OP_LDR_STR_W_LO (reg , imm12 ) (((reg) << 12) | (imm12))
459
457
460
458
#define OP_LDR 0x01
@@ -467,31 +465,35 @@ static const uint8_t OP_LDR_STR_TABLE[3] = {
467
465
0x0E , 0x10 , 0x0C
468
466
};
469
467
470
- void asm_thumb_load_reg_reg_offset (asm_thumb_t * as , uint reg_dest , uint reg_base , uint offset , uint shift ) {
471
- if (UNSIGNED_FIT5 (offset ) && (reg_dest < ASM_THUMB_REG_R8 ) && (reg_base < ASM_THUMB_REG_R8 )) {
468
+ void asm_thumb_load_reg_reg_offset (asm_thumb_t * as , uint reg_dest , uint reg_base , uint offset , uint operation_size ) {
469
+ assert (operation_size <= 2 && "Operation size out of range." );
470
+
471
+ if (MP_FIT_UNSIGNED (5 , offset ) && (reg_dest < ASM_THUMB_REG_R8 ) && (reg_base < ASM_THUMB_REG_R8 )) {
472
472
// Can use T1 encoding
473
- asm_thumb_op16 (as , ((OP_LDR_STR_TABLE [shift ] | OP_LDR ) << 11 ) | (offset << 6 ) | (reg_base << 3 ) | reg_dest );
474
- } else if (asm_thumb_allow_armv7m (as ) && UNSIGNED_FIT12 ( offset << shift )) {
473
+ asm_thumb_op16 (as , ((OP_LDR_STR_TABLE [operation_size ] | OP_LDR ) << 11 ) | (offset << 6 ) | (reg_base << 3 ) | reg_dest );
474
+ } else if (asm_thumb_allow_armv7m (as ) && MP_FIT_UNSIGNED ( 12 , offset << operation_size )) {
475
475
// Can use T3 encoding
476
- asm_thumb_op32 (as , (OP_LDR_STR_W_HI (shift , reg_base ) | OP_LDR_W ), OP_LDR_STR_W_LO (reg_dest , (offset << shift )));
476
+ asm_thumb_op32 (as , (OP_LDR_STR_W_HI (operation_size , reg_base ) | OP_LDR_W ), OP_LDR_STR_W_LO (reg_dest , (offset << operation_size )));
477
477
} else {
478
478
// Must use the generic sequence
479
- asm_thumb_add_reg_reg_offset (as , reg_dest , reg_base , offset - 31 , shift );
480
- asm_thumb_op16 (as , ((OP_LDR_STR_TABLE [shift ] | OP_LDR ) << 11 ) | (31 << 6 ) | (reg_dest << 3 ) | (reg_dest ));
479
+ asm_thumb_add_reg_reg_offset (as , reg_dest , reg_base , offset - 31 , operation_size );
480
+ asm_thumb_op16 (as , ((OP_LDR_STR_TABLE [operation_size ] | OP_LDR ) << 11 ) | (31 << 6 ) | (reg_dest << 3 ) | (reg_dest ));
481
481
}
482
482
}
483
483
484
- void asm_thumb_store_reg_reg_offset (asm_thumb_t * as , uint reg_src , uint reg_base , uint offset , uint shift ) {
485
- if (UNSIGNED_FIT5 (offset ) && (reg_src < ASM_THUMB_REG_R8 ) && (reg_base < ASM_THUMB_REG_R8 )) {
484
+ void asm_thumb_store_reg_reg_offset (asm_thumb_t * as , uint reg_src , uint reg_base , uint offset , uint operation_size ) {
485
+ assert (operation_size <= 2 && "Operation size out of range." );
486
+
487
+ if (MP_FIT_UNSIGNED (5 , offset ) && (reg_src < ASM_THUMB_REG_R8 ) && (reg_base < ASM_THUMB_REG_R8 )) {
486
488
// Can use T1 encoding
487
- asm_thumb_op16 (as , ((OP_LDR_STR_TABLE [shift ] | OP_STR ) << 11 ) | (offset << 6 ) | (reg_base << 3 ) | reg_src );
488
- } else if (asm_thumb_allow_armv7m (as ) && UNSIGNED_FIT12 ( offset << shift )) {
489
+ asm_thumb_op16 (as , ((OP_LDR_STR_TABLE [operation_size ] | OP_STR ) << 11 ) | (offset << 6 ) | (reg_base << 3 ) | reg_src );
490
+ } else if (asm_thumb_allow_armv7m (as ) && MP_FIT_UNSIGNED ( 12 , offset << operation_size )) {
489
491
// Can use T3 encoding
490
- asm_thumb_op32 (as , (OP_LDR_STR_W_HI (shift , reg_base ) | OP_STR_W ), OP_LDR_STR_W_LO (reg_src , (offset << shift )));
492
+ asm_thumb_op32 (as , (OP_LDR_STR_W_HI (operation_size , reg_base ) | OP_STR_W ), OP_LDR_STR_W_LO (reg_src , (offset << operation_size )));
491
493
} else {
492
494
// Must use the generic sequence
493
- asm_thumb_add_reg_reg_offset (as , reg_base , reg_base , offset - 31 , shift );
494
- asm_thumb_op16 (as , ((OP_LDR_STR_TABLE [shift ] | OP_STR ) << 11 ) | (31 << 6 ) | (reg_base << 3 ) | reg_src );
495
+ asm_thumb_add_reg_reg_offset (as , reg_base , reg_base , offset - 31 , operation_size );
496
+ asm_thumb_op16 (as , ((OP_LDR_STR_TABLE [operation_size ] | OP_STR ) << 11 ) | (31 << 6 ) | (reg_base << 3 ) | reg_src );
495
497
}
496
498
}
497
499
0 commit comments