@@ -418,7 +418,7 @@ uint8_t* gen_entry_point(const rb_iseq_t *iseq, uint32_t insn_idx, rb_execution_
418
418
// Called by the generated code when a branch stub is executed
419
419
// Triggers compilation of branches and code patching
420
420
static uint8_t *
421
- branch_stub_hit (uint32_t branch_idx , uint32_t target_idx , rb_execution_context_t * ec )
421
+ branch_stub_hit (const uint32_t branch_idx , const uint32_t target_idx , rb_execution_context_t * ec )
422
422
{
423
423
uint8_t * dst_addr ;
424
424
@@ -449,18 +449,6 @@ branch_stub_hit(uint32_t branch_idx, uint32_t target_idx, rb_execution_context_t
449
449
// may be out of sync in JITted code
450
450
ec -> cfp -> pc = iseq_pc_at_idx (target .iseq , target .idx );
451
451
452
- // If either of the target blocks will be placed next
453
- if (cb -> write_pos == branch -> end_pos )
454
- {
455
- //fprintf(stderr, "target idx %d will be placed next\n", target_idx);
456
- branch -> shape = (uint8_t )target_idx ;
457
-
458
- // Rewrite the branch with the new, potentially more compact shape
459
- cb_set_pos (cb , branch -> start_pos );
460
- branch -> gen_fn (cb , branch -> dst_addrs [0 ], branch -> dst_addrs [1 ], branch -> shape );
461
- RUBY_ASSERT (cb -> write_pos <= branch -> end_pos );
462
- }
463
-
464
452
// Try to find an existing compiled version of this block
465
453
block_t * p_block = find_block_version (target , target_ctx );
466
454
@@ -487,13 +475,26 @@ branch_stub_hit(uint32_t branch_idx, uint32_t target_idx, rb_execution_context_t
487
475
dst_addr = cb_get_ptr (cb , p_block -> start_pos );
488
476
branch -> dst_addrs [target_idx ] = dst_addr ;
489
477
478
+ // Adjust brach shape based on block placement relative to the branch
479
+ if (branch -> end_pos == p_block -> start_pos ) {
480
+ branch -> shape = (branch_shape_t )target_idx ;
481
+ }
482
+
490
483
// Rewrite the branch with the new jump target address
491
484
RUBY_ASSERT (branch -> dst_addrs [0 ] != NULL );
492
485
uint32_t cur_pos = cb -> write_pos ;
493
486
cb_set_pos (cb , branch -> start_pos );
494
487
branch -> gen_fn (cb , branch -> dst_addrs [0 ], branch -> dst_addrs [1 ], branch -> shape );
495
- RUBY_ASSERT (cb -> write_pos <= branch -> end_pos );
496
- branch -> end_pos = cb -> write_pos ;
488
+ RUBY_ASSERT (cb -> write_pos <= branch -> end_pos && "can't enlarge a branch" );
489
+
490
+ // If the branch got smaller
491
+ if (cb -> write_pos < branch -> end_pos ) {
492
+ // fill the difference with nops
493
+ uint32_t shrinkage = branch -> end_pos - cb -> write_pos ;
494
+ nop (cb , shrinkage );
495
+ }
496
+
497
+ // Done patching the branch. Restore write position.
497
498
cb_set_pos (cb , cur_pos );
498
499
499
500
// Restore interpreter sp, since the code hitting the stub expects the original.
0 commit comments