Skip to content

Commit 1713ff0

Browse files
authored
GH-93444: remove redundant fields from basicblock: b_nofallthrough, b_exit, b_return (GH-93445)
1 parent f32e6b4 commit 1713ff0

File tree

2 files changed

+75
-78
lines changed

2 files changed

+75
-78
lines changed
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
Removed redundant fields from the compiler's basicblock struct: ``b_nofallthrough``, ``b_exit``, ``b_return``. They can be easily calculated from the opcode of the last instruction of the block.

Python/compile.c

Lines changed: 74 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,10 @@
101101
(opcode) == POP_JUMP_IF_FALSE || \
102102
(opcode) == POP_JUMP_IF_TRUE)
103103

104+
#define IS_JUMP_OPCODE(opcode) \
105+
(IS_VIRTUAL_JUMP_OPCODE(opcode) || \
106+
is_bit_set_in_table(_PyOpcode_Jump, opcode))
107+
104108
/* opcodes which are not emitted in codegen stage, only by the assembler */
105109
#define IS_ASSEMBLER_OPCODE(opcode) \
106110
((opcode) == JUMP_FORWARD || \
@@ -124,6 +128,17 @@
124128
(opcode) == POP_JUMP_BACKWARD_IF_TRUE || \
125129
(opcode) == POP_JUMP_BACKWARD_IF_FALSE)
126130

131+
#define IS_UNCONDITIONAL_JUMP_OPCODE(opcode) \
132+
((opcode) == JUMP || \
133+
(opcode) == JUMP_NO_INTERRUPT || \
134+
(opcode) == JUMP_FORWARD || \
135+
(opcode) == JUMP_BACKWARD || \
136+
(opcode) == JUMP_BACKWARD_NO_INTERRUPT)
137+
138+
#define IS_SCOPE_EXIT_OPCODE(opcode) \
139+
((opcode) == RETURN_VALUE || \
140+
(opcode) == RAISE_VARARGS || \
141+
(opcode) == RERAISE)
127142

128143
#define IS_TOP_LEVEL_AWAIT(c) ( \
129144
(c->c_flags->cf_flags & PyCF_ALLOW_TOP_LEVEL_AWAIT) \
@@ -142,11 +157,6 @@ struct instr {
142157
int i_end_col_offset;
143158
};
144159

145-
typedef struct excepthandler {
146-
struct instr *setup;
147-
int offset;
148-
} ExceptHandler;
149-
150160
typedef struct exceptstack {
151161
struct basicblock_ *handlers[CO_MAXBLOCKS+1];
152162
int depth;
@@ -187,8 +197,7 @@ is_block_push(struct instr *instr)
187197
static inline int
188198
is_jump(struct instr *i)
189199
{
190-
return IS_VIRTUAL_JUMP_OPCODE(i->i_opcode) ||
191-
is_bit_set_in_table(_PyOpcode_Jump, i->i_opcode);
200+
return IS_JUMP_OPCODE(i->i_opcode);
192201
}
193202

194203
static int
@@ -254,16 +263,10 @@ typedef struct basicblock_ {
254263
int b_startdepth;
255264
/* instruction offset for block, computed by assemble_jump_offsets() */
256265
int b_offset;
257-
/* Basic block has no fall through (it ends with a return, raise or jump) */
258-
unsigned b_nofallthrough : 1;
259266
/* Basic block is an exception handler that preserves lasti */
260267
unsigned b_preserve_lasti : 1;
261268
/* Used by compiler passes to mark whether they have visited a basic block. */
262269
unsigned b_visited : 1;
263-
/* Basic block exits scope (it ends with a return or raise) */
264-
unsigned b_exit : 1;
265-
/* b_return is true if a RETURN_VALUE opcode is inserted. */
266-
unsigned b_return : 1;
267270
/* b_cold is true if this block is not perf critical (like an exception handler) */
268271
unsigned b_cold : 1;
269272
/* b_warm is used by the cold-detection algorithm to mark blocks which are definitely not cold */
@@ -279,6 +282,29 @@ basicblock_last_instr(basicblock *b) {
279282
return NULL;
280283
}
281284

285+
static inline int
286+
basicblock_returns(basicblock *b) {
287+
struct instr *last = basicblock_last_instr(b);
288+
return last && last->i_opcode == RETURN_VALUE;
289+
}
290+
291+
static inline int
292+
basicblock_exits_scope(basicblock *b) {
293+
struct instr *last = basicblock_last_instr(b);
294+
return last && IS_SCOPE_EXIT_OPCODE(last->i_opcode);
295+
}
296+
297+
static inline int
298+
basicblock_nofallthrough(basicblock *b) {
299+
struct instr *last = basicblock_last_instr(b);
300+
return (last &&
301+
(IS_SCOPE_EXIT_OPCODE(last->i_opcode) ||
302+
IS_UNCONDITIONAL_JUMP_OPCODE(last->i_opcode)));
303+
}
304+
305+
#define BB_NO_FALLTHROUGH(B) (basicblock_nofallthrough(B))
306+
#define BB_HAS_FALLTHROUGH(B) (!basicblock_nofallthrough(B))
307+
282308
/* fblockinfo tracks the current frame block.
283309
284310
A frame block is used to handle loops, try/except, and try/finally.
@@ -852,7 +878,7 @@ compiler_copy_block(struct compiler *c, basicblock *block)
852878
/* Cannot copy a block if it has a fallthrough, since
853879
* a block can only have one fallthrough predecessor.
854880
*/
855-
assert(block->b_nofallthrough);
881+
assert(BB_NO_FALLTHROUGH(block));
856882
basicblock *result = compiler_new_block(c);
857883
if (result == NULL) {
858884
return NULL;
@@ -864,8 +890,6 @@ compiler_copy_block(struct compiler *c, basicblock *block)
864890
}
865891
result->b_instr[n] = block->b_instr[i];
866892
}
867-
result->b_exit = block->b_exit;
868-
result->b_nofallthrough = 1;
869893
return result;
870894
}
871895

@@ -1223,11 +1247,7 @@ static int
12231247
is_end_of_basic_block(struct instr *instr)
12241248
{
12251249
int opcode = instr->i_opcode;
1226-
1227-
return is_jump(instr) ||
1228-
opcode == RETURN_VALUE ||
1229-
opcode == RAISE_VARARGS ||
1230-
opcode == RERAISE;
1250+
return is_jump(instr) || IS_SCOPE_EXIT_OPCODE(opcode);
12311251
}
12321252

12331253
static int
@@ -1263,9 +1283,6 @@ basicblock_addop_line(basicblock *b, int opcode, int line,
12631283
struct instr *i = &b->b_instr[off];
12641284
i->i_opcode = opcode;
12651285
i->i_oparg = 0;
1266-
if (opcode == RETURN_VALUE) {
1267-
b->b_return = 1;
1268-
}
12691286
i->i_lineno = line;
12701287
i->i_end_lineno = end_line;
12711288
i->i_col_offset = col_offset;
@@ -7144,11 +7161,8 @@ stackdepth(struct compiler *c, basicblock *entry)
71447161
}
71457162
depth = new_depth;
71467163
assert(!IS_ASSEMBLER_OPCODE(instr->i_opcode));
7147-
if (instr->i_opcode == JUMP_NO_INTERRUPT ||
7148-
instr->i_opcode == JUMP ||
7149-
instr->i_opcode == RETURN_VALUE ||
7150-
instr->i_opcode == RAISE_VARARGS ||
7151-
instr->i_opcode == RERAISE)
7164+
if (IS_UNCONDITIONAL_JUMP_OPCODE(instr->i_opcode) ||
7165+
IS_SCOPE_EXIT_OPCODE(instr->i_opcode))
71527166
{
71537167
/* remaining code is dead */
71547168
next = NULL;
@@ -7159,7 +7173,7 @@ stackdepth(struct compiler *c, basicblock *entry)
71597173
}
71607174
}
71617175
if (next != NULL) {
7162-
assert(b->b_nofallthrough == 0);
7176+
assert(BB_HAS_FALLTHROUGH(b));
71637177
stackdepth_push(&sp, next, depth);
71647178
}
71657179
}
@@ -7314,7 +7328,7 @@ label_exception_targets(basicblock *entry) {
73147328
instr->i_except = handler;
73157329
assert(i == b->b_iused -1);
73167330
if (!instr->i_target->b_visited) {
7317-
if (b->b_nofallthrough == 0) {
7331+
if (BB_HAS_FALLTHROUGH(b)) {
73187332
ExceptStack *copy = copy_except_stack(except_stack);
73197333
if (copy == NULL) {
73207334
goto error;
@@ -7334,7 +7348,7 @@ label_exception_targets(basicblock *entry) {
73347348
instr->i_except = handler;
73357349
}
73367350
}
7337-
if (b->b_nofallthrough == 0 && !b->b_next->b_visited) {
7351+
if (BB_HAS_FALLTHROUGH(b) && !b->b_next->b_visited) {
73387352
assert(except_stack != NULL);
73397353
b->b_next->b_exceptstack = except_stack;
73407354
todo[0] = b->b_next;
@@ -7373,7 +7387,7 @@ mark_warm(basicblock *entry) {
73737387
assert(!b->b_except_predecessors);
73747388
b->b_warm = 1;
73757389
basicblock *next = b->b_next;
7376-
if (next && !b->b_nofallthrough && !next->b_visited) {
7390+
if (next && BB_HAS_FALLTHROUGH(b) && !next->b_visited) {
73777391
*sp++ = next;
73787392
next->b_visited = 1;
73797393
}
@@ -7417,7 +7431,7 @@ mark_cold(basicblock *entry) {
74177431
basicblock *b = *(--sp);
74187432
b->b_cold = 1;
74197433
basicblock *next = b->b_next;
7420-
if (next && !b->b_nofallthrough) {
7434+
if (next && BB_HAS_FALLTHROUGH(b)) {
74217435
if (!next->b_warm && !next->b_visited) {
74227436
*sp++ = next;
74237437
next->b_visited = 1;
@@ -7452,15 +7466,14 @@ push_cold_blocks_to_end(struct compiler *c, basicblock *entry, int code_flags) {
74527466
/* If we have a cold block with fallthrough to a warm block, add */
74537467
/* an explicit jump instead of fallthrough */
74547468
for (basicblock *b = entry; b != NULL; b = b->b_next) {
7455-
if (b->b_cold && !b->b_nofallthrough && b->b_next && b->b_next->b_warm) {
7469+
if (b->b_cold && BB_HAS_FALLTHROUGH(b) && b->b_next && b->b_next->b_warm) {
74567470
basicblock *explicit_jump = compiler_new_block(c);
74577471
if (explicit_jump == NULL) {
74587472
return -1;
74597473
}
74607474
basicblock_add_jump(explicit_jump, JUMP, -1, 0, 0, 0, b->b_next);
74617475

74627476
explicit_jump->b_cold = 1;
7463-
explicit_jump->b_nofallthrough = 1;
74647477
explicit_jump->b_next = b->b_next;
74657478
b->b_next = explicit_jump;
74667479
}
@@ -7953,7 +7966,7 @@ scan_block_for_local(int target, basicblock *b, bool unsafe_to_start,
79537966
if (unsafe) {
79547967
// unsafe at end of this block,
79557968
// so unsafe at start of next blocks
7956-
if (b->b_next && !b->b_nofallthrough) {
7969+
if (b->b_next && BB_HAS_FALLTHROUGH(b)) {
79577970
MAYBE_PUSH(b->b_next);
79587971
}
79597972
if (b->b_iused > 0) {
@@ -8281,9 +8294,10 @@ dump_instr(struct instr *i)
82818294
static void
82828295
dump_basicblock(const basicblock *b)
82838296
{
8284-
const char *b_return = b->b_return ? "return " : "";
8297+
const char *b_return = basicblock_returns(b) ? "return " : "";
82858298
fprintf(stderr, "[%d %d %d %p] used: %d, depth: %d, offset: %d %s\n",
8286-
b->b_cold, b->b_warm, b->b_nofallthrough, b, b->b_iused, b->b_startdepth, b->b_offset, b_return);
8299+
b->b_cold, b->b_warm, BB_NO_FALLTHROUGH(b), b, b->b_iused,
8300+
b->b_startdepth, b->b_offset, b_return);
82878301
if (b->b_instr) {
82888302
int i;
82898303
for (i = 0; i < b->b_iused; i++) {
@@ -8545,7 +8559,6 @@ remove_redundant_jumps(basicblock *entry) {
85458559
b_last_instr->i_opcode == JUMP_NO_INTERRUPT) {
85468560
if (b_last_instr->i_target == b->b_next) {
85478561
assert(b->b_next->b_iused);
8548-
b->b_nofallthrough = 0;
85498562
b_last_instr->i_opcode = NOP;
85508563
removed++;
85518564
}
@@ -8572,7 +8585,7 @@ assemble(struct compiler *c, int addNone)
85728585
}
85738586

85748587
/* Make sure every block that falls off the end returns None. */
8575-
if (!c->u->u_curblock->b_return) {
8588+
if (!basicblock_returns(c->u->u_curblock)) {
85768589
UNSET_LOC(c);
85778590
if (addNone)
85788591
ADDOP_LOAD_CONST(c, Py_None);
@@ -9064,7 +9077,6 @@ optimize_basic_block(struct compiler *c, basicblock *bb, PyObject *consts)
90649077
jump_if_true = nextop == POP_JUMP_IF_TRUE;
90659078
if (is_true == jump_if_true) {
90669079
bb->b_instr[i+1].i_opcode = JUMP;
9067-
bb->b_nofallthrough = 1;
90689080
}
90699081
else {
90709082
bb->b_instr[i+1].i_opcode = NOP;
@@ -9084,7 +9096,6 @@ optimize_basic_block(struct compiler *c, basicblock *bb, PyObject *consts)
90849096
jump_if_true = nextop == JUMP_IF_TRUE_OR_POP;
90859097
if (is_true == jump_if_true) {
90869098
bb->b_instr[i+1].i_opcode = JUMP;
9087-
bb->b_nofallthrough = 1;
90889099
}
90899100
else {
90909101
inst->i_opcode = NOP;
@@ -9273,7 +9284,7 @@ extend_block(basicblock *bb) {
92739284
last->i_opcode != JUMP_BACKWARD) {
92749285
return 0;
92759286
}
9276-
if (last->i_target->b_exit && last->i_target->b_iused <= MAX_COPY_SIZE) {
9287+
if (basicblock_exits_scope(last->i_target) && last->i_target->b_iused <= MAX_COPY_SIZE) {
92779288
basicblock *to_copy = last->i_target;
92789289
last->i_opcode = NOP;
92799290
for (int i = 0; i < to_copy->b_iused; i++) {
@@ -9283,7 +9294,6 @@ extend_block(basicblock *bb) {
92839294
}
92849295
bb->b_instr[index] = to_copy->b_instr[i];
92859296
}
9286-
bb->b_exit = 1;
92879297
}
92889298
return 0;
92899299
}
@@ -9341,34 +9351,21 @@ normalize_basic_block(basicblock *bb) {
93419351
/* Mark blocks as exit and/or nofallthrough.
93429352
Raise SystemError if CFG is malformed. */
93439353
for (int i = 0; i < bb->b_iused; i++) {
9344-
assert(!IS_ASSEMBLER_OPCODE(bb->b_instr[i].i_opcode));
9345-
switch(bb->b_instr[i].i_opcode) {
9346-
case RETURN_VALUE:
9347-
case RAISE_VARARGS:
9348-
case RERAISE:
9349-
bb->b_exit = 1;
9350-
bb->b_nofallthrough = 1;
9351-
break;
9352-
case JUMP:
9353-
case JUMP_NO_INTERRUPT:
9354-
bb->b_nofallthrough = 1;
9355-
/* fall through */
9356-
case POP_JUMP_IF_NOT_NONE:
9357-
case POP_JUMP_IF_NONE:
9358-
case POP_JUMP_IF_FALSE:
9359-
case POP_JUMP_IF_TRUE:
9360-
case JUMP_IF_FALSE_OR_POP:
9361-
case JUMP_IF_TRUE_OR_POP:
9362-
case FOR_ITER:
9363-
if (i != bb->b_iused-1) {
9364-
PyErr_SetString(PyExc_SystemError, "malformed control flow graph.");
9365-
return -1;
9366-
}
9367-
/* Skip over empty basic blocks. */
9368-
while (bb->b_instr[i].i_target->b_iused == 0) {
9369-
bb->b_instr[i].i_target = bb->b_instr[i].i_target->b_next;
9370-
}
9371-
9354+
int opcode = bb->b_instr[i].i_opcode;
9355+
assert(!IS_ASSEMBLER_OPCODE(opcode));
9356+
int is_jump = IS_JUMP_OPCODE(opcode);
9357+
int is_exit = IS_SCOPE_EXIT_OPCODE(opcode);
9358+
if (is_exit || is_jump) {
9359+
if (i != bb->b_iused-1) {
9360+
PyErr_SetString(PyExc_SystemError, "malformed control flow graph.");
9361+
return -1;
9362+
}
9363+
}
9364+
if (is_jump) {
9365+
/* Skip over empty basic blocks. */
9366+
while (bb->b_instr[i].i_target->b_iused == 0) {
9367+
bb->b_instr[i].i_target = bb->b_instr[i].i_target->b_next;
9368+
}
93729369
}
93739370
}
93749371
return 0;
@@ -9386,7 +9383,7 @@ mark_reachable(struct assembler *a) {
93869383
while (sp > stack) {
93879384
basicblock *b = *(--sp);
93889385
b->b_visited = 1;
9389-
if (b->b_next && !b->b_nofallthrough) {
9386+
if (b->b_next && BB_HAS_FALLTHROUGH(b)) {
93909387
if (!b->b_next->b_visited) {
93919388
assert(b->b_next->b_predecessors == 0);
93929389
*sp++ = b->b_next;
@@ -9475,7 +9472,7 @@ propagate_line_numbers(struct assembler *a) {
94759472
COPY_INSTR_LOC(b->b_instr[i], prev_instr);
94769473
}
94779474
}
9478-
if (!b->b_nofallthrough && b->b_next->b_predecessors == 1) {
9475+
if (BB_HAS_FALLTHROUGH(b) && b->b_next->b_predecessors == 1) {
94799476
assert(b->b_next->b_iused);
94809477
if (b->b_next->b_instr[0].i_lineno < 0) {
94819478
COPY_INSTR_LOC(prev_instr, b->b_next->b_instr[0]);
@@ -9523,7 +9520,6 @@ optimize_cfg(struct compiler *c, struct assembler *a, PyObject *consts)
95239520
for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) {
95249521
if (b->b_predecessors == 0) {
95259522
b->b_iused = 0;
9526-
b->b_nofallthrough = 0;
95279523
}
95289524
}
95299525
eliminate_empty_basic_blocks(a->a_entry);
@@ -9563,7 +9559,7 @@ trim_unused_consts(struct assembler *a, PyObject *consts)
95639559

95649560
static inline int
95659561
is_exit_without_lineno(basicblock *b) {
9566-
if (!b->b_exit) {
9562+
if (!basicblock_exits_scope(b)) {
95679563
return 0;
95689564
}
95699565
for (int i = 0; i < b->b_iused; i++) {
@@ -9614,7 +9610,7 @@ duplicate_exits_without_lineno(struct compiler *c)
96149610
/* Any remaining reachable exit blocks without line number can only be reached by
96159611
* fall through, and thus can only have a single predecessor */
96169612
for (basicblock *b = c->u->u_blocks; b != NULL; b = b->b_list) {
9617-
if (!b->b_nofallthrough && b->b_next && b->b_iused > 0) {
9613+
if (BB_HAS_FALLTHROUGH(b) && b->b_next && b->b_iused > 0) {
96189614
if (is_exit_without_lineno(b->b_next)) {
96199615
assert(b->b_next->b_iused > 0);
96209616
COPY_INSTR_LOC(b->b_instr[b->b_iused-1], b->b_next->b_instr[0]);

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy