From e78ef223ef50067f77d2f7892536ef25ebbf75c5 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Tue, 15 Jul 2025 15:59:07 +0200 Subject: [PATCH 1/9] integrate frameshift patch --- include/afl-fuzz.h | 72 ++++++++++++++++++++- include/envs.h | 2 +- src/afl-fuzz-one.c | 148 +++++++++++++++++++++++++++++++++++++++---- src/afl-fuzz-queue.c | 3 + src/afl-fuzz-run.c | 7 ++ src/afl-fuzz-state.c | 14 ++++ 6 files changed, 231 insertions(+), 15 deletions(-) diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index a0ee58bf73..a7ce03c1ac 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -180,6 +180,58 @@ struct havoc_profile { }; +/* Frameshift */ + +// A single frameshift relation field. +typedef struct fs_relation { + + // Dynamic values: + u64 pos; + u64 val; + u64 anchor; + u64 insert; + + // Backup to perform revert. + u64 _old_pos; + u64 _old_val; + u64 _old_anchor; + u64 _old_insert; + + // Fixed values: + u8 size; + u8 le; + u8 enabled; + +} fs_relation_t; + +typedef struct fs_idx_vec { + + u32 *idx; + u32 count; + u32 capacity; + +} fs_idx_vec_t; + +// Per-input metadata. +typedef struct fs_meta { + + fs_relation_t *relations; + u32 rel_count; + u32 rel_capacity; + + u8 *blocked_points_map; /* bitmap of blocked points */ + +} fs_meta_t; + +struct frameshift_stats { + + u32 searched; + u32 found; + u64 search_tests; + u64 total_time_ms; + +}; + struct skipdet_entry { u8 continue_inf, done_eff; @@ -257,6 +309,11 @@ struct queue_entry { struct tainted *taint; /* Taint information from CmpLog */ struct skipdet_entry *skipdet_e; + u8 fs_status; /* Frameshift status */ + /* 0: unexplored */ + /* 1: explored */ + fs_meta_t *fs_meta; /* Frameshift metadata */ + }; struct extra_data { @@ -463,7 +520,7 @@ typedef struct afl_env_vars { afl_post_process_keep_original, afl_crashing_seeds_as_new_crash, afl_final_sync, afl_ignore_seed_problems, afl_disable_redundant, afl_sha1_filenames, afl_no_sync, afl_no_fastresume, afl_forksrv_uid_set, - afl_forksrv_gid_set; + afl_forksrv_gid_set, afl_frameshift_enabled; u16 afl_forksrv_nb_supl_gids; @@ -876,6 +933,10 @@ typedef struct afl_state { /* Global Profile Data for deterministic/havoc-splice stage */ struct havoc_profile *havoc_prof; + struct frameshift_stats fs_stats; + u32 *frameshift_index_buffer; /* Buffer for frameshift index */ + fs_meta_t *fs_curr_meta; /* Metadata for the current input (full copy) */ + struct skipdet_global *skipdet_g; s64 last_scored_idx; /* Index of the last queue entry re-scored */ @@ -1343,6 +1404,15 @@ u8 is_det_timeout(u64, u8); void plot_profile_data(afl_state_t *, struct queue_entry *); +/* Frameshift functions */ +void frameshift_stage(afl_state_t *); +void fs_sanitize(fs_meta_t *, u8 *buf); +void fs_save(fs_meta_t *meta); +void fs_restore(fs_meta_t *meta); +int fs_track_insert(fs_meta_t *meta, u64 idx, u64 data_size, u8 ignore_invalid); +void fs_track_delete(fs_meta_t *meta, u64 idx, u64 data_size); +void fs_clone_meta(afl_state_t *afl); + /**** Inline routines ****/ /* Generate a random number (from 0 to limit - 1). This may diff --git a/include/envs.h b/include/envs.h index 1a745bde53..7a974f2387 100644 --- a/include/envs.h +++ b/include/envs.h @@ -122,7 +122,7 @@ static char *afl_environment_variables[] = { "AFL_PRINT_FILENAMES", "AFL_PIZZA_MODE", "AFL_NO_FASTRESUME", "AFL_SAN_ABSTRACTION", "AFL_LLVM_ONLY_FSRV", "AFL_GCC_ONLY_FRSV", "AFL_SAN_RECOVER", "AFL_PRELOAD_DISCRIMINATE_FORKSERVER_PARENT", - "AFL_FORKSRV_UID", "AFL_FORKSRV_GID", NULL}; + "AFL_FORKSRV_UID", "AFL_FORKSRV_GID", "AFL_FRAMESHIFT_ENABLED", NULL}; extern char *afl_environment_variables[]; diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index ea44be5a5e..da177d373e 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -410,19 +410,46 @@ u8 fuzz_one_original(afl_state_t *afl) { u_simplestring_time_diff(time_tmp, afl->prev_run_time + get_cur_time(), afl->start_time); - ACTF( - "Fuzzing test case #%u (%u total, %s%llu crashes saved%s, state: %s, " - "mode=%s, " - "perf_score=%0.0f, weight=%0.0f, favorite=%u, was_fuzzed=%u, " - "exec_us=%llu, hits=%u, map=%u, ascii=%u, run_time=%s)...", - afl->current_entry, afl->queued_items, - afl->saved_crashes != 0 ? cRED : "", afl->saved_crashes, cRST, - get_fuzzing_state(afl), afl->fuzz_mode ? "exploit" : "explore", - afl->queue_cur->perf_score, afl->queue_cur->weight, - afl->queue_cur->favored, afl->queue_cur->was_fuzzed, - afl->queue_cur->exec_us, - likely(afl->n_fuzz) ? afl->n_fuzz[afl->queue_cur->n_fuzz_entry] : 0, - afl->queue_cur->bitmap_size, afl->queue_cur->is_ascii, time_tmp); + + if (afl->afl_env.afl_frameshift_enabled) { + + u8 search_time[64]; + u_simplestring_time_diff(search_time, afl->fs_stats.total_time_ms + 1, 1); + + ACTF( + "Fuzzing test case #%u (%u total, %llu crashes saved, state: %s, " + "mode=%s, " + "perf_score=%0.0f, weight=%0.0f, favorite=%u, was_fuzzed=%u, " + "exec_us=%llu, hits=%u, map=%u, ascii=%u, run_time=%s) FS (t=%s, " + "st=%llu, found=%u/%u)...", + afl->current_entry, afl->queued_items, afl->saved_crashes, + get_fuzzing_state(afl), afl->fuzz_mode ? "exploit" : "explore", + afl->queue_cur->perf_score, afl->queue_cur->weight, + afl->queue_cur->favored, afl->queue_cur->was_fuzzed, + afl->queue_cur->exec_us, + likely(afl->n_fuzz) ? afl->n_fuzz[afl->queue_cur->n_fuzz_entry] : 0, + afl->queue_cur->bitmap_size, afl->queue_cur->is_ascii, time_tmp, + search_time, afl->fs_stats.search_tests, afl->fs_stats.found, + afl->fs_stats.searched); + + } else { + + ACTF( + "Fuzzing test case #%u (%u total, %s%llu crashes saved%s, state: %s, " + "mode=%s, " + "perf_score=%0.0f, weight=%0.0f, favorite=%u, was_fuzzed=%u, " + "exec_us=%llu, hits=%u, map=%u, ascii=%u, run_time=%s)...", + afl->current_entry, afl->queued_items, + afl->saved_crashes != 0 ? cRED : "", afl->saved_crashes, cRST, + get_fuzzing_state(afl), afl->fuzz_mode ? "exploit" : "explore", + afl->queue_cur->perf_score, afl->queue_cur->weight, + afl->queue_cur->favored, afl->queue_cur->was_fuzzed, + afl->queue_cur->exec_us, + likely(afl->n_fuzz) ? afl->n_fuzz[afl->queue_cur->n_fuzz_entry] : 0, + afl->queue_cur->bitmap_size, afl->queue_cur->is_ascii, time_tmp); + + } + fflush(stdout); } @@ -507,6 +534,20 @@ u8 fuzz_one_original(afl_state_t *afl) { memcpy(out_buf, in_buf, len); + /************** + * FRAMESHIFT * + **************/ + + if (unlikely(afl->afl_env.afl_frameshift_enabled && + afl->queue_cur->fs_status == 0)) { + + frameshift_stage(afl); + + } + + // Frameshift: reload the original input meta + if (afl->afl_env.afl_frameshift_enabled) { fs_clone_meta(afl); } + /********************* * PERFORMANCE SCORE * *********************/ @@ -2159,6 +2200,9 @@ u8 fuzz_one_original(afl_state_t *afl) { afl->queue_cur->fname, afl->queue_cur->is_ascii, use_stacking); #endif + // Frameshift: save the current input meta + if (afl->afl_env.afl_frameshift_enabled) { fs_save(afl->fs_curr_meta); } + for (i = 0; i < use_stacking; ++i) { if (afl->custom_mutators_count) { @@ -2542,6 +2586,13 @@ u8 fuzz_one_original(afl_state_t *afl) { afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); temp_len += clone_len; + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_insert(afl->fs_curr_meta, clone_to, clone_len, 1); + + } + } else if (unlikely(temp_len < 8)) { break; @@ -2593,6 +2644,13 @@ u8 fuzz_one_original(afl_state_t *afl) { afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); temp_len += clone_len; + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_insert(afl->fs_curr_meta, clone_to, clone_len, 1); + + } + } else if (unlikely(temp_len < 8)) { break; @@ -2770,6 +2828,13 @@ u8 fuzz_one_original(afl_state_t *afl) { temp_len -= del_len; + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_delete(afl->fs_curr_meta, del_from, del_len); + + } + break; } @@ -2827,6 +2892,13 @@ u8 fuzz_one_original(afl_state_t *afl) { temp_len -= del_len; + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_delete(afl->fs_curr_meta, del_from, del_len); + + } + break; } @@ -2866,6 +2938,13 @@ u8 fuzz_one_original(afl_state_t *afl) { afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); temp_len += clone_len; + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_insert(afl->fs_curr_meta, clone_to, clone_len, 1); + + } + break; } @@ -3007,6 +3086,13 @@ u8 fuzz_one_original(afl_state_t *afl) { afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); temp_len += (new_len - old_len); + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_insert(afl->fs_curr_meta, off, new_len, 1); + + } + } // fprintf(stderr, "AFTER : %s\n", out_buf); @@ -3100,6 +3186,13 @@ u8 fuzz_one_original(afl_state_t *afl) { memcpy(out_buf + insert_at, ptr, extra_len); temp_len += extra_len; + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_insert(afl->fs_curr_meta, insert_at, extra_len, 1); + + } + break; } @@ -3158,6 +3251,13 @@ u8 fuzz_one_original(afl_state_t *afl) { memcpy(out_buf + insert_at, ptr, extra_len); temp_len += extra_len; + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_insert(afl->fs_curr_meta, insert_at, extra_len, 1); + + } + break; } @@ -3271,6 +3371,13 @@ u8 fuzz_one_original(afl_state_t *afl) { afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); temp_len += clone_len; + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_insert(afl->fs_curr_meta, clone_to, clone_len, 1); + + } + break; } @@ -3291,6 +3398,9 @@ u8 fuzz_one_original(afl_state_t *afl) { temp_len = len; memcpy(out_buf, in_buf, len); + // Frameshift: restore the original input meta + if (afl->afl_env.afl_frameshift_enabled) { fs_restore(afl->fs_curr_meta); } + /* If we're finding new stuff, let's run for a bit longer, limits permitting. */ @@ -3350,6 +3460,9 @@ u8 fuzz_one_original(afl_state_t *afl) { u8 *new_buf; s32 f_diff, l_diff; + // Frameshift: reload the original input meta + if (afl->afl_env.afl_frameshift_enabled) { fs_clone_meta(afl); } + /* First of all, if we've modified in_buf for havoc, let's clean that up... */ @@ -3400,6 +3513,15 @@ u8 fuzz_one_original(afl_state_t *afl) { if (unlikely(!out_buf)) { PFATAL("alloc"); } memcpy(out_buf, in_buf, len); + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_delete(afl->fs_curr_meta, split_at, + afl->queue_cur->len - split_at); + fs_track_insert(afl->fs_curr_meta, split_at, target->len - split_at, 1); + + } + goto custom_mutator_stage; } diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 2b8dbd7110..0e57fad375 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -761,6 +761,9 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { q->skipdet_e = (struct skipdet_entry *)ck_alloc(sizeof(struct skipdet_entry)); + q->fs_meta = NULL; + q->fs_status = 0; + } /* Destroy the entire queue. */ diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index 48a56053c6..fea992a9fc 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -1384,6 +1384,13 @@ u8 __attribute__((hot)) common_fuzz_stuff(afl_state_t *afl, u8 *out_buf, u8 fault; + if (afl->afl_env.afl_frameshift_enabled && afl->fs_curr_meta) { + + // Apply relation updates before running. + fs_sanitize(afl->fs_curr_meta, out_buf); + + } + if (unlikely(len = write_to_testcase(afl, (void **)&out_buf, len, 0)) == 0) { return 0; diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index 993bb5dbbc..fd65ab0ff7 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -142,6 +142,13 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) { afl->havoc_prof = (struct havoc_profile *)ck_alloc(sizeof(struct havoc_profile)); + afl->frameshift_index_buffer = NULL; + afl->fs_curr_meta = NULL; + afl->fs_stats.found = 0; + afl->fs_stats.searched = 0; + afl->fs_stats.search_tests = 0; + afl->fs_stats.total_time_ms = 0; + init_mopt_globals(afl); list_append(&afl_states, afl); @@ -766,6 +773,13 @@ void read_afl_environment(afl_state_t *afl, char **envp) { } + } else if (!strncmp(env, "AFL_FRAMESHIFT_ENABLED", + + afl_environment_variable_len)) { + + afl->afl_env.afl_frameshift_enabled = + get_afl_env(afl_environment_variables[i]) ? 1 : 0; + } } else { From 25633eb9b8bfccf59e8f29f1870d17b89d057bc6 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 16 Jul 2025 15:29:40 +0200 Subject: [PATCH 2/9] add missing file --- src/afl-fuzz-frameshift.c | 718 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 718 insertions(+) create mode 100644 src/afl-fuzz-frameshift.c diff --git a/src/afl-fuzz-frameshift.c b/src/afl-fuzz-frameshift.c new file mode 100644 index 0000000000..110de3ba5c --- /dev/null +++ b/src/afl-fuzz-frameshift.c @@ -0,0 +1,718 @@ + +#include "afl-fuzz.h" + +#define FRAMESHIFT_DEBUG 1 + +#define FRAMESHIFT_INITIAL_CAPACITY 128 + +#define FRAMESHIFT_MAX_ITERS 10 +#define FRAMESHIFT_LOSS_PCT 5 // 5% loss +#define FRAMESHIFT_RECOVER_PCT 20 // 20% recovery + +// Update the relation based on the given insertion. +// +// Returns 0 on success, 1 on error. +int rel_on_insert(fs_relation_t *rel, u64 idx, u64 size) { + + // Error if insert is inside the field. + if (idx > rel->pos && idx < rel->pos + rel->size) { return 1; } + + // Check if we should update the value of the field. + if (idx >= rel->anchor && idx <= rel->insert) { + + u64 pre = rel->val; + rel->val += size; + + if (rel->size < 8) { rel->val &= (1ULL << (rel->size * 8)) - 1; } + + // Check if we overflowed the field. + if (rel->val < pre) { return 1; } + + } + + // Move the field. + if (idx <= rel->pos) { rel->pos += size; } + + // Move the anchor point. + // Anchor point of 0 is locked. + if (idx < rel->anchor) { rel->anchor += size; } + + // Move the insert point. + if (idx <= rel->insert) { rel->insert += size; } + + return 0; + +} + +// Update the relation based on the given removal. +// +// Returns 0 on success, 1 on error. +int rel_on_remove(fs_relation_t *rel, u64 idx, u64 size) { + + // Error if remove overlaps the field. + if (idx < rel->pos + rel->size && idx + size > rel->pos) { return 1; } + + u64 pre_pos = (idx < rel->pos) ? MIN(rel->pos - idx, size) : 0; + u64 pre_anchor = (idx < rel->anchor) ? MIN(rel->anchor - idx, size) : 0; + u64 pre_insert = (idx < rel->insert) ? MIN(rel->insert - idx, size) : 0; + + // Compute overlap. + u64 overlap_min = MIN(MAX(idx, rel->anchor), rel->insert); + u64 overlap_max = MAX(MIN(idx + size, rel->anchor), rel->insert); + u64 overlap = overlap_max - overlap_min; + + // Adjust the field value. + if (overlap > rel->val) { + + return 1; + + } else { + + rel->val -= overlap; + + } + + // Adjust the field position. + rel->pos -= pre_pos; + rel->anchor -= pre_anchor; + rel->insert -= pre_insert; + + return 0; + +} + +// Apply the relation to the given buffer. +void rel_apply(u8 *buf, fs_relation_t *rel) { + + u32 i; + u64 val = rel->val; + u8 size = rel->size; + + if (rel->le) { + + for (i = 0; i < size; i++) { + + buf[rel->pos + i] = (u8)(val >> (i * 8)); + + } + + } else { + + for (i = 0; i < size; i++) { + + buf[rel->pos + size - 1 - i] = (u8)(val >> (i * 8)); + + } + + } + +} + +void rel_save(fs_relation_t *rel) { + + rel->_old_pos = rel->pos; + rel->_old_val = rel->val; + rel->_old_anchor = rel->anchor; + rel->_old_insert = rel->insert; + +} + +void rel_restore(fs_relation_t *rel) { + + rel->pos = rel->_old_pos; + rel->val = rel->_old_val; + rel->anchor = rel->_old_anchor; + rel->insert = rel->_old_insert; + + // Re-enable all + rel->enabled = 1; + +} + +void fs_add_relation(fs_meta_t *meta, fs_relation_t *rel) { + + if (meta->rel_count == meta->rel_capacity) { + + meta->rel_capacity *= 2; + meta->relations = + realloc(meta->relations, sizeof(fs_relation_t) * meta->rel_capacity); + + } + + memcpy(&meta->relations[meta->rel_count], rel, sizeof(fs_relation_t)); + meta->rel_count++; + + // Update blocked points map. + for (u32 i = 0; i < rel->size; i++) { + + meta->blocked_points_map[rel->pos + i] = 1; + + } + +} + +void fs_save(fs_meta_t *meta) { + + // printf("Saving metadata\n"); + for (u32 i = 0; i < meta->rel_count; i++) { + + fs_relation_t *rel = &meta->relations[i]; + rel_save(rel); + + } + +} + +void fs_restore(fs_meta_t *meta) { + + // printf("Restoring metadata\n"); + for (u32 i = 0; i < meta->rel_count; i++) { + + fs_relation_t *rel = &meta->relations[i]; + rel_restore(rel); + + } + +} + +// Insert data into the buffer at the given index. +// Update any relations that are affected by the insertion. +// If ignore_invalid is set, invalid insertions are ignored. +// Returns 0 on success, 1 on error. +int fs_track_insert(fs_meta_t *meta, u64 idx, u64 data_size, + u8 ignore_invalid) { + + // printf("Inserting %llu at %llu\n", data_size, idx); + for (u32 i = 0; i < meta->rel_count; i++) { + + if (meta->relations[i].enabled) { + + u8 res = rel_on_insert(&meta->relations[i], idx, data_size); + if (res) { + + if (ignore_invalid) { + + // Invalid insertion, disable relation and keep going. + meta->relations[i].enabled = 0; + + } else { + + // Invalid insertion, return error. + return 1; + + } + + } + + } + + } + + return 0; + +} + +void fs_track_delete(fs_meta_t *meta, u64 idx, u64 data_size) { + + // printf("Deleting %llu at %llu\n", data_size, idx); + for (u32 i = 0; i < meta->rel_count; i++) { + + if (meta->relations[i].enabled) { + + u8 res = rel_on_remove(&meta->relations[i], idx, data_size); + if (res) { + + // Invalid deletion, disable relation and keep going. + meta->relations[i].enabled = 0; + + } + + } + + } + +} + +void fs_sanitize(fs_meta_t *meta, u8 *buf) { + + // Apply the relations in reverse order. + for (u32 i = meta->rel_count - 1; i != (u32)-1; i--) { + + if (!meta->relations[i].enabled) { continue; } + + rel_apply(buf, &meta->relations[i]); + + } + +} + +void fs_clone_meta(afl_state_t *afl) { + + // printf("Cloning metadata\n"); + fs_meta_t *meta = afl->queue_cur->fs_meta; + fs_meta_t *fs_curr_meta = afl->fs_curr_meta; + if (unlikely(!fs_curr_meta)) { + + // Initial allocation. + fs_curr_meta = malloc(sizeof(fs_meta_t)); + fs_curr_meta->rel_count = 0; + fs_curr_meta->rel_capacity = FRAMESHIFT_INITIAL_CAPACITY; + fs_curr_meta->relations = + malloc(sizeof(fs_relation_t) * fs_curr_meta->rel_capacity); + afl->fs_curr_meta = fs_curr_meta; + + } + + // Copy relation data over. + if (fs_curr_meta->rel_capacity < meta->rel_count) { + + // Increase capacity if needed. + fs_curr_meta->relations = realloc(fs_curr_meta->relations, + sizeof(fs_relation_t) * meta->rel_count); + fs_curr_meta->rel_capacity = meta->rel_count; + + } + + memcpy(fs_curr_meta->relations, meta->relations, + sizeof(fs_relation_t) * meta->rel_count); + fs_curr_meta->rel_count = meta->rel_count; + + // Blocked points will be read only after this, so we can shallow copy. + fs_curr_meta->blocked_points_map = meta->blocked_points_map; + +} + +fs_meta_t *fs_new_meta(u32 size) { + + fs_meta_t *meta = malloc(sizeof(fs_meta_t)); + meta->rel_count = 0; + meta->rel_capacity = FRAMESHIFT_INITIAL_CAPACITY; + meta->relations = malloc(sizeof(fs_relation_t) * meta->rel_capacity); + + meta->blocked_points_map = malloc(size); + memset(meta->blocked_points_map, 0, size); + + return meta; + +} + +void lightweight_run(afl_state_t *afl, u8 *out_buf, u32 len) { + + afl->fs_stats.search_tests++; + + write_to_testcase(afl, (void **)&out_buf, len, 0); + + u8 fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout); + + afl->queued_discovered += save_if_interesting(afl, out_buf, len, fault); + +} + +void print_buffer(u8 *buf, u32 len) { + + for (u32 i = 0; i < len; i++) { + + printf("%02x ", buf[i]); + + } + + printf("\n"); + +} + +typedef struct field_tmpl { + + u8 size; + u8 le; + +} field_tmpl_t; + +const field_tmpl_t FRAMESHIFT_SEARCH_ORDER[] = { + + {8, 1}, // u64 - little + {8, 0}, // u64 - big + {4, 1}, // u32 - little + {4, 0}, // u32 - big + {2, 1}, // u16 - little + {2, 0}, // u16 - big + {1, 1}, // u8 - little + +}; + +u64 decode_value(u8 *buf, u8 size, u8 le) { + + u64 val = 0; + if (le) { + + for (u8 i = 0; i < size; i++) { + + val |= ((u64)buf[i]) << (i * 8); + + } + + } else { + + for (u8 i = 0; i < size; i++) { + + val |= ((u64)buf[size - 1 - i]) << (i * 8); + + } + + } + + return val; + +} + +int is_blocked(fs_meta_t *meta, u32 pos, u8 size) { + + for (u32 i = 0; i < size; i++) { + + if (meta->blocked_points_map[pos + i]) { return 1; } + + } + + return 0; + +} + +void check_anchor(afl_state_t *afl, u32 anchor, u32 len, u32 curr_size, + u32 field_pos, u8 *buf, fs_meta_t *meta, u8 *trace_bits, + u32 *loss_buffer, u32 loss_count, u8 *scratch, + u32 shift_amount, fs_relation_t *potential_rel, + double *curr_recover) { + + // Check if the anchor is valid. + if (anchor > len) { return; } + + u32 insertion = anchor + curr_size; + if (insertion > len) { return; } + + // Construct testcase with valid insertion. + memcpy(scratch, buf, insertion); + memset(scratch + insertion, 0x41, shift_amount); + memcpy(scratch + insertion + shift_amount, buf + insertion, len - insertion); + + // Handle on_insert for the prospective relation manually. + if (insertion < potential_rel->pos) { + + // Temporarily shift the relation to apply on the scratch buffer. + potential_rel->pos += shift_amount; + + } + + rel_apply(scratch, potential_rel); + potential_rel->pos = field_pos; + + fs_save(meta); + u8 res = fs_track_insert(meta, insertion, shift_amount, 0); + fs_sanitize(meta, scratch); + fs_restore(meta); + if (res) { + + // Invalid insertion, return. + return; + + } + + // Measure recovery. + lightweight_run(afl, scratch, len + shift_amount); + + u64 recover_count = 0; + for (u32 j = 0; j < loss_count; j++) { + + u32 idx = loss_buffer[j]; + if (trace_bits[idx] > 0) { recover_count++; } + + } + + double recover_pct = (double)recover_count / loss_count; + + // printf(" -> Anchor: %u, Insertion: %u, Recovery: %.2f%%\n", anchor, + // insertion, recover_pct * 100); + + // Update the best relation if we have a better recovery. + if (recover_pct > *curr_recover) { + + potential_rel->anchor = anchor; + potential_rel->insert = insertion; + *curr_recover = recover_pct; + + } + +} + +void frameshift_stage(afl_state_t *afl) { + + printf("Frameshift stage\n"); + + u64 time_start = get_cur_time(); + + if (unlikely(!afl->frameshift_index_buffer)) { + + // Allocate the frameshift index buffer. + afl->frameshift_index_buffer = malloc(afl->fsrv.map_size * sizeof(u32)); + + } + + u32 *index_buf = afl->frameshift_index_buffer; + u32 index_count = 0; + + u8 *buf = afl->queue_cur->testcase_buf; + u32 len = afl->queue_cur->len; + + u8 *scratch = malloc(len + 0x100); // We will at most shift by 0xff + + // Print out +#ifdef FRAMESHIFT_DEBUG + printf("[FS] Input buffer: "); + print_buffer(buf, len); +#endif + + // Update queue state + afl->queue_cur->fs_status = 1; + + // Initialize relation metadata + fs_meta_t *meta = fs_new_meta(len); + afl->queue_cur->fs_meta = meta; + + // Compute base coverage for this testcase. + u8 *trace_bits = afl->fsrv.trace_bits; + u32 map_size = afl->fsrv.map_size; + + // Compute coverage of this testcase. + lightweight_run(afl, buf, len); + for (u32 i = 0; i < map_size; i++) { + + if (trace_bits[i] > 0) { index_buf[index_count++] = i; } + + } + + // Compute base coverage for an invalid testcase. + // Keep only indices that are found in the current testcase and not the base. + lightweight_run(afl, "a", 1); + u32 write_idx = 0; + for (u32 i = 0; i < index_count; i++) { + + u32 idx = index_buf[i]; + if (trace_bits[idx] == 0) { index_buf[write_idx++] = idx; } + + } + + index_count = write_idx; + + u32 loss_buffer[index_count]; + memset(loss_buffer, 0, sizeof(loss_buffer)); + u32 loss_count = 0; + + u32 loss_threshold = ((index_count * FRAMESHIFT_LOSS_PCT) / 100) + 1; + + // printf("[FS] Index count: %u\n", index_count); + u32 inflection_points_count = 0; + u32 inflection_points_capacity = 128; + u32 *inflection_points = calloc(inflection_points_capacity, sizeof(u32)); + + // Outer loop, run at most max_iterations times. + for (u32 i = 0; i < FRAMESHIFT_MAX_ITERS; i++) { + + u8 found = 0; + + // Iterate over field position. + for (u32 field_pos = 0; field_pos < len - 1; field_pos++) { + + // Iterate over field type. + for (u8 k = 0; k < sizeof(FRAMESHIFT_SEARCH_ORDER) / sizeof(field_tmpl_t); + k++) { + + field_tmpl_t *tmpl = &FRAMESHIFT_SEARCH_ORDER[k]; + u8 size = tmpl->size; + u8 le = tmpl->le; + + if (field_pos + size > len) { continue; } + + u64 curr_size = decode_value(buf + field_pos, size, le); + + // Does this look like a size/offset field? + if (curr_size == 0 || curr_size > len) { continue; } + + // Pick a shift amount that will test this field size. + u64 shift_amount = 0xff; // overflow the field boundary + if (size == 1) { + + u64 max_shift = 0xff - curr_size; + if (max_shift == 0) { continue; } + shift_amount = MIN(0x20, max_shift); + + } + + // Check if the field is blocked. + if (is_blocked(meta, field_pos, size)) { + + // printf("[FS] Field is blocked\n"); + continue; + + } + + fs_relation_t potential_rel = {.pos = field_pos, + .val = curr_size, + .anchor = -1, // unset + .insert = -1, // unset + .size = size, + .le = le, + .enabled = 1}; + + // Corrupt the field and measure lost features. + potential_rel.val += shift_amount; + rel_apply(buf, &potential_rel); + + loss_count = 0; + + lightweight_run(afl, buf, len); + for (u32 j = 0; j < index_count; j++) { + + u32 idx = index_buf[j]; + if (trace_bits[idx] == 0) { loss_buffer[loss_count++] = idx; } + + } + + // Undo the change to the buffer. + potential_rel.val -= shift_amount; + rel_apply(buf, &potential_rel); + potential_rel.val += shift_amount; + + if (loss_count < loss_threshold) { continue; } + + // printf("[FS] Testing relation: pos=%u size=%u le=%u shift=%u value=%u + // (loss: %d)\n", field_pos, size, le, shift_amount, curr_size, + // loss_count); + + // Next, we iterate over inflection points to find the best anchor. + double curr_recover = FRAMESHIFT_RECOVER_PCT / 100.0; + + if (size == 1) { + + check_anchor(afl, field_pos + size, len, curr_size, field_pos, buf, + meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + + } else if (size == 2) { + + check_anchor(afl, 0, len, curr_size, field_pos, buf, meta, trace_bits, + loss_buffer, loss_count, scratch, shift_amount, + &potential_rel, &curr_recover); + check_anchor(afl, field_pos, len, curr_size, field_pos, buf, meta, + trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + check_anchor(afl, field_pos + size, len, curr_size, field_pos, buf, + meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + + } else { + + check_anchor(afl, field_pos + size + 7, len, curr_size, field_pos, + buf, meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + check_anchor(afl, field_pos + size + 6, len, curr_size, field_pos, + buf, meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + check_anchor(afl, field_pos + size + 5, len, curr_size, field_pos, + buf, meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + check_anchor(afl, field_pos + size + 4, len, curr_size, field_pos, + buf, meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + check_anchor(afl, field_pos + size + 3, len, curr_size, field_pos, + buf, meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + check_anchor(afl, field_pos + size + 2, len, curr_size, field_pos, + buf, meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + check_anchor(afl, field_pos + size + 1, len, curr_size, field_pos, + buf, meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + check_anchor(afl, 0, len, curr_size, field_pos, buf, meta, trace_bits, + loss_buffer, loss_count, scratch, shift_amount, + &potential_rel, &curr_recover); + check_anchor(afl, field_pos, len, curr_size, field_pos, buf, meta, + trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + check_anchor(afl, field_pos + size, len, curr_size, field_pos, buf, + meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + + if (potential_rel.anchor == -1) { + + // Check other inflection points. + for (u32 j = 0; j < inflection_points_count; j++) { + + u32 anchor = inflection_points[j]; + check_anchor(afl, anchor, len, curr_size, field_pos, buf, meta, + trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + + } + + } + + } + + // Check if we have a valid relation. + if (potential_rel.anchor == -1) { + + // No valid relation found, continue. + continue; + + } + + printf( + "[FS] Found relation: pos=%u size=%u le=%u shift=%u value=%u " + "anchor=%u insert=%u (loss: %d recover: %.2f%%)\n", + field_pos, size, le, shift_amount, curr_size, potential_rel.anchor, + potential_rel.insert, loss_count, curr_recover); + + potential_rel.val = curr_size; + fs_add_relation(meta, &potential_rel); + + // Update the inflection points. + // Only size 4 and 8 are used for inflection points. + if (potential_rel.size == 4 || potential_rel.size == 8) { + + // Need space for 3 more points. + if (inflection_points_count + 3 >= inflection_points_capacity) { + + inflection_points_capacity *= 2; + inflection_points = realloc( + inflection_points, inflection_points_capacity * sizeof(u32)); + + } + + inflection_points[inflection_points_count++] = potential_rel.pos; + inflection_points[inflection_points_count++] = potential_rel.anchor; + inflection_points[inflection_points_count++] = potential_rel.insert; + + } + + found = 1; + + } + + } + + if (!found) { + + // Didn't find relations this iteration, stop searching. + break; + + } + + } + + u64 time_end = get_cur_time(); + + afl->fs_stats.total_time_ms += time_end - time_start; + + afl->fs_stats.searched += 1; + if (meta->rel_count > 0) { afl->fs_stats.found += 1; } + +} + From d2a476433129e7d2a7c356fc59e979445051264b Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 16 Jul 2025 19:01:27 +0200 Subject: [PATCH 3/9] remove debug output --- src/afl-fuzz-frameshift.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/afl-fuzz-frameshift.c b/src/afl-fuzz-frameshift.c index 110de3ba5c..295a9496e4 100644 --- a/src/afl-fuzz-frameshift.c +++ b/src/afl-fuzz-frameshift.c @@ -1,7 +1,7 @@ #include "afl-fuzz.h" -#define FRAMESHIFT_DEBUG 1 +// #define FRAMESHIFT_DEBUG 1 #define FRAMESHIFT_INITIAL_CAPACITY 128 @@ -444,7 +444,7 @@ void check_anchor(afl_state_t *afl, u32 anchor, u32 len, u32 curr_size, void frameshift_stage(afl_state_t *afl) { - printf("Frameshift stage\n"); + // printf("Frameshift stage\n"); u64 time_start = get_cur_time(); @@ -664,11 +664,13 @@ void frameshift_stage(afl_state_t *afl) { } - printf( - "[FS] Found relation: pos=%u size=%u le=%u shift=%u value=%u " - "anchor=%u insert=%u (loss: %d recover: %.2f%%)\n", - field_pos, size, le, shift_amount, curr_size, potential_rel.anchor, - potential_rel.insert, loss_count, curr_recover); + /* + printf( + "[FS] Found relation: pos=%u size=%u le=%u shift=%u value=%u + " "anchor=%u insert=%u (loss: %d recover: %.2f%%)\n", field_pos, + size, le, shift_amount, curr_size, potential_rel.anchor, + potential_rel.insert, loss_count, curr_recover); + */ potential_rel.val = curr_size; fs_add_relation(meta, &potential_rel); From d4a48394f59a05e6138955d8688925d115df2ec3 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 16 Jul 2025 21:34:37 +0200 Subject: [PATCH 4/9] debug --- src/afl-fuzz-frameshift.c | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/src/afl-fuzz-frameshift.c b/src/afl-fuzz-frameshift.c index 295a9496e4..cfbf76d16f 100644 --- a/src/afl-fuzz-frameshift.c +++ b/src/afl-fuzz-frameshift.c @@ -153,25 +153,27 @@ void fs_add_relation(fs_meta_t *meta, fs_relation_t *rel) { void fs_save(fs_meta_t *meta) { - // printf("Saving metadata\n"); + fprintf(stderr, "Saving metadata\n"); for (u32 i = 0; i < meta->rel_count; i++) { fs_relation_t *rel = &meta->relations[i]; rel_save(rel); } + fprintf(stderr, "done\n"); } void fs_restore(fs_meta_t *meta) { - // printf("Restoring metadata\n"); + fprintf(stderr, "Restoring metadata\n"); for (u32 i = 0; i < meta->rel_count; i++) { fs_relation_t *rel = &meta->relations[i]; rel_restore(rel); } + fprintf(stderr, "done\n"); } @@ -182,7 +184,7 @@ void fs_restore(fs_meta_t *meta) { int fs_track_insert(fs_meta_t *meta, u64 idx, u64 data_size, u8 ignore_invalid) { - // printf("Inserting %llu at %llu\n", data_size, idx); + fprintf(stderr, "Inserting %llu at %llu\n", data_size, idx); for (u32 i = 0; i < meta->rel_count; i++) { if (meta->relations[i].enabled) { @@ -208,13 +210,14 @@ int fs_track_insert(fs_meta_t *meta, u64 idx, u64 data_size, } + fprintf(stderr, "done\n"); return 0; } void fs_track_delete(fs_meta_t *meta, u64 idx, u64 data_size) { - // printf("Deleting %llu at %llu\n", data_size, idx); + fprintf(stderr, "Deleting %llu at %llu\n", data_size, idx); for (u32 i = 0; i < meta->rel_count; i++) { if (meta->relations[i].enabled) { @@ -230,11 +233,13 @@ void fs_track_delete(fs_meta_t *meta, u64 idx, u64 data_size) { } } + fprintf(stderr, "done\n"); } void fs_sanitize(fs_meta_t *meta, u8 *buf) { + fprintf(stderr, "fs_sanitize\n"); // Apply the relations in reverse order. for (u32 i = meta->rel_count - 1; i != (u32)-1; i--) { @@ -243,12 +248,13 @@ void fs_sanitize(fs_meta_t *meta, u8 *buf) { rel_apply(buf, &meta->relations[i]); } + fprintf(stderr, "done\n"); } void fs_clone_meta(afl_state_t *afl) { - // printf("Cloning metadata\n"); + fprintf(stderr, "Cloning metadata\n"); fs_meta_t *meta = afl->queue_cur->fs_meta; fs_meta_t *fs_curr_meta = afl->fs_curr_meta; if (unlikely(!fs_curr_meta)) { @@ -264,6 +270,7 @@ void fs_clone_meta(afl_state_t *afl) { } // Copy relation data over. +fprintf(stderr, "x\n"); if (fs_curr_meta->rel_capacity < meta->rel_count) { // Increase capacity if needed. @@ -279,6 +286,7 @@ void fs_clone_meta(afl_state_t *afl) { // Blocked points will be read only after this, so we can shallow copy. fs_curr_meta->blocked_points_map = meta->blocked_points_map; + fprintf(stderr, "done\n"); } @@ -388,6 +396,8 @@ void check_anchor(afl_state_t *afl, u32 anchor, u32 len, u32 curr_size, u32 insertion = anchor + curr_size; if (insertion > len) { return; } + fprintf(stderr, "check_anchor\n"); + // Construct testcase with valid insertion. memcpy(scratch, buf, insertion); memset(scratch + insertion, 0x41, shift_amount); @@ -428,8 +438,8 @@ void check_anchor(afl_state_t *afl, u32 anchor, u32 len, u32 curr_size, double recover_pct = (double)recover_count / loss_count; - // printf(" -> Anchor: %u, Insertion: %u, Recovery: %.2f%%\n", anchor, - // insertion, recover_pct * 100); + fprintf(stderr, " -> Anchor: %u, Insertion: %u, Recovery: %.2f%%\n", anchor, + insertion, recover_pct * 100); // Update the best relation if we have a better recovery. if (recover_pct > *curr_recover) { @@ -439,12 +449,13 @@ void check_anchor(afl_state_t *afl, u32 anchor, u32 len, u32 curr_size, *curr_recover = recover_pct; } + fprintf(stderr, "done\n"); } void frameshift_stage(afl_state_t *afl) { - // printf("Frameshift stage\n"); + fprintf(stderr, "Frameshift stage\n"); u64 time_start = get_cur_time(); @@ -716,5 +727,7 @@ void frameshift_stage(afl_state_t *afl) { afl->fs_stats.searched += 1; if (meta->rel_count > 0) { afl->fs_stats.found += 1; } + fprintf(stderr, "done\n"); + } From 040c4cc51dc84c0c9ef2f32675e6e695b1da85e9 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 16 Jul 2025 21:50:02 +0200 Subject: [PATCH 5/9] debug --- src/afl-fuzz-frameshift.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/afl-fuzz-frameshift.c b/src/afl-fuzz-frameshift.c index cfbf76d16f..e37af9cd68 100644 --- a/src/afl-fuzz-frameshift.c +++ b/src/afl-fuzz-frameshift.c @@ -292,7 +292,9 @@ fprintf(stderr, "x\n"); fs_meta_t *fs_new_meta(u32 size) { + fprintf(stderr, "fs_new_meta\n"); fs_meta_t *meta = malloc(sizeof(fs_meta_t)); + fprintf(stderr, "%p", meta); meta->rel_count = 0; meta->rel_capacity = FRAMESHIFT_INITIAL_CAPACITY; meta->relations = malloc(sizeof(fs_relation_t) * meta->rel_capacity); @@ -300,6 +302,7 @@ fs_meta_t *fs_new_meta(u32 size) { meta->blocked_points_map = malloc(size); memset(meta->blocked_points_map, 0, size); + fprintf(stderr, "done\n"); return meta; } @@ -466,6 +469,7 @@ void frameshift_stage(afl_state_t *afl) { } + fprintf(stderr, "b %p", afl->frameshift_index_buffer); u32 *index_buf = afl->frameshift_index_buffer; u32 index_count = 0; @@ -473,6 +477,7 @@ void frameshift_stage(afl_state_t *afl) { u32 len = afl->queue_cur->len; u8 *scratch = malloc(len + 0x100); // We will at most shift by 0xff +fprintf(stderr, "s %p\n", scratch); // Print out #ifdef FRAMESHIFT_DEBUG @@ -492,7 +497,9 @@ void frameshift_stage(afl_state_t *afl) { u32 map_size = afl->fsrv.map_size; // Compute coverage of this testcase. +fprintf(stderr, "l0\n"); lightweight_run(afl, buf, len); +fprintf(stderr, "l1\n"); for (u32 i = 0; i < map_size; i++) { if (trace_bits[i] > 0) { index_buf[index_count++] = i; } @@ -502,6 +509,7 @@ void frameshift_stage(afl_state_t *afl) { // Compute base coverage for an invalid testcase. // Keep only indices that are found in the current testcase and not the base. lightweight_run(afl, "a", 1); +fprintf(stderr, "l2\n"); u32 write_idx = 0; for (u32 i = 0; i < index_count; i++) { @@ -523,6 +531,7 @@ void frameshift_stage(afl_state_t *afl) { u32 inflection_points_capacity = 128; u32 *inflection_points = calloc(inflection_points_capacity, sizeof(u32)); +fprintf(stderr, "i %p\n", inflection_points); // Outer loop, run at most max_iterations times. for (u32 i = 0; i < FRAMESHIFT_MAX_ITERS; i++) { @@ -719,6 +728,7 @@ void frameshift_stage(afl_state_t *afl) { } } + fprintf(stderr, "fooobar\n"); u64 time_end = get_cur_time(); From e80dae02250ba4858cb8151dd386d93c1cc321f6 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 16 Jul 2025 22:07:06 +0200 Subject: [PATCH 6/9] debug --- src/afl-fuzz-frameshift.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/afl-fuzz-frameshift.c b/src/afl-fuzz-frameshift.c index e37af9cd68..0a6c0bc734 100644 --- a/src/afl-fuzz-frameshift.c +++ b/src/afl-fuzz-frameshift.c @@ -1,7 +1,7 @@ #include "afl-fuzz.h" -// #define FRAMESHIFT_DEBUG 1 +#define FRAMESHIFT_DEBUG 1 #define FRAMESHIFT_INITIAL_CAPACITY 128 @@ -294,7 +294,7 @@ fs_meta_t *fs_new_meta(u32 size) { fprintf(stderr, "fs_new_meta\n"); fs_meta_t *meta = malloc(sizeof(fs_meta_t)); - fprintf(stderr, "%p", meta); + fprintf(stderr, "%p\n", meta); meta->rel_count = 0; meta->rel_capacity = FRAMESHIFT_INITIAL_CAPACITY; meta->relations = malloc(sizeof(fs_relation_t) * meta->rel_capacity); @@ -469,7 +469,7 @@ void frameshift_stage(afl_state_t *afl) { } - fprintf(stderr, "b %p", afl->frameshift_index_buffer); + fprintf(stderr, "b %p\n", afl->frameshift_index_buffer); u32 *index_buf = afl->frameshift_index_buffer; u32 index_count = 0; @@ -481,7 +481,7 @@ fprintf(stderr, "s %p\n", scratch); // Print out #ifdef FRAMESHIFT_DEBUG - printf("[FS] Input buffer: "); + fprintf(stderr, "[FS] Input buffer: "); print_buffer(buf, len); #endif @@ -497,7 +497,7 @@ fprintf(stderr, "s %p\n", scratch); u32 map_size = afl->fsrv.map_size; // Compute coverage of this testcase. -fprintf(stderr, "l0\n"); +fprintf(stderr, "l0 %p %u\n", buf, len); lightweight_run(afl, buf, len); fprintf(stderr, "l1\n"); for (u32 i = 0; i < map_size; i++) { From 49466589ede08ad5d61215fd07e6b97dcadcb364 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 16 Jul 2025 22:26:19 +0200 Subject: [PATCH 7/9] debug --- src/afl-fuzz-frameshift.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/afl-fuzz-frameshift.c b/src/afl-fuzz-frameshift.c index 0a6c0bc734..1f2e47a3eb 100644 --- a/src/afl-fuzz-frameshift.c +++ b/src/afl-fuzz-frameshift.c @@ -477,7 +477,7 @@ void frameshift_stage(afl_state_t *afl) { u32 len = afl->queue_cur->len; u8 *scratch = malloc(len + 0x100); // We will at most shift by 0xff -fprintf(stderr, "s %p\n", scratch); +fprintf(stderr, "s %p - %p %u\n", scratch, buf, len); // Print out #ifdef FRAMESHIFT_DEBUG From f1d4acadfc6d9315138c14c3d52a715c42a3e6a0 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 16 Jul 2025 22:29:20 +0200 Subject: [PATCH 8/9] fix --- src/afl-fuzz-frameshift.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/afl-fuzz-frameshift.c b/src/afl-fuzz-frameshift.c index 1f2e47a3eb..2973441137 100644 --- a/src/afl-fuzz-frameshift.c +++ b/src/afl-fuzz-frameshift.c @@ -473,7 +473,7 @@ void frameshift_stage(afl_state_t *afl) { u32 *index_buf = afl->frameshift_index_buffer; u32 index_count = 0; - u8 *buf = afl->queue_cur->testcase_buf; + u8 *buf = queue_testcase_get(afl, afl->queue_cur); u32 len = afl->queue_cur->len; u8 *scratch = malloc(len + 0x100); // We will at most shift by 0xff From 2a45bb55913ef28f6325b9509846a8484337e21a Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Thu, 17 Jul 2025 11:58:18 +0200 Subject: [PATCH 9/9] remove debug and warnings --- src/afl-fuzz-frameshift.c | 109 ++++++++++++++++++++++++++++++-------- 1 file changed, 88 insertions(+), 21 deletions(-) diff --git a/src/afl-fuzz-frameshift.c b/src/afl-fuzz-frameshift.c index 2973441137..c1c7c2f341 100644 --- a/src/afl-fuzz-frameshift.c +++ b/src/afl-fuzz-frameshift.c @@ -1,7 +1,7 @@ #include "afl-fuzz.h" -#define FRAMESHIFT_DEBUG 1 +// #define FRAMESHIFT_DEBUG 1 #define FRAMESHIFT_INITIAL_CAPACITY 128 @@ -153,27 +153,37 @@ void fs_add_relation(fs_meta_t *meta, fs_relation_t *rel) { void fs_save(fs_meta_t *meta) { +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "Saving metadata\n"); +#endif for (u32 i = 0; i < meta->rel_count; i++) { fs_relation_t *rel = &meta->relations[i]; rel_save(rel); } + +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "done\n"); +#endif } void fs_restore(fs_meta_t *meta) { +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "Restoring metadata\n"); +#endif for (u32 i = 0; i < meta->rel_count; i++) { fs_relation_t *rel = &meta->relations[i]; rel_restore(rel); } + +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "done\n"); +#endif } @@ -184,7 +194,9 @@ void fs_restore(fs_meta_t *meta) { int fs_track_insert(fs_meta_t *meta, u64 idx, u64 data_size, u8 ignore_invalid) { +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "Inserting %llu at %llu\n", data_size, idx); +#endif for (u32 i = 0; i < meta->rel_count; i++) { if (meta->relations[i].enabled) { @@ -210,14 +222,18 @@ int fs_track_insert(fs_meta_t *meta, u64 idx, u64 data_size, } +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "done\n"); +#endif return 0; } void fs_track_delete(fs_meta_t *meta, u64 idx, u64 data_size) { +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "Deleting %llu at %llu\n", data_size, idx); +#endif for (u32 i = 0; i < meta->rel_count; i++) { if (meta->relations[i].enabled) { @@ -233,13 +249,18 @@ void fs_track_delete(fs_meta_t *meta, u64 idx, u64 data_size) { } } + +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "done\n"); +#endif } void fs_sanitize(fs_meta_t *meta, u8 *buf) { +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "fs_sanitize\n"); +#endif // Apply the relations in reverse order. for (u32 i = meta->rel_count - 1; i != (u32)-1; i--) { @@ -248,13 +269,18 @@ void fs_sanitize(fs_meta_t *meta, u8 *buf) { rel_apply(buf, &meta->relations[i]); } + +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "done\n"); +#endif } void fs_clone_meta(afl_state_t *afl) { +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "Cloning metadata\n"); +#endif fs_meta_t *meta = afl->queue_cur->fs_meta; fs_meta_t *fs_curr_meta = afl->fs_curr_meta; if (unlikely(!fs_curr_meta)) { @@ -270,7 +296,9 @@ void fs_clone_meta(afl_state_t *afl) { } // Copy relation data over. -fprintf(stderr, "x\n"); +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "x\n"); +#endif if (fs_curr_meta->rel_capacity < meta->rel_count) { // Increase capacity if needed. @@ -286,15 +314,21 @@ fprintf(stderr, "x\n"); // Blocked points will be read only after this, so we can shallow copy. fs_curr_meta->blocked_points_map = meta->blocked_points_map; +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "done\n"); +#endif } fs_meta_t *fs_new_meta(u32 size) { +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "fs_new_meta\n"); +#endif fs_meta_t *meta = malloc(sizeof(fs_meta_t)); +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "%p\n", meta); +#endif meta->rel_count = 0; meta->rel_capacity = FRAMESHIFT_INITIAL_CAPACITY; meta->relations = malloc(sizeof(fs_relation_t) * meta->rel_capacity); @@ -302,7 +336,9 @@ fs_meta_t *fs_new_meta(u32 size) { meta->blocked_points_map = malloc(size); memset(meta->blocked_points_map, 0, size); +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "done\n"); +#endif return meta; } @@ -399,7 +435,9 @@ void check_anchor(afl_state_t *afl, u32 anchor, u32 len, u32 curr_size, u32 insertion = anchor + curr_size; if (insertion > len) { return; } +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "check_anchor\n"); +#endif // Construct testcase with valid insertion. memcpy(scratch, buf, insertion); @@ -441,8 +479,10 @@ void check_anchor(afl_state_t *afl, u32 anchor, u32 len, u32 curr_size, double recover_pct = (double)recover_count / loss_count; +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, " -> Anchor: %u, Insertion: %u, Recovery: %.2f%%\n", anchor, - insertion, recover_pct * 100); + insertion, recover_pct * 100); +#endif // Update the best relation if we have a better recovery. if (recover_pct > *curr_recover) { @@ -452,13 +492,18 @@ void check_anchor(afl_state_t *afl, u32 anchor, u32 len, u32 curr_size, *curr_recover = recover_pct; } + +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "done\n"); +#endif } void frameshift_stage(afl_state_t *afl) { +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "Frameshift stage\n"); +#endif u64 time_start = get_cur_time(); @@ -469,7 +514,9 @@ void frameshift_stage(afl_state_t *afl) { } +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "b %p\n", afl->frameshift_index_buffer); +#endif u32 *index_buf = afl->frameshift_index_buffer; u32 index_count = 0; @@ -477,7 +524,9 @@ void frameshift_stage(afl_state_t *afl) { u32 len = afl->queue_cur->len; u8 *scratch = malloc(len + 0x100); // We will at most shift by 0xff -fprintf(stderr, "s %p - %p %u\n", scratch, buf, len); +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "s %p - %p %u\n", scratch, buf, len); +#endif // Print out #ifdef FRAMESHIFT_DEBUG @@ -497,9 +546,13 @@ fprintf(stderr, "s %p - %p %u\n", scratch, buf, len); u32 map_size = afl->fsrv.map_size; // Compute coverage of this testcase. -fprintf(stderr, "l0 %p %u\n", buf, len); +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "l0 %p %u\n", buf, len); +#endif lightweight_run(afl, buf, len); -fprintf(stderr, "l1\n"); +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "l1\n"); +#endif for (u32 i = 0; i < map_size; i++) { if (trace_bits[i] > 0) { index_buf[index_count++] = i; } @@ -509,7 +562,9 @@ fprintf(stderr, "l1\n"); // Compute base coverage for an invalid testcase. // Keep only indices that are found in the current testcase and not the base. lightweight_run(afl, "a", 1); -fprintf(stderr, "l2\n"); +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "l2\n"); +#endif u32 write_idx = 0; for (u32 i = 0; i < index_count; i++) { @@ -526,12 +581,16 @@ fprintf(stderr, "l2\n"); u32 loss_threshold = ((index_count * FRAMESHIFT_LOSS_PCT) / 100) + 1; - // printf("[FS] Index count: %u\n", index_count); +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "[FS] Index count: %u\n", index_count); +#endif u32 inflection_points_count = 0; u32 inflection_points_capacity = 128; u32 *inflection_points = calloc(inflection_points_capacity, sizeof(u32)); -fprintf(stderr, "i %p\n", inflection_points); +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "i %p\n", inflection_points); +#endif // Outer loop, run at most max_iterations times. for (u32 i = 0; i < FRAMESHIFT_MAX_ITERS; i++) { @@ -544,7 +603,7 @@ fprintf(stderr, "i %p\n", inflection_points); for (u8 k = 0; k < sizeof(FRAMESHIFT_SEARCH_ORDER) / sizeof(field_tmpl_t); k++) { - field_tmpl_t *tmpl = &FRAMESHIFT_SEARCH_ORDER[k]; + field_tmpl_t *tmpl = (field_tmpl_t *)&FRAMESHIFT_SEARCH_ORDER[k]; u8 size = tmpl->size; u8 le = tmpl->le; @@ -575,8 +634,8 @@ fprintf(stderr, "i %p\n", inflection_points); fs_relation_t potential_rel = {.pos = field_pos, .val = curr_size, - .anchor = -1, // unset - .insert = -1, // unset + .anchor = (u64)-1, // unset + .insert = (u64)-1, // unset .size = size, .le = le, .enabled = 1}; @@ -660,7 +719,7 @@ fprintf(stderr, "i %p\n", inflection_points); meta, trace_bits, loss_buffer, loss_count, scratch, shift_amount, &potential_rel, &curr_recover); - if (potential_rel.anchor == -1) { + if (potential_rel.anchor == (u64)-1) { // Check other inflection points. for (u32 j = 0; j < inflection_points_count; j++) { @@ -677,20 +736,23 @@ fprintf(stderr, "i %p\n", inflection_points); } // Check if we have a valid relation. - if (potential_rel.anchor == -1) { + if (potential_rel.anchor == (u64)-1) { // No valid relation found, continue. continue; } - /* - printf( - "[FS] Found relation: pos=%u size=%u le=%u shift=%u value=%u - " "anchor=%u insert=%u (loss: %d recover: %.2f%%)\n", field_pos, - size, le, shift_amount, curr_size, potential_rel.anchor, - potential_rel.insert, loss_count, curr_recover); - */ +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, + "[FS] Found relation: pos=%u size=%u le=%u shift=%u value=%u + " " anchor = % u insert = % u(loss + : % d recover + : % .2f % %)\n ", field_pos, + size, + le, shift_amount, curr_size, potential_rel.anchor, + potential_rel.insert, loss_count, curr_recover); +#endif potential_rel.val = curr_size; fs_add_relation(meta, &potential_rel); @@ -728,7 +790,10 @@ fprintf(stderr, "i %p\n", inflection_points); } } + +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "fooobar\n"); +#endif u64 time_end = get_cur_time(); @@ -737,7 +802,9 @@ fprintf(stderr, "i %p\n", inflection_points); afl->fs_stats.searched += 1; if (meta->rel_count > 0) { afl->fs_stats.found += 1; } +#ifdef FRAMESHIFT_DEBUG fprintf(stderr, "done\n"); +#endif } pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy