diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index a0ee58bf73..a7ce03c1ac 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -180,6 +180,58 @@ struct havoc_profile { }; +/* Frameshift */ + +// A single frameshift relation field. +typedef struct fs_relation { + + // Dynamic values: + u64 pos; + u64 val; + u64 anchor; + u64 insert; + + // Backup to perform revert. + u64 _old_pos; + u64 _old_val; + u64 _old_anchor; + u64 _old_insert; + + // Fixed values: + u8 size; + u8 le; + u8 enabled; + +} fs_relation_t; + +typedef struct fs_idx_vec { + + u32 *idx; + u32 count; + u32 capacity; + +} fs_idx_vec_t; + +// Per-input metadata. +typedef struct fs_meta { + + fs_relation_t *relations; + u32 rel_count; + u32 rel_capacity; + + u8 *blocked_points_map; /* bitmap of blocked points */ + +} fs_meta_t; + +struct frameshift_stats { + + u32 searched; + u32 found; + u64 search_tests; + u64 total_time_ms; + +}; + struct skipdet_entry { u8 continue_inf, done_eff; @@ -257,6 +309,11 @@ struct queue_entry { struct tainted *taint; /* Taint information from CmpLog */ struct skipdet_entry *skipdet_e; + u8 fs_status; /* Frameshift status */ + /* 0: unexplored */ + /* 1: explored */ + fs_meta_t *fs_meta; /* Frameshift metadata */ + }; struct extra_data { @@ -463,7 +520,7 @@ typedef struct afl_env_vars { afl_post_process_keep_original, afl_crashing_seeds_as_new_crash, afl_final_sync, afl_ignore_seed_problems, afl_disable_redundant, afl_sha1_filenames, afl_no_sync, afl_no_fastresume, afl_forksrv_uid_set, - afl_forksrv_gid_set; + afl_forksrv_gid_set, afl_frameshift_enabled; u16 afl_forksrv_nb_supl_gids; @@ -876,6 +933,10 @@ typedef struct afl_state { /* Global Profile Data for deterministic/havoc-splice stage */ struct havoc_profile *havoc_prof; + struct frameshift_stats fs_stats; + u32 *frameshift_index_buffer; /* Buffer for frameshift index */ + fs_meta_t *fs_curr_meta; /* Metadata for the current input (full copy) */ + struct skipdet_global *skipdet_g; s64 last_scored_idx; /* Index of the last queue entry re-scored */ @@ -1343,6 +1404,15 @@ u8 is_det_timeout(u64, u8); void plot_profile_data(afl_state_t *, struct queue_entry *); +/* Frameshift functions */ +void frameshift_stage(afl_state_t *); +void fs_sanitize(fs_meta_t *, u8 *buf); +void fs_save(fs_meta_t *meta); +void fs_restore(fs_meta_t *meta); +int fs_track_insert(fs_meta_t *meta, u64 idx, u64 data_size, u8 ignore_invalid); +void fs_track_delete(fs_meta_t *meta, u64 idx, u64 data_size); +void fs_clone_meta(afl_state_t *afl); + /**** Inline routines ****/ /* Generate a random number (from 0 to limit - 1). This may diff --git a/include/envs.h b/include/envs.h index 1a745bde53..7a974f2387 100644 --- a/include/envs.h +++ b/include/envs.h @@ -122,7 +122,7 @@ static char *afl_environment_variables[] = { "AFL_PRINT_FILENAMES", "AFL_PIZZA_MODE", "AFL_NO_FASTRESUME", "AFL_SAN_ABSTRACTION", "AFL_LLVM_ONLY_FSRV", "AFL_GCC_ONLY_FRSV", "AFL_SAN_RECOVER", "AFL_PRELOAD_DISCRIMINATE_FORKSERVER_PARENT", - "AFL_FORKSRV_UID", "AFL_FORKSRV_GID", NULL}; + "AFL_FORKSRV_UID", "AFL_FORKSRV_GID", "AFL_FRAMESHIFT_ENABLED", NULL}; extern char *afl_environment_variables[]; diff --git a/src/afl-fuzz-frameshift.c b/src/afl-fuzz-frameshift.c new file mode 100644 index 0000000000..c1c7c2f341 --- /dev/null +++ b/src/afl-fuzz-frameshift.c @@ -0,0 +1,810 @@ + +#include "afl-fuzz.h" + +// #define FRAMESHIFT_DEBUG 1 + +#define FRAMESHIFT_INITIAL_CAPACITY 128 + +#define FRAMESHIFT_MAX_ITERS 10 +#define FRAMESHIFT_LOSS_PCT 5 // 5% loss +#define FRAMESHIFT_RECOVER_PCT 20 // 20% recovery + +// Update the relation based on the given insertion. +// +// Returns 0 on success, 1 on error. +int rel_on_insert(fs_relation_t *rel, u64 idx, u64 size) { + + // Error if insert is inside the field. + if (idx > rel->pos && idx < rel->pos + rel->size) { return 1; } + + // Check if we should update the value of the field. + if (idx >= rel->anchor && idx <= rel->insert) { + + u64 pre = rel->val; + rel->val += size; + + if (rel->size < 8) { rel->val &= (1ULL << (rel->size * 8)) - 1; } + + // Check if we overflowed the field. + if (rel->val < pre) { return 1; } + + } + + // Move the field. + if (idx <= rel->pos) { rel->pos += size; } + + // Move the anchor point. + // Anchor point of 0 is locked. + if (idx < rel->anchor) { rel->anchor += size; } + + // Move the insert point. + if (idx <= rel->insert) { rel->insert += size; } + + return 0; + +} + +// Update the relation based on the given removal. +// +// Returns 0 on success, 1 on error. +int rel_on_remove(fs_relation_t *rel, u64 idx, u64 size) { + + // Error if remove overlaps the field. + if (idx < rel->pos + rel->size && idx + size > rel->pos) { return 1; } + + u64 pre_pos = (idx < rel->pos) ? MIN(rel->pos - idx, size) : 0; + u64 pre_anchor = (idx < rel->anchor) ? MIN(rel->anchor - idx, size) : 0; + u64 pre_insert = (idx < rel->insert) ? MIN(rel->insert - idx, size) : 0; + + // Compute overlap. + u64 overlap_min = MIN(MAX(idx, rel->anchor), rel->insert); + u64 overlap_max = MAX(MIN(idx + size, rel->anchor), rel->insert); + u64 overlap = overlap_max - overlap_min; + + // Adjust the field value. + if (overlap > rel->val) { + + return 1; + + } else { + + rel->val -= overlap; + + } + + // Adjust the field position. + rel->pos -= pre_pos; + rel->anchor -= pre_anchor; + rel->insert -= pre_insert; + + return 0; + +} + +// Apply the relation to the given buffer. +void rel_apply(u8 *buf, fs_relation_t *rel) { + + u32 i; + u64 val = rel->val; + u8 size = rel->size; + + if (rel->le) { + + for (i = 0; i < size; i++) { + + buf[rel->pos + i] = (u8)(val >> (i * 8)); + + } + + } else { + + for (i = 0; i < size; i++) { + + buf[rel->pos + size - 1 - i] = (u8)(val >> (i * 8)); + + } + + } + +} + +void rel_save(fs_relation_t *rel) { + + rel->_old_pos = rel->pos; + rel->_old_val = rel->val; + rel->_old_anchor = rel->anchor; + rel->_old_insert = rel->insert; + +} + +void rel_restore(fs_relation_t *rel) { + + rel->pos = rel->_old_pos; + rel->val = rel->_old_val; + rel->anchor = rel->_old_anchor; + rel->insert = rel->_old_insert; + + // Re-enable all + rel->enabled = 1; + +} + +void fs_add_relation(fs_meta_t *meta, fs_relation_t *rel) { + + if (meta->rel_count == meta->rel_capacity) { + + meta->rel_capacity *= 2; + meta->relations = + realloc(meta->relations, sizeof(fs_relation_t) * meta->rel_capacity); + + } + + memcpy(&meta->relations[meta->rel_count], rel, sizeof(fs_relation_t)); + meta->rel_count++; + + // Update blocked points map. + for (u32 i = 0; i < rel->size; i++) { + + meta->blocked_points_map[rel->pos + i] = 1; + + } + +} + +void fs_save(fs_meta_t *meta) { + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "Saving metadata\n"); +#endif + for (u32 i = 0; i < meta->rel_count; i++) { + + fs_relation_t *rel = &meta->relations[i]; + rel_save(rel); + + } + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "done\n"); +#endif + +} + +void fs_restore(fs_meta_t *meta) { + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "Restoring metadata\n"); +#endif + for (u32 i = 0; i < meta->rel_count; i++) { + + fs_relation_t *rel = &meta->relations[i]; + rel_restore(rel); + + } + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "done\n"); +#endif + +} + +// Insert data into the buffer at the given index. +// Update any relations that are affected by the insertion. +// If ignore_invalid is set, invalid insertions are ignored. +// Returns 0 on success, 1 on error. +int fs_track_insert(fs_meta_t *meta, u64 idx, u64 data_size, + u8 ignore_invalid) { + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "Inserting %llu at %llu\n", data_size, idx); +#endif + for (u32 i = 0; i < meta->rel_count; i++) { + + if (meta->relations[i].enabled) { + + u8 res = rel_on_insert(&meta->relations[i], idx, data_size); + if (res) { + + if (ignore_invalid) { + + // Invalid insertion, disable relation and keep going. + meta->relations[i].enabled = 0; + + } else { + + // Invalid insertion, return error. + return 1; + + } + + } + + } + + } + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "done\n"); +#endif + return 0; + +} + +void fs_track_delete(fs_meta_t *meta, u64 idx, u64 data_size) { + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "Deleting %llu at %llu\n", data_size, idx); +#endif + for (u32 i = 0; i < meta->rel_count; i++) { + + if (meta->relations[i].enabled) { + + u8 res = rel_on_remove(&meta->relations[i], idx, data_size); + if (res) { + + // Invalid deletion, disable relation and keep going. + meta->relations[i].enabled = 0; + + } + + } + + } + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "done\n"); +#endif + +} + +void fs_sanitize(fs_meta_t *meta, u8 *buf) { + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "fs_sanitize\n"); +#endif + // Apply the relations in reverse order. + for (u32 i = meta->rel_count - 1; i != (u32)-1; i--) { + + if (!meta->relations[i].enabled) { continue; } + + rel_apply(buf, &meta->relations[i]); + + } + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "done\n"); +#endif + +} + +void fs_clone_meta(afl_state_t *afl) { + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "Cloning metadata\n"); +#endif + fs_meta_t *meta = afl->queue_cur->fs_meta; + fs_meta_t *fs_curr_meta = afl->fs_curr_meta; + if (unlikely(!fs_curr_meta)) { + + // Initial allocation. + fs_curr_meta = malloc(sizeof(fs_meta_t)); + fs_curr_meta->rel_count = 0; + fs_curr_meta->rel_capacity = FRAMESHIFT_INITIAL_CAPACITY; + fs_curr_meta->relations = + malloc(sizeof(fs_relation_t) * fs_curr_meta->rel_capacity); + afl->fs_curr_meta = fs_curr_meta; + + } + + // Copy relation data over. +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "x\n"); +#endif + if (fs_curr_meta->rel_capacity < meta->rel_count) { + + // Increase capacity if needed. + fs_curr_meta->relations = realloc(fs_curr_meta->relations, + sizeof(fs_relation_t) * meta->rel_count); + fs_curr_meta->rel_capacity = meta->rel_count; + + } + + memcpy(fs_curr_meta->relations, meta->relations, + sizeof(fs_relation_t) * meta->rel_count); + fs_curr_meta->rel_count = meta->rel_count; + + // Blocked points will be read only after this, so we can shallow copy. + fs_curr_meta->blocked_points_map = meta->blocked_points_map; +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "done\n"); +#endif + +} + +fs_meta_t *fs_new_meta(u32 size) { + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "fs_new_meta\n"); +#endif + fs_meta_t *meta = malloc(sizeof(fs_meta_t)); +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "%p\n", meta); +#endif + meta->rel_count = 0; + meta->rel_capacity = FRAMESHIFT_INITIAL_CAPACITY; + meta->relations = malloc(sizeof(fs_relation_t) * meta->rel_capacity); + + meta->blocked_points_map = malloc(size); + memset(meta->blocked_points_map, 0, size); + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "done\n"); +#endif + return meta; + +} + +void lightweight_run(afl_state_t *afl, u8 *out_buf, u32 len) { + + afl->fs_stats.search_tests++; + + write_to_testcase(afl, (void **)&out_buf, len, 0); + + u8 fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout); + + afl->queued_discovered += save_if_interesting(afl, out_buf, len, fault); + +} + +void print_buffer(u8 *buf, u32 len) { + + for (u32 i = 0; i < len; i++) { + + printf("%02x ", buf[i]); + + } + + printf("\n"); + +} + +typedef struct field_tmpl { + + u8 size; + u8 le; + +} field_tmpl_t; + +const field_tmpl_t FRAMESHIFT_SEARCH_ORDER[] = { + + {8, 1}, // u64 - little + {8, 0}, // u64 - big + {4, 1}, // u32 - little + {4, 0}, // u32 - big + {2, 1}, // u16 - little + {2, 0}, // u16 - big + {1, 1}, // u8 - little + +}; + +u64 decode_value(u8 *buf, u8 size, u8 le) { + + u64 val = 0; + if (le) { + + for (u8 i = 0; i < size; i++) { + + val |= ((u64)buf[i]) << (i * 8); + + } + + } else { + + for (u8 i = 0; i < size; i++) { + + val |= ((u64)buf[size - 1 - i]) << (i * 8); + + } + + } + + return val; + +} + +int is_blocked(fs_meta_t *meta, u32 pos, u8 size) { + + for (u32 i = 0; i < size; i++) { + + if (meta->blocked_points_map[pos + i]) { return 1; } + + } + + return 0; + +} + +void check_anchor(afl_state_t *afl, u32 anchor, u32 len, u32 curr_size, + u32 field_pos, u8 *buf, fs_meta_t *meta, u8 *trace_bits, + u32 *loss_buffer, u32 loss_count, u8 *scratch, + u32 shift_amount, fs_relation_t *potential_rel, + double *curr_recover) { + + // Check if the anchor is valid. + if (anchor > len) { return; } + + u32 insertion = anchor + curr_size; + if (insertion > len) { return; } + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "check_anchor\n"); +#endif + + // Construct testcase with valid insertion. + memcpy(scratch, buf, insertion); + memset(scratch + insertion, 0x41, shift_amount); + memcpy(scratch + insertion + shift_amount, buf + insertion, len - insertion); + + // Handle on_insert for the prospective relation manually. + if (insertion < potential_rel->pos) { + + // Temporarily shift the relation to apply on the scratch buffer. + potential_rel->pos += shift_amount; + + } + + rel_apply(scratch, potential_rel); + potential_rel->pos = field_pos; + + fs_save(meta); + u8 res = fs_track_insert(meta, insertion, shift_amount, 0); + fs_sanitize(meta, scratch); + fs_restore(meta); + if (res) { + + // Invalid insertion, return. + return; + + } + + // Measure recovery. + lightweight_run(afl, scratch, len + shift_amount); + + u64 recover_count = 0; + for (u32 j = 0; j < loss_count; j++) { + + u32 idx = loss_buffer[j]; + if (trace_bits[idx] > 0) { recover_count++; } + + } + + double recover_pct = (double)recover_count / loss_count; + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, " -> Anchor: %u, Insertion: %u, Recovery: %.2f%%\n", anchor, + insertion, recover_pct * 100); +#endif + + // Update the best relation if we have a better recovery. + if (recover_pct > *curr_recover) { + + potential_rel->anchor = anchor; + potential_rel->insert = insertion; + *curr_recover = recover_pct; + + } + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "done\n"); +#endif + +} + +void frameshift_stage(afl_state_t *afl) { + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "Frameshift stage\n"); +#endif + + u64 time_start = get_cur_time(); + + if (unlikely(!afl->frameshift_index_buffer)) { + + // Allocate the frameshift index buffer. + afl->frameshift_index_buffer = malloc(afl->fsrv.map_size * sizeof(u32)); + + } + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "b %p\n", afl->frameshift_index_buffer); +#endif + u32 *index_buf = afl->frameshift_index_buffer; + u32 index_count = 0; + + u8 *buf = queue_testcase_get(afl, afl->queue_cur); + u32 len = afl->queue_cur->len; + + u8 *scratch = malloc(len + 0x100); // We will at most shift by 0xff +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "s %p - %p %u\n", scratch, buf, len); +#endif + + // Print out +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "[FS] Input buffer: "); + print_buffer(buf, len); +#endif + + // Update queue state + afl->queue_cur->fs_status = 1; + + // Initialize relation metadata + fs_meta_t *meta = fs_new_meta(len); + afl->queue_cur->fs_meta = meta; + + // Compute base coverage for this testcase. + u8 *trace_bits = afl->fsrv.trace_bits; + u32 map_size = afl->fsrv.map_size; + + // Compute coverage of this testcase. +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "l0 %p %u\n", buf, len); +#endif + lightweight_run(afl, buf, len); +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "l1\n"); +#endif + for (u32 i = 0; i < map_size; i++) { + + if (trace_bits[i] > 0) { index_buf[index_count++] = i; } + + } + + // Compute base coverage for an invalid testcase. + // Keep only indices that are found in the current testcase and not the base. + lightweight_run(afl, "a", 1); +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "l2\n"); +#endif + u32 write_idx = 0; + for (u32 i = 0; i < index_count; i++) { + + u32 idx = index_buf[i]; + if (trace_bits[idx] == 0) { index_buf[write_idx++] = idx; } + + } + + index_count = write_idx; + + u32 loss_buffer[index_count]; + memset(loss_buffer, 0, sizeof(loss_buffer)); + u32 loss_count = 0; + + u32 loss_threshold = ((index_count * FRAMESHIFT_LOSS_PCT) / 100) + 1; + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "[FS] Index count: %u\n", index_count); +#endif + u32 inflection_points_count = 0; + u32 inflection_points_capacity = 128; + u32 *inflection_points = calloc(inflection_points_capacity, sizeof(u32)); + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "i %p\n", inflection_points); +#endif + // Outer loop, run at most max_iterations times. + for (u32 i = 0; i < FRAMESHIFT_MAX_ITERS; i++) { + + u8 found = 0; + + // Iterate over field position. + for (u32 field_pos = 0; field_pos < len - 1; field_pos++) { + + // Iterate over field type. + for (u8 k = 0; k < sizeof(FRAMESHIFT_SEARCH_ORDER) / sizeof(field_tmpl_t); + k++) { + + field_tmpl_t *tmpl = (field_tmpl_t *)&FRAMESHIFT_SEARCH_ORDER[k]; + u8 size = tmpl->size; + u8 le = tmpl->le; + + if (field_pos + size > len) { continue; } + + u64 curr_size = decode_value(buf + field_pos, size, le); + + // Does this look like a size/offset field? + if (curr_size == 0 || curr_size > len) { continue; } + + // Pick a shift amount that will test this field size. + u64 shift_amount = 0xff; // overflow the field boundary + if (size == 1) { + + u64 max_shift = 0xff - curr_size; + if (max_shift == 0) { continue; } + shift_amount = MIN(0x20, max_shift); + + } + + // Check if the field is blocked. + if (is_blocked(meta, field_pos, size)) { + + // printf("[FS] Field is blocked\n"); + continue; + + } + + fs_relation_t potential_rel = {.pos = field_pos, + .val = curr_size, + .anchor = (u64)-1, // unset + .insert = (u64)-1, // unset + .size = size, + .le = le, + .enabled = 1}; + + // Corrupt the field and measure lost features. + potential_rel.val += shift_amount; + rel_apply(buf, &potential_rel); + + loss_count = 0; + + lightweight_run(afl, buf, len); + for (u32 j = 0; j < index_count; j++) { + + u32 idx = index_buf[j]; + if (trace_bits[idx] == 0) { loss_buffer[loss_count++] = idx; } + + } + + // Undo the change to the buffer. + potential_rel.val -= shift_amount; + rel_apply(buf, &potential_rel); + potential_rel.val += shift_amount; + + if (loss_count < loss_threshold) { continue; } + + // printf("[FS] Testing relation: pos=%u size=%u le=%u shift=%u value=%u + // (loss: %d)\n", field_pos, size, le, shift_amount, curr_size, + // loss_count); + + // Next, we iterate over inflection points to find the best anchor. + double curr_recover = FRAMESHIFT_RECOVER_PCT / 100.0; + + if (size == 1) { + + check_anchor(afl, field_pos + size, len, curr_size, field_pos, buf, + meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + + } else if (size == 2) { + + check_anchor(afl, 0, len, curr_size, field_pos, buf, meta, trace_bits, + loss_buffer, loss_count, scratch, shift_amount, + &potential_rel, &curr_recover); + check_anchor(afl, field_pos, len, curr_size, field_pos, buf, meta, + trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + check_anchor(afl, field_pos + size, len, curr_size, field_pos, buf, + meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + + } else { + + check_anchor(afl, field_pos + size + 7, len, curr_size, field_pos, + buf, meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + check_anchor(afl, field_pos + size + 6, len, curr_size, field_pos, + buf, meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + check_anchor(afl, field_pos + size + 5, len, curr_size, field_pos, + buf, meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + check_anchor(afl, field_pos + size + 4, len, curr_size, field_pos, + buf, meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + check_anchor(afl, field_pos + size + 3, len, curr_size, field_pos, + buf, meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + check_anchor(afl, field_pos + size + 2, len, curr_size, field_pos, + buf, meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + check_anchor(afl, field_pos + size + 1, len, curr_size, field_pos, + buf, meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + check_anchor(afl, 0, len, curr_size, field_pos, buf, meta, trace_bits, + loss_buffer, loss_count, scratch, shift_amount, + &potential_rel, &curr_recover); + check_anchor(afl, field_pos, len, curr_size, field_pos, buf, meta, + trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + check_anchor(afl, field_pos + size, len, curr_size, field_pos, buf, + meta, trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + + if (potential_rel.anchor == (u64)-1) { + + // Check other inflection points. + for (u32 j = 0; j < inflection_points_count; j++) { + + u32 anchor = inflection_points[j]; + check_anchor(afl, anchor, len, curr_size, field_pos, buf, meta, + trace_bits, loss_buffer, loss_count, scratch, + shift_amount, &potential_rel, &curr_recover); + + } + + } + + } + + // Check if we have a valid relation. + if (potential_rel.anchor == (u64)-1) { + + // No valid relation found, continue. + continue; + + } + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, + "[FS] Found relation: pos=%u size=%u le=%u shift=%u value=%u + " " anchor = % u insert = % u(loss + : % d recover + : % .2f % %)\n ", field_pos, + size, + le, shift_amount, curr_size, potential_rel.anchor, + potential_rel.insert, loss_count, curr_recover); +#endif + + potential_rel.val = curr_size; + fs_add_relation(meta, &potential_rel); + + // Update the inflection points. + // Only size 4 and 8 are used for inflection points. + if (potential_rel.size == 4 || potential_rel.size == 8) { + + // Need space for 3 more points. + if (inflection_points_count + 3 >= inflection_points_capacity) { + + inflection_points_capacity *= 2; + inflection_points = realloc( + inflection_points, inflection_points_capacity * sizeof(u32)); + + } + + inflection_points[inflection_points_count++] = potential_rel.pos; + inflection_points[inflection_points_count++] = potential_rel.anchor; + inflection_points[inflection_points_count++] = potential_rel.insert; + + } + + found = 1; + + } + + } + + if (!found) { + + // Didn't find relations this iteration, stop searching. + break; + + } + + } + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "fooobar\n"); +#endif + + u64 time_end = get_cur_time(); + + afl->fs_stats.total_time_ms += time_end - time_start; + + afl->fs_stats.searched += 1; + if (meta->rel_count > 0) { afl->fs_stats.found += 1; } + +#ifdef FRAMESHIFT_DEBUG + fprintf(stderr, "done\n"); +#endif + +} + diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index ea44be5a5e..da177d373e 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -410,19 +410,46 @@ u8 fuzz_one_original(afl_state_t *afl) { u_simplestring_time_diff(time_tmp, afl->prev_run_time + get_cur_time(), afl->start_time); - ACTF( - "Fuzzing test case #%u (%u total, %s%llu crashes saved%s, state: %s, " - "mode=%s, " - "perf_score=%0.0f, weight=%0.0f, favorite=%u, was_fuzzed=%u, " - "exec_us=%llu, hits=%u, map=%u, ascii=%u, run_time=%s)...", - afl->current_entry, afl->queued_items, - afl->saved_crashes != 0 ? cRED : "", afl->saved_crashes, cRST, - get_fuzzing_state(afl), afl->fuzz_mode ? "exploit" : "explore", - afl->queue_cur->perf_score, afl->queue_cur->weight, - afl->queue_cur->favored, afl->queue_cur->was_fuzzed, - afl->queue_cur->exec_us, - likely(afl->n_fuzz) ? afl->n_fuzz[afl->queue_cur->n_fuzz_entry] : 0, - afl->queue_cur->bitmap_size, afl->queue_cur->is_ascii, time_tmp); + + if (afl->afl_env.afl_frameshift_enabled) { + + u8 search_time[64]; + u_simplestring_time_diff(search_time, afl->fs_stats.total_time_ms + 1, 1); + + ACTF( + "Fuzzing test case #%u (%u total, %llu crashes saved, state: %s, " + "mode=%s, " + "perf_score=%0.0f, weight=%0.0f, favorite=%u, was_fuzzed=%u, " + "exec_us=%llu, hits=%u, map=%u, ascii=%u, run_time=%s) FS (t=%s, " + "st=%llu, found=%u/%u)...", + afl->current_entry, afl->queued_items, afl->saved_crashes, + get_fuzzing_state(afl), afl->fuzz_mode ? "exploit" : "explore", + afl->queue_cur->perf_score, afl->queue_cur->weight, + afl->queue_cur->favored, afl->queue_cur->was_fuzzed, + afl->queue_cur->exec_us, + likely(afl->n_fuzz) ? afl->n_fuzz[afl->queue_cur->n_fuzz_entry] : 0, + afl->queue_cur->bitmap_size, afl->queue_cur->is_ascii, time_tmp, + search_time, afl->fs_stats.search_tests, afl->fs_stats.found, + afl->fs_stats.searched); + + } else { + + ACTF( + "Fuzzing test case #%u (%u total, %s%llu crashes saved%s, state: %s, " + "mode=%s, " + "perf_score=%0.0f, weight=%0.0f, favorite=%u, was_fuzzed=%u, " + "exec_us=%llu, hits=%u, map=%u, ascii=%u, run_time=%s)...", + afl->current_entry, afl->queued_items, + afl->saved_crashes != 0 ? cRED : "", afl->saved_crashes, cRST, + get_fuzzing_state(afl), afl->fuzz_mode ? "exploit" : "explore", + afl->queue_cur->perf_score, afl->queue_cur->weight, + afl->queue_cur->favored, afl->queue_cur->was_fuzzed, + afl->queue_cur->exec_us, + likely(afl->n_fuzz) ? afl->n_fuzz[afl->queue_cur->n_fuzz_entry] : 0, + afl->queue_cur->bitmap_size, afl->queue_cur->is_ascii, time_tmp); + + } + fflush(stdout); } @@ -507,6 +534,20 @@ u8 fuzz_one_original(afl_state_t *afl) { memcpy(out_buf, in_buf, len); + /************** + * FRAMESHIFT * + **************/ + + if (unlikely(afl->afl_env.afl_frameshift_enabled && + afl->queue_cur->fs_status == 0)) { + + frameshift_stage(afl); + + } + + // Frameshift: reload the original input meta + if (afl->afl_env.afl_frameshift_enabled) { fs_clone_meta(afl); } + /********************* * PERFORMANCE SCORE * *********************/ @@ -2159,6 +2200,9 @@ u8 fuzz_one_original(afl_state_t *afl) { afl->queue_cur->fname, afl->queue_cur->is_ascii, use_stacking); #endif + // Frameshift: save the current input meta + if (afl->afl_env.afl_frameshift_enabled) { fs_save(afl->fs_curr_meta); } + for (i = 0; i < use_stacking; ++i) { if (afl->custom_mutators_count) { @@ -2542,6 +2586,13 @@ u8 fuzz_one_original(afl_state_t *afl) { afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); temp_len += clone_len; + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_insert(afl->fs_curr_meta, clone_to, clone_len, 1); + + } + } else if (unlikely(temp_len < 8)) { break; @@ -2593,6 +2644,13 @@ u8 fuzz_one_original(afl_state_t *afl) { afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); temp_len += clone_len; + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_insert(afl->fs_curr_meta, clone_to, clone_len, 1); + + } + } else if (unlikely(temp_len < 8)) { break; @@ -2770,6 +2828,13 @@ u8 fuzz_one_original(afl_state_t *afl) { temp_len -= del_len; + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_delete(afl->fs_curr_meta, del_from, del_len); + + } + break; } @@ -2827,6 +2892,13 @@ u8 fuzz_one_original(afl_state_t *afl) { temp_len -= del_len; + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_delete(afl->fs_curr_meta, del_from, del_len); + + } + break; } @@ -2866,6 +2938,13 @@ u8 fuzz_one_original(afl_state_t *afl) { afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); temp_len += clone_len; + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_insert(afl->fs_curr_meta, clone_to, clone_len, 1); + + } + break; } @@ -3007,6 +3086,13 @@ u8 fuzz_one_original(afl_state_t *afl) { afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); temp_len += (new_len - old_len); + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_insert(afl->fs_curr_meta, off, new_len, 1); + + } + } // fprintf(stderr, "AFTER : %s\n", out_buf); @@ -3100,6 +3186,13 @@ u8 fuzz_one_original(afl_state_t *afl) { memcpy(out_buf + insert_at, ptr, extra_len); temp_len += extra_len; + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_insert(afl->fs_curr_meta, insert_at, extra_len, 1); + + } + break; } @@ -3158,6 +3251,13 @@ u8 fuzz_one_original(afl_state_t *afl) { memcpy(out_buf + insert_at, ptr, extra_len); temp_len += extra_len; + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_insert(afl->fs_curr_meta, insert_at, extra_len, 1); + + } + break; } @@ -3271,6 +3371,13 @@ u8 fuzz_one_original(afl_state_t *afl) { afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); temp_len += clone_len; + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_insert(afl->fs_curr_meta, clone_to, clone_len, 1); + + } + break; } @@ -3291,6 +3398,9 @@ u8 fuzz_one_original(afl_state_t *afl) { temp_len = len; memcpy(out_buf, in_buf, len); + // Frameshift: restore the original input meta + if (afl->afl_env.afl_frameshift_enabled) { fs_restore(afl->fs_curr_meta); } + /* If we're finding new stuff, let's run for a bit longer, limits permitting. */ @@ -3350,6 +3460,9 @@ u8 fuzz_one_original(afl_state_t *afl) { u8 *new_buf; s32 f_diff, l_diff; + // Frameshift: reload the original input meta + if (afl->afl_env.afl_frameshift_enabled) { fs_clone_meta(afl); } + /* First of all, if we've modified in_buf for havoc, let's clean that up... */ @@ -3400,6 +3513,15 @@ u8 fuzz_one_original(afl_state_t *afl) { if (unlikely(!out_buf)) { PFATAL("alloc"); } memcpy(out_buf, in_buf, len); + // Frameshift tracking + if (afl->afl_env.afl_frameshift_enabled) { + + fs_track_delete(afl->fs_curr_meta, split_at, + afl->queue_cur->len - split_at); + fs_track_insert(afl->fs_curr_meta, split_at, target->len - split_at, 1); + + } + goto custom_mutator_stage; } diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 2b8dbd7110..0e57fad375 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -761,6 +761,9 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { q->skipdet_e = (struct skipdet_entry *)ck_alloc(sizeof(struct skipdet_entry)); + q->fs_meta = NULL; + q->fs_status = 0; + } /* Destroy the entire queue. */ diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index 48a56053c6..fea992a9fc 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -1384,6 +1384,13 @@ u8 __attribute__((hot)) common_fuzz_stuff(afl_state_t *afl, u8 *out_buf, u8 fault; + if (afl->afl_env.afl_frameshift_enabled && afl->fs_curr_meta) { + + // Apply relation updates before running. + fs_sanitize(afl->fs_curr_meta, out_buf); + + } + if (unlikely(len = write_to_testcase(afl, (void **)&out_buf, len, 0)) == 0) { return 0; diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index 993bb5dbbc..fd65ab0ff7 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -142,6 +142,13 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) { afl->havoc_prof = (struct havoc_profile *)ck_alloc(sizeof(struct havoc_profile)); + afl->frameshift_index_buffer = NULL; + afl->fs_curr_meta = NULL; + afl->fs_stats.found = 0; + afl->fs_stats.searched = 0; + afl->fs_stats.search_tests = 0; + afl->fs_stats.total_time_ms = 0; + init_mopt_globals(afl); list_append(&afl_states, afl); @@ -766,6 +773,13 @@ void read_afl_environment(afl_state_t *afl, char **envp) { } + } else if (!strncmp(env, "AFL_FRAMESHIFT_ENABLED", + + afl_environment_variable_len)) { + + afl->afl_env.afl_frameshift_enabled = + get_afl_env(afl_environment_variables[i]) ? 1 : 0; + } } else {
Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.
Alternative Proxies: