diff --git a/learning.py b/learning.py index 5f1ba596e..f5bc5d835 100644 --- a/learning.py +++ b/learning.py @@ -984,8 +984,8 @@ def flatten(seqs): return sum(seqs, []) def err_ratio(predict, dataset, examples=None, verbose=0): - """Return the proportion of the examples that are NOT correctly predicted.""" - """verbose - 0: No output; 1: Output wrong; 2 (or greater): Output correct""" + """Return the proportion of the examples that are NOT correctly predicted. + verbose - 0: No output; 1: Output wrong; 2 (or greater): Output correct""" if examples is None: examples = dataset.examples if len(examples) == 0: diff --git a/tests/test_csp.py b/tests/test_csp.py index 5a10f5ce5..f303af6f9 100644 --- a/tests/test_csp.py +++ b/tests/test_csp.py @@ -1,5 +1,10 @@ import pytest +from utils import failure_test from csp import * +import random + + +random.seed("aima-python") def test_csp_assign(): @@ -331,10 +336,12 @@ def test_backtracking_search(): def test_min_conflicts(): - random.seed("aima-python") assert min_conflicts(australia) - assert min_conflicts(usa) assert min_conflicts(france) + + tests = [(usa, None)] * 3 + assert failure_test(min_conflicts, tests) > 1/3 + australia_impossible = MapColoringCSP(list('RG'), 'SA: WA NT Q NSW V; NT: WA Q; NSW: Q V; T: ') assert min_conflicts(australia_impossible, 1000) is None @@ -351,7 +358,7 @@ def test_parse_neighbours(): def test_topological_sort(): root = 'NT' Sort, Parents = topological_sort(australia,root) - + assert Sort == ['NT','SA','Q','NSW','V','WA'] assert Parents['NT'] == None assert Parents['SA'] == 'NT' diff --git a/tests/test_learning.py b/tests/test_learning.py index aff8903a4..8a21d6462 100644 --- a/tests/test_learning.py +++ b/tests/test_learning.py @@ -168,9 +168,13 @@ def test_decision_tree_learner(): def test_random_forest(): iris = DataSet(name="iris") rF = RandomForest(iris) - assert rF([5, 3, 1, 0.1]) == "setosa" - assert rF([6, 5, 3, 1]) == "versicolor" - assert rF([7.5, 4, 6, 2]) == "virginica" + tests = [([5.0, 3.0, 1.0, 0.1], "setosa"), + ([5.1, 3.3, 1.1, 0.1], "setosa"), + ([6.0, 5.0, 3.0, 1.0], "versicolor"), + ([6.1, 2.2, 3.5, 1.0], "versicolor"), + ([7.5, 4.1, 6.2, 2.3], "virginica"), + ([7.3, 3.7, 6.1, 2.5], "virginica")] + assert grade_learner(rF, tests) >= 1/3 def test_neural_network_learner(): diff --git a/utils.py b/utils.py index d2720abe1..e5dbfd5cd 100644 --- a/utils.py +++ b/utils.py @@ -416,6 +416,16 @@ def open_data(name, mode='r'): return open(aima_file) +def failure_test(algorithm, tests): + """Grades the given algorithm based on how many tests it passes. + Most algorithms have arbitary output on correct execution, which is difficult + to check for correctness. On the other hand, a lot of algorithms output something + particular on fail (for example, False, or None). + tests is a list with each element in the form: (values, failure_output).""" + from statistics import mean + return mean(int(algorithm(x) != y) for x, y in tests) + + # ______________________________________________________________________________ # Expressions
Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.
Alternative Proxies: