diff --git a/.landscape.yml b/.landscape.yml new file mode 100644 index 00000000..def58547 --- /dev/null +++ b/.landscape.yml @@ -0,0 +1,11 @@ +python-targets: + - 2 + - 3 +doc-warnings: true +max-line-length: 100 +pyroma: + run: true +ignore-patterns: + - ^docs/*.py$ +ignore-paths: + - examples diff --git a/.travis.yml b/.travis.yml index 18fa9b13..6b03021b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,8 +10,18 @@ python: - 3.6-dev - 3.7-dev - nightly - - pypy - - pypy3.5-5.8.0 +env: +matrix: + include: + - python: pypy2.7-5.8.0 + - python: pypy3.5-5.8.0 + - python: pypy2.7-5.8.0 + env: TRY_PYPY=1 + - python: pypy3.5-5.8.0 + env: TRY_PYPY=1 + fast_finish: true + allow_failures: + - env: TRY_PYPY=1 install: - pip install --upgrade nose coveralls coverage - if [[ $TRAVIS_PYTHON_VERSION == 'pypy'* ]]; then export TRAVIS_WAIT=45; else export TRAVIS_WAIT=20; fi diff --git a/docs/conf.py b/docs/conf.py index 961f5082..bc7f0085 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -40,7 +40,8 @@ 'sphinx.ext.extlinks', # note: the below does not actually require (at least the Python) graphviz # package to be installed - 'sphinx.ext.inheritance_diagram', + 'sphinx.ext.inheritance_diagram', + 'sphinx.ext.coverage', ] extlinks = {'pytypes': ('https://docs.python.org/3.5/library/stdtypes.html#%s', diff --git a/docs/module_summaries.rst b/docs/module_summaries.rst index 969b8bb7..f6a2ce98 100644 --- a/docs/module_summaries.rst +++ b/docs/module_summaries.rst @@ -429,9 +429,27 @@ Uses :py:mod:`pickle` to save and restore populations (and other aspects of the :type time_interval_seconds: :pytypes:`float ` or None :param str filename_prefix: The prefix for the checkpoint file names. + .. py:method:: start_generation(generation) + + Saves the current generation number for use by :py:meth:`save_checkpoint()`. + + :param int generation: Current generation. + + .. py:method:: end_generation(config, population, species_set) + + Checks to see whether :py:meth:`save_checkpoint()` needs to be called, and if so calls it, then updates the last_generation_checkpoint and the + corresponding last_time_checkpoint attributes. + + :param config: The `config.Config` configuration instance to be used. + :type config: :datamodel:`instance ` + :param population: A population as created by :py:meth:`reproduction.DefaultReproduction.create_new` or a compatible implementation. + :type population: dict(int, :datamodel:`object `) + :param species: A :py:class:`species.DefaultSpeciesSet` (or compatible implementation) instance. + :type species: :datamodel:`instance ` + .. py:method:: save_checkpoint(config, population, species, generation) - Saves the current simulation (including randomization) state to (if using the default ``neat-checkpoint-`` for ``filename_prefix``) + Saves the current simulation state (including randomization state) to (if using the default ``neat-checkpoint-`` for ``filename_prefix``) :file:`neat-checkpoint-{generation}`, with ``generation`` being the generation number. :param config: The `config.Config` configuration instance to be used. @@ -632,6 +650,21 @@ ctrnn Resets the time and all node activations to 0 (necessary due to otherwise retaining state via :term:`recurrent` connections). + .. py:method:: set_node_value(node_key, value) + + Sets the current node activation for the particular node selected. + + :param int node_key: The :term:`key` for the node to be altered. + :param float value: What to set the activation of the node to. + + .. index:: TODO + + .. py:method:: get_max_time_step() + + Planned to return the maximum time step that will be stable for the current network; not yet implemented. + + :raises NotImplementedError: Always. + .. index:: ! continuous-time .. py:method:: advance(inputs, advance_time, time_step=None) @@ -643,7 +676,7 @@ ctrnn :type inputs: list(float) :param advance_time: How much time to advance the network before returning the resulting outputs. :type advance_time: :pytypes:`float ` - :param time_step: How much time per step to advance the network; the default of ``None`` will currently result in an error, but it is planned to determine it automatically. + :param time_step: How much time per step to advance the network; the default of ``None`` will currently result in an error, but it is planned to determine it automatically using :py:meth:`get_max_time_step()`. :type time_step: :pytypes:`float ` or None :return: The values for the :term:`output nodes `. :rtype: list(float) @@ -1555,7 +1588,19 @@ See http://www.izhikevich.org/publications/spikes.pdf. .. py:class:: IZGenome(DefaultGenome) - Contains the parse_config class method for iznn genome configuration, which returns a :py:class:`genome.DefaultGenomeConfig` instance. + Sets up the genome to use :py:class:`IZNodeGene` instances for node genes, and :py:class:`genes.DefaultConnectionGene` instances for + connection genes. + + .. py:classmethod:: parse_config(param_dict) + + Required interface method. Provides IZNodeGene :term:`node` and default :term:`connection` :term:`gene` specifications (from :py:mod:`genes`) and + uses `DefaultGenomeConfig` to do the rest of the configuration. + + :param param_dict: Dictionary of parameters from configuration file. + :type param_dict: dict(str, str) + :return: Configuration object; considered opaque by rest of code, so type may vary by implementation (here, a `DefaultGenomeConfig` instance). + :rtype: :datamodel:`instance ` + .. py:class:: IZNeuron(bias, a, b, c, d, inputs) @@ -1863,6 +1908,20 @@ Implements the core evolution algorithm. :type initial_state: None or tuple(:datamodel:`instance `, :datamodel:`instance `, int) :raises RuntimeError: If the :ref:`fitness_criterion ` function is invalid. + .. py:method:: add_reporter(reporter) + + Adds a reporter to those that will be notified at appropriate points. Uses :py:meth:`ReporterSet.add() `. + + :param reporter: A reporter callable via a :py:class:`reporting.ReporterSet` instance. + :type reporter: :datamodel:`instance ` + + .. py:method:: remove_reporter(reporter) + + Removes a reporter from those that will be notified at appropriate points. Uses :py:meth:`ReporterSet.remove() `. + + :param reporter: A reporter callable via a :py:class:`reporting.ReporterSet` instance. + :type reporter: :datamodel:`instance ` + .. index:: ! no_fitness_termination .. index:: ! reset_on_extinction .. index:: ! generation diff --git a/examples/picture2d/evolve_novelty.py b/examples/picture2d/evolve_novelty.py index 00819b5e..30edb589 100644 --- a/examples/picture2d/evolve_novelty.py +++ b/examples/picture2d/evolve_novelty.py @@ -1,4 +1,7 @@ -from Pillow import Image +try: + from Pillow import Image +expect ImportError: + from PIL import Image import os import random from multiprocessing import Pool @@ -101,6 +104,9 @@ def run(): pop.run(ne.evaluate, 1) winner = stats.best_genome() + + pop.best_genome = None # code assumes fitnesses remain the same... + if ne.scheme == 'gray': image = eval_gray_image(winner, config, full_scale * width, full_scale * height) elif ne.scheme == 'color': diff --git a/examples/picture2d/render.py b/examples/picture2d/render.py index 0e416573..54d0aaf4 100644 --- a/examples/picture2d/render.py +++ b/examples/picture2d/render.py @@ -1,4 +1,7 @@ +from __future__ import print_function + import pickle + import pygame import evolve @@ -11,7 +14,7 @@ with open("genome-20219-701.bin", "rb") as f: g = pickle.load(f) - print g + print(g) node_names = {0: 'x', 1: 'y', 2: 'gray'} visualize.draw_net(g, view=True, filename="picture-net.gv", show_disabled=False, prune_unused=True, node_names=node_names) diff --git a/neat/distributed.py b/neat/distributed.py index 57adc4a8..22d083ff 100644 --- a/neat/distributed.py +++ b/neat/distributed.py @@ -21,10 +21,14 @@ specified. NOTE: - This module is in a **beta** state, and still *unstable* even in single-machine testing. Reliability is likely to vary, including depending on the Python version - and implementation (e.g., cpython vs pypy) in use and the likelihoods of timeouts (due to machine and/or network slowness). In particular, while the code can try - to reconnect between between primary and secondary nodes, as noted in the `multiprocessing` documentation this may not work due to data loss/corruption. Note also - that this module is not responsible for starting the script copies on the different compute nodes, since this is very site/configuration-dependent. + This module is in a **beta** state, and still *unstable* even in single-machine + testing. Reliability is likely to vary, including depending on the Python version + and implementation (e.g., cpython vs pypy) in use and the likelihoods of timeouts + (due to machine and/or network slowness). In particular, while the code can try + to reconnect between between primary and secondary nodes, as noted in the + `multiprocessing` documentation this may not work due to data loss/corruption. + Note also that this module is *not* responsible for starting the script copies + on the different compute nodes, since this is very site/configuration-dependent. Usage: @@ -87,11 +91,10 @@ MODE_PRIMARY = MODE_MASTER = 1 # enforce primary mode MODE_SECONDARY = MODE_SLAVE = 2 # enforce secondary mode -# states to determine whether the secondaries should shut down -_STATE_RUNNING = 0 -_STATE_SHUTDOWN = 1 -_STATE_FORCED_SHUTDOWN = 2 - +# what a return from _check_exception means +_EXCEPTION_TYPE_OK = 1 # queue empty and similar; try again +_EXCEPTION_TYPE_UNCERTAIN = 0 # disconnected but may be able to reconnect +_EXCEPTION_TYPE_BAD = -1 # either raise it again or immediately return and exit with non-zero status code class ModeError(RuntimeError): """ @@ -179,50 +182,34 @@ def __init__(self, addr, authkey, mode, start=False): self.authkey = authkey self.mode = _determine_mode(addr, mode) self.manager = None - self._secondary_state= multiprocessing.managers.Value(int, _STATE_RUNNING) if start: self.start() - def __reduce__(self): + def __reduce__(self): # pragma: no cover """ This method is used by pickle to serialize instances of this class. """ return ( self.__class__, - (self.addr, self.authkey, self.mode, True), + (self.addr, self.authkey, self.mode, bool(self.manager is not None)), ) def start(self): """Starts or connects to the manager.""" - if self.mode == MODE_PRIMARY: - i = self._start() - else: - i = self._connect() - self.manager = i + if self.manager is None: + if self.mode == MODE_PRIMARY: + i = self._start() + else: + i = self._connect() + self.manager = i def stop(self): """Stops the manager.""" - self.manager.shutdown() - - def set_secondary_state(self, value): - """Sets the value for 'secondary_state'.""" - if value not in (_STATE_RUNNING, _STATE_SHUTDOWN, _STATE_FORCED_SHUTDOWN): - raise ValueError( - "State {!r} is invalid - needs to be one of _STATE_RUNNING, _STATE_SHUTDOWN, or _STATE_FORCED_SHUTDOWN".format( - value) - ) - if self.manager is None: - raise RuntimeError("Manager not started") - self.manager.set_state(value) - - def _get_secondary_state(self): - """ - Returns the value for 'secondary_state'. - This is required for the manager. - """ - return self._secondary_state + #self.manager.shutdown() # claims there isn't any such attribute ?!? + self.manager = None - def _get_manager_class(self, register_callables=False): + @staticmethod + def _get_manager_class(register_callables=False): """ Returns a new 'Manager' subclass with registered methods. If 'register_callable' is True, defines the 'callable' arguments. @@ -236,9 +223,9 @@ class _EvaluatorSyncManager(managers.BaseManager): """ pass - inqueue = queue.Queue() - outqueue = queue.Queue() - namespace = Namespace() + inqueue = queue.Queue() # may need to be one from multiprocessing.managers.SyncManager + outqueue = queue.Queue() # ditto + namespace = Namespace() # ditto if register_callables: _EvaluatorSyncManager.register( @@ -249,14 +236,6 @@ class _EvaluatorSyncManager(managers.BaseManager): "get_outqueue", callable=lambda: outqueue, ) - _EvaluatorSyncManager.register( - "get_state", - callable=self._get_secondary_state, - ) - _EvaluatorSyncManager.register( - "set_state", - callable=lambda v: self._secondary_state.set(v), - ) _EvaluatorSyncManager.register( "get_namespace", callable=lambda: namespace, @@ -270,12 +249,6 @@ class _EvaluatorSyncManager(managers.BaseManager): _EvaluatorSyncManager.register( "get_outqueue", ) - _EvaluatorSyncManager.register( - "get_state", - ) - _EvaluatorSyncManager.register( - "set_state", - ) _EvaluatorSyncManager.register( "get_namespace", ) @@ -295,12 +268,6 @@ def _start(self): ins.start() return ins - @property - def secondary_state(self): - """Whether the secondary nodes should still process elements.""" - v = self.manager.get_state() - return v.get() - def get_inqueue(self): """Returns the inqueue.""" if self.manager is None: @@ -375,6 +342,11 @@ def __init__( self.outqueue = None self.namespace = None self.started = False + self.exit_string = None + self.exit_on_stop = True + self.reconnect = False + self.reconnect_max_time = None + self.n_tasks = None def __getstate__(self): """Required by the pickle protocol.""" @@ -395,7 +367,13 @@ def is_master(self): # pragma: no cover warnings.warn("Use is_primary, not is_master", DeprecationWarning) return self.is_primary() - def start(self, exit_on_stop=True, secondary_wait=0, reconnect=False): + def _do_exit(self): + if self.exit_string is None: + sys.exit(0) + else: + sys.exit(self.exit_string) + + def start(self, exit_on_stop=True, secondary_wait=0, reconnect=False, reconnect_max_time=None): """ If the DistributedEvaluator is in primary mode, starts the manager process and returns. In this case, the ``exit_on_stop`` argument will @@ -414,14 +392,31 @@ def start(self, exit_on_stop=True, secondary_wait=0, reconnect=False): if self.started: raise RuntimeError("DistributedEvaluator already started!") self.started = True + self.exit_on_stop = exit_on_stop + self.reconnect = reconnect + if reconnect_max_time is None: + if reconnect: + reconnect_max_time = max((5*60),(15*max(5,self.worker_timeout))) + else: + reconnect_max_time = max(60,(5*max(1,self.worker_timeout))) + self.reconnect_max_time = max(0.3,reconnect_max_time) if self.mode == MODE_PRIMARY: self._start_primary() elif self.mode == MODE_SECONDARY: time.sleep(secondary_wait) - self._start_secondary() - self._secondary_loop(reconnect=reconnect) + while True: + self._start_secondary() + self._secondary_loop(reconnect_max_time=reconnect_max_time) + if self.exit_on_stop: + self._do_exit() + else: + self.inqueue = self.outqueue = self.namespace = None + if self.reconnect: + self.em.stop() + else: + break if exit_on_stop: - sys.exit(0) + self._do_exit() else: raise ValueError("Invalid mode {!r}!".format(self.mode)) @@ -438,21 +433,44 @@ def stop(self, wait=1, shutdown=True, force_secondary_shutdown=False): raise ModeError("Not in primary mode!") if not self.started: raise RuntimeError("Not yet started!") - if force_secondary_shutdown: - state = _STATE_FORCED_SHUTDOWN - else: - state = _STATE_SHUTDOWN - self.em.set_secondary_state(state) - time.sleep(wait) + start_time = time.time() + num_added = 0 + if self.n_tasks is None: # pragma: no cover + self.n_tasks = max(1, wait, self.worker_timeout)*5 + warnings.warn("Self.n_tasks is None; estimating at {:n}".format(self.n_tasks)) + while (num_added < self.n_tasks) and ((time.time() - start_time) < + max(1, + self.reconnect_max_time, + wait, + self.worker_timeout)): + try: + if force_secondary_shutdown: + self.inqueue.put(0, block=True, timeout=0.2) + else: + self.inqueue.put(1, block=True, timeout=0.2) + except (EOFError, IOError, OSError, socket.gaierror, TypeError, queue.Full, + managers.RemoteError, multiprocessing.ProcessError) as e: + if ("timed" in repr(e).lower()) or ("timeout" in repr(e).lower()): + if (time.time() - start_time) < max(1, wait, self.worker_timeout): + num_added += 1 + continue + else: + break + else: + break + else: + num_added += 1 + time_passed = time.time() - start_time + if time_passed < wait: + time.sleep(wait - time_passed) if shutdown: self.em.stop() self.started = False - self.inqueue = self.outqueue = self.namespace = None + self.outqueue = self.inqueue = self.namespace = None def _start_primary(self): """Start as the primary""" self.em.start() - self.em.set_secondary_state(_STATE_RUNNING) self._set_shared_instances() def _start_secondary(self): @@ -468,55 +486,88 @@ def _set_shared_instances(self): def _reset_em(self): """Resets self.em and the shared instances.""" - self.em = _ExtendedManager(self.addr, self.authkey, mode=self.mode, start=False) - self.em.start() + self.em = _ExtendedManager(self.addr, self.authkey, mode=self.mode, start=True) self._set_shared_instances() - def _secondary_loop(self, reconnect=False): + @staticmethod + def _check_exception(e): + string = repr(e).lower() + if ('timed' in string) or ('timeout' in string): + return _EXCEPTION_TYPE_OK + elif isinstance(e, (EOFError, TypeError, socket.gaierror)): + return _EXCEPTION_TYPE_UNCERTAIN + elif (('eoferror' in string) or ('typeerror' in string) or ('gaierror' in string) + or ('pipeerror' in string) or ('authenticationerror' in string) + or ('refused' in string) or ('file descriptor' in string)): + return _EXCEPTION_TYPE_UNCERTAIN + return _EXCEPTION_TYPE_BAD + + def _secondary_loop(self, reconnect_max_time=(5*60)): """The worker loop for the secondary nodes.""" if self.num_workers > 1: pool = multiprocessing.Pool(self.num_workers) else: pool = None should_reconnect = True + if self.reconnect: + em_bad = False + else: + em_bad = True while should_reconnect: - i = 0 + last_time_done = time.time() # so that if loops below, have a chance to check _reset_em running = True try: self._reset_em() - except (socket.error, EOFError, IOError, OSError, socket.gaierror, TypeError): - continue + except (EOFError, IOError, OSError, socket.gaierror, TypeError, + managers.RemoteError, multiprocessing.ProcessError) as e: + if (time.time() - last_time_done) >= reconnect_max_time: + should_reconnect = False + em_bad = True + if self._check_exception(e) == _EXCEPTION_TYPE_BAD: # pragma: no cover + self.exit_on_stop = True + self.exit_string = repr(e) + break + elif self._check_exception(e) == _EXCEPTION_TYPE_BAD: # pragma: no cover + raise + else: + continue + last_time_done = time.time() # being successful at reconnecting can be used as a keepalive while running: - i += 1 - if i % 5 == 0: - # for better performance, only check every 5 cycles - try: - state = self.em.secondary_state - except (socket.error, EOFError, IOError, OSError, socket.gaierror, TypeError): - if not reconnect: - raise - else: - break - if state == _STATE_FORCED_SHUTDOWN: - running = False - should_reconnect = False - elif state == _STATE_SHUTDOWN: - running = False - if not running: - continue try: tasks = self.inqueue.get(block=True, timeout=0.2) except queue.Empty: continue - except (socket.error, EOFError, IOError, OSError, socket.gaierror, TypeError): - break - except (managers.RemoteError, multiprocessing.ProcessError) as e: - if ('Empty' in repr(e)) or ('TimeoutError' in repr(e)): + except (EOFError, TypeError, socket.gaierror, + managers.RemoteError, multiprocessing.ProcessError, IOError, OSError) as e: + if ('empty' in repr(e).lower()): continue - if (('EOFError' in repr(e)) or ('PipeError' in repr(e)) or - ('AuthenticationError' in repr(e))): # Second for Python 3.X, Third for 3.6+ + curr_status = self._check_exception(e) + if curr_status in (_EXCEPTION_TYPE_OK, _EXCEPTION_TYPE_UNCERTAIN): + if (time.time() - last_time_done) >= reconnect_max_time: + if em_bad: + should_reconnect = False + break + elif curr_status == _EXCEPTION_TYPE_OK: + continue + else: + break + elif (time.time() - last_time_done) >= reconnect_max_time: # pragma: no cover + self.exit_on_stop = True + self.exit_string = repr(e) + should_reconnect = False break - raise + else: # pragma: no cover + raise + + if isinstance(tasks, int): # from primary + running = False + should_reconnect = False + if tasks and self.reconnect: + self.exit_on_stop = False + elif not tasks: + self.reconnect = False + break + last_time_done = time.time() if pool is None: res = [] for genome_id, genome, config in tasks: @@ -536,20 +587,38 @@ def _secondary_loop(self, reconnect=False): job.get(timeout=self.worker_timeout) for job in jobs ] res = zip(genome_ids, results) + last_time_done = time.time() try: self.outqueue.put(res) - except (socket.error, EOFError, IOError, OSError, socket.gaierror, TypeError): - break - except (managers.RemoteError, multiprocessing.ProcessError) as e: - if ('Empty' in repr(e)) or ('TimeoutError' in repr(e)): + except queue.Full: # pragma: no cover + continue + except (EOFError, TypeError, socket.gaierror, + managers.RemoteError, multiprocessing.ProcessError, + IOError, OSError) as e: + if ('full' in repr(e).lower()): continue - if (('EOFError' in repr(e)) or ('PipeError' in repr(e)) or - ('AuthenticationError' in repr(e))): # Second for Python 3.X, Third for 3.6+ + curr_status = self._check_exception(e) + if curr_status in (_EXCEPTION_TYPE_OK, _EXCEPTION_TYPE_UNCERTAIN): + if (time.time() - last_time_done) >= reconnect_max_time: + if em_bad: + should_reconnect = False + break + elif curr_status == _EXCEPTION_TYPE_OK: + continue + else: + break + elif (time.time() - last_time_done) >= reconnect_max_time: # pragma: no cover + self.exit_on_stop = True + self.exit_string = repr(e) + should_reconnect = False break - raise - - if not reconnect: - should_reconnect = False + else: # pragma: no cover + raise + else: + last_time_done = time.time() + if ((time.time() - last_time_done) >= reconnect_max_time): + if em_bad: + should_reconnect = False break if pool is not None: pool.terminate() @@ -567,12 +636,12 @@ def evaluate(self, genomes, config): tasks = chunked(tasks, self.secondary_chunksize) n_tasks = len(tasks) for task in tasks: - self.inqueue.put(task) + self.inqueue.put(task) # should this be w/timeouts and checking for exceptions? tresults = [] while len(tresults) < n_tasks: try: sr = self.outqueue.get(block=True, timeout=0.2) - except (queue.Empty, managers.RemoteError): + except (queue.Empty, managers.RemoteError): # more detailed check? continue tresults.append(sr) results = [] @@ -581,3 +650,4 @@ def evaluate(self, genomes, config): for genome_id, fitness in results: genome = id2genome[genome_id] genome.fitness = fitness + self.n_tasks = n_tasks diff --git a/neat/genome.py b/neat/genome.py index 7108bb5d..782ee001 100644 --- a/neat/genome.py +++ b/neat/genome.py @@ -38,7 +38,7 @@ def __init__(self, params): ConfigParameter('conn_delete_prob', float), ConfigParameter('node_add_prob', float), ConfigParameter('node_delete_prob', float), - ConfigParameter('single_structural_mutation', bool, 'false'), + ConfigParameter('single_structural_mutation', bool, False), ConfigParameter('structural_mutation_surer', str, 'default'), ConfigParameter('initial_connection', str, 'unconnected')] diff --git a/neat/math_util.py b/neat/math_util.py index 3137f9c8..04d3b5d9 100644 --- a/neat/math_util.py +++ b/neat/math_util.py @@ -1,8 +1,11 @@ """Commonly used functions not available in the Python2 standard library.""" from __future__ import division -from math import sqrt, exp +import math +from sys import float_info + +NORM_EPSILON = math.pow(float_info.epsilon, 0.25) # half-precision works for machine learning def mean(values): values = list(values) @@ -15,6 +18,10 @@ def median(values): return values[len(values) // 2] def median2(values): + """ + Returns the median of the input values; + if there are an even number of inputs, returns the mean of the middle two. + """ values = list(values) n = len(values) if n <= 2: @@ -32,14 +39,14 @@ def variance(values): def stdev(values): - return sqrt(variance(values)) + return math.sqrt(variance(values)) def softmax(values): """ Compute the softmax of the given value set, v_i = exp(v_i) / s, where s = sum(exp(v_0), exp(v_1), ..).""" - e_values = list(map(exp, values)) + e_values = list(map(math.exp, values)) s = sum(e_values) inv_s = 1.0 / s return [ev * inv_s for ev in e_values] diff --git a/neat/reproduction.py b/neat/reproduction.py index fa50ba1d..fc720cc2 100644 --- a/neat/reproduction.py +++ b/neat/reproduction.py @@ -2,14 +2,16 @@ Handles creation of genomes, either from scratch or by sexual or asexual reproduction from parents. """ -from __future__ import division +from __future__ import division, print_function import math import random + from itertools import count +from sys import stderr, float_info from neat.config import ConfigParameter, DefaultClassConfig -from neat.math_util import mean +from neat.math_util import mean, NORM_EPSILON from neat.six_util import iteritems, itervalues # TODO: Provide some sort of optional cross-species performance criteria, which @@ -28,7 +30,8 @@ def parse_config(cls, param_dict): return DefaultClassConfig(param_dict, [ConfigParameter('elitism', int, 0), ConfigParameter('survival_threshold', float, 0.2), - ConfigParameter('min_species_size', int, 2)]) + ConfigParameter('min_species_size', int, 2), + ConfigParameter('fitness_min_divisor', float, 1.0)]) def __init__(self, config, reporters, stagnation): # pylint: disable=super-init-not-called @@ -38,6 +41,19 @@ def __init__(self, config, reporters, stagnation): self.stagnation = stagnation self.ancestors = {} + if config.fitness_min_divisor < 0.0: + raise RuntimeError( + "Fitness_min_divisor cannot be negative ({0:n})".format( + config.fitness_min_divisor)) + elif config.fitness_min_divisor == 0.0: + config.fitness_min_divisor = NORM_EPSILON + elif config.fitness_min_divisor < float_info.epsilon: + print("Fitness_min_divisor {0:n} is too low; increasing to {1:n}".format( + config.fitness_min_divisor,float_info.epsilon), file=stderr) + stderr.flush() + config.fitness_min_divisor = float_info.epsilon + + def create_new(self, genome_type, genome_config, num_genomes): new_genomes = {} for i in range(num_genomes): @@ -115,8 +131,7 @@ def reproduce(self, config, species, pop_size, generation): min_fitness = min(all_fitnesses) max_fitness = max(all_fitnesses) # Do not allow the fitness range to be zero, as we divide by it below. - # TODO: The ``1.0`` below is rather arbitrary, and should be configurable. - fitness_range = max(1.0, max_fitness - min_fitness) + fitness_range = max(self.reproduction_config.fitness_min_divisor, max_fitness - min_fitness) for afs in remaining_species: # Compute adjusted fitness. msf = mean([m.fitness for m in itervalues(afs.members)]) diff --git a/tests/bad_configurationB b/tests/bad_configurationB new file mode 100644 index 00000000..7ca9be1e --- /dev/null +++ b/tests/bad_configurationB @@ -0,0 +1,81 @@ +[NEAT] +fitness_criterion = max +fitness_threshold = 0.9 +pop_size = 150 +reset_on_extinction = False + +[DefaultGenome] +# node activation options +activation_default = sigmoid +activation_mutate_rate = 0.0 +activation_options = sigmoid + +# node aggregation options +aggregation_default = sum +aggregation_mutate_rate = 0.0 +aggregation_options = sum + +# node bias options +bias_init_mean = 0.0 +bias_init_stdev = 1.0 +bias_max_value = 30.0 +bias_min_value = -30.0 +bias_mutate_power = 0.5 +bias_mutate_rate = 0.7 +bias_replace_rate = 0.1 + +# genome compatibility options +compatibility_disjoint_coefficient = 1.0 +compatibility_weight_coefficient = 0.5 + +# connection add/remove rates +conn_add_prob = 0.5 +conn_delete_prob = 0.5 + +# connection enable options +enabled_default = True +enabled_mutate_rate = 0.01 + +feed_forward = True +initial_connection = full + +# node add/remove rates +node_add_prob = 0.2 +node_delete_prob = 0.2 + +# network parameters +num_hidden = 0 +num_inputs = 2 +num_outputs = 1 + +# node response options +response_init_mean = 1.0 +response_init_stdev = 0.0 +response_max_value = 30.0 +response_min_value = -30.0 +response_mutate_power = 0.0 +response_mutate_rate = 0.0 +response_replace_rate = 0.0 + +# connection weight options +weight_init_mean = 0.0 +weight_init_stdev = 1.0 +weight_max_value = 30 +weight_min_value = -30 +weight_mutate_power = 0.5 +weight_mutate_rate = 0.8 +weight_replace_rate = 0.1 + +[DefaultSpeciesSet] +compatibility_threshold = 3.0 + +[DefaultStagnation] +species_fitness_func = max +max_stagnation = 20 +species_elitism = 1 + +[DefaultReproduction] +elitism = 2 +survival_threshold = 0.2 +min_species_size = 2 +fitness_min_divisor = -1.0 \ No newline at end of file diff --git a/tests/test_configuration4 b/tests/test_configuration4 index b51406f6..0e3c93a6 100644 --- a/tests/test_configuration4 +++ b/tests/test_configuration4 @@ -85,3 +85,4 @@ species_elitism = 0 elitism = 2 survival_threshold = 0.2 min_species_size = 2 +fitness_min_divisor = 0.5 \ No newline at end of file diff --git a/tests/test_configuration5 b/tests/test_configuration5 index 9b0926b5..c9828663 100644 --- a/tests/test_configuration5 +++ b/tests/test_configuration5 @@ -85,3 +85,4 @@ species_elitism = 0 elitism = 2 survival_threshold = 0.2 min_species_size = 2 +fitness_min_divisor = 0.0 \ No newline at end of file diff --git a/tests/test_distributed.py b/tests/test_distributed.py index a6b929c9..63b1ff68 100644 --- a/tests/test_distributed.py +++ b/tests/test_distributed.py @@ -18,11 +18,14 @@ HAVE_THREADING = True import neat -from neat.distributed import chunked, MODE_AUTO, MODE_PRIMARY, MODE_SECONDARY, ModeError, _STATE_RUNNING +from neat.distributed import chunked, MODE_AUTO, MODE_PRIMARY, MODE_SECONDARY, ModeError ON_PYPY = platform.python_implementation().upper().startswith("PYPY") - +if ON_PYPY and ((not 'TRY_PYPY' in os.environ) or (os.environ['TRY_PYPY'] != 1)): + SKIP_FOR_PYPY = True +else: + SKIP_FOR_PYPY = False def eval_dummy_genome_nn(genome, config): """dummy evaluation function""" @@ -181,21 +184,6 @@ def test_DistributedEvaluator_primary_restrictions(): else: raise Exception("A DistributedEvaluator in secondary mode could call evaluate()!") -def test_DistributedEvaluator_state_error1(): - """Tests that attempts to use an unstarted manager for set_secondary_state cause an error.""" - primary = neat.DistributedEvaluator( - ("localhost", 8022), - authkey=b"abcd1234", - eval_function=eval_dummy_genome_nn, - mode=MODE_PRIMARY, - ) - try: - primary.em.set_secondary_state(_STATE_RUNNING) - except RuntimeError: - pass - else: - raise Exception("primary.em.set_secondary_state with unstarted manager did not raise a RuntimeError!") - def test_DistributedEvaluator_state_error2(): """Tests that attempts to use an unstarted manager for get_inqueue cause an error.""" primary = neat.DistributedEvaluator( @@ -240,25 +228,9 @@ def test_DistributedEvaluator_state_error4(): pass else: raise Exception("primary.em.get_namespace() with unstarted manager did not raise a RuntimeError!") - -def test_DistributedEvaluator_state_error5(): - """Tests that attempts to set an invalid state cause an error.""" - primary = neat.DistributedEvaluator( - ("localhost", 8022), - authkey=b"abcd1234", - eval_function=eval_dummy_genome_nn, - mode=MODE_PRIMARY, - ) - primary.start() - try: - primary.em.set_secondary_state(-1) - except ValueError: - pass - else: - raise Exception("primary.em.set_secondary_state(-1) did not raise a ValueError!") -@unittest.skipIf(ON_PYPY, "This test fails on pypy during travis builds but usually works locally.") +@unittest.skipIf(SKIP_FOR_PYPY, "This test fails on pypy during travis builds but usually works locally.") def test_distributed_evaluation_multiprocessing(do_mwcp=True): """ Full test run using the Distributed Evaluator (fake nodes using processes). @@ -448,6 +420,7 @@ def run_secondary(addr, authkey, num_workers=1): test_host_is_local() test_DistributedEvaluator_mode() test_DistributedEvaluator_primary_restrictions() - test_distributed_evaluation_multiprocessing(do_mwcp=True) - if HAVE_THREADING: + if not SKIP_FOR_PYPY: + test_distributed_evaluation_multiprocessing(do_mwcp=True) + if HAVE_THREADING and (not ON_PYPY): test_distributed_evaluation_threaded() diff --git a/tests/test_simple_run.py b/tests/test_simple_run.py index 73362ece..5b51074b 100644 --- a/tests/test_simple_run.py +++ b/tests/test_simple_run.py @@ -1,9 +1,22 @@ from __future__ import print_function import os +import platform +import unittest + import neat VERBOSE = True +ON_PYPY = platform.python_implementation().upper().startswith("PYPY") + +try: + import threading +except ImportError: + import dummy_threading as threading + HAVE_THREADING = False +else: + HAVE_THREADING = True + def eval_dummy_genome_nn(genome, config): net = neat.nn.FeedForwardNetwork.create(genome, config) ignored_output = net.activate((0.5, 0.5)) @@ -285,6 +298,24 @@ def test_serial_bad_configA(): raise Exception( "Should have had a RuntimeError with bad_configurationA") +def test_serial_bad_configB(): + """Test if bad_configurationB causes a RuntimeError on trying to create the population.""" + # Load configuration. + local_dir = os.path.dirname(__file__) + config_path = os.path.join(local_dir, 'bad_configurationB') + config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, + neat.DefaultSpeciesSet, neat.DefaultStagnation, + config_path) + + try: + # Create the population, which is the top-level object for a NEAT run. + p = neat.Population(config) + except RuntimeError: + pass + else: + raise Exception( + "Should have had a RuntimeError with bad_configurationB") + def test_serial_extinction_exception(): """Test for complete extinction with exception.""" # Load configuration. @@ -363,9 +394,11 @@ def test_parallel(): stats.save() - +@unittest.skipIf(ON_PYPY, "Pypy has problems with threading.") def test_threaded_evaluation(): """Tests a neat evolution using neat.threaded.ThreadedEvaluator""" + if not HAVE_THREADING: + raise unittest.SkipTest("Platform does not have threading") # Load configuration. local_dir = os.path.dirname(__file__) config_path = os.path.join(local_dir, 'test_configuration') @@ -388,9 +421,11 @@ def test_threaded_evaluation(): stats.save() - +@unittest.skipIf(ON_PYPY, "Pypy has problems with threading.") def test_threaded_evaluator(): """Tests general functionality of neat.threaded.ThreadedEvaluator""" + if not HAVE_THREADING: + raise unittest.SkipTest("Platform does not have threading") n_workers = 3 e = neat.ThreadedEvaluator(n_workers, eval_dummy_genome_nn) try: @@ -673,8 +708,9 @@ def test_run_iznn_bad(): test_serial_extinction_exception() test_serial_extinction_no_exception() test_parallel() - test_threaded_evaluation() - test_threaded_evaluator() + if HAVE_THREADING and (not ON_PYPY): + test_threaded_evaluation() + test_threaded_evaluator() test_run_nn_recurrent() test_run_nn_recurrent_bad() test_run_ctrnn() diff --git a/tests/test_xor_example_distributed.py b/tests/test_xor_example_distributed.py index 583efbbb..fdf5c627 100644 --- a/tests/test_xor_example_distributed.py +++ b/tests/test_xor_example_distributed.py @@ -12,6 +12,11 @@ ON_PYPY = platform.python_implementation().upper().startswith("PYPY") +if ON_PYPY and ((not 'TRY_PYPY' in os.environ) or (os.environ['TRY_PYPY'] != 1)): + SKIP_FOR_PYPY = True +else: + SKIP_FOR_PYPY = False + # 2-input XOR inputs and expected outputs. XOR_INPUTS = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)] XOR_OUTPUTS = [(0.0,), (1.0,), (1.0,), (0.0,)] @@ -58,7 +63,7 @@ def run_primary(addr, authkey, generations): de.start() winner = p.run(de.evaluate, generations) print("===== stopping DistributedEvaluator =====") - de.stop(wait=3, shutdown=True, force_secondary_shutdown=False) + de.stop(wait=3, shutdown=False, force_secondary_shutdown=False) if winner: # Display the winning genome. @@ -128,7 +133,7 @@ def run_secondary(addr, authkey, num_workers=1): raise Exception("DistributedEvaluator in secondary mode did not try to exit!") -@unittest.skipIf(ON_PYPY, "This test fails on pypy during travis builds (frequently due to timeouts) but usually works locally.") +@unittest.skipIf(SKIP_FOR_PYPY, "This test fails on pypy during travis builds (frequently due to timeouts) but usually works locally.") def test_xor_example_distributed(): """ Test to make sure restoration after checkpoint works with distributed. pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy