diff --git a/.travis.yml b/.travis.yml index c821816..122931d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,7 +8,9 @@ python: - "3.6" - "3.7" - "3.8" + - "3.9-dev" - "pypy3.5" + - "pypy3" install: - pip install tox-travis diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3829c24..39f4452 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,10 +1,82 @@ # Contributing to python-nnf -More information coming soon. +## Running the tests and linters -## Notes for now +[`tox`](https://tox.readthedocs.io/en/latest/) is used to run the tests and linters. After installing it, run: -- `tox -e py3` -- Set notation for Internal nodes (rather than lists) -- Use of `@memoize` -- Use of Typing / mypy +``` +tox +``` + +This will install and run all the tooling. + +`tox` aborts early if one of the steps fails. To run just the tests (for example if you can't get `mypy` to work), install and run [`pytest`](https://docs.pytest.org/en/latest/getting-started.html). To run just one particular test, run `pytest -k `. + +## Mypy + +[Mypy](https://mypy.readthedocs.io/en/stable/) is used for static typing. This is also managed by `tox`. + +You can look at the existing code for cues. If you can't figure it out, just leave it be and we'll look at it during code review. + +## Hypothesis + +[Hypothesis](https://hypothesis.readthedocs.io/en/latest/) is used for property-based testing: it generates random sentences for tests to use. See the existing tests for examples. + +It's ideal for a feature to have both tests that do use Hypothesis and tests that don't. + +## Memoization + +[Memoization](https://en.wikipedia.org/wiki/Memoization) is used in various places to avoid computing the same thing multiple times. A memoized function remembers past calls so it can return the same return value again in the future. + +A downside of memoization is that it increases memory usage. + +It's used in two patterns throughout the codebase: + +### Temporary internal memoization with `@memoize` + +This is used for functions that run on individual nodes of sentences, within a method. For example: + +```python + def height(self) -> int: + """The number of edges between here and the furthest leaf.""" + @memoize + def height(node: NNF) -> int: + if isinstance(node, Internal) and node.children: + return 1 + max(height(child) for child in node.children) + return 0 + + return height(self) +``` + +Because the function is defined inside the method, it's garbage collected along with its cache when the method returns. This makes sure we don't keep all the individual node heights in memory indefinitely. + +### Memoizing sentence properties with `@weakref_memoize` + +This is used for commonly used methods that run on whole sentences. For example: + +```python + @weakref_memoize + def vars(self) -> t.FrozenSet[Name]: + """The names of all variables that appear in the sentence.""" + return frozenset(node.name + for node in self.walk() + if isinstance(node, Var)) +``` + +This lets us call `.vars()` often without worrying about performance. + +Unlike the other decorator, this one uses `weakref`, so it doesn't interfere with garbage collection. It's slightly less efficient though, so the temporary functions from the previous section are better off with `@memoize`. + +## Documentation + +Methods are documented with reStructuredText inside docstrings. This looks a little like markdown, but it's different, so take care and look at other docstrings for examples. + +Documentation is automatically generated and ends up at [Read the Docs](https://python-nnf.readthedocs.io/en/latest/). + +To build the documentation locally, run `make html` inside the `docs/` directory. This generates a manual in `docs/_build/html/`. + +New modules have to be added to `docs/nnf.rst` to be documented. + +## Style/miscellaneous + +- Prefer sets over lists where it make sense. For example, `Or({~c for c in children} | {aux})` instead of `Or([~c for c in children] + [aux])`. diff --git a/docs/caveats.rst b/docs/caveats.rst index 61f0467..ef1718a 100644 --- a/docs/caveats.rst +++ b/docs/caveats.rst @@ -44,11 +44,11 @@ Decomposability and determinism A lot of methods are much faster to perform on sentences that are decomposable or deterministic, such as model enumeration. -Decomposability is automatically detected. However, you can skip the check if you already know whether the sentence is decomposable or not, by passing ``decomposable=True`` or ``decomposable=False`` as a keyword argument. +Decomposability is automatically detected. -Determinism is too expensive to automatically detect, but it can give a huge speedup. If you know a sentence to be deterministic, pass ``deterministic=True`` as a keyword argument to take advantage. +Determinism is too expensive to automatically detect, but it can give a huge speedup. If you know a sentence to be deterministic, run ``.mark_deterministic()`` to enable the relevant optimizations. -A compiler like `DSHARP `_ may be able to convert some sentences into equivalent deterministic decomposable sentences. The output of DSHARP can be loaded using the :mod:`nnf.dsharp` module. +A compiler like `DSHARP `_ may be able to convert some sentences into equivalent deterministic decomposable sentences. The output of DSHARP can be loaded using the :mod:`nnf.dsharp` module. Sentences returned by :func:`nnf.dsharp.compile` are automatically marked as deterministic. Other duplication inefficiencies -------------------------------- diff --git a/examples/socialchoice.py b/examples/socialchoice.py index 827c42b..36c90ed 100644 --- a/examples/socialchoice.py +++ b/examples/socialchoice.py @@ -4,11 +4,10 @@ import functools import itertools -import nnf +from nnf import amc, And, Or, Var, true, false -from nnf import amc, And, Or, Var - -memoize = functools.lru_cache(None) # huge speedup +# Cache generated nodes +memoize = functools.lru_cache(None) def powerset(iterable): @@ -21,8 +20,6 @@ def powerset(iterable): def lin(candidates): # candidates must be hashable, have total ordering - builder = nnf.Builder() - candidates = frozenset(candidates) n = len(candidates) T = frozenset(powerset(candidates)) @@ -32,17 +29,16 @@ def lin(candidates): def defeats(i, j): assert i != j if i < j: - return builder.Var((i, j)) + return Var((i, j)) else: - return builder.Var((j, i), False) + return Var((j, i), False) @memoize def C(S): if S == candidates: - return builder.true + return true - return builder.Or(C_child(i, S) - for i in candidates - S) + return Or(C_child(i, S) for i in candidates - S) @memoize def C_child(i, S): @@ -50,7 +46,7 @@ def C_child(i, S): children.update(defeats(i, j) for j in candidates - S if i != j) - return builder.And(children) + return And(children) return C(frozenset()) @@ -81,8 +77,8 @@ def order_to_model(order): def kemeny(votes): labels = collections.Counter() for vote in votes: - for name, true in order_to_model(vote).items(): - labels[nnf.Var(name, true)] += 1 + for name, truthiness in order_to_model(vote).items(): + labels[Var(name, truthiness)] += 1 return {model_to_order(model) for model in amc.maxplus_reduce(lin(votes[0]), labels).models()} @@ -90,8 +86,8 @@ def kemeny(votes): def slater(votes): totals = collections.Counter() for vote in votes: - for name, true in order_to_model(vote).items(): - totals[nnf.Var(name, true)] += 1 + for name, truthiness in order_to_model(vote).items(): + totals[Var(name, truthiness)] += 1 labels = {} for var in totals: labels[var] = 1 if totals[var] > totals[~var] else 0 @@ -143,7 +139,7 @@ def test(): s_3 = s.condition({(0, 1): True, (1, 2): True, (0, 2): False}) assert len(list(s_3.models())) == 0 assert not s_3.satisfiable() - assert s_3.simplify() == nnf.false + assert s_3.simplify() == false # strings as candidates named = lin({"Alice", "Bob", "Carol"}) diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 0000000..cb295e7 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,3 @@ +# -*- conf -*- +[mypy] +strict = True diff --git a/nnf/__init__.py b/nnf/__init__.py index 151f283..6d0a907 100644 --- a/nnf/__init__.py +++ b/nnf/__init__.py @@ -21,14 +21,25 @@ import os import typing as t import uuid +import weakref from collections import Counter +from nnf.util import ( + memoize, + weakref_memoize, + T_NNF, + U_NNF, + T_NNF_co, + _Tristate, + Bottom, + Name, + Model, +) + if t.TYPE_CHECKING: import nnf -Name = t.Hashable -Model = t.Dict[Name, bool] __all__ = ('NNF', 'Internal', 'And', 'Or', 'Var', 'Aux', 'Builder', 'all_models', 'complete_models', 'decision', 'true', 'false', @@ -55,12 +66,6 @@ def all_models(names: 't.Iterable[Name]') -> t.Iterator[Model]: yield new -T = t.TypeVar('T') -T_NNF = t.TypeVar('T_NNF', bound='NNF') -U_NNF = t.TypeVar('U_NNF', bound='NNF') -T_NNF_co = t.TypeVar('T_NNF_co', bound='NNF', covariant=True) -_Tristate = t.Optional[bool] - # Valid values: native and kissat SAT_BACKEND = 'native' @@ -81,16 +86,9 @@ def __exit__(self, *_: t.Any) -> None: SAT_BACKEND = self.setting -if t.TYPE_CHECKING: - def memoize(func: T) -> T: - ... -else: - memoize = functools.lru_cache(maxsize=None) - - class NNF(metaclass=abc.ABCMeta): """Base class for all NNF sentences.""" - __slots__ = () + __slots__ = ("__weakref__",) def __and__(self: T_NNF, other: U_NNF) -> 'And[t.Union[T_NNF, U_NNF]]': """And({self, other})""" @@ -117,6 +115,7 @@ def walk(self) -> t.Iterator['NNF']: seen.add(child) nodes.append(child) + @weakref_memoize def size(self) -> int: """The number of edges in the sentence. @@ -165,28 +164,75 @@ def simply_conjunct(self) -> bool: for node in self.walk() if isinstance(node, And)) + @weakref_memoize def vars(self) -> t.FrozenSet[Name]: """The names of all variables that appear in the sentence.""" return frozenset(node.name for node in self.walk() if isinstance(node, Var)) + def _memoized_vars(self) -> t.Callable[["NNF"], t.FrozenSet[Name]]: + """Return a memoized alternative to the .vars() method. + + You should use this if you're going to query the variables of many + nodes within a sentence. + + This returns a function so the cache is cleared when it's garbage + collected. + """ + + @memoize + def vars_(node: NNF) -> t.FrozenSet[Name]: + if isinstance(node, Var): + return frozenset({node.name}) + assert isinstance(node, Internal) + return frozenset( + v for child in node.children for v in vars_(child) + ) + + return vars_ + + @weakref_memoize def decomposable(self) -> bool: """The children of each And node don't share variables, recursively.""" - @memoize - def var(node: NNF) -> t.FrozenSet[Name]: - return node.vars() + vars_ = self._memoized_vars() for node in self.walk(): if isinstance(node, And): seen = set() # type: t.Set[Name] for child in node.children: - for name in var(child): + for name in vars_(child): if name in seen: return False seen.add(name) return True + # We want something set-like, but WeakSets are unreliable for reasons + # explained in nnf.util.weakref_memoize. + # We use a WeakValueDictionary that maps object IDs to the objects + # themselves. This has the properties we want: + # - Objects disappear from the dictionary when collected. + # - We never forget that an object was marked. + # Unlike @weakref_memoize, a result for one object never transfers to + # an equal object. But since .mark_deterministic() is a manual optimization + # it's probably better that way. + _deterministic_sentences = ( + weakref.WeakValueDictionary() + ) # type: weakref.WeakValueDictionary[int, NNF] + + def mark_deterministic(self) -> None: + """Declare for optimization that this sentence is deterministic. + + Note that this goes by object identity, not equality. This may matter + in obscure cases where you instantiate the same sentence multiple + times. + """ + NNF._deterministic_sentences[id(self)] = self + + def marked_deterministic(self) -> bool: + """Whether this sentence has been marked as deterministic.""" + return id(self) in NNF._deterministic_sentences + def deterministic(self) -> bool: """The children of each Or node contradict each other. @@ -199,16 +245,19 @@ def deterministic(self) -> bool: return False return True + @weakref_memoize def smooth(self) -> bool: """The children of each Or node all use the same variables.""" + vars_ = self._memoized_vars() + for node in self.walk(): if isinstance(node, Or) and len(node.children) > 1: expected = None for child in node.children: if expected is None: - expected = child.vars() + expected = vars_(child) else: - if child.vars() != expected: + if vars_(child) != expected: return False return True @@ -253,41 +302,24 @@ def sat(node: NNF) -> bool: return sat(self) - def satisfiable( - self, - *, - decomposable: _Tristate = None, - cnf: _Tristate = None - ) -> bool: + def satisfiable(self) -> bool: """Some set of values exists that makes the sentence correct. This method doesn't necessarily try to find an example, which can make it faster. It's decent at decomposable sentences and sentences in CNF, and bad at other sentences. - - :param decomposable: Indicate whether the sentence is decomposable. By - default, this will be checked. - :param cnf: Indicate whether the sentence is in CNF. By default, - this will be checked. """ if not self._satisfiable_decomposable(): return False - if decomposable is None: - decomposable = self.decomposable() - - if decomposable: + if self.decomposable(): # Would've been picked up already if not satisfiable return True - if cnf is None: - cnf = self.is_CNF() - - if cnf: + if self.is_CNF(): return self._cnf_satisfiable() - else: - from nnf import tseitin - return tseitin.to_CNF(self)._cnf_satisfiable() + + return self.to_CNF()._cnf_satisfiable() def _satisfiable_decomposable(self) -> bool: """Checks satisfiability of decomposable sentences. @@ -333,38 +365,17 @@ def con(node: NNF) -> bool: consistent = satisfiable # synonym - def valid( - self, - *, - decomposable: _Tristate = None, - deterministic: bool = False, - smooth: _Tristate = None - ) -> bool: + def valid(self) -> bool: """Check whether the sentence is valid (i.e. always true). This can be done efficiently for sentences that are decomposable and deterministic. - - :param decomposable: Whether to assume the sentence is decomposable. - If ``None`` (the default), the sentence is - automatically checked. - :param deterministic: Whether the sentence is deterministic. This - should be set to ``True`` for sentences that - are known to be deterministic. It's assumed to - be ``False`` otherwise. - :param smooth: Whether to assume the sentence is smooth. If ``None`` - (the default), the sentence is automatically checked. """ - if decomposable is None: - decomposable = self.decomposable() - - if decomposable and deterministic: + if self.marked_deterministic() and self.decomposable(): # mypy is unsure that 2** is actually an int # but len(self.vars()) >= 0, so it definitely is max_num_models = 2**len(self.vars()) # type: int - return max_num_models == self.model_count(decomposable=True, - deterministic=True, - smooth=smooth) + return max_num_models == self.model_count() return not self.negate().satisfiable() @@ -377,89 +388,62 @@ def implies(self, other: 'NNF') -> bool: return not self.condition(other.negate().to_model()).satisfiable() if self.term(): return not other.negate().condition(self.to_model()).satisfiable() - return (self.negate() | other).valid() # todo: speed this up + + if not self.vars() & other.vars(): + return not self.satisfiable() or other.valid() + + return not (self & other.negate()).satisfiable() entails = implies - def models( - self, - *, - decomposable: _Tristate = None, - deterministic: bool = False, - cnf: _Tristate = None - ) -> t.Iterator[Model]: + def models(self, *, deterministic: _Tristate = None) -> t.Iterator[Model]: """Yield all dictionaries of values that make the sentence correct. Much faster on sentences that are deterministic or decomposable or both. The algorithm for deterministic sentences works on non-deterministic - sentences, but may be much slower for such sentences. Using - ``deterministic=True`` for sentences that aren't deterministic can - be a reasonable decision. - - :param decomposable: Whether to assume the sentence is - decomposable. If ``None`` (the default), - the sentence is automatically checked. - :param deterministic: Indicate whether the sentence is - deterministic. Set this to ``True`` if you - know it to be deterministic, or want it to be - treated as deterministic. - :param cnf: Indicate whether the sentence is in CNF, in which case a - SAT solver will be used. Set to ``True`` to skip the - automatic check and immediately use that method, set to - ``False`` to force the use of another algorithm. + sentences, but may be much slower for such sentences. Pass + ``deterministic=True`` to use it anyway. This can give a speedup in + some cases. + + :param deterministic: If ``True``, treat the sentence as if it's + deterministic. If ``False``, treat it as if it + isn't, even if it's marked otherwise. """ - if decomposable is None: - decomposable = self.decomposable() - if cnf is None: - cnf = self.is_CNF() - if cnf: + if deterministic is None: + deterministic = self.marked_deterministic() + if self.is_CNF(): yield from self._cnf_models() elif deterministic: - yield from self._models_deterministic(decomposable=decomposable) - elif decomposable: + yield from self._models_deterministic() + elif self.decomposable(): yield from self._models_decomposable() else: for model in all_models(self.vars()): if self.satisfied_by(model): yield model - def model_count( - self, - *, - decomposable: _Tristate = None, - deterministic: bool = False, - smooth: _Tristate = None - ) -> int: + def model_count(self) -> int: """Return the number of models the sentence has. This can be done efficiently for sentences that are decomposable and deterministic. - - :param decomposable: Whether to assume the sentence is decomposable. - If ``None`` (the default), the sentence is - automatically checked. - :param deterministic: Whether the sentence is deterministic. This - should be set to ``True`` for sentences that - are known to be deterministic. It's assumed to - be ``False`` otherwise. - :param smooth: Whether to assume the sentence is smooth. If the - sentence isn't smooth, an extra step is needed. If - ``None`` (the default), the sentence is automatically - checked. """ - if decomposable is None: - decomposable = self.decomposable() - if smooth is None: - smooth = self.smooth() + decomposable = self.decomposable() + deterministic = self.marked_deterministic() + made_smooth = False sentence = self - if decomposable and deterministic and not smooth: + if decomposable and deterministic and not sentence.smooth(): sentence = sentence.make_smooth() - smooth = True + made_smooth = True - if decomposable and deterministic and smooth: + if ( + decomposable + and deterministic + and (made_smooth or sentence.smooth()) + ): @memoize def count(node: NNF) -> int: if isinstance(node, Var): @@ -476,38 +460,14 @@ def count(node: NNF) -> int: return count(sentence) - return len(list(sentence.models())) - - def contradicts( - self, - other: 'NNF', - *, - decomposable: _Tristate = None - ) -> bool: - """There is no set of values that satisfies both sentences. + return sum(1 for _ in sentence.models()) - May be very expensive. - """ - if decomposable is None: - decomposable = self.decomposable() and other.decomposable() + def contradicts(self, other: "NNF") -> bool: + """There is no set of values that satisfies both sentences.""" + if not self.vars() & other.vars(): + return not (self.satisfiable() and other.satisfiable()) - if len(self.vars()) > len(other.vars()): - # The one with the fewest vars has the smallest models - a, b = other, self - else: - a, b = self, other - - if decomposable: - for model in a.models(decomposable=True): - if b._consistent_with_model(model): - return False - return True - - for model in b.models(): - # Hopefully, a.vars() <= b.vars() and .satisfiable() is fast - if a.condition(model).satisfiable(): - return False - return True + return not (self & other).satisfiable() def equivalent(self, other: 'NNF') -> bool: """Test whether two sentences have the same models. @@ -515,85 +475,13 @@ def equivalent(self, other: 'NNF') -> bool: If the sentences don't contain the same variables they are considered equivalent if the variables that aren't shared are independent, i.e. their value doesn't affect the value of the sentence. - - Warning: may be very slow. Decomposability helps. """ - # TODO: add arguments for decomposability, determinism (per sentence?) if self == other: return True - models_a = list(self.models()) - models_b = list(other.models()) - if models_a == models_b: - return True - - vars_a = self.vars() - vars_b = other.vars() - if vars_a == vars_b: - if len(models_a) != len(models_b): - return False - - Model_T = t.FrozenSet[t.Tuple[Name, bool]] - - def dict_hashable(model: t.Dict[Name, bool]) -> Model_T: - return frozenset(model.items()) - - modset_a = frozenset(map(dict_hashable, models_a)) - modset_b = frozenset(map(dict_hashable, models_b)) - - if vars_a == vars_b: - return modset_a == modset_b - - # There are variables that appear in one sentence but not the other - # The sentences can still be equivalent if those variables don't - # affect the truth value of the sentence they appear in - # That is, for every model that contains such a variable there's - # another model with that variable flipped - - # In such a scenario we can calculate the number of "shared" models in - # advance - # Each additional variable not shared with the other doubles the - # number of models compared to the number of shared models - # so we calculate those factors and check whether that works out, - # which is much cheaper than actually checking everything, which we - # do afterwards if necessary - if len(modset_a) % 2**(len(vars_a - vars_b)) != 0: - return False - if len(modset_b) % 2**(len(vars_b - vars_a)) != 0: - return False - if (len(modset_a) // 2**(len(vars_a - vars_b)) != - len(modset_b) // 2**(len(vars_b - vars_a))): - return False - - def flip(model: Model_T, var: Name) -> Model_T: - other = (var, False) if (var, True) in model else (var, True) - return (model - {(var, False), (var, True)}) | {other} - - modset_a_small = set() # type: t.Set[Model_T] - for model in modset_a: - for var in vars_a - vars_b: - if flip(model, var) not in modset_a: - return False - true_vars = {(var, True) for var in vars_a - vars_b} - if true_vars <= model: # Don't add it multiple times - modset_a_small.add(model - true_vars) - - modset_b_small = set() # type: t.Set[Model_T] - for model in modset_b: - for var in vars_b - vars_a: - if flip(model, var) not in modset_b: - return False - true_vars = {(var, True) for var in vars_b - vars_a} - if true_vars <= model: - modset_b_small.add(model - true_vars) - - if modset_a_small != modset_b_small: - return False - - assert (len(modset_a_small) - == len(modset_a) // 2**(len(vars_a - vars_b))) - - return True + return not ( + (self & other.negate()) | (self.negate() & other) + ).satisfiable() def negate(self) -> 'NNF': """Return a new sentence that's true iff the original is false.""" @@ -827,16 +715,21 @@ def implicates(self) -> 'And[Or[Var]]': the sentence that are strict subsets of any of the clauses in this representation, so no clauses could be made smaller. - While :meth:`NNF.implicants` returns all implicants, this method may + While :meth:`implicants` returns all implicants, this method may only return some of the implicates. """ return And(self._do_PI()[1]) def to_MODS(self) -> 'Or[And[Var]]': """Convert the sentence to a MODS sentence.""" - return Or(And(Var(name, val) - for name, val in model.items()) - for model in self.models()) + new = Or( + And(Var(name, val) for name, val in model.items()) + for model in self.models() + ) + NNF.is_MODS.set(new, True) + NNF._is_DNF_loose.set(new, True) + NNF._is_DNF_strict.set(new, True) + return new def to_model(self) -> Model: """If the sentence directly represents a model, convert it to that. @@ -883,6 +776,8 @@ def cond(node: NNF) -> NNF: def make_smooth(self) -> 'NNF': """Transform the sentence into an equivalent smooth sentence.""" + vars_ = self._memoized_vars() + @memoize def filler(name: Name) -> 'Or[Var]': return Or({Var(name), Var(name, False)}) @@ -895,11 +790,11 @@ def smooth(node: NNF) -> NNF: elif isinstance(node, Var): return node elif isinstance(node, Or): - names = node.vars() + names = vars_(node) children = {smooth(child) for child in node.children} smoothed = set() # type: t.Set[NNF] for child in children: - child_names = child.vars() + child_names = vars_(child) if len(child_names) < len(names): child_children = {child} child_children.update(filler(name) @@ -914,7 +809,9 @@ def smooth(node: NNF) -> NNF: return node return new - return smooth(self) + ret = smooth(self) + NNF.smooth.set(ret, True) + return ret def simplify(self, merge_nodes: bool = True) -> 'NNF': """Apply the following transformations to make the sentence simpler: @@ -1052,8 +949,8 @@ def deduplicate(self: T_NNF) -> T_NNF: represented by two separate objects. This method gets rid of that duplication. - In a lot of cases it's better to avoid the duplication in the first - place, for example with a Builder object. + It's better to avoid the duplication in the first place. This method is + for diagnostic purposes, in combination with :meth:`object_count`. """ new_nodes = {} # type: t.Dict[NNF, NNF] @@ -1201,21 +1098,14 @@ def name(node: NNF) -> int: ['}\n'] ) - def _models_deterministic( - self, - *, - decomposable: _Tristate = None - ) -> t.Iterator[Model]: + def _models_deterministic(self) -> t.Iterator[Model]: """Model enumeration for deterministic sentences. Slightly faster for sentences that are also decomposable. """ ModelInt = t.FrozenSet[t.Tuple[Name, bool]] - if decomposable is None: - decomposable = self.decomposable() - - if decomposable: + if self.decomposable(): def compatible(a: ModelInt, b: ModelInt) -> bool: return True else: @@ -1264,7 +1154,7 @@ def complete( def _models_decomposable(self) -> t.Iterator[Model]: """Model enumeration for decomposable sentences.""" - if not self.satisfiable(decomposable=True): + if not self.satisfiable(): return names = tuple(self.vars()) model_tree = {} # type: t.Dict[bool, t.Any] @@ -1293,23 +1183,67 @@ def leaves( for leaf, path in leaves(model_tree): yield dict(zip(names, path)) - def is_CNF(self) -> bool: - """Return whether the sentence is in the Conjunctive Normal Form.""" - return isinstance(self, And) and all(child.clause() - for child in self.children) + def is_CNF(self, strict: bool = False) -> bool: + """Return whether the sentence is in the Conjunctive Normal Form. + + :param strict: If ``True``, follow the definition of the + `Knowledge Compilation Map + `_, + requiring that a variable doesn't appear multiple times + in a single clause. + """ + if strict: + return self._is_CNF_strict() + return self._is_CNF_loose() + + @weakref_memoize + def _is_CNF_loose(self) -> bool: + return isinstance(self, And) and all( + isinstance(child, Or) + and all(isinstance(grandchild, Var) for grandchild in child) + for child in self + ) + + @weakref_memoize + def _is_CNF_strict(self) -> bool: + return isinstance(self, And) and all( + child.clause() for child in self.children + ) + + def is_DNF(self, strict: bool = False) -> bool: + """Return whether the sentence is in the Disjunctive Normal Form. + + :param strict: If ``True``, follow the definition of the + `Knowledge Compilation Map + `_, + requiring that a variable doesn't appear multiple times + in a single term. + """ + if strict: + return self._is_DNF_strict() + return self._is_DNF_loose() + + @weakref_memoize + def _is_DNF_loose(self) -> bool: + return isinstance(self, Or) and all( + isinstance(child, And) + and all(isinstance(grandchild, Var) for grandchild in child) + for child in self + ) - def is_DNF(self) -> bool: - """Return whether the sentence is in the Disjunctive Normal Form.""" + @weakref_memoize + def _is_DNF_strict(self) -> bool: return isinstance(self, Or) and all(child.term() for child in self.children) + @weakref_memoize def is_MODS(self) -> bool: """Return whether the sentence is in MODS form. MODS sentences are disjunctions of terms representing models, making the models trivial to enumerate. """ - return self.is_DNF() and self.smooth() + return self.is_DNF(strict=True) and self.smooth() @abc.abstractmethod def _sorting_key(self) -> t.Tuple[t.Any, ...]: @@ -1677,9 +1611,9 @@ def decision( #: A node that's always true. Technically an And node without children. -true = And() # type: And[NNF] +true = And() # type: And[Bottom] #: A node that's always false. Technically an Or node without children. -false = Or() # type: Or[NNF] +false = Or() # type: Or[Bottom] class Builder: diff --git a/nnf/amc.py b/nnf/amc.py index a312970..928c221 100644 --- a/nnf/amc.py +++ b/nnf/amc.py @@ -6,7 +6,8 @@ import typing as t -from nnf import NNF, And, Var, Or, Internal, Name, true, false, memoize +from nnf import NNF, And, Var, Or, Internal, true, false +from nnf.util import Name, memoize neg_inf = float('-inf') diff --git a/nnf/cli.py b/nnf/cli.py index 4167733..3ccb320 100644 --- a/nnf/cli.py +++ b/nnf/cli.py @@ -20,6 +20,8 @@ # model enumeration # ... +DOT_FORMATS = {'ps', 'pdf', 'svg', 'fig', 'png', 'gif', 'jpg', 'jpeg'} + @contextlib.contextmanager def timer(args: argparse.Namespace) -> t.Iterator[SimpleNamespace]: @@ -152,10 +154,9 @@ def sentence_stats(verbose: bool, sentence: NNF) -> SimpleNamespace: def sat(args: argparse.Namespace) -> int: with open_read(args.file) as f: sentence = dimacs.load(f) - stats = sentence_stats(args.verbose, sentence) + sentence_stats(args.verbose, sentence) with timer(args): - sat = sentence.satisfiable(decomposable=stats.decomposable, - cnf=stats.cnf) + sat = sentence.satisfiable() if sat: if not args.quiet: print("SATISFIABLE") @@ -169,11 +170,11 @@ def sat(args: argparse.Namespace) -> int: def sharpsat(args: argparse.Namespace) -> int: with open_read(args.file) as f: sentence = dimacs.load(f) - stats = sentence_stats(args.verbose, sentence) + if args.deterministic: + sentence.mark_deterministic() + sentence_stats(args.verbose, sentence) with timer(args): - num = sentence.model_count(decomposable=stats.decomposable, - deterministic=args.deterministic, - smooth=stats.smooth) + num = sentence.model_count() if args.quiet: print(num) else: @@ -190,9 +191,6 @@ def info(args: argparse.Namespace) -> int: return 0 -dot_formats = {'ps', 'pdf', 'svg', 'fig', 'png', 'gif', 'jpg', 'jpeg'} - - def extension(fname: str) -> t.Optional[str]: if '.' not in fname: return None @@ -206,7 +204,7 @@ def draw(args: argparse.Namespace) -> int: dot = sentence.to_DOT(color=args.color, label=label) ext = extension(args.out) - if ext in dot_formats or args.format is not None: + if ext in DOT_FORMATS or args.format is not None: argv = ['dot', '-T' + (ext if args.format is None # type: ignore else args.format)] if args.out != '-': diff --git a/nnf/dimacs.py b/nnf/dimacs.py index 8182360..f46a20b 100644 --- a/nnf/dimacs.py +++ b/nnf/dimacs.py @@ -6,7 +6,8 @@ import typing as t import warnings -from nnf import NNF, Var, And, Or, Name, true, false +from nnf import NNF, Var, And, Or, true, false +from nnf.util import Name __all__ = ('dump', 'load', 'dumps', 'loads') @@ -144,8 +145,6 @@ def _dump_cnf( if not isinstance(clause, Or): raise TypeError("CNF sentences must be conjunctions of " "disjunctions") - if not len(clause.children) > 0: - raise TypeError("CNF sentences shouldn't have empty clauses") first = True for child in clause.children: if not isinstance(child, Var): @@ -293,8 +292,7 @@ def _parse_cnf(tokens: t.Iterable[str]) -> And[Or[Var]]: clause = set() # type: t.Set[Var] for token in tokens: if token == '0': - if clause: - clauses.add(Or(clause)) + clauses.add(Or(clause)) clause = set() elif token == '%': # Some example files end with: @@ -302,7 +300,7 @@ def _parse_cnf(tokens: t.Iterable[str]) -> And[Or[Var]]: # % # 0 # I don't know why. - pass + break elif token.startswith('-'): clause.add(Var(int(token[1:]), False)) else: @@ -311,4 +309,6 @@ def _parse_cnf(tokens: t.Iterable[str]) -> And[Or[Var]]: # A file may or may not end with a 0 # Adding an empty clause is not desirable clauses.add(Or(clause)) - return And(clauses) + sentence = And(clauses) + NNF._is_CNF_loose.set(sentence, True) + return sentence diff --git a/nnf/dsharp.py b/nnf/dsharp.py index 0c205d2..20b99c8 100644 --- a/nnf/dsharp.py +++ b/nnf/dsharp.py @@ -25,7 +25,8 @@ import tempfile import typing as t -from nnf import NNF, And, Or, Var, false, dimacs, Name +from nnf import NNF, And, Or, Var, false, true, dimacs +from nnf.util import Name __all__ = ('load', 'loads', 'compile') @@ -80,6 +81,8 @@ def compile( This requires having DSHARP installed. + The returned sentence will be marked as deterministic. + :param sentence: The CNF sentence to compile. :param executable: The path of the ``dsharp`` executable. If the executable is in your PATH there's no need to set this. @@ -97,6 +100,12 @@ def compile( if not sentence.is_CNF(): raise ValueError("Sentence must be in CNF") + # Handle cases D# doesn't like + if not sentence.children: + return true + if false in sentence.children: + return false + var_labels = dict(enumerate(sentence.vars(), start=1)) var_labels_inverse = {v: k for k, v in var_labels.items()} @@ -139,4 +148,7 @@ def compile( if not out: raise RuntimeError("Couldn't read file output. Log:\n\n{}".format(log)) - return loads(out, var_labels=var_labels) + result = loads(out, var_labels=var_labels) + result.mark_deterministic() + NNF.decomposable.set(result, True) + return result diff --git a/nnf/kissat.py b/nnf/kissat.py index b446d12..2179ee0 100644 --- a/nnf/kissat.py +++ b/nnf/kissat.py @@ -8,7 +8,8 @@ import subprocess import typing as t -from nnf import And, Or, Var, dimacs, Model +from nnf import And, Or, Var, dimacs +from nnf.util import Model __all__ = ('solve',) diff --git a/nnf/operators.py b/nnf/operators.py index bedcfab..2cacdbf 100644 --- a/nnf/operators.py +++ b/nnf/operators.py @@ -7,7 +7,8 @@ import typing as t -from nnf import NNF, And, Or, T_NNF, U_NNF +from nnf import NNF, And, Or +from nnf.util import T_NNF, U_NNF __all__ = ('xor', 'nand', 'nor', 'implies', 'implied_by', 'iff', 'and_', 'or_') diff --git a/nnf/tseitin.py b/nnf/tseitin.py index 0d713a5..202e6a0 100644 --- a/nnf/tseitin.py +++ b/nnf/tseitin.py @@ -6,7 +6,8 @@ (one for each logical connective in the formula). """ -from nnf import NNF, Var, And, Or, memoize, Internal +from nnf import NNF, Var, And, Or, Internal +from nnf.util import memoize def to_CNF(theory: NNF) -> And[Or[Var]]: @@ -51,4 +52,7 @@ def process_node(node: NNF) -> Var: root = process_node(theory) clauses.append(Or({root})) - return And(clauses) + ret = And(clauses) + NNF._is_CNF_loose.set(ret, True) + NNF._is_CNF_strict.set(ret, True) + return ret diff --git a/nnf/util.py b/nnf/util.py new file mode 100644 index 0000000..06e7a5a --- /dev/null +++ b/nnf/util.py @@ -0,0 +1,110 @@ +"""Utility definitions for internal use. Not part of the public API.""" + +import functools +import typing as t +import weakref + +if t.TYPE_CHECKING: + from nnf import NNF # noqa: F401 + +Name = t.Hashable +Model = t.Dict[Name, bool] + +T = t.TypeVar("T") +U = t.TypeVar("U") +T_NNF = t.TypeVar("T_NNF", bound="NNF") +U_NNF = t.TypeVar("U_NNF", bound="NNF") +T_NNF_co = t.TypeVar("T_NNF_co", bound="NNF", covariant=True) +_Tristate = t.Optional[bool] + +# Bottom type with no values +# This works in mypy but not pytype +# t.Union[()] works too but not at runtime +# NoReturn doesn't exist in some Python releases, hence the guard +if t.TYPE_CHECKING: + Bottom = t.NoReturn +else: + Bottom = None + +memoize = t.cast(t.Callable[[T], T], functools.lru_cache(maxsize=None)) + + +def weakref_memoize( + func: t.Callable[[T_NNF], T] +) -> "_WeakrefMemoized[T_NNF, T]": + """Make a function cache its return values using weakrefs. + + This makes it possible to remember sentences' properties without keeping + them in memory forever. + + To keep memory use reasonable, this decorator should only be used on + methods that will only be called on full sentences, not individual nodes + within a sentence. + + The current implementation has a problem: WeakKeyDictionary operates on + object equality, not identity. Consider the following: + + >>> d = weakref.WeakKeyDictionary() + >>> s = nnf.And() + >>> t = nnf.And() + >>> d[s] = 3 + >>> t in d + True + >>> del s + >>> t in d + False + + When ``s`` is garbage collected, ``t`` can no longer be looked up in the + dictionary, even though we did look it up before. + + This might be acceptable because the methods this wraps around aren't + prohibitively expensive. + + For a solution that can't easily be applied here, see the implementation of + :meth:`nnf.NNF.mark_deterministic`. + """ + memo = ( + weakref.WeakKeyDictionary() + ) # type: weakref.WeakKeyDictionary[T_NNF, T] + + @functools.wraps(func) + def wrapped(self: T_NNF) -> T: + try: + return memo[self] + except KeyError: + ret = func(self) + memo[self] = ret + return ret + + wrapped.memo = memo # type: ignore + wrapped.set = memo.__setitem__ # type: ignore + return t.cast(_WeakrefMemoized[T_NNF, T], wrapped) + + +class _WeakrefMemoized(t.Generic[T_NNF, T]): + """Fake class for typechecking. Should never be instantiated.""" + def __init__(self) -> None: + assert t.TYPE_CHECKING, "Not a real class" + self.memo = NotImplemented # type: weakref.WeakKeyDictionary[T_NNF, T] + self.__wrapped__ = NotImplemented # type: t.Callable[[T_NNF], T] + + @t.overload + def __get__(self: U, instance: None, owner: t.Type[T_NNF]) -> "U": + ... + + @t.overload # noqa: F811 + def __get__( # noqa: F811 + self, instance: T_NNF, owner: t.Optional[t.Type[T_NNF]] = None + ) -> t.Callable[[], T]: + ... + + def __get__( # noqa: F811 + self, instance: object, owner: object = None + ) -> t.Any: + ... + + def __call__(self, sentence: T_NNF) -> T: + ... + + def set(self, sentence: T_NNF, value: T) -> None: + ... diff --git a/test_nnf.py b/test_nnf.py index f80d4ca..4958b1e 100644 --- a/test_nnf.py +++ b/test_nnf.py @@ -3,6 +3,7 @@ import platform import shutil import os +import types from pathlib import Path @@ -16,6 +17,15 @@ from nnf import (Var, And, Or, amc, dimacs, dsharp, operators, tseitin, complete_models, using_kissat) +memoized = [ + method + for method in vars(nnf.NNF).values() + if isinstance(method, types.FunctionType) and hasattr(method, "memo") +] +assert memoized, "No memoized methods found, did the implementation change?" +for method in memoized: + method.set = lambda *args: None # type: ignore + settings.register_profile('patient', deadline=2000, suppress_health_check=(HealthCheck.too_slow,)) settings.load_profile('patient') @@ -116,7 +126,6 @@ def DNF(draw): @st.composite def CNF(draw): sentence = And(draw(st.frozensets(clauses()))) - assume(len(sentence.children) > 0) return sentence @@ -309,19 +318,15 @@ def test_arbitrary_dimacs_sat_serialize(sentence: nnf.NNF): @given(CNF()) -def test_arbitrary_dimacs_cnf_serialize(sentence: nnf.And): - assume(all(len(clause.children) > 0 for clause in sentence.children)) - assert dimacs.loads(dimacs.dumps(sentence, mode='cnf')) == sentence +def test_arbitrary_dimacs_cnf_serialize(sentence: And[Or[Var]]): + reloaded = dimacs.loads(dimacs.dumps(sentence, mode='cnf')) + assert reloaded.is_CNF() + assert reloaded == sentence @given(NNF()) def test_dimacs_cnf_serialize_accepts_only_cnf(sentence: nnf.NNF): - if (isinstance(sentence, And) - and all(isinstance(clause, Or) - and all(isinstance(var, Var) - for var in clause.children) - and len(clause.children) > 0 - for clause in sentence.children)): + if sentence.is_CNF(): event("CNF sentence") dimacs.dumps(sentence, mode='cnf') else: @@ -483,14 +488,10 @@ def test_uf20_models(): for sentence in uf20: assert sentence.decomposable() - m = list(sentence.models(deterministic=False, - decomposable=True)) + m = list(sentence.models(deterministic=False)) models = model_set(m) assert len(m) == len(models) - assert models == model_set(sentence.models(deterministic=True, - decomposable=False)) - assert models == model_set(sentence.models(deterministic=True, - decomposable=True)) + assert models == model_set(sentence.models(deterministic=True)) @given(NNF()) @@ -530,8 +531,9 @@ def test_model_counting(sentence: nnf.NNF): def test_uf20_model_counting(): for sentence in uf20: - assert (sentence.model_count(deterministic=True) - == len(list(sentence.models()))) + assert sentence.model_count() == len(list(sentence.models())) + sentence.mark_deterministic() + assert sentence.model_count() == len(list(sentence.models())) @given(NNF()) @@ -548,26 +550,55 @@ def test_validity(sentence: nnf.NNF): def test_uf20_validity(): for sentence in uf20: - assert not sentence.valid(deterministic=True) + assert not sentence.valid() + sentence.mark_deterministic() + assert not sentence.valid() @given(CNF()) def test_is_CNF(sentence: nnf.NNF): assert sentence.is_CNF() + assert sentence.is_CNF(strict=True) assert not sentence.is_DNF() +def test_is_CNF_examples(): + assert And().is_CNF() + assert And().is_CNF(strict=True) + assert And({Or()}).is_CNF() + assert And({Or()}).is_CNF(strict=True) + assert And({Or({a, ~b})}).is_CNF() + assert And({Or({a, ~b})}).is_CNF(strict=True) + assert And({Or({a, ~b}), Or({c, ~c})}).is_CNF() + assert not And({Or({a, ~b}), Or({c, ~c})}).is_CNF(strict=True) + + @given(DNF()) def test_is_DNF(sentence: nnf.NNF): assert sentence.is_DNF() + assert sentence.is_DNF(strict=True) assert not sentence.is_CNF() +def test_is_DNF_examples(): + assert Or().is_DNF() + assert Or().is_DNF(strict=True) + assert Or({And()}).is_DNF() + assert Or({And()}).is_DNF(strict=True) + assert Or({And({a, ~b})}).is_DNF() + assert Or({And({a, ~b})}).is_DNF(strict=True) + assert Or({And({a, ~b}), And({c, ~c})}).is_DNF() + assert not Or({And({a, ~b}), And({c, ~c})}).is_DNF(strict=True) + + @given(NNF()) def test_to_MODS(sentence: nnf.NNF): assume(len(sentence.vars()) <= 5) mods = sentence.to_MODS() assert mods.is_MODS() + assert mods.is_DNF() + assert mods.is_DNF(strict=True) + assert mods.smooth() assert isinstance(mods, Or) assert mods.model_count() == len(mods.children) @@ -591,7 +622,7 @@ def test_pairwise(sentence: nnf.NNF): def test_implicates(sentence: nnf.NNF): implicates = sentence.implicates() assert implicates.equivalent(sentence) - assert implicates.is_CNF() + assert implicates.is_CNF(strict=True) assert not any(a.children < b.children for a in implicates.children for b in implicates.children) @@ -608,13 +639,11 @@ def test_implicants(sentence: nnf.NNF): @given(NNF()) -def test_implicates_implicants_idempotent(sentence: nnf.NNF): +def test_implicants_idempotent(sentence: nnf.NNF): assume(len(sentence.vars()) <= 6) implicants = sentence.implicants() implicates = sentence.implicates() assert implicants.implicants() == implicants - assert implicates.implicates() == implicates - assert implicants.implicates() == implicates assert implicates.implicants() == implicants @@ -628,6 +657,7 @@ def test_implicates_implicants_negation_rule(sentence: nnf.NNF): So sentence.negate().implicants().negate() gives all implicates, and sentence.negate().implicates().negate() gives some implicants. """ + assume(sentence.size() <= 30) assert ( sentence.negate().implicants().negate().children >= sentence.implicates().children @@ -666,9 +696,9 @@ def test_implies(a: nnf.NNF, b: nnf.NNF): @given(CNF()) def test_cnf_sat(sentence: nnf.NNF): assert sentence.is_CNF() - assert sentence.satisfiable(cnf=True) == sentence.satisfiable(cnf=False) - assert (model_set(sentence.models(cnf=True)) == - model_set(sentence.models(cnf=False, deterministic=True))) + models_ = list(sentence.models()) + assert model_set(models_) == model_set(sentence._models_deterministic()) + assert sentence.satisfiable() == bool(models_) def test_uf20_cnf_sat(): @@ -679,7 +709,7 @@ def test_uf20_cnf_sat(): # But even 20 variables is too much # So let's just hope that test_cnf_sat does enough at_least_one = False - for model in sentence.models(cnf=True): + for model in sentence.models(): assert sentence.satisfied_by(model) at_least_one = True assert at_least_one @@ -779,18 +809,17 @@ def test_copying_does_not_copy(sentence: nnf.NNF): if shutil.which('dsharp') is not None: def test_dsharp_compile_uf20(): - for sentence in uf20_cnf: - compiled = dsharp.compile(sentence) - compiled_smooth = dsharp.compile(sentence, smooth=True) - assert sentence.equivalent(compiled) - assert sentence.equivalent(compiled_smooth) - assert compiled.decomposable() - assert compiled_smooth.decomposable() - assert compiled_smooth.smooth() + sentence = uf20_cnf[0] + compiled = dsharp.compile(sentence) + compiled_smooth = dsharp.compile(sentence, smooth=True) + assert sentence.equivalent(compiled) + assert sentence.equivalent(compiled_smooth) + assert compiled.decomposable() + assert compiled_smooth.decomposable() + assert compiled_smooth.smooth() @given(CNF()) def test_dsharp_compile(sentence: And[Or[Var]]): - assume(all(len(clause) > 0 for clause in sentence)) compiled = dsharp.compile(sentence) compiled_smooth = dsharp.compile(sentence, smooth=True) assert compiled.decomposable() @@ -802,7 +831,6 @@ def test_dsharp_compile(sentence: And[Or[Var]]): @given(CNF()) def test_dsharp_compile_converting_names(sentence: And[Or[Var]]): - assume(all(len(clause) > 0 for clause in sentence)) sentence = And(Or(Var(str(var.name), var.true) for var in clause) for clause in sentence) compiled = dsharp.compile(sentence) @@ -811,6 +839,28 @@ def test_dsharp_compile_converting_names(sentence: And[Or[Var]]): assert sentence.equivalent(compiled) +def test_mark_deterministic(): + s = And() + t = And() + + assert not s.marked_deterministic() + assert not t.marked_deterministic() + + s.mark_deterministic() + + assert s.marked_deterministic() + assert not t.marked_deterministic() + + t.mark_deterministic() + + assert s.marked_deterministic() + assert t.marked_deterministic() + + del s + + assert t.marked_deterministic() + + @given(NNF()) def test_tseitin(sentence: nnf.NNF): @@ -819,6 +869,7 @@ def test_tseitin(sentence: nnf.NNF): T = tseitin.to_CNF(sentence) assert T.is_CNF() + assert T.is_CNF(strict=True) assert T.forget_aux().equivalent(sentence) models = list(complete_models(T.models(), sentence.vars() | T.vars())) @@ -828,6 +879,7 @@ def test_tseitin(sentence: nnf.NNF): assert len(models) == sentence.model_count() + @given(models()) def test_complete_models(model: nnf.And[nnf.Var]): m = {v.name: v.true for v in model} @@ -844,11 +896,14 @@ def test_complete_models(model: nnf.And[nnf.Var]): assert all(x.keys() == m.keys() | {"test1", "test2"} for x in two) if m: - multi = list(complete_models([m, neg], model.vars() | {"test1", "test2"})) + multi = list( + complete_models([m, neg], model.vars() | {"test1", "test2"}) + ) assert len(multi) == 8 assert len({frozenset(x.items()) for x in multi}) == 8 # all unique assert all(x.keys() == m.keys() | {"test1", "test2"} for x in multi) + if (platform.uname().system, platform.uname().machine) == ('Linux', 'x86_64'): def test_kissat_uf20(): @@ -858,11 +913,13 @@ def test_kissat_uf20(): @given(CNF()) def test_kissat_cnf(sentence: And[Or[Var]]): - assume(all(len(clause) > 0 for clause in sentence)) with using_kissat(): assert sentence.satisfiable() == sentence._cnf_satisfiable_native() @given(NNF()) def test_kissat_nnf(sentence: And[Or[Var]]): with using_kissat(): - assert sentence.satisfiable() == tseitin.to_CNF(sentence)._cnf_satisfiable_native() + assert ( + sentence.satisfiable() + == tseitin.to_CNF(sentence)._cnf_satisfiable_native() + ) diff --git a/tox.ini b/tox.ini index 8d55c9f..c117389 100644 --- a/tox.ini +++ b/tox.ini @@ -1,16 +1,17 @@ [tox] -envlist = py34, py35, py36, py37, py38, pypy3 +envlist = py3 toxworkdir = {env:TOX_WORK_DIR:.tox} [testenv] deps = pytest flake8 - py{3,35,36,37,38}: mypy + py{3,35,36,37,38,39}: mypy hypothesis commands = - flake8 nnf - py{3,35,36,37,38}: python -m mypy --strict nnf + flake8 nnf test_nnf.py + py{3,35,36,37,38,39}: mypy nnf + py{3,35,36,37,38,39}: mypy --python-version=3.4 nnf pytest - py{3,36,37,38,py3}: python -m doctest nnf/__init__.py -o ELLIPSIS + py{3,36,37,38,39,py3}: python -m doctest nnf/__init__.py -o ELLIPSIS python examples/socialchoice.py