From 3d0931713ff72fe9307711a3110b4171846dbc38 Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Mon, 6 May 2019 10:33:52 +0200 Subject: [PATCH] Implementation of a general graph mapper for ProjectQ --- projectq/cengines/__init__.py | 1 + projectq/cengines/_command_list.py | 136 ++ projectq/cengines/_command_list_test.py | 281 +++++ projectq/cengines/_graph_path_manager.py | 809 ++++++++++++ projectq/cengines/_graph_path_manager_test.py | 941 ++++++++++++++ projectq/cengines/_graphmapper.py | 705 +++++++++++ projectq/cengines/_graphmapper_test.py | 1105 +++++++++++++++++ pytest.ini | 2 + 8 files changed, 3980 insertions(+) create mode 100644 projectq/cengines/_command_list.py create mode 100644 projectq/cengines/_command_list_test.py create mode 100644 projectq/cengines/_graph_path_manager.py create mode 100644 projectq/cengines/_graph_path_manager_test.py create mode 100644 projectq/cengines/_graphmapper.py create mode 100644 projectq/cengines/_graphmapper_test.py diff --git a/projectq/cengines/__init__.py b/projectq/cengines/__init__.py index 966159e78..90b7d95de 100755 --- a/projectq/cengines/__init__.py +++ b/projectq/cengines/__init__.py @@ -32,3 +32,4 @@ from ._tagremover import TagRemover from ._testengine import CompareEngine, DummyEngine from ._twodmapper import GridMapper +from ._graphmapper import GraphMapper diff --git a/projectq/cengines/_command_list.py b/projectq/cengines/_command_list.py new file mode 100644 index 000000000..9642fc2bd --- /dev/null +++ b/projectq/cengines/_command_list.py @@ -0,0 +1,136 @@ +# Copyright 2019 ProjectQ-Framework (www.projectq.ch) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This is a helper module for the _graphmapper.GraphMapper class. +""" + +from copy import deepcopy +import networkx as nx + +# ============================================================================== + + +class CommandList(): + """Class used to manage a list of ProjectQ commands""" + + def __init__(self): + self._cmds = [] + self.partitions = [set()] + self.interactions = [[]] + + def __len__(self): + return len(self._cmds) + + def __iadd__(self, other): + self.extend(other) + return self + + def __iter__(self): + return self._cmds.__iter__() + + def __getitem__(self, key): + return self._cmds[key] + + def __eq__(self, other): + if isinstance(other, list): + return self._cmds == other + if isinstance(other, CommandList): + return self._cmds == other._cmds + raise NotImplementedError() + + @property + def stored_commands(self): + """ + Simple getter. + """ + return deepcopy(self._cmds) + + def clear(self): + """ + Remove all commands from the container. + """ + self._cmds = [] + self.partitions = [set()] + self.interactions = [[]] + + def append(self, cmd): + """ + Append a command to the end of the container. + """ + self._cmds.append(cmd) + + qubit_ids = {qubit.id for qureg in cmd.all_qubits for qubit in qureg} + if len(qubit_ids) > 1: + # Add new partition if any qubit ids are already present in the + # current partition + if self.partitions[-1] \ + and self.partitions[-1] & qubit_ids: + self.partitions.append(set()) + self.interactions.append([]) + self.partitions[-1] |= qubit_ids + self.interactions[-1].append(tuple(sorted(qubit_ids))) + + def extend(self, iterable): + """ + Extend container by appending commands from the iterable. + """ + for cmd in iterable: + self.append(cmd) + + # -------------------------------------------------------------------------- + + def calculate_qubit_interaction_subgraphs(self, order=2): + """ + Calculate qubits interaction graph based on all commands stored. + + While iterating through the partitions, we create a graph whose + vertices are logical qubit IDs and where edges represent an interaction + between qubits. + Additionally, we make sure that the resulting graph has no vertices + with degree higher than a specified threshold. + + Args: + order (int): maximum degree of the nodes in the resulting graph + + Returns: + A list of list of graph nodes corresponding to all the connected + components of the qubit interaction graph. Within each components, + nodes are sorted in decreasing order of their degree. + + Note: + The current implementation is really aimed towards handling + two-qubit gates but should also work with higher order qubit gates. + """ + graph = nx.Graph() + for timestep in self.interactions: + for interaction in timestep: + for prev, cur in zip(interaction, interaction[1:]): + if prev not in graph \ + or cur not in graph \ + or (len(graph[prev]) < order + and len(graph[cur]) < order): + graph.add_edge(prev, cur) + + # Return value is a list of list of nodes corresponding to a list of + # connected components of the intial graph sorted by their order + # Each connected component is sorted in decreasing order by the degree + # of each node in the graph + return [ + sorted( + graph.subgraph(g), key=lambda n: len(graph[n]), reverse=True) + for g in sorted( + nx.connected_components(graph), + key=lambda c: (max(len(graph[n]) for n in c), len(c)), + reverse=True) + ] diff --git a/projectq/cengines/_command_list_test.py b/projectq/cengines/_command_list_test.py new file mode 100644 index 000000000..86eb1ea57 --- /dev/null +++ b/projectq/cengines/_command_list_test.py @@ -0,0 +1,281 @@ +# Copyright 2019 ProjectQ-Framework (www.projectq.ch) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for projectq.cengines._command_list.py.""" + +from projectq.cengines._command_list import CommandList + +from copy import deepcopy +import pytest +from projectq.ops import (Allocate, Command, X) +from projectq.types import WeakQubitRef + +# ============================================================================== + + +def allocate_all_qubits_cmd(num_qubits): + qb = [] + allocate_cmds = [] + for i in range(num_qubits): + qb.append(WeakQubitRef(engine=None, idx=i)) + allocate_cmds.append( + Command(engine=None, gate=Allocate, qubits=([qb[i]], ))) + return qb, allocate_cmds + + +# ============================================================================== + + +@pytest.fixture +def command_list(): + return CommandList() + + +# ============================================================================== + + +def test_empty_command_list(command_list): + assert not command_list + assert command_list._cmds == [] + assert command_list.partitions == [set()] + + +def test_append_single_qubit_gate(command_list): + assert not command_list + + qb0 = WeakQubitRef(engine=None, idx=0) + cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0], )) + command_list.append(cmd0) + assert command_list._cmds == [cmd0] + assert command_list.interactions == [[]] + + cmd1 = Command(engine=None, gate=X, qubits=([qb0], )) + command_list.append(cmd1) + assert command_list._cmds == [cmd0, cmd1] + assert command_list.partitions == [set()] + assert command_list.interactions == [[]] + + assert command_list + command_list.clear() + assert not command_list + assert command_list._cmds == [] + assert command_list.partitions == [set()] + assert command_list.interactions == [[]] + + +def test_append_two_qubit_gate(command_list): + assert not command_list + + qb, allocate_cmds = allocate_all_qubits_cmd(4) + for cmd in allocate_cmds: + command_list.append(cmd) + assert command_list._cmds == allocate_cmds + assert command_list.partitions == [set()] + assert command_list.interactions == [[]] + + cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) + command_list.append(cmd0) + assert command_list._cmds == allocate_cmds + [cmd0] + assert command_list.partitions == [{0, 1}] + assert command_list.interactions == [[(0, 1)]] + + cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) + command_list.append(cmd1) + assert command_list._cmds == allocate_cmds + [cmd0, cmd1] + assert command_list.partitions == [{0, 1, 2, 3}] + assert command_list.interactions == [[(0, 1), (2, 3)]] + + cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) + command_list.append(cmd2) + assert command_list._cmds == allocate_cmds + [cmd0, cmd1, cmd2] + assert command_list.partitions == [{0, 1, 2, 3}, {0, 2}] + assert command_list.interactions == [[(0, 1), (2, 3)], [(0, 2)]] + + assert command_list + command_list.clear() + assert not command_list + assert command_list._cmds == [] + assert command_list.partitions == [set()] + assert command_list.interactions == [[]] + + +def test_extend(command_list): + assert not command_list + + qb, allocate_cmds = allocate_all_qubits_cmd(4) + command_list.extend(allocate_cmds) + assert command_list._cmds == allocate_cmds + assert command_list.partitions == [set()] + + cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) + cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) + cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) + cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) + command_list.extend((cmd0, cmd1, cmd2, cmd3)) + assert command_list._cmds == allocate_cmds + [cmd0, cmd1, cmd2, cmd3] + assert command_list.partitions == [{0, 1, 2, 3}, {0, 2}] + assert command_list.interactions == [[(0, 1), (2, 3)], [(0, 2)]] + + +def test_iadd(): + command_list_ref = CommandList() + command_list = CommandList() + assert not command_list + assert not command_list_ref + + qb, allocate_cmds = allocate_all_qubits_cmd(4) + command_list_ref.extend(allocate_cmds) + command_list += allocate_cmds + + assert command_list._cmds == command_list_ref._cmds + assert command_list.partitions == command_list_ref.partitions + assert command_list.interactions == command_list_ref.interactions + + cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) + cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) + cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) + cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) + command_list_ref.extend((cmd0, cmd1, cmd2, cmd3)) + command_list += (cmd0, cmd1, cmd2, cmd3) + assert command_list._cmds == command_list_ref._cmds + assert command_list.partitions == command_list_ref.partitions + assert command_list.interactions == command_list_ref.interactions + + +def test_iter(command_list): + assert not command_list + + for cmd in command_list: + raise RuntimeError('ERROR') + + qb, allocate_cmds = allocate_all_qubits_cmd(4) + command_list.extend(allocate_cmds) + + cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) + cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) + cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) + cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) + command_list.extend((cmd0, cmd1, cmd2, cmd3)) + + for cmd, cmd_ref in zip(command_list, command_list.stored_commands): + assert cmd == cmd_ref + + +def test_getitem(command_list): + assert not command_list + + qb, allocate_cmds = allocate_all_qubits_cmd(4) + command_list.extend(allocate_cmds) + + cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) + cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) + cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) + cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) + command_list.extend((cmd0, cmd1, cmd2, cmd3)) + + ref_list = allocate_cmds + [cmd0, cmd1, cmd2, cmd3] + for i in range(len(command_list)): + assert command_list[i] == ref_list[i] + + assert command_list[4:] == ref_list[4:] + + +def test_eq(command_list): + assert not command_list + qb, allocate_cmds = allocate_all_qubits_cmd(4) + command_list.extend(allocate_cmds) + + cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) + cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) + cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) + cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) + command_list.extend((cmd0, cmd1, cmd2, cmd3)) + + with pytest.raises(NotImplementedError): + assert command_list == 2 + with pytest.raises(NotImplementedError): + assert command_list == 2. + with pytest.raises(NotImplementedError): + assert command_list == 'asr' + + assert command_list == allocate_cmds + [cmd0, cmd1, cmd2, cmd3] + assert command_list != allocate_cmds + + other_list = deepcopy(command_list) + assert command_list == other_list + other_list.append(cmd0) + assert command_list != other_list + + +def test_generate_qubit_interaction_graph(command_list): + assert not command_list + + qb, allocate_cmds = allocate_all_qubits_cmd(9) + command_list.extend(allocate_cmds) + + cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) + cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) + cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) + cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) + command_list.extend((cmd0, cmd1, cmd2, cmd3)) + + subgraphs = command_list.calculate_qubit_interaction_subgraphs(order=2) + assert len(subgraphs) == 1 + assert len(subgraphs[0]) == 4 + assert all([n in subgraphs[0] for n in [0, 1, 2, 3]]) + assert subgraphs[0][-2:] in ([1, 3], [3, 1]) + + # -------------------------------------------------------------------------- + + cmd4 = Command(engine=None, gate=X, qubits=([qb[4]], ), controls=[qb[5]]) + cmd5 = Command(engine=None, gate=X, qubits=([qb[5]], ), controls=[qb[6]]) + command_list.extend((cmd4, cmd5)) + + subgraphs = command_list.calculate_qubit_interaction_subgraphs(order=2) + assert len(subgraphs) == 2 + assert len(subgraphs[0]) == 4 + + assert all([n in subgraphs[0] for n in [0, 1, 2, 3]]) + assert subgraphs[0][-2:] in ([1, 3], [3, 1]) + assert subgraphs[1] in ([5, 4, 6], [5, 6, 4]) + + # -------------------------------------------------------------------------- + + cmd6 = Command(engine=None, gate=X, qubits=([qb[6]], ), controls=[qb[7]]) + cmd7 = Command(engine=None, gate=X, qubits=([qb[7]], ), controls=[qb[8]]) + command_list.extend((cmd6, cmd7)) + + subgraphs = command_list.calculate_qubit_interaction_subgraphs(order=2) + + assert len(subgraphs) == 2 + assert len(subgraphs[0]) == 5 + assert all([n in subgraphs[0] for n in [4, 5, 6, 7, 8]]) + assert subgraphs[0][-2:] in ([4, 8], [8, 4]) + assert len(subgraphs[1]) == 4 + assert all([n in subgraphs[1] for n in [0, 1, 2, 3]]) + assert subgraphs[1][-2:] in ([1, 3], [3, 1]) + + # -------------------------------------------------------------------------- + + command_list.append( + Command(engine=None, gate=X, qubits=([qb[3]], ), controls=[qb[0]])) + subgraphs = command_list.calculate_qubit_interaction_subgraphs(order=3) + + assert len(subgraphs) == 2 + assert len(subgraphs[0]) == 4 + assert all([n in subgraphs[0] for n in [0, 1, 2, 3]]) + assert subgraphs[0][0] == 0 + assert subgraphs[0][-2:] in ([1, 3], [3, 1]) + assert len(subgraphs[1]) == 5 + assert all([n in subgraphs[1] for n in [4, 5, 6, 7, 8]]) + assert subgraphs[1][-2:] in ([4, 8], [8, 4]) diff --git a/projectq/cengines/_graph_path_manager.py b/projectq/cengines/_graph_path_manager.py new file mode 100644 index 000000000..3b6356894 --- /dev/null +++ b/projectq/cengines/_graph_path_manager.py @@ -0,0 +1,809 @@ +# Copyright 2019 ProjectQ-Framework (www.projectq.ch) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This is a helper module for the _graphmapper.GraphMapper class. + +Its main goal is to provide classes and functions to manage paths through an +arbitrary graph and eventually generate a list of swap operations to perform as +many paths as possible, by either solving conflicts (ie. crossing points and +intersections; see definitions below) or discarding paths. + +Note that when generating a list of swap operations for a particular path, the +path is usually splitted into two halves in order to maximize the number of +swap operations that can be performed simultaneously. + +In the context of this module, a distinction is made between a crossing point +and an intersection. + +A crossing point is just as its name implies a point or node of the graph that +simultaneously belongs to one or more paths. On the other hand, an intersection +is defined as a particular crossing point of a path for which one of the +splitted sub-path halves has an endpoint. This means that a path may have at +most two intersections + +This is best exemplified by some examples: + + Given the path [0, 1, 2, 3], a possible split to maximize simultaneous + swapping operations would be: + [[0, 1], [2, 3]] where 1 or 2 may be intersections. + + Given the path [0, 1, 2, 3, 4], possible splits would include: + [[0, 1, 2], [3, 4]] where 2 or 3 could be intersections if they are + crossings + [[0, 1], [2, 3, 4]] where 1 or 2 could be intersections if they are + crossings +""" + +import itertools +import math +import numpy as np +import networkx as nx + +# ============================================================================== + + +def _find_first_order_intersections(crossings, split_paths): + """ + Find out which crossing nodes are intersections. + + A crossing point is considered an intersection if and only if either: + - the end of sub-path 1 is the crossing point + - the beginning of sub-path 2 is the crossing point + + Args: + crossings (dict) : Dictionary containing the list of all crossing + points indexed by the path ID + split_paths (dict) : Dictionary containing the two halves of each paths + indexed by the path ID + + Returns: + intersections (dict) : Dictionary indexed by the intersection node + containing the IDs of the paths for which that + particular node is considered an intersection + """ + intersections = {} + + for path_id, (subpath1, subpath2) in split_paths.items(): + for crossing in crossings[path_id]: + if crossing.overlap[0] in (subpath1[-1], subpath2[0]): + if crossing.overlap[0] not in intersections: + intersections[crossing.overlap[0]] = set((path_id, )) + else: + intersections[crossing.overlap[0]].add(path_id) + + return intersections + + +def _try_solve_intersection(intersection_node, subpath1, subpath2, + subpath1_not_crossing, subpath2_not_crossing): + """ + Attempt to solve a first order intersection by modifying sub-paths. + + Args: + intersection_node (int) : Intersection node + subpath1 (list) : First half of the path + subpath2 (list) : Second half of the path + subpath1_not_crossing (list) : Helper list of booleans indicating + whether the nodes of the first subpath + are crossing or not + subpath2_not_crossing (list) : Helper list of booleans indicating + whether the nodes of the second subpath + are crossing or not + + Note: + subpath1*, subpath2* arguments are modified in-place + + Returns: + True/False depending on whether the intersection could be solved or not + """ + if len(subpath1) + len(subpath2) < 4: + return False + + if subpath1[-1] == intersection_node: + # Try moving the head of subpath2 to subpath1 + if len(subpath2) > 1 \ + and subpath2_not_crossing[0] \ + and subpath2_not_crossing[1]: + subpath1.append(subpath2[0]) + subpath1_not_crossing.append(subpath2_not_crossing[0]) + del subpath2[0] + del subpath2_not_crossing[0] + return True + else: + # Try moving the tail of subpath1 to subpath2 + if len(subpath1) > 1 \ + and subpath1_not_crossing[-1] \ + and subpath1_not_crossing[-2]: + subpath2.insert(0, subpath1.pop()) + subpath2_not_crossing.insert(0, subpath1_not_crossing.pop()) + return True + + # Try moving the last two elements of subpath1 to subpath2 + if len(subpath1) > 2 \ + and subpath1_not_crossing[-2] \ + and subpath1_not_crossing[-3]: + subpath2.insert(0, subpath1.pop()) + subpath2.insert(0, subpath1.pop()) + subpath2_not_crossing.insert(0, subpath1_not_crossing.pop()) + subpath2_not_crossing.insert(0, subpath1_not_crossing.pop()) + return True + + # Try moving the first two elements of subpath2 to subpath1 + if len(subpath2) > 2 \ + and subpath2_not_crossing[1] \ + and subpath2_not_crossing[2]: + subpath1.append(subpath2[0]) + subpath1.append(subpath2[1]) + subpath1_not_crossing.append(subpath2_not_crossing[0]) + subpath1_not_crossing.append(subpath2_not_crossing[1]) + del subpath2[:2] + del subpath2_not_crossing[:2] + return True + + return False + + +def _return_swaps(split_paths): + """ + Return a list of swap operations given a list of path halves + + Args: + split_paths (dict): Dictionary indexed by path ID containing 2-tuples + of path halves + + Returns: A list of swap operations (2-tuples) + """ + swap_operations = [] + + for path_id in sorted(split_paths): + path = split_paths[path_id] + swap_operations.append([]) + # Add swaps operations for first half of the path + for prev, cur in zip(path[0], path[0][1:]): + swap_operations[-1].append((prev, cur)) + + # Add swaps operations for the second half of the path + for prev, cur in zip(path[1][::-1], path[1][-2::-1]): + swap_operations[-1].append((prev, cur)) + + return swap_operations + + +# ============================================================================== + + +class PathCacheExhaustive(): + """ + Class acting as cache for optimal paths through the graph. + """ + + def __init__(self, path_length_threshold): + self._path_length_threshold = path_length_threshold + self._cache = {} + self.key_type = frozenset + + def __str__(self): + ret = "" + for (node0, node1), path in self._cache.items(): + ret += "{}: {}\n".format(sorted([node0, node1]), path) + return ret + + def empty_cache(self): + """Empty the cache.""" + self._cache = {} + + def get_path(self, start, end): + """ + Return a path from the cache. + + Args: + start (object): Start node for the path + end (object): End node for the path + + Returns: Optimal path stored in cache + + Raises: KeyError if path is not present in the cache + """ + return self._cache[self.key_type((start, end))] + + def has_path(self, start, end): + """ + Test whether a path connecting start to end is present in the cache. + + Args: + start (object): Start node for the path + end (object): End node for the path + + Returns: True/False + """ + return self.key_type((start, end)) in self._cache + + def add_path(self, path): + """ + Add a path to the cache. + + This method also recursively adds all the subpaths that are at least + self._path_length_threshold long to the cache. + + Args: + path (list): Path to store inside the cache + """ + length = len(path) + for start in range(length - self._path_length_threshold + 1): + node0 = path[start] + for incr in range(length - start - 1, + self._path_length_threshold - 2, -1): + end = start + incr + self._cache[self.key_type((node0, + path[end]))] = path[start:end + 1] + + +# ============================================================================== + + +class _Crossing: + __slots__ = ['path_id', 'overlap'] + + def __init__(self, path_id, overlap): + self.path_id, self.overlap = path_id, overlap + + def __eq__(self, other): + if isinstance(other, self.__class__): + return (self.path_id, self.overlap) == (other.path_id, + other.overlap) + if isinstance(other, list): + return self.overlap == other + if isinstance(other, int): + return self.overlap[0] == other + raise NotImplementedError("Invalid comparison") + + def __str__(self): + return '{} {}'.format(self.path_id, self.overlap) + + def __repr__(self): + return 'Crossing({}, {})'.format(self.path_id, self.overlap) + + +class PathManager: + """ + Class managing interactions between distant qubits on an arbitrary graph. + + This class essentially manages paths through an arbitrary graph, handling + possible intersections between multiple paths through an arbitrary graph by + resolving conflict points such as crossings and intersections. + + Attributes: + crossings (dict) : dictionary of crossing points indexed by path ID + cache (PathCacheExhaustive) : cache manager + enable_caching (bool): indicates whether caching is enabled or not + graph (networkx.Graph): Arbitrary connected graph + paths (dict) : list of paths currently held by a path container indexed + by a unique ID + paths_stats (dict) : dictionary for storing statistics indexed by + interactions (frozenset of pairs of qubits) + """ + + def __init__(self, graph, enable_caching=True): + """ + Args: + graph (networkx.Graph): an arbitrary connected graph + enable_caching (bool): Controls whether path caching is enabled + """ + # Make sure that we start with a valid graph + if not nx.is_connected(graph): + raise RuntimeError("Input graph must be a connected graph") + elif not all([isinstance(n, int) for n in graph]): + raise RuntimeError( + "All nodes inside the graph needs to be integers") + else: + self.graph = graph + + self.paths = {} + self.crossings = {} + self._path_id = 0 + + self.enable_caching = enable_caching + # Path cache support + path_length_threshold = 3 + self.cache = PathCacheExhaustive(path_length_threshold) + + # Statistics + self.paths_stats = dict() + + def __str__(self): + interactions = [ + k for _, k in sorted( + zip(self.paths_stats.values(), self.paths_stats.keys()), + reverse=True) + ] + + max_width = int( + math.ceil(math.log10(max(self.paths_stats.values()))) + 1) + paths_stats_str = "" + if self.enable_caching: + average_path_length = np.average( + [ + len(self.cache.get_path(*list(k))) + if not self.graph.has_edge(*list(k)) else 2 + for k in interactions + ], + weights=[self.paths_stats[k] for k in interactions]) + for k in interactions: + if self.graph.has_edge(*list(k)): + path = list(k) + else: + path = self.cache.get_path(*list(k)) + paths_stats_str += "\n {3:3} - {4:3}: {0:{1}} | {2}".format( + self.paths_stats[k], max_width, path, *k) + else: + average_path_length = None + for k in interactions: + paths_stats_str += "\n {2:3} - {3:3}: {0:{1}}".format( + self.paths_stats[k], max_width, *k) + return "Path statistics:{}\n\nAverage path length: {}".format( + paths_stats_str, average_path_length) + + ################################################################# + # Methods querying information about the state of the container # + ################################################################# + + def get_all_nodes(self): + """ + Return a list of all nodes that are part of some path. + + Returns: + A set of nodes that are part of at least one path. + """ + all_nodes = [] + for row in self.paths.values(): + all_nodes.extend(row[0]) + all_nodes.extend(row[1]) + return set(all_nodes) + + def get_all_paths(self): + """ + Return a list of all the path contained in the container. + + Returns: + A list of paths (list of list of ints) + """ + return [ + self.paths[k][0] + self.paths[k][1] for k in sorted(self.paths) + ] + + def has_interaction(self, node0, node1): + """ + Check if a path within the container already generate an interaction + + Args: + node0 (int) : An endnode of a path + node1 (int) : An endnode of a path + + Returns: + True or False depending on whether the container has a path linking + node0 to node1 + """ + for path in self.paths.values(): + if frozenset((node0, node1)) == frozenset((path[0][0], + path[1][-1])): + return True + return False + + def max_crossing_order(self): + """ + Return the order of the largest crossing. + + The order of a crossing is defined as the number of paths that + intersect + + Returns: + An int + """ + crossing_orders = list( + itertools.chain.from_iterable( + [[len(c.overlap) for c in crossing] + for crossing in self.crossings.values()])) + if crossing_orders: + return max(crossing_orders) + return 0 + + ###################################################### + # Methods for resetting the content of the container # + ###################################################### + + def clear_paths(self): + """ + Reset the list of paths managed by this instance. + + Note: + Does not reset path statistics or the state of the cache. + """ + self.paths.clear() + self.crossings.clear() + + def clear(self): + """ + Completely reset the state of this instance. + + Note: + Both path statistics and cache are also reset + """ + self.clear_paths() + self.paths_stats.clear() + self.cache.empty_cache() + + ############################################################# + # Entry point for the mapper to extract the final path list # + ############################################################# + + def generate_swaps(self): + """ + Generate a list of swaps to execute as many paths as possible. + + Returns: + A list of swap operations (tuples) + """ + + self._solve_first_order_intersections( + _find_first_order_intersections(self.crossings, self.paths)) + + # By this point, we should have solved all intersections + return list(itertools.chain.from_iterable(_return_swaps(self.paths))) + + ############################################# + # Methods for adding paths to the container # + ############################################# + + def push_interaction(self, node0, node1): + """ + Plan an interaction between two qubit. + + Args: + node0 (int) : backend id of the first qubit + node1 (int) : backend id of the second qubit + + Returns: + True if the path could be added to the container, False otherwise + """ + + # TODO: think about merging paths + # TODO: maybe apply gates in the middle of the swaps + + interaction = frozenset((node0, node1)) + if self.has_interaction(node0, node1): + self.paths_stats[interaction] += 1 + return True + + if not self.graph.has_edge(node0, node1): + new_path = self._calculate_path(node0, node1) + else: + new_path = None + + if new_path: + if not self.try_add_path(new_path) \ + and not self._try_alternative_paths(node0, node1): + return False + else: + # Prevent adding a new path if it contains some already interacting + # qubits + for path in self.paths.values(): + if path[0][0] in (node0, node1) or path[1][-1] in (node0, + node1): + return False + + if interaction not in self.paths_stats: + self.paths_stats[interaction] = 1 + else: + self.paths_stats[interaction] += 1 + return True + + def try_add_path(self, new_path): + """ + Try adding a path to the path container. + + Args: + new_path (list) : path to add to the container + + Returns: + True if the path could be added to the container, False otherwise + """ + # Prevent adding a new path if it contains some already interacting + # qubits + for path in self.paths.values(): + if path[0][0] in new_path or path[1][-1] in new_path: + return False + + # Make sure each node appears only once + if len(new_path) != len(set(new_path)): + return False + + idx = len(new_path) >> 1 + new_subpath0, new_subpath1 = new_path[:idx], new_path[idx:] + new_intersections = {} + new_crossings = [] + for idx, (subpath0, subpath1) in self.paths.items(): + path_overlap = [ + node for node in new_path + if node in subpath0 or node in subpath1 + ] + if len(path_overlap) > 1: + return False + if len(path_overlap) == 1: + new_crossings.append(_Crossing(idx, path_overlap)) + + # Is this crossing point an intersection for the new path? + if new_subpath0[-1] in path_overlap \ + or new_subpath1[0] in path_overlap: + if path_overlap[0] not in new_intersections: + new_intersections[path_overlap[0]] = set( + (self._path_id, )) + else: + new_intersections[path_overlap[0]].add(self._path_id) + + # Is this crossing point an intersection for the other path? + subpath0, subpath1 = self.paths[idx] + if subpath0[-1] in path_overlap \ + or subpath1[0] in path_overlap: + if path_overlap[0] not in new_intersections: + new_intersections[path_overlap[0]] = set((idx, )) + else: + new_intersections[path_overlap[0]].add(idx) + + self.paths[self._path_id] = (new_subpath0, new_subpath1) + self.crossings[self._path_id] = new_crossings + for crossing in new_crossings: + path_id = crossing.path_id + self.crossings[path_id].append( + _Crossing(self._path_id, crossing.overlap)) + + # Remove the entries where only the new path is present, as the + # solution in those cases is to execute the new path after the other + # paths, which is going to happen anyway as the new path is appended to + # the list of paths + new_intersections = { + node: path_ids + for node, path_ids in new_intersections.items() + if len(path_ids) > 1 or self._path_id not in path_ids + } + + if new_intersections: + self._solve_first_order_intersections(new_intersections) + + if self._path_id not in self.paths: + return False + + self._path_id += 1 + return True + + ############################################# + # Methods for adding paths to the container # + ############################################# + + def remove_path_by_id(self, path_id): + """ + Remove a path from the path container given its ID. + + Args: + path_id (int) : ID of path to remove + + Raises: + KeyError if path_id is not valid + """ + if path_id not in self.paths: + raise KeyError(path_id) + self.crossings = { + k: [i for i in v if i.path_id != path_id] + for k, v in self.crossings.items() if k != path_id + } + del self.paths[path_id] + + def remove_crossing_of_order_higher_than(self, order): + """ + Remove paths that have crossings with order above a certain threshold. + + Args: + order (int) : Maximum allowed order of crossing + """ + number_of_crossings_per_path = { + path_id: len([c for c in crossing if len(c.overlap) > order]) + for path_id, crossing in self.crossings.items() + } + + path_id_list = [ + x for y, x in sorted( + zip(number_of_crossings_per_path.values(), + number_of_crossings_per_path.keys())) if y + ] + + while path_id_list and self.max_crossing_order() > order: + path_id = path_id_list.pop() + self.remove_path_by_id(path_id) + + def swap_paths(self, path_id1, path_id2): + """ + Swap two path within the path container. + + Args: + path_id1 (int) : ID of first path + path_id2 (int) : ID of second path + """ + + if path_id1 not in self.paths: + raise KeyError(path_id1) + if path_id2 not in self.paths: + raise KeyError(path_id2) + + for crossing_list in self.crossings.values(): + for crossing in crossing_list: + if path_id1 == crossing.path_id: + crossing.path_id = path_id2 + elif path_id2 == crossing.path_id: + crossing.path_id = path_id1 + + self.crossings[path_id2], self.crossings[path_id1] = self.crossings[ + path_id1], self.crossings[path_id2] + self.paths[path_id2], self.paths[path_id1] = self.paths[ + path_id1], self.paths[path_id2] + + ########################## + # Private helper methods # + ########################## + + def _solve_first_order_intersections(self, intersections): + """ + Solve all first order intersections. + + The intersections may be "solved" in two different manners: + - Sub-path split are modified to transform intersections in simple + crossings + - Paths are removed from the container + + Pre-conditions: + self.max_crossing_order() == 1 + + Args: + intersections (dict): TODO + """ + + # Get a list of the intersection nodes sorted by intersection order and + # total number of points of all paths for that particular intersection + def intersection_sort(crossing): + order = len(crossing[0]) + number_of_points = sum([ + len(self.paths[path_id][0]) + len(self.paths[path_id][1]) + for path_id in crossing[0] + ]) - order + 1 + return (order, number_of_points) + + intersection_node_list = [ + x for _, x in sorted( + zip(intersections.values(), intersections.keys()), + key=intersection_sort) + ] + + # and process them + while intersection_node_list: + intersection_node = intersection_node_list[-1] + node_is_not_crossing = { + path_id: ([ + node not in self.crossings[path_id] + for node in self.paths[path_id][0] + ], [ + node not in self.crossings[path_id] + for node in self.paths[path_id][1] + ]) + for path_id in intersections[intersection_node] + } + + if len(intersections[intersection_node]) == 1: + # This crossing is an intersection only for one path + # -> only need to make sure that the other paths gets + # processed first when generating the swaps + path_id = list(intersections[intersection_node])[0] + + for crossing in self.crossings[path_id]: + if crossing.overlap[0] == intersection_node: + other_path_id = crossing.path_id + if path_id < other_path_id: + self.swap_paths(path_id, other_path_id) + del intersections[intersection_node] + del intersection_node_list[-1] + else: + # This crossing is an intersection for multiple paths + # -> find all paths concerned with this crossing + path_id_list = [ + x for _, x in sorted( + zip([ + len(self.paths[i][0]) + len(self.paths[i][1]) + for i in intersections[intersection_node] + ], intersections[intersection_node])) + ] + + # TODO: multiple passes if failure to find an optimal solution + path_id1 = path_id_list.pop() + path_id2 = path_id_list.pop() + + solved = _try_solve_intersection( + intersection_node, + *(self.paths[path_id1] + node_is_not_crossing[path_id1])) + + if not solved: + solved = _try_solve_intersection( + intersection_node, + *(self.paths[path_id2] + + node_is_not_crossing[path_id2])) + + if not solved: + # Last resort: delete one path + path_id_min, path_id_max = sorted([path_id1, path_id2]) + del node_is_not_crossing[path_id_max] + self.remove_path_by_id(path_id_max) + node_is_not_crossing[path_id_min] = ([ + node not in self.crossings[path_id_min] + for node in self.paths[path_id_min][0] + ], [ + node not in self.crossings[path_id_min] + for node in self.paths[path_id_min][1] + ]) + + intersections = _find_first_order_intersections( + self.crossings, self.paths) + intersection_node_list = [ + x for _, x in sorted( + zip(intersections.values(), intersections.keys()), + key=intersection_sort) + ] + + def _calculate_path(self, node0, node1): + """ + Calculate a path between two nodes on the graph. + + Args: + node0 (int) : backend id of the first qubit + node1 (int) : backend id of the second qubit + """ + + if self.enable_caching: + try: + path = self.cache.get_path(node0, node1) + except KeyError: + path = nx.shortest_path(self.graph, source=node0, target=node1) + self.cache.add_path(path) + else: + path = nx.shortest_path(self.graph, source=node0, target=node1) + + return path + + def _try_alternative_paths(self, node0, node1): + """ + Attempt to find some alternative paths + """ + for neighbour in self.graph[node0]: + new_path = self._calculate_path(neighbour, node1) + if new_path[-1] == neighbour: + new_path = new_path + [node0] + else: + new_path = [node0] + new_path + if self.try_add_path(new_path): + return True + for neighbour in self.graph[node1]: + new_path = self._calculate_path(node0, neighbour) + if new_path[-1] == neighbour: + new_path = new_path + [node1] + else: + new_path = [node1] + new_path + if self.try_add_path(new_path): + return True + + return False diff --git a/projectq/cengines/_graph_path_manager_test.py b/projectq/cengines/_graph_path_manager_test.py new file mode 100644 index 000000000..e9b9dbbbd --- /dev/null +++ b/projectq/cengines/_graph_path_manager_test.py @@ -0,0 +1,941 @@ +# Copyright 2019 ProjectQ-Framework (www.projectq.ch) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for projectq.cengines._graph_path_manager.py.""" + +from copy import deepcopy +import itertools +import networkx as nx +import pytest +from projectq.cengines._graph_path_manager import PathManager, \ + PathCacheExhaustive, _find_first_order_intersections, _Crossing + +# ============================================================================== + + +def generate_grid_graph(nrows, ncols): + graph = nx.Graph() + graph.add_nodes_from(range(nrows * ncols)) + + for row in range(nrows): + for col in range(ncols): + node0 = col + ncols * row + + is_middle = ((0 < row < nrows - 1) and (0 < col < ncols - 1)) + add_horizontal = is_middle or (row in (0, nrows - 1) and + (0 < col < ncols - 1)) + add_vertical = is_middle or (col in (0, ncols - 1) and + (0 < row < nrows - 1)) + + if add_horizontal: + graph.add_edge(node0, node0 - 1) + graph.add_edge(node0, node0 + 1) + if add_vertical: + graph.add_edge(node0, node0 - ncols) + graph.add_edge(node0, node0 + ncols) + + return graph + + +@pytest.fixture(scope="module") +def simple_graph(): + # 2 4 + # / \ / | + # 0 - 1 3 | + # \ / \ | + # 5 6 + graph = nx.Graph() + graph.add_nodes_from(range(7)) + graph.add_edges_from([(0, 1), (1, 2), (1, 5), (2, 3), (5, 3), (3, 4), (3, + 6), + (4, 6)]) + return graph + + +@pytest.fixture +def grid44_manager(): + return PathManager(graph=generate_grid_graph(4, 4), enable_caching=False) + + +# ============================================================================== + + +def test_path_cache_exhaustive(): + path_length_threshold = 3 + cache = PathCacheExhaustive(path_length_threshold) + + assert not cache._cache + cache.add_path(['a', 'b', 'c']) + assert cache._cache == {cache.key_type(('a', 'c')): ['a', 'b', 'c']} + + assert cache.has_path('a', 'c') + assert not cache.has_path('a', 'b') + assert not cache.has_path('b', 'c') + + cache.empty_cache() + assert not cache._cache + + cache.add_path(['a', 'b', 'c', 'd']) + assert cache._cache == { + cache.key_type(('a', 'c')): ['a', 'b', 'c'], + cache.key_type(('a', 'd')): ['a', 'b', 'c', 'd'], + cache.key_type(('b', 'd')): ['b', 'c', 'd'] + } + assert cache.get_path('a', 'd') == ['a', 'b', 'c', 'd'] + assert cache.has_path('a', 'd') + assert cache.has_path('d', 'a') + assert cache.has_path('a', 'c') + assert cache.has_path('b', 'd') + assert not cache.has_path('a', 'b') + assert not cache.has_path('b', 'a') + assert not cache.has_path('b', 'c') + assert not cache.has_path('c', 'd') + + str_repr = str(cache) + assert str_repr.count("['a', 'd']: ['a', 'b', 'c', 'd']") == 1 + assert str_repr.count("['a', 'c']: ['a', 'b', 'c']") == 1 + assert str_repr.count("['b', 'd']: ['b', 'c', 'd']") == 1 + + +# ============================================================================== + + +def test_path_container_crossing_class(): + Crossing = _Crossing + crossing_list = [Crossing(0, [1]), Crossing(1, [1]), Crossing(2, [2])] + + assert Crossing(0, [1]) == Crossing(0, [1]) + assert Crossing(0, [1]) != Crossing(1, [1]) + assert Crossing(0, [1]) != Crossing(0, [0, 1]) + assert Crossing(0, [0]) != Crossing(1, [0, 1]) + + assert [0, 1] == Crossing(0, [0, 1]) + assert [0, 1] == Crossing(1, [0, 1]) + assert [0, 1] != Crossing(0, [0]) + assert [0, 1] != Crossing(1, [0]) + + assert Crossing(0, [1]) in crossing_list + assert [0] not in crossing_list + assert [1] in crossing_list + + assert str(Crossing(0, [1])) == "{} {}".format(0, [1]) + assert repr(Crossing(0, [1])) == "Crossing({}, {})".format(0, [1]) + + with pytest.raises(NotImplementedError): + assert "" == Crossing(0, [1]) + + +# ============================================================================== + + +def test_valid_and_invalid_graphs(simple_graph): + graph = nx.Graph() + graph.add_nodes_from('abcd') + with pytest.raises(RuntimeError): + PathManager(graph=graph) + + graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'a')]) + with pytest.raises(RuntimeError): + PathManager(graph=graph) + + graph = deepcopy(simple_graph) + graph.remove_edge(0, 1) + with pytest.raises(RuntimeError): + PathManager(graph=graph) + + +def test_path_container_has_interaction(grid44_manager): + path_dict = { + 0: ([4, 5], [6, 7]), + 1: ([1, 5], [9, 13]), + 2: ([8, 9], [10, 11, 15]) + } + grid44_manager.paths = path_dict + + assert grid44_manager.has_interaction(4, 7) + assert grid44_manager.has_interaction(7, 4) + assert grid44_manager.has_interaction(8, 15) + assert grid44_manager.has_interaction(15, 8) + assert not grid44_manager.has_interaction(4, 5) + assert not grid44_manager.has_interaction(4, 6) + assert not grid44_manager.has_interaction(4, 8) + assert not grid44_manager.has_interaction(4, 9) + assert not grid44_manager.has_interaction(9, 4) + assert not grid44_manager.has_interaction(1, 5) + assert not grid44_manager.has_interaction(1, 9) + assert not grid44_manager.has_interaction(8, 9) + assert not grid44_manager.has_interaction(8, 10) + assert not grid44_manager.has_interaction(8, 11) + + +def test_path_container_get_all_nodes(grid44_manager): + path_dict = { + 0: ([4, 5], [6, 7]), + 1: ([1, 5], [9, 13]), + 2: ([8, 9], [10, 11, 15]) + } + grid44_manager.paths = path_dict + + assert grid44_manager.get_all_nodes() == set((1, 4, 5, 6, 7, 8, 9, 10, 11, + 13, 15)) + + +def test_path_container_get_all_paths(grid44_manager): + path_dict = { + 0: ([4, 5], [6, 7]), + 1: ([1, 5], [9, 13]), + 2: ([8, 9], [10, 11, 15]) + } + grid44_manager.paths = path_dict + + assert grid44_manager.get_all_paths() == [[4, 5, 6, 7], [1, 5, 9, 13], + [8, 9, 10, 11, 15]] + + +def test_path_container_max_order(grid44_manager): + assert grid44_manager.max_crossing_order() == 0 + + assert grid44_manager.try_add_path([4, 5, 6, 7]) + assert grid44_manager.max_crossing_order() == 0 + + assert grid44_manager.try_add_path([1, 5, 9, 13]) + assert grid44_manager.max_crossing_order() == 1 + + +def test_path_container_clear(grid44_manager): + grid44_manager.paths = { + 0: ([4, 5], [6, 7]), + 1: ([1, 5], [9, 13]), + 2: ([8, 9], [10, 11, 15]) + } + grid44_manager.crossings = {0: None, 1: None, 2: None} # dummy values + grid44_manager.paths_stats = {0: 0, 1: 1, 2: 2} # dummy values + + grid44_manager.clear_paths() + assert not grid44_manager.paths + assert not grid44_manager.crossings + assert grid44_manager.paths_stats + + grid44_manager.paths = { + 0: ([4, 5], [6, 7]), + 1: ([1, 5], [9, 13]), + 2: ([8, 9], [10, 11, 15]) + } + grid44_manager.crossings = {0: None, 1: None, 2: None} # dummy values + grid44_manager.paths_stats = {0: 0, 1: 1, 2: 2} # dummy values + + grid44_manager.clear() + assert not grid44_manager.paths + assert not grid44_manager.crossings + assert not grid44_manager.paths_stats + + +def test_path_container_add_path(grid44_manager): + Crossing = _Crossing + + assert grid44_manager.try_add_path([4, 5, 6, 7]) + assert grid44_manager.get_all_paths() == [[4, 5, 6, 7]] + assert grid44_manager.crossings == {0: []} + + assert not grid44_manager.try_add_path([4, 8, 12]) + assert not grid44_manager.try_add_path([0, 1, 2, 3, 7]) + assert not grid44_manager.try_add_path([1, 5, 6, 10]) + assert grid44_manager.get_all_paths() == [[4, 5, 6, 7]] + assert grid44_manager.crossings == {0: []} + + assert grid44_manager.try_add_path([1, 5, 9, 13]) + assert [4, 5, 6, 7] in grid44_manager.get_all_paths() + assert [1, 5, 9, 13] in grid44_manager.get_all_paths() + assert grid44_manager.crossings == { + 0: [Crossing(1, [5])], + 1: [Crossing(0, [5])] + } + + assert grid44_manager.try_add_path([10, 6, 9, 14, 15]) + assert [4, 5, 6, 7] in grid44_manager.get_all_paths() + assert [4, 5, 6, 7] in grid44_manager.get_all_paths() + assert [10, 6, 9, 14, 15] in grid44_manager.get_all_paths() + + crossings_overlap = [ + sorted([c.overlap[0] for c in crossing_list]) + for crossing_list in grid44_manager.crossings.values() + ] + + assert [6, 9] in crossings_overlap + assert [5, 9] in crossings_overlap + assert [5, 6] in crossings_overlap + + +def test_path_container_push_interaction(grid44_manager): + assert grid44_manager.push_interaction(4, 7) + assert grid44_manager.push_interaction(4, 7) + assert grid44_manager.get_all_paths() == [[4, 5, 6, 7]] + assert grid44_manager.crossings == {0: []} + + assert grid44_manager.push_interaction(14, 15) + assert grid44_manager.get_all_paths() == [[4, 5, 6, 7]] + assert grid44_manager.crossings == {0: []} + + assert not grid44_manager.push_interaction(0, 4) + + +@pytest.mark.parametrize("enable_caching", [False, True]) +def test_path_container_push_interaction_alternative(grid44_manager, + enable_caching): + grid44_manager.enable_caching = enable_caching + interaction_list = [ + [(4, 7), (0, 12), False], + [(4, 7), (12, 0), True], + [(7, 4), (0, 12), False], + [(7, 4), (12, 0), True], + ] + + for inter1, inter2, may_fail in interaction_list: + grid44_manager.clear_paths() + assert grid44_manager.push_interaction(*inter1) + if may_fail: + if grid44_manager.push_interaction(*inter2): + assert grid44_manager.get_all_paths()[1] in ([4, 5, 6, 7], + [7, 6, 5, 4]) + else: + assert grid44_manager.push_interaction(*inter2) + assert grid44_manager.get_all_paths()[1] in ([4, 5, 6, 7], + [7, 6, 5, 4]) + + interaction_list = [ + [(4, 7), (15, 3)], + [(4, 7), (3, 15)], + [(7, 4), (15, 3)], + [(7, 4), (3, 15)], + ] + grid44_manager.clear() + for inter1, inter2 in interaction_list: + grid44_manager.clear_paths() + assert grid44_manager.push_interaction(*inter1) + assert grid44_manager.push_interaction(*inter2) + assert grid44_manager.get_all_paths()[1] in ([4, 5, 6, 7], + [7, 6, 5, 4]) + + +def test_path_container_remove_path(grid44_manager): + Crossing = _Crossing + + assert grid44_manager.try_add_path([4, 5, 6, 7]) + assert grid44_manager.try_add_path([1, 5, 9, 13]) + assert grid44_manager.try_add_path([8, 9, 10, 11, 15]) + + with pytest.raises(KeyError): + grid44_manager.remove_path_by_id(10) + + grid44_manager.remove_path_by_id(0) + assert [4, 5, 6, 7] in grid44_manager.get_all_paths() + assert [1, 5, 9, 13] in grid44_manager.get_all_paths() + assert grid44_manager.crossings == { + 1: [Crossing(2, [5])], + 2: [Crossing(1, [5])] + } + + grid44_manager.remove_path_by_id(1) + assert [[1, 5, 9, 13]] == grid44_manager.get_all_paths() + assert grid44_manager.crossings == {2: []} + + assert grid44_manager.try_add_path([8, 9, 10, 11, 15]) + assert [1, 5, 9, 13] in grid44_manager.get_all_paths() + assert [8, 9, 10, 11, 15] in grid44_manager.get_all_paths() + assert grid44_manager.crossings == { + 2: [Crossing(3, [9])], + 3: [Crossing(2, [9])] + } + + +def test_path_container_swap_paths(grid44_manager): + path_dict = {0: [4, 5, 6, 7], 1: [1, 5, 9, 13], 2: [8, 9, 10, 11, 15]} + for _, path in path_dict.items(): + assert grid44_manager.try_add_path(path) + assert path in grid44_manager.get_all_paths() + path_dict_ref = grid44_manager.paths + + with pytest.raises(KeyError): + grid44_manager.swap_paths(10, 0) + with pytest.raises(KeyError): + grid44_manager.swap_paths(0, 10) + + grid44_manager.swap_paths(0, 1) + path_dict_ref[0], path_dict_ref[1] = path_dict_ref[1], path_dict_ref[0] + assert grid44_manager.paths == path_dict_ref + + path_dict[3] = [20, 21, 6, 22, 23, 10, 24, 25] + assert grid44_manager.try_add_path(path_dict[3]) + assert path_dict[3] in grid44_manager.get_all_paths() + path_dict_ref = grid44_manager.paths + + grid44_manager.swap_paths(1, 3) + path_dict_ref[1], path_dict_ref[3] = path_dict_ref[3], path_dict_ref[1] + assert grid44_manager.paths == path_dict_ref + + +def test_path_grid44_manager_discard_paths(grid44_manager): + Crossing = _Crossing + path_dict = {0: [4, 5, 6, 7], 1: [1, 5, 9, 13], 2: [8, 9, 10, 11, 15]} + for _, path in path_dict.items(): + assert grid44_manager.try_add_path(path) + assert path in grid44_manager.get_all_paths() + + path_dict_ref = grid44_manager.paths + grid44_manager.remove_crossing_of_order_higher_than(1) + assert grid44_manager.max_crossing_order() == 1 + assert grid44_manager.paths == path_dict_ref + assert grid44_manager.crossings == { + 0: [Crossing(2, [9])], + 1: [Crossing(2, [5])], + 2: [Crossing(1, [5]), Crossing(0, [9])] + } + + grid44_manager.remove_crossing_of_order_higher_than(0) + del path_dict_ref[1] + assert grid44_manager.max_crossing_order() == 0 + assert grid44_manager.paths == path_dict_ref + assert grid44_manager.crossings == {0: [], 1: []} + + +def test_path_container_find_first_order_intersections(): + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (2, 10), (10, 11), (11, 12), (12, + 5)]) + graph.add_edges_from([(3, 1), (1, 4)]) + graph.add_edges_from([(5, 6), (6, 7)]) + graph.add_edges_from([(20, 6), (6, 21), (21, 22), (22, 23), (23, 24)]) + graph.add_edges_from([(30, 1), (1, 31), (31, 32)]) + graph.add_edges_from([(40, 23), (23, 41), (41, 42), (42, 43), (43, 44)]) + + Crossing = _Crossing + manager = PathManager(graph=graph, enable_caching=False) + + path_dict = {0: [0, 1, 2, 10, 11, 12], 1: [3, 1, 4], 2: [5, 6, 7]} + for _, path in path_dict.items(): + assert manager.try_add_path(path) + assert path in manager.get_all_paths() + + assert manager.crossings == { + 0: [Crossing(1, [1])], + 1: [Crossing(0, [1])], + 2: [] + } + assert _find_first_order_intersections(manager.crossings, + manager.paths) == { + 1: {1} + } + + manager.remove_path_by_id(0) + del path_dict[0] + path_dict[3] = [0, 1, 2, 10] + assert manager.try_add_path(path_dict[3]) + idx1 = manager.get_all_paths().index(path_dict[1]) + 1 + assert _find_first_order_intersections( + manager.crossings, manager.paths + ) == { + 1: {idx1}, + # would be 1: {idx1, idx3} if + # try_add_path was not also + # trying to solve the + # intersections while adding the + # paths + } + + path_dict[4] = [20, 6, 21, 22, 23, 24] + assert manager.try_add_path(path_dict[4]) + assert path_dict[4] in manager.get_all_paths() + idx1 = manager.get_all_paths().index(path_dict[1]) + 1 + idx2 = manager.get_all_paths().index(path_dict[2]) + 1 + assert _find_first_order_intersections(manager.crossings, + manager.paths) == { + 1: {idx1}, + 6: {idx2} + } + + path_dict[5] = [30, 1, 31, 32] + assert manager.try_add_path(path_dict[5]) + assert path_dict[5] in manager.get_all_paths() + idx1 = manager.get_all_paths().index(path_dict[1]) + 1 + idx2 = manager.get_all_paths().index(path_dict[2]) + 1 + assert _find_first_order_intersections(manager.crossings, + manager.paths) == { + 1: {idx1}, + 6: {idx2} + } + + path_dict[6] = [40, 23, 41, 42, 43, 44] + assert manager.try_add_path(path_dict[6]) + assert path_dict[6] in manager.get_all_paths() + idx1 = manager.get_all_paths().index(path_dict[1]) + 1 + idx2 = manager.get_all_paths().index(path_dict[2]) + 1 + assert _find_first_order_intersections(manager.crossings, + manager.paths) == { + 1: {idx1}, + 6: {idx2} + } + + +def test_path_container_no_intersection(grid44_manager): + path_dict = {0: [0, 1, 2, 3], 1: [5, 6, 7], 2: [4, 8, 9, 10, 11]} + for _, path in path_dict.items(): + assert grid44_manager.try_add_path(path) + assert path in grid44_manager.get_all_paths() + assert grid44_manager.generate_swaps() == [(0, 1), (3, 2), (7, 6), (4, 8), + (11, 10), (10, 9)] + + +def test_path_container_1_intersection_single_intersection(): + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (3, 1), (1, 4), (2, 10), (10, 11), + (11, 12)]) + + manager = PathManager(graph=graph) + + # 3 + # | + # 0 - 1 - 2 + # | 10 - 11 - 12 + # 4 + # NB: intersection at node 1 + ref_swaps = [ + [(0, 1), (12, 11)], + [(0, 1), (10, 11)], + [(2, 1), (12, 11)], + [(2, 1), (10, 11)], + [(3, 1), (12, 11)], + [(3, 1), (10, 11)], + [(4, 1), (12, 11)], + [(4, 1), (10, 11)], + ] + paths = [[0, 1, 2], [3, 1, 4]] + for path1, path2, in itertools.permutations(paths): + manager.clear() + assert manager.try_add_path(path1) + assert not manager.try_add_path(path2) + assert manager.try_add_path([10, 11, 12]) + assert manager.generate_swaps() in ref_swaps + + # 4 + # | + # 0 - 1 - 2 - 3 + # | 10 - 11 - 12 + # 5 + # NB: intersection at node 1 + ref_swaps = [ + [(0, 1), (1, 2), (4, 1), (12, 11)], + [(0, 1), (1, 2), (4, 1), (12, 11)], + [(0, 1), (1, 2), (5, 1), (10, 11)], + [(0, 1), (1, 2), (5, 1), (12, 11)], + [(0, 1), (1, 2), (12, 11), (4, 1)], + [(0, 1), (1, 2), (12, 11), (4, 1)], + [(0, 1), (1, 2), (10, 11), (5, 1)], + [(0, 1), (1, 2), (12, 11), (5, 1)], + [(12, 11), (0, 1), (1, 2), (4, 1)], + [(12, 11), (0, 1), (1, 2), (4, 1)], + [(10, 11), (0, 1), (1, 2), (5, 1)], + [(12, 11), (0, 1), (1, 2), (5, 1)], + ] + paths = [[0, 1, 2, 3], [4, 1, 5], [10, 11, 12]] + for path1, path2, path3 in itertools.permutations(paths): + manager.clear() + assert manager.try_add_path(path1) + assert manager.try_add_path(path2) + assert manager.try_add_path(path3) + assert manager.generate_swaps() in ref_swaps + + # 4 + # | + # 0 - 1 - 2 - 3 + # | 10 - 11 - 12 + # 5 + # NB: intersection at node 2 + ref_swaps = [ + [(3, 2), (2, 1), (4, 2), (12, 11)], + [(3, 2), (2, 1), (4, 2), (12, 11)], + [(3, 2), (2, 1), (5, 2), (10, 11)], + [(3, 2), (2, 1), (5, 2), (12, 11)], + [(3, 2), (2, 1), (12, 11), (4, 2)], + [(3, 2), (2, 1), (12, 11), (4, 2)], + [(3, 2), (2, 1), (10, 11), (5, 2)], + [(3, 2), (2, 1), (12, 11), (5, 2)], + [(12, 11), (3, 2), (2, 1), (4, 2)], + [(12, 11), (3, 2), (2, 1), (4, 2)], + [(10, 11), (3, 2), (2, 1), (5, 2)], + [(12, 11), (3, 2), (2, 1), (5, 2)], + ] + paths = [[0, 1, 2, 3], [4, 2, 5], [10, 11, 12]] + for path1, path2, path3 in itertools.permutations(paths): + manager.clear() + assert manager.try_add_path(path1) + assert manager.try_add_path(path2) + assert manager.try_add_path(path3) + assert manager.generate_swaps() in ref_swaps + + # 9 + # | + # 0 - 1 - 2 - 3 - 4 - 5 + # | + # 10 6 - 7 - 8 + # | + # 11 + # NB: intersection at node 1 + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (9, 1), + (1, 10), (10, 11), (5, 6), (6, 7), (7, 8)]) + manager = PathManager(graph=graph) + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.try_add_path([9, 1, 10, 11]) + assert manager.try_add_path([6, 7, 8]) + assert manager.generate_swaps() == [(0, 1), (1, 2), (5, 4), (4, 3), (9, 1), + (11, 10), (8, 7)] + + +def test_path_container_1_intersection_double_crossing_long_right(): + # 6 7 + # | | + # 0 - 1 - 2 - 3 - 4 - 5 + # | | + # 8 9 + # | + # 10 + # | + # 11 + # | + # 12 + # NB: intersection at node 2 + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (6, 2), (2, + 8), + (7, 4), (4, 9), (9, 10), (10, 11), (11, 12)]) + manager = PathManager(graph=graph) + + ref_swaps = [(7, 4), (4, 9), (12, 11), (11, 10), (0, 1), (1, 2), (2, 3), + (5, 4), (8, 2)] + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.try_add_path([6, 2, 8]) + assert manager.try_add_path([7, 4, 9, 10, 11, 12]) + assert manager.generate_swaps() == ref_swaps + + manager.clear() + assert manager.try_add_path([6, 2, 8]) + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.try_add_path([7, 4, 9, 10, 11, 12]) + assert manager.generate_swaps() == ref_swaps + + ref_swaps = [(5, 4), (4, 3), (3, 2), (2, 1), (7, 4), (4, 9), (12, 11), + (11, 10), (8, 2)] + manager.clear() + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.try_add_path([7, 4, 9, 10, 11, 12]) + assert manager.try_add_path([6, 2, 8]) + assert manager.generate_swaps() == ref_swaps + manager.clear() + assert manager.try_add_path([6, 2, 8]) + assert manager.try_add_path([7, 4, 9, 10, 11, 12]) + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.generate_swaps() == ref_swaps + + ref_swaps = [(7, 4), (4, 9), (12, 11), (11, 10), (5, 4), (4, 3), (3, 2), + (2, 1), (8, 2)] + manager.clear() + assert manager.try_add_path([7, 4, 9, 10, 11, 12]) + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.try_add_path([6, 2, 8]) + assert manager.generate_swaps() == ref_swaps + manager.clear() + assert manager.try_add_path([7, 4, 9, 10, 11, 12]) + assert manager.try_add_path([6, 2, 8]) + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.generate_swaps() == ref_swaps + + +def test_path_container_1_intersection_double_crossing_long_left(): + # 6 7 + # | | + # 0 - 1 - 2 - 3 - 4 - 5 + # | | + # 8 9 + # | + # 10 + # | + # 11 + # | + # 12 + # NB: intersection at node 3 + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (6, 1), (1, + 8), + (8, 10), (10, 11), (11, 12), (7, 3), (3, 9)]) + manager = PathManager(graph=graph) + + ref_swaps = [(0, 1), (1, 2), (2, 3), (3, 4), (6, 1), (1, 8), (12, 11), + (11, 10), (9, 3)] + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.try_add_path([6, 1, 8, 10, 11, 12]) + assert manager.try_add_path([7, 3, 9]) + assert manager.generate_swaps() == ref_swaps + manager.clear() + assert manager.try_add_path([7, 3, 9]) + assert manager.try_add_path([6, 1, 8, 10, 11, 12]) + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.generate_swaps() == ref_swaps + + ref_swaps = [(6, 1), (1, 8), (12, 11), (11, 10), (0, 1), (5, 4), (4, 3), + (3, 2), (9, 3)] + manager.clear() + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.try_add_path([7, 3, 9]) + assert manager.try_add_path([6, 1, 8, 10, 11, 12]) + assert manager.generate_swaps() == ref_swaps + manager.clear() + assert manager.try_add_path([7, 3, 9]) + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.try_add_path([6, 1, 8, 10, 11, 12]) + assert manager.generate_swaps() == ref_swaps + + ref_swaps = [(6, 1), (1, 8), (12, 11), (11, 10), (0, 1), (1, 2), (2, 3), + (3, 4), (9, 3)] + manager.clear() + assert manager.try_add_path([6, 1, 8, 10, 11, 12]) + assert manager.try_add_path([7, 3, 9]) + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.generate_swaps() == ref_swaps + manager.clear() + assert manager.try_add_path([6, 1, 8, 10, 11, 12]) + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.try_add_path([7, 3, 9]) + assert manager.generate_swaps() == ref_swaps + + +def test_path_container_1_intersection_double_crossing_delete_path(): + # 4 5 4 5 + # | | | | + # 0 - 1 - 2 - 3 -> 0 - 1 - 2 - 3 or 0 - 1 - 2 - 3 + # | | | | + # 6 7 6 7 + # NB: intersection at nodes 1 & 2 + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (2, 3), (4, 1), (1, 6), (5, 2), (2, + 7)]) + ref_swaps = [ + [(0, 1), (1, 2), (6, 1)], + [(0, 1), (1, 2), (4, 1)], + ] + + manager = PathManager(graph=graph) + assert manager.try_add_path([0, 1, 2, 3]) + assert manager.try_add_path([4, 1, 6]) + assert not manager.try_add_path([5, 2, 7]) + assert manager.generate_swaps() in ref_swaps + + ref_swaps = [ + [(3, 2), (2, 1), (7, 2)], + [(3, 2), (2, 1), (5, 2)], + ] + + manager.clear() + assert manager.try_add_path([0, 1, 2, 3]) + assert manager.try_add_path([5, 2, 7]) + assert not manager.try_add_path([4, 1, 6]) + assert manager.generate_swaps() in ref_swaps + + +def test_path_container_1_intersection_double_crossing_delete_path2(): + # 5 6 6 + # | | | + # 0 - 1 - 2 - 3 - 4 -> 0 - 1 - 2 - 3 - 4 + # | | | + # 7 8 8 + # NB: intersection at nodes 1 & 3 + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (5, 1), (1, 7), (6, + 3), + (3, 8)]) + manager = PathManager(graph=graph) + + ref_swaps = [ + [(0, 1), (1, 2), (4, 3), (7, 1)], + [(0, 1), (1, 2), (4, 3), (5, 1)], + [(0, 1), (4, 3), (3, 2), (8, 3)], + [(0, 1), (4, 3), (3, 2), (6, 3)], + ] + + assert manager.try_add_path([0, 1, 2, 3, 4]) + assert manager.try_add_path([5, 1, 7]) + assert not manager.try_add_path([6, 3, 8]) + assert manager.generate_swaps() in ref_swaps + + manager.clear() + assert manager.try_add_path([0, 1, 2, 3, 4]) + assert manager.try_add_path([6, 3, 8]) + assert not manager.try_add_path([5, 1, 7]) + assert manager.generate_swaps() in ref_swaps + + +def test_path_container_1_intersection_double_crossing_neighbouring_nodes(): + # 5 + # | + # 6 7 + # | | + # 0 - 1 - 2 - 3 - 4 + # | | + # 8 9 + # NB: intersection at nodes 1 & 3 + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (5, 6), (6, 1), (1, + 8), + (7, 2), (2, 9)]) + manager = PathManager(graph=graph) + + ref_swaps = [ + [(0, 1), (1, 2), (2, 3), (8, 1), (1, 6), (9, 2)], + [(0, 1), (1, 2), (2, 3), (5, 6), (8, 1), (9, 2)], + [(8, 1), (1, 6), (4, 3), (3, 2), (2, 1), (9, 2)], + [(8, 1), (1, 6), (0, 1), (1, 2), (2, 3), (9, 2)], + [(0, 1), (1, 2), (2, 3), (8, 1), (1, 6), (7, 2)], + [(0, 1), (1, 2), (2, 3), (5, 6), (8, 1), (7, 2)], + [(8, 1), (1, 6), (4, 3), (3, 2), (2, 1), (7, 2)], + [(8, 1), (1, 6), (0, 1), (1, 2), (2, 3), (7, 2)], + [(0, 1), (1, 2), (2, 3), (9, 2), (8, 1), (1, 6)], + [(0, 1), (1, 2), (2, 3), (9, 2), (5, 6), (8, 1)], + [(8, 1), (1, 6), (4, 3), (9, 2), (3, 2), (2, 1)], + [(8, 1), (1, 6), (0, 1), (9, 2), (1, 2), (2, 3)], + [(0, 1), (1, 2), (2, 3), (7, 2), (8, 1), (1, 6)], + [(0, 1), (1, 2), (2, 3), (7, 2), (5, 6), (8, 1)], + [(8, 1), (1, 6), (4, 3), (7, 2), (3, 2), (2, 1)], + [(8, 1), (1, 6), (0, 1), (7, 2), (1, 2), (2, 3)], + ] + + paths = [[0, 1, 2, 3, 4], [5, 6, 1, 8], [7, 2, 9]] + + for path1, path2, path3 in itertools.permutations(paths): + manager.clear() + assert manager.try_add_path(path1) + assert manager.try_add_path(path2) + assert manager.try_add_path(path3) + assert manager.generate_swaps() in ref_swaps + + +def test_path_container_1_intersection_triple_crossing(): + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (9, 1), + (1, 10), (10, 11), (12, 1), (1, 13), (13, 14), + (14, 15), (5, 6), (6, 7), (7, 8)]) + manager = PathManager(graph=graph) + + # 9 13 - 14 - 15 + # | / + # 0 - 1 - 2 - 3 - 4 - 5 + # / | + # 12 10 6 - 7 - 8 + # | + # 11 + # NB: intersection at node 1 + manager.clear() + paths = [[9, 1, 10, 11], [0, 1, 2, 3, 4, 5], [6, 7, 8], + [12, 1, 13, 14, 15, 16]] + for path in paths: + assert manager.try_add_path(path) + + paths[3], paths[0], paths[1] \ + = paths[0], paths[1], paths[3] + assert manager.get_all_paths() == paths + + manager.clear() + paths = [[0, 1, 2, 3, 4, 5], [9, 1, 10, 11], [6, 7, 8], + [12, 1, 13, 14, 15, 16]] + for path in paths: + assert manager.try_add_path(path) + + paths[3], paths[1] \ + = paths[1], paths[3] + assert manager.get_all_paths() == paths + + # 4 5 10 - 11 - 12 4 10 - 11 - 12 + # | / | + # 0 - 1 - 2 - 3 -> 0 - 1 - 2 - 3 + # / | | + # 6 7 7 + # NB: intersection at node 1 + ref_swaps = [[(0, 1), (1, 2), (4, 1), (12, 11)], + [(0, 1), (1, 2), (4, 1), (10, 11)], + [(0, 1), (1, 2), (7, 1), (12, 11)], + [(0, 1), (1, 2), (7, 1), (10, 11)]] + manager.clear() + paths = [[0, 1, 2, 3], [4, 1, 7], [10, 11, 12], [5, 1, 6]] + assert manager.try_add_path([0, 1, 2, 3]) + assert manager.try_add_path([4, 1, 7]) + assert manager.try_add_path([10, 11, 12]) + assert not manager.try_add_path([5, 1, 6]) + assert manager.generate_swaps() in ref_swaps + + +def test_path_container_1_intersection_triple_crossing_complex(): + # 4 + # | + # 0 - 1 - 2 - 3 + # | + # 5 - 6 - 7 + # | + # 8 + # NB: intersection at nodes 1 & 3 + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (2, 3), (4, 1), (1, 6), (6, 8), (5, + 6), + (6, 7)]) + manager = PathManager(graph=graph) + + ref_swaps = [ + [(0, 1), (1, 2), (4, 1), (8, 6)], + [(0, 1), (1, 2), (4, 1), (1, 6)], + [(4, 1), (1, 6), (0, 1), (1, 2)], + [(0, 1), (3, 2), (4, 1), (1, 6)], + [(4, 1), (8, 6), (0, 1), (3, 2)], + [(4, 1), (1, 6), (0, 1), (3, 2)], + ] + + assert manager.try_add_path([0, 1, 2, 3]) + assert manager.try_add_path([4, 1, 6, 8]) + assert not manager.try_add_path([5, 6, 7]) + assert manager.generate_swaps() in ref_swaps + + manager.clear() + assert manager.try_add_path([4, 1, 6, 8]) + assert manager.try_add_path([0, 1, 2, 3]) + assert not manager.try_add_path([5, 6, 7]) + assert manager.generate_swaps() in ref_swaps + + ref_swaps = [ + [(0, 1), (1, 2), (8, 6), (6, 1), (5, 6)], + [(0, 1), (1, 2), (8, 6), (6, 1), (7, 6)], + ] + + manager.clear() + assert manager.try_add_path([4, 1, 6, 8]) + assert manager.try_add_path([5, 6, 7]) + assert manager.try_add_path([0, 1, 2, 3]) + assert manager.generate_swaps() in ref_swaps + + manager.clear() + assert manager.try_add_path([0, 1, 2, 3]) + assert manager.try_add_path([5, 6, 7]) + + # With some modification to PathManager, this next line could be made not + # to fail adding the path. + # This would require the intersection resolving algorithm to allow the + # creation of a new intersection for the path currently being added but not + # for any other stored path. + # (ie. allowing the [4], [1, 6, 8] path split, although now 1 is an + # intersection for the new path) + assert not manager.try_add_path([4, 1, 6, 8]) diff --git a/projectq/cengines/_graphmapper.py b/projectq/cengines/_graphmapper.py new file mode 100644 index 000000000..1a07bd0ea --- /dev/null +++ b/projectq/cengines/_graphmapper.py @@ -0,0 +1,705 @@ +# Copyright 2018 ProjectQ-Framework (wOAww.projectq.ch) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Mapper for a quantum circuit to an arbitrary connected graph. + +Input: Quantum circuit with 1 and 2 qubit gates on n qubits. Gates are assumed + to be applied in parallel if they act on disjoint qubit(s) and any pair + of qubits can perform a 2 qubit gate (all-to-all connectivity) +Output: Quantum circuit in which qubits are placed in 2-D square grid in which + only nearest neighbour qubits can perform a 2 qubit gate. The mapper + uses Swap gates in order to move qubits next to each other. +""" +from copy import deepcopy + +import random +import itertools + +from projectq.cengines import (BasicMapperEngine, return_swap_depth) +from projectq.meta import LogicalQubitIDTag +from projectq.ops import (AllocateQubitGate, Command, DeallocateQubitGate, + FlushGate, Swap) +from projectq.types import WeakQubitRef +from projectq.cengines._graph_path_manager import PathManager +from projectq.cengines._command_list import CommandList + +# ------------------------------------------------------------------------------ + +# https://www.peterbe.com/plog/fastest-way-to-uniquify-a-list-in-python-3.6 +import sys +if sys.version_info[0] >= 3 and sys.version_info[1] > 6: # pragma: no cover + + def uniquify_list(seq): + return list(dict.fromkeys(seq)) +else: # pragma: no cover + + def uniquify_list(seq): + seen = set() + seen_add = seen.add + return [x for x in seq if x not in seen and not seen_add(x)] + + +# ============================================================================== + + +class GraphMapperError(Exception): + """Base class for all exceptions related to the GraphMapper.""" + + +def _add_qubits_to_mapping_fcfs(current_mapping, graph, new_logical_qubit_ids, + stored_commands): + """ + Add active qubits to a mapping. + + This function implements the simple first-come first serve approach; + Qubits that are active but not yet registered in the mapping are added by + mapping them to the next available backend id + + Args: + current_mapping (dict): specify which method should be used to + add the new qubits to the current mapping + graph (networkx.Graph): underlying graph used by the mapper + new_logical_qubit_ids (list): list of logical ids not yet part of the + mapping and that need to be assigned a + backend id + stored_commands (CommandList): list of commands yet to be processed by + the mapper + + Returns: A new mapping + """ + mapping = deepcopy(current_mapping) + currently_used_nodes = sorted([v for _, v in mapping.items()]) + available_nodes = [n for n in graph if n not in currently_used_nodes] + + for i, logical_id in enumerate(new_logical_qubit_ids): + mapping[logical_id] = available_nodes[i] + return mapping + + +def _generate_mapping_minimize_swaps(graph, qubit_interaction_subgraphs): + """ + Generate an initial mapping while maximizing the number of 2-qubit gates + that can be applied without applying any SWAP operations. + + Args: + graph (networkx.Graph): underlying graph used by the mapper + qubit_interaction_subgraph (list): see documentation for CommandList + + Returns: A new mapping + """ + mapping = {} + available_nodes = sorted(list(graph), key=lambda n: len(graph[n])) + + # Initialize the seed node + logical_id = qubit_interaction_subgraphs[0].pop(0) + backend_id = available_nodes.pop() + mapping[logical_id] = backend_id + + for subgraph in qubit_interaction_subgraphs: + anchor_node = backend_id + for logical_id in subgraph: + neighbours = sorted( + [n for n in graph[anchor_node] if n in available_nodes], + key=lambda n: len(graph[n])) + + # If possible, take the neighbour with the highest + # degree. Otherwise, take the next highest order available node + if neighbours: + backend_id = neighbours[-1] + available_nodes.remove(backend_id) + else: + backend_id = available_nodes.pop() + mapping[logical_id] = backend_id + + return mapping + + +def _add_qubits_to_mapping_smart_init(current_mapping, graph, + new_logical_qubit_ids, stored_commands): + """ + Add active qubits to a mapping. + + Similar to the first-come first-serve approach, except the initial mapping + tries to maximize the initial number of gates to be applied without + swaps. Otherwise identical to the first-come first-serve approach. + + Args: + current_mapping (dict): specify which method should be used to + add the new qubits to the current mapping + graph (networkx.Graph): underlying graph used by the mapper + new_logical_qubit_ids (list): list of logical ids not yet part of the + mapping and that need to be assigned a + backend id + stored_commands (CommandList): list of commands yet to be processed by + the mapper + + Returns: A new mapping + """ + qubit_interaction_subgraphs = \ + stored_commands.calculate_qubit_interaction_subgraphs(order=2) + + # Interaction subgraph list can be empty if only single qubit gates are + # present + if not qubit_interaction_subgraphs: + qubit_interaction_subgraphs = [list(new_logical_qubit_ids)] + + if not current_mapping: + return _generate_mapping_minimize_swaps(graph, + qubit_interaction_subgraphs) + return _add_qubits_to_mapping_fcfs(current_mapping, graph, + new_logical_qubit_ids, stored_commands) + + +def _add_qubits_to_mapping(current_mapping, graph, new_logical_qubit_ids, + stored_commands): + """ + Add active qubits to a mapping + + Qubits that are active but not yet registered in the mapping are added by + mapping them to an available backend id, as close as possible to other + qubits which they might interact with. + + Args: + current_mapping (dict): specify which method should be used to + add the new qubits to the current mapping + graph (networkx.Graph): underlying graph used by the mapper + new_logical_qubit_ids (list): list of logical ids not yet part of the + mapping and that need to be assigned a + backend id + stored_commands (CommandList): list of commands yet to be processed by + the mapper + + Returns: A new mapping + """ + qubit_interaction_subgraphs = \ + stored_commands.calculate_qubit_interaction_subgraphs(order=2) + + # Interaction subgraph list can be empty if only single qubit gates are + # present + if not qubit_interaction_subgraphs: + qubit_interaction_subgraphs = [list(new_logical_qubit_ids)] + + if not current_mapping: + return _generate_mapping_minimize_swaps(graph, + qubit_interaction_subgraphs) + + mapping = deepcopy(current_mapping) + currently_used_nodes = sorted([v for _, v in mapping.items()]) + available_nodes = sorted( + [n for n in graph if n not in currently_used_nodes], + key=lambda n: len(graph[n])) + interactions = list( + itertools.chain.from_iterable(stored_commands.interactions)) + + for logical_id in uniquify_list(new_logical_qubit_ids): + qubit_interactions = uniquify_list([ + i[0] if i[0] != logical_id else i[1] for i in interactions + if logical_id in i + ]) + + backend_id = None + + if len(qubit_interactions) == 1: + qubit = qubit_interactions[0] + + if qubit in mapping: + candidates = sorted([ + n for n in graph[mapping[qubit]] + if n not in currently_used_nodes + ], + key=lambda n: len(graph[n])) + if candidates: + backend_id = candidates[-1] + elif qubit_interactions: + neighbours = [] + for qubit in qubit_interactions: + if qubit in mapping: + neighbours.append( + set(n for n in graph[mapping[qubit]] + if n in available_nodes)) + else: + break + + intersection = set() + while neighbours: + intersection = neighbours[0].intersection(*neighbours[1:]) + if intersection: + backend_id = intersection.pop() + break + neighbours.pop() + + if backend_id is None: + backend_id = available_nodes.pop() + else: + available_nodes.remove(backend_id) + + mapping[logical_id] = backend_id + + return mapping + + +class GraphMapper(BasicMapperEngine): + """ + Mapper to an arbitrary connected graph. + + Maps a quantum circuit to an arbitrary connected graph of connected qubits + using Swap gates. + + Args: + graph (networkx.Graph) : Arbitrary connected graph + storage (int) Number of gates to temporarily store + add_qubits_to_mapping (function or str) Function called when new qubits + are to be added to the current + mapping. + Special possible string values: + "fcfs": first-come first serve + "fcfs_init": first-come first + serve with smarter + mapping + initialisation + Signature of the function call: + current_mapping + graph + new_logical_qubit_ids + stored_commands + enable_caching(Bool): Controls whether optimal path caching is + enabled + + Attributes: + current_mapping: Stores the mapping: key is logical qubit id, value + is mapped qubit id from 0,...,self.num_qubits + storage (int): Number of gate it caches before mapping. + num_qubits(int): number of qubits + num_mappings (int): Number of times the mapper changed the mapping + depth_of_swaps (dict): Key are circuit depth of swaps, value is the + number of such mappings which have been + applied + num_of_swaps_per_mapping (dict): Key are the number of swaps per + mapping, value is the number of such + mappings which have been applied + path_stats (dict) : Key is the endpoints of a path, value is the number + of such paths which have been applied + + Note: + 1) Gates are cached and only mapped from time to time. A + FastForwarding gate doesn't empty the cache, only a FlushGate does. + 2) Only 1 and two qubit gates allowed. + 3) Does not optimize for dirty qubits. + + """ + + def __init__(self, + graph, + storage=1000, + add_qubits_to_mapping=_add_qubits_to_mapping, + enable_caching=True): + """ + Initialize a GraphMapper compiler engine. + + Args: + graph (networkx.Graph): Arbitrary connected graph representing + Qubit connectivity + storage (int): Number of gates to temporarily store + enable_caching (Bool): Controls whether path caching is enabled + Raises: + RuntimeError: if the graph is not a connected graph + """ + BasicMapperEngine.__init__(self) + + self.paths = PathManager(graph, enable_caching) + self.num_qubits = graph.number_of_nodes() + self.storage = storage + # Randomness to pick permutations if there are too many. + # This creates an own instance of Random in order to not influence + # the bound methods of the random module which might be used in other + # places. + self._rng = random.Random(11) + # Storing commands + self._stored_commands = CommandList() + # Logical qubit ids for which the Allocate gate has already been + # processed and sent to the next engine but which are not yet + # deallocated: + self._currently_allocated_ids = set() + # Our internal mappings + self._current_mapping = dict() # differs from other mappers + self._reverse_current_mapping = dict() + # Function to add new logical qubits ids to the mapping + self.set_add_qubits_to_mapping(add_qubits_to_mapping) + + # Statistics: + self.num_mappings = 0 + self.depth_of_swaps = dict() + self.num_of_swaps_per_mapping = dict() + + @property + def current_mapping(self): + """Return a copy of the current mapping.""" + return deepcopy(self._current_mapping) + + @current_mapping.setter + def current_mapping(self, current_mapping): + """Set the current mapping to a new value.""" + if not current_mapping: + self._current_mapping = dict() + self._reverse_current_mapping = dict() + else: + self._current_mapping = current_mapping + self._reverse_current_mapping = { + v: k + for k, v in self._current_mapping.items() + } + + def set_add_qubits_to_mapping(self, add_qubits_to_mapping): + if isinstance(add_qubits_to_mapping, str): + if add_qubits_to_mapping.lower() == "fcfs": + self._add_qubits_to_mapping = _add_qubits_to_mapping_fcfs + elif add_qubits_to_mapping.lower() == "fcfs_init": + self._add_qubits_to_mapping = _add_qubits_to_mapping_smart_init + else: + raise ValueError( + "Invalid invalid value for add_qubits_to_mapping: {}". + format(add_qubits_to_mapping)) + else: + self._add_qubits_to_mapping = add_qubits_to_mapping + + def is_available(self, cmd): + """Only allows 1 or two qubit gates.""" + num_qubits = 0 + for qureg in cmd.all_qubits: + num_qubits += len(qureg) + return num_qubits <= 2 + + def _process_commands(self): + """ + Process commands and if necessary, calculate paths through the graph. + + Attempts to find as many paths through the graph as possible in order + to generate a new mapping that is able to apply as many gates as + possible. + + It goes through stored_commands and tries to find paths through the + graph that can be applied simultaneously to move the qubits without + side effects so that as many gates can be applied; gates are applied + on on a first come first served basis. + + Args: + None (list): Nothing here for now + + Returns: A list of paths through the graph to move some qubits and have + them interact + """ + not_in_mapping_qubits = [] + allocated_qubits = deepcopy(self._currently_allocated_ids) + active_qubits = deepcopy(self._currently_allocated_ids) + + # Always start from scratch again + # (does not reset cache or path statistics) + self.paths.clear_paths() + + for cmd in self._stored_commands: + if (len(allocated_qubits) == self.num_qubits + and not active_qubits): + break + + qubit_ids = [ + qubit.id for qureg in cmd.all_qubits for qubit in qureg + ] + + if len(qubit_ids) > 2 or not qubit_ids: + raise Exception("Invalid command (number of qubits): " + + str(cmd)) + + elif isinstance(cmd.gate, AllocateQubitGate): + qubit_id = cmd.qubits[0][0].id + if len(allocated_qubits) < self.num_qubits: + allocated_qubits.add(qubit_id) + active_qubits.add(qubit_id) + if qubit_id not in self._current_mapping: + not_in_mapping_qubits.append(qubit_id) + # not_in_mapping_qubits.add(qubit_id) + + elif isinstance(cmd.gate, DeallocateQubitGate): + qubit_id = cmd.qubits[0][0].id + if qubit_id in active_qubits: + active_qubits.remove(qubit_id) + # Do not remove from allocated_qubits as this would + # allow the mapper to add a new qubit to this location + # before the next swaps which is currently not + # supported + + # Process a two qubit gate: + elif len(qubit_ids) == 2: + # At least one qubit is not an active qubit: + if qubit_ids[0] not in active_qubits \ + or qubit_ids[1] not in active_qubits: + active_qubits.discard(qubit_ids[0]) + active_qubits.discard(qubit_ids[1]) + else: + if not_in_mapping_qubits: + self.current_mapping = self._add_qubits_to_mapping( + self._current_mapping, self.paths.graph, + not_in_mapping_qubits, self._stored_commands) + not_in_mapping_qubits = [] + + if not self.paths.push_interaction( + self._current_mapping[qubit_ids[0]], + self._current_mapping[qubit_ids[1]]): + break + + if not_in_mapping_qubits: + self.current_mapping = self._add_qubits_to_mapping( + self._current_mapping, self.paths.graph, not_in_mapping_qubits, + self._stored_commands) + + def _send_possible_commands(self): + """ + Send the stored commands possible without changing the mapping. + """ + active_ids = deepcopy(self._currently_allocated_ids) + + for logical_id in self._current_mapping: + # So that loop doesn't stop before AllocateGate applied + active_ids.add(logical_id) + + new_stored_commands = CommandList() + for i in range(len(self._stored_commands)): + cmd = self._stored_commands[i] + if not active_ids: + new_stored_commands += self._stored_commands[i:] + break + if isinstance(cmd.gate, AllocateQubitGate): + if cmd.qubits[0][0].id in self._current_mapping: + qb0 = WeakQubitRef( + engine=self, + idx=self._current_mapping[cmd.qubits[0][0].id]) + self._currently_allocated_ids.add(cmd.qubits[0][0].id) + self.send([ + Command( + engine=self, + gate=AllocateQubitGate(), + qubits=([qb0], ), + tags=[LogicalQubitIDTag(cmd.qubits[0][0].id)]) + ]) + else: + new_stored_commands.append(cmd) + elif isinstance(cmd.gate, DeallocateQubitGate): + if cmd.qubits[0][0].id in active_ids: + qb0 = WeakQubitRef( + engine=self, + idx=self._current_mapping[cmd.qubits[0][0].id]) + self._currently_allocated_ids.remove(cmd.qubits[0][0].id) + active_ids.remove(cmd.qubits[0][0].id) + self._current_mapping.pop(cmd.qubits[0][0].id) + self.send([ + Command( + engine=self, + gate=DeallocateQubitGate(), + qubits=([qb0], ), + tags=[LogicalQubitIDTag(cmd.qubits[0][0].id)]) + ]) + else: + new_stored_commands.append(cmd) + else: + send_gate = True + backend_ids = set() + for qureg in cmd.all_qubits: + for qubit in qureg: + if qubit.id not in active_ids: + send_gate = False + break + backend_ids.add(self._current_mapping[qubit.id]) + + # Check that mapped ids are connected by an edge on the graph + if len(backend_ids) == 2: + send_gate = self.paths.graph.has_edge(*list(backend_ids)) + + if send_gate: + self._send_cmd_with_mapped_ids(cmd) + else: + # Cannot execute gate -> make sure no other gate will use + # any of those qubits to preserve sequence + for qureg in cmd.all_qubits: + for qubit in qureg: + active_ids.discard(qubit.id) + new_stored_commands.append(cmd) + self._stored_commands = new_stored_commands + + def _run(self): + """ + Create a new mapping and executes possible gates. + + It first allocates all 0, ..., self.num_qubits-1 mapped qubit ids, if + they are not already used because we might need them all for the + swaps. Then it creates a new map, swaps all the qubits to the new map, + executes all possible gates, and finally deallocates mapped qubit ids + which don't store any information. + """ + num_of_stored_commands_before = len(self._stored_commands) + + # Go through the command list and generate a list of paths. + # At the same time, add soon-to-be-allocated qubits to the mapping + self._process_commands() + + self._send_possible_commands() + if not self._stored_commands: + return + + swaps = self.paths.generate_swaps() + + if swaps: # first mapping requires no swaps + backend_ids_used = { + self._current_mapping[logical_id] + for logical_id in self._currently_allocated_ids + } + + # Get a list of the qubits we need to allocate just to perform the + # swaps + not_allocated_ids = set( + self.paths.get_all_nodes()).difference(backend_ids_used) + + # Calculate temporary internal reverse mapping + new_internal_mapping = deepcopy(self._reverse_current_mapping) + + # Allocate all mapped qubit ids that are not currently allocated + # but part of some path so that we may perform the swapping + # operations. + for backend_id in not_allocated_ids: + qb0 = WeakQubitRef(engine=self, idx=backend_id) + self.send([ + Command( + engine=self, + gate=AllocateQubitGate(), + qubits=([qb0], )) + ]) + + # Those qubits are not part of the current mapping, so add them + # to the temporary internal reverse mapping with invalid ids + new_internal_mapping[backend_id] = -1 + + # Calculate reverse internal mapping + new_internal_mapping = deepcopy(self._reverse_current_mapping) + + # Add missing entries with invalid id to be able to process the + # swaps operations + for backend_id in not_allocated_ids: + new_internal_mapping[backend_id] = -1 + + # Send swap operations to arrive at the new mapping + for bqb0, bqb1 in swaps: + qb0 = WeakQubitRef(engine=self, idx=bqb0) + qb1 = WeakQubitRef(engine=self, idx=bqb1) + self.send( + [Command(engine=self, gate=Swap, qubits=([qb0], [qb1]))]) + + # Update internal mapping based on swap operations + new_internal_mapping[bqb0], \ + new_internal_mapping[bqb1] = \ + new_internal_mapping[bqb1], \ + new_internal_mapping[bqb0] + + # Register statistics: + self.num_mappings += 1 + depth = return_swap_depth(swaps) + if depth not in self.depth_of_swaps: + self.depth_of_swaps[depth] = 1 + else: + self.depth_of_swaps[depth] += 1 + if len(swaps) not in self.num_of_swaps_per_mapping: + self.num_of_swaps_per_mapping[len(swaps)] = 1 + else: + self.num_of_swaps_per_mapping[len(swaps)] += 1 + + # Calculate the list of "helper" qubits that need to be deallocated + # and remove invalid entries + not_needed_anymore = [] + new_reverse_current_mapping = {} + for backend_id, logical_id in new_internal_mapping.items(): + if logical_id < 0: + not_needed_anymore.append(backend_id) + else: + new_reverse_current_mapping[backend_id] = logical_id + + # Deallocate all previously mapped ids which we only needed for the + # swaps: + for backend_id in not_needed_anymore: + qb0 = WeakQubitRef(engine=self, idx=backend_id) + self.send([ + Command( + engine=self, + gate=DeallocateQubitGate(), + qubits=([qb0], )) + ]) + + # Calculate new mapping + self.current_mapping = { + v: k + for k, v in new_reverse_current_mapping.items() + } + + # Send possible gates: + self._send_possible_commands() + # Check that mapper actually made progress + if len(self._stored_commands) == num_of_stored_commands_before: + raise RuntimeError("Mapper is potentially in an infinite loop. " + "It is likely that the algorithm requires " + "too many qubits. Increase the number of " + "qubits for this mapper.") + + def receive(self, command_list): + """ + Receive some commands. + + Receive a command list and, for each command, stores it until + we do a mapping (FlushGate or Cache of stored commands is full). + + Args: + command_list (list of Command objects): list of commands to + receive. + """ + for cmd in command_list: + if isinstance(cmd.gate, FlushGate): + while self._stored_commands: + self._run() + self.send([cmd]) + else: + self._stored_commands.append(cmd) + # Storage is full: Create new map and send some gates away: + if len(self._stored_commands) >= self.storage: + self._run() + + def __str__(self): + """ + Return the string representation of this GraphMapper. + + Returns: + A summary (string) of resources used, including depth of swaps and + statistics about the paths generated + """ + + depth_of_swaps_str = "" + for depth_of_swaps, num_mapping in sorted(self.depth_of_swaps.items()): + depth_of_swaps_str += "\n {:3d}: {:3d}".format( + depth_of_swaps, num_mapping) + + num_swaps_per_mapping_str = "" + for num_swaps_per_mapping, num_mapping \ + in sorted(self.num_of_swaps_per_mapping.items(), + key=lambda x: x[1], reverse=True): + num_swaps_per_mapping_str += "\n {:3d}: {:3d}".format( + num_swaps_per_mapping, num_mapping) + + return ("Number of mappings: {}\n" + "Depth of swaps: {}\n\n" + + "Number of swaps per mapping:{}\n\n{}\n\n").format( + self.num_mappings, depth_of_swaps_str, + num_swaps_per_mapping_str, str(self.paths)) diff --git a/projectq/cengines/_graphmapper_test.py b/projectq/cengines/_graphmapper_test.py new file mode 100644 index 000000000..01d0fdf82 --- /dev/null +++ b/projectq/cengines/_graphmapper_test.py @@ -0,0 +1,1105 @@ +# Copyright 2018 ProjectQ-Framework (www.projectq.ch) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for projectq.cengines._graphmapper.py.""" + +from copy import deepcopy +import itertools + +import pytest +import networkx as nx +from projectq.cengines import DummyEngine, LocalOptimizer, MainEngine +from projectq.meta import LogicalQubitIDTag +from projectq.ops import (Allocate, BasicGate, Command, Deallocate, FlushGate, + X, H, All, Measure, CNOT) +from projectq.types import WeakQubitRef + +from projectq.cengines import _graphmapper as graphm + + +def allocate_all_qubits_cmd(mapper): + qb = [] + allocate_cmds = [] + for i in range(mapper.num_qubits): + qb.append(WeakQubitRef(engine=None, idx=i)) + allocate_cmds.append( + Command(engine=None, gate=Allocate, qubits=([qb[i]], ))) + return qb, allocate_cmds + + +def generate_grid_graph(nrows, ncols): + graph = nx.Graph() + graph.add_nodes_from(range(nrows * ncols)) + + for row in range(nrows): + for col in range(ncols): + node0 = col + ncols * row + + is_middle = ((0 < row < nrows - 1) and (0 < col < ncols - 1)) + add_horizontal = is_middle or (row in (0, nrows - 1) and + (0 < col < ncols - 1)) + add_vertical = is_middle or (col in (0, ncols - 1) and + (0 < row < nrows - 1)) + if add_horizontal: + graph.add_edge(node0, node0 - 1) + graph.add_edge(node0, node0 + 1) + if add_vertical: + graph.add_edge(node0, node0 - ncols) + graph.add_edge(node0, node0 + ncols) + if nrows == 2: + node0 = col + graph.add_edge(node0, node0 + ncols) + if ncols == 2: + node0 = ncols * row + graph.add_edge(node0, node0 + 1) + + return graph + + +@pytest.fixture(scope="module") +def simple_graph(): + # 2 4 + # / \ / | + # 0 - 1 3 | + # \ / \ | + # 5 6 + graph = nx.Graph() + graph.add_nodes_from(range(7)) + graph.add_edges_from([(0, 1), (1, 2), (1, 5), (2, 3), (5, 3), (3, 4), (3, + 6), + (4, 6)]) + return graph + + +@pytest.fixture(scope="module") +def grid22_graph(): + graph = nx.Graph() + graph.add_nodes_from([0, 1, 2, 3]) + graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0)]) + return graph + + +@pytest.fixture(scope="module") +def grid33_graph(): + return generate_grid_graph(3, 3) + + +@pytest.fixture +def grid22_graph_mapper(grid22_graph): + mapper = graphm.GraphMapper( + graph=grid22_graph, add_qubits_to_mapping="fcfs") + backend = DummyEngine(save_commands=True) + backend.is_last_engine = True + mapper.next_engine = backend + return mapper, backend + + +@pytest.fixture +def grid33_graph_mapper(grid33_graph): + mapper = graphm.GraphMapper( + graph=grid33_graph, add_qubits_to_mapping="fcfs") + backend = DummyEngine(save_commands=True) + backend.is_last_engine = True + mapper.next_engine = backend + return mapper, backend + + +@pytest.fixture +def simple_mapper(simple_graph): + mapper = graphm.GraphMapper( + graph=simple_graph, add_qubits_to_mapping="fcfs") + backend = DummyEngine(save_commands=True) + backend.is_last_engine = True + mapper.next_engine = backend + return mapper, backend + + +# ============================================================================== + + +def test_is_available(simple_graph): + mapper = graphm.GraphMapper(graph=simple_graph) + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + qb2 = WeakQubitRef(engine=None, idx=2) + cmd0 = Command(None, BasicGate(), qubits=([qb0], )) + assert mapper.is_available(cmd0) + cmd1 = Command(None, BasicGate(), qubits=([qb0], ), controls=[qb1]) + assert mapper.is_available(cmd1) + cmd2 = Command(None, BasicGate(), qubits=([qb0], [qb1, qb2])) + assert not mapper.is_available(cmd2) + cmd3 = Command(None, BasicGate(), qubits=([qb0], [qb1]), controls=[qb2]) + assert not mapper.is_available(cmd3) + + +def test_invalid_gates(simple_mapper): + mapper, backend = simple_mapper + + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + qb2 = WeakQubitRef(engine=None, idx=2) + + cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0], ), controls=[]) + cmd1 = Command(engine=None, gate=Allocate, qubits=([qb1], ), controls=[]) + cmd2 = Command(engine=None, gate=Allocate, qubits=([qb2], ), controls=[]) + cmd3 = Command(engine=None, gate=X, qubits=([qb0], [qb1]), controls=[qb2]) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + with pytest.raises(Exception): + mapper.receive([cmd0, cmd1, cmd2, cmd3, cmd_flush]) + + +def test_run_infinite_loop_detection(simple_mapper): + mapper, backend = simple_mapper + + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + cmd0 = Command(engine=None, gate=X, qubits=([qb0], ), controls=[]) + with pytest.raises(RuntimeError): + mapper.receive([cmd0, cmd_flush]) + + mapper._stored_commands.clear() + cmd0 = Command(engine=None, gate=X, qubits=([qb0], ), controls=[qb1]) + with pytest.raises(RuntimeError): + mapper.receive([cmd0, cmd_flush]) + + +def test_resetting_mapping_to_none(simple_graph): + mapper = graphm.GraphMapper(graph=simple_graph) + mapper.current_mapping = {0: 1} + assert mapper._current_mapping == {0: 1} + assert mapper._reverse_current_mapping == {1: 0} + mapper.current_mapping = {0: 0, 1: 4} + assert mapper._current_mapping == {0: 0, 1: 4} + assert mapper._reverse_current_mapping == {0: 0, 4: 1} + mapper.current_mapping = None + assert mapper._current_mapping == {} + assert mapper._reverse_current_mapping == {} + + +def test_add_qubits_to_mapping_methods_failure(simple_graph): + with pytest.raises(ValueError): + graphm.GraphMapper(graph=simple_graph, add_qubits_to_mapping="as") + + +@pytest.mark.parametrize("add_qubits", ["fcfs", "fcfs_init", "FCFS"]) +def test_add_qubits_to_mapping_methods(simple_graph, add_qubits): + mapper = graphm.GraphMapper( + graph=simple_graph, add_qubits_to_mapping=add_qubits) + backend = DummyEngine(save_commands=True) + backend.is_last_engine = True + mapper.next_engine = backend + + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + gates = [ + Command(None, X, qubits=([qb[1]], ), controls=[qb[0]]), + Command(None, X, qubits=([qb[1]], ), controls=[qb[2]]), + ] + + mapper.receive(list(itertools.chain(allocate_cmds, gates, [cmd_flush]))) + assert mapper.num_mappings == 0 + + +def test_qubit_placement_initial_mapping_single_qubit_gates( + grid33_graph_mapper): + grid33_graph_mapper[0].set_add_qubits_to_mapping( + graphm._add_qubits_to_mapping) + mapper, backend = deepcopy(grid33_graph_mapper) + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + mapper.receive(allocate_cmds + [cmd_flush]) + mapping = mapper.current_mapping + + assert mapper.num_mappings == 0 + assert mapping[0] == 4 + assert sorted([mapping[1], mapping[2], mapping[3], + mapping[4]]) == [1, 3, 5, 7] + assert sorted([mapping[5], mapping[6], mapping[7], + mapping[8]]) == [0, 2, 6, 8] + + +def test_qubit_placement_single_two_qubit_gate(grid33_graph_mapper): + grid33_graph_mapper[0].set_add_qubits_to_mapping( + graphm._add_qubits_to_mapping) + mapper_ref, backend = deepcopy(grid33_graph_mapper) + + mapper_ref.current_mapping = {3: 3, 4: 4, 5: 5} + mapper_ref._currently_allocated_ids = set( + mapper_ref.current_mapping.keys()) + + qb, allocate_cmds = allocate_all_qubits_cmd(mapper_ref) + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + mapper = deepcopy(mapper_ref) + mapper.receive([ + allocate_cmds[0], + Command(None, X, qubits=([qb[0]], ), controls=[qb[3]]), cmd_flush + ]) + mapping = mapper.current_mapping + + assert mapper.num_mappings == 0 + assert mapping[0] in {0, 6} + + mapper = deepcopy(mapper_ref) + mapper.receive([ + allocate_cmds[6], + Command(None, X, qubits=([qb[3]], ), controls=[qb[6]]), cmd_flush + ]) + mapping = mapper.current_mapping + + assert mapper.num_mappings == 0 + assert mapping[6] in {0, 6} + + +def test_qubit_placement_double_two_qubit_gate(grid33_graph_mapper): + grid33_graph_mapper[0].set_add_qubits_to_mapping( + graphm._add_qubits_to_mapping) + mapper_ref, backend_ref = deepcopy(grid33_graph_mapper) + + mapper_ref.current_mapping = {1: 1, 3: 3, 4: 4, 5: 5} + mapper_ref._currently_allocated_ids = set( + mapper_ref.current_mapping.keys()) + + qb, allocate_cmds = allocate_all_qubits_cmd(mapper_ref) + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + mapper = deepcopy(mapper_ref) + backend = deepcopy(backend_ref) + mapper.next_engine = backend + mapper.receive([ + allocate_cmds[0], + Command(None, X, qubits=([qb[0]], ), controls=[qb[3]]), + Command(None, X, qubits=([qb[0]], ), controls=[qb[1]]), cmd_flush + ]) + mapping = mapper.current_mapping + + assert mapper.num_mappings == 0 + assert mapping[0] == 0 + + mapper = deepcopy(mapper_ref) + backend = deepcopy(backend_ref) + mapper.next_engine = backend + mapper.receive([ + allocate_cmds[2], + Command(None, X, qubits=([qb[2]], ), controls=[qb[3]]), + Command(None, X, qubits=([qb[2]], ), controls=[qb[1]]), + Command(None, X, qubits=([qb[2]], ), controls=[qb[5]]), + cmd_flush, + ]) + mapping = mapper.current_mapping + + # Make sure that the qb[2] was allocated at backend_id 0 + assert backend.received_commands[0].gate == Allocate + assert backend.received_commands[0].qubits[0][0].id == 0 + assert backend.received_commands[0].tags == [LogicalQubitIDTag(2)] + + +def test_qubit_placement_multiple_two_qubit_gates(grid33_graph_mapper): + grid33_graph_mapper[0].set_add_qubits_to_mapping( + graphm._add_qubits_to_mapping) + mapper, backend = deepcopy(grid33_graph_mapper) + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + gates = [ + Command(None, X, qubits=([qb[1]], ), controls=[qb[0]]), + Command(None, X, qubits=([qb[1]], ), controls=[qb[2]]), + Command(None, X, qubits=([qb[1]], ), controls=[qb[3]]), + Command(None, X, qubits=([qb[1]], ), controls=[qb[4]]), + ] + + all_cmds = list(itertools.chain(allocate_cmds, gates)) + mapper, backend = deepcopy(grid33_graph_mapper) + mapper.receive(all_cmds + [cmd_flush]) + mapping = mapper.current_mapping + + assert mapper.num_mappings == 0 + assert mapping[1] == 4 + assert sorted([mapping[0], mapping[2], mapping[3], + mapping[4]]) == [1, 3, 5, 7] + assert sorted([mapping[5], mapping[6], mapping[7], + mapping[8]]) == [0, 2, 6, 8] + + all_cmds = list(itertools.chain(allocate_cmds[:5], gates)) + mapper, backend = deepcopy(grid33_graph_mapper) + mapper.receive(all_cmds + [cmd_flush]) + + gates = [ + Command(None, X, qubits=([qb[5]], ), controls=[qb[6]]), + Command(None, X, qubits=([qb[5]], ), controls=[qb[7]]), + ] + all_cmds = list(itertools.chain(allocate_cmds[5:], gates)) + mapper.receive(all_cmds + [cmd_flush]) + assert mapper.num_mappings == 2 + + +def test_send_possible_commands(simple_graph, simple_mapper): + mapper, backend = simple_mapper + mapper.current_mapping = dict(enumerate(range(len(simple_graph)))) + + neighbours = set() + for node in simple_graph: + for other in simple_graph[node]: + neighbours.add(frozenset((node, other))) + + neighbours = [tuple(s) for s in neighbours] + + for qb0_id, qb1_id in neighbours: + qb0 = WeakQubitRef(engine=None, idx=qb0_id) + qb1 = WeakQubitRef(engine=None, idx=qb1_id) + cmd1 = Command(None, X, qubits=([qb0], ), controls=[qb1]) + cmd2 = Command(None, X, qubits=([qb1], ), controls=[qb0]) + mapper._stored_commands += [cmd1, cmd2] + mapper._send_possible_commands() + assert len(mapper._stored_commands) == 0 + + for qb0_id, qb1_id in itertools.permutations(range(8), 2): + if ((qb0_id, qb1_id) not in neighbours + and (qb1_id, qb0_id) not in neighbours): + qb0 = WeakQubitRef(engine=None, idx=qb0_id) + qb1 = WeakQubitRef(engine=None, idx=qb1_id) + cmd = Command(None, X, qubits=([qb0], ), controls=[qb1]) + mapper._stored_commands.clear() + mapper._stored_commands += [cmd] + mapper._send_possible_commands() + assert len(mapper._stored_commands) == 1 + + +def test_send_possible_commands_allocate(simple_mapper): + mapper, backend = simple_mapper + + qb0 = WeakQubitRef(engine=None, idx=0) + cmd0 = Command( + engine=None, gate=Allocate, qubits=([qb0], ), controls=[], tags=[]) + mapper._stored_commands += [cmd0] + mapper._currently_allocated_ids = set([10]) + # not in mapping: + mapper.current_mapping = dict() + assert len(backend.received_commands) == 0 + mapper._send_possible_commands() + assert len(backend.received_commands) == 0 + assert mapper._stored_commands == [cmd0] + # in mapping: + mapper.current_mapping = {0: 3} + mapper._send_possible_commands() + assert len(mapper._stored_commands) == 0 + # Only self._run() sends Allocate gates + mapped0 = WeakQubitRef(engine=None, idx=3) + received_cmd = Command( + engine=mapper, + gate=Allocate, + qubits=([mapped0], ), + controls=[], + tags=[LogicalQubitIDTag(0)]) + assert backend.received_commands[0] == received_cmd + assert mapper._currently_allocated_ids == set([10, 0]) + + +def test_send_possible_commands_allocation_no_active_qubits( + grid22_graph_mapper): + mapper, backend = grid22_graph_mapper + + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + qb2 = WeakQubitRef(engine=None, idx=2) + qb3 = WeakQubitRef(engine=None, idx=3) + qb4 = WeakQubitRef(engine=None, idx=4) + + cmd_list = [ + Command(engine=None, gate=Allocate, qubits=([qb0], )), + Command(engine=None, gate=Allocate, qubits=([qb1], )), + Command(engine=None, gate=Allocate, qubits=([qb2], )), + Command(engine=None, gate=X, qubits=([qb0], ), controls=[qb2]), + Command(engine=None, gate=X, qubits=([qb1], ), controls=[qb2]), + Command(engine=None, gate=Allocate, qubits=([qb3], )), + Command(engine=None, gate=X, qubits=([qb3], )), + Command(engine=None, gate=Deallocate, qubits=([qb3], )), + Command(engine=None, gate=Deallocate, qubits=([qb2], )), + Command(engine=None, gate=Deallocate, qubits=([qb1], )), + Command(engine=None, gate=Deallocate, qubits=([qb0], )), + Command(engine=None, gate=Allocate, qubits=([qb4], )), + ] + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + mapper._stored_commands += cmd_list + [cmd_flush] + + mapper._run() + assert len(mapper._stored_commands) == 8 + # NB: after swap, can actually send Deallocate to qb0 + assert mapper._stored_commands[:6] == cmd_list[4:10] + assert mapper._stored_commands[6] == cmd_list[11] + + +def test_send_possible_commands_allocation_no_active_qubits( + grid22_graph_mapper): + mapper, backend = grid22_graph_mapper + + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + qb2 = WeakQubitRef(engine=None, idx=2) + qb3 = WeakQubitRef(engine=None, idx=3) + qb4 = WeakQubitRef(engine=None, idx=4) + + cmd_list = [ + Command(engine=None, gate=Allocate, qubits=([qb0], )), + Command(engine=None, gate=Allocate, qubits=([qb1], )), + Command(engine=None, gate=Allocate, qubits=([qb2], )), + Command(engine=None, gate=Allocate, qubits=([qb3], )), + Command(engine=None, gate=X, qubits=([qb0], ), controls=[qb2]), + ] + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + mapper._stored_commands += cmd_list + [cmd_flush] + + mapper._run() + assert mapper.num_mappings == 1 + assert len(mapper._stored_commands) == 1 + assert mapper._stored_commands[0] == cmd_flush + + cmd_list = [ + Command(engine=None, gate=X, qubits=([qb2], ), controls=[qb3]), + Command(engine=None, gate=Deallocate, qubits=([qb3], )), + Command(engine=None, gate=Deallocate, qubits=([qb2], )), + Command(engine=None, gate=Deallocate, qubits=([qb1], )), + Command(engine=None, gate=Deallocate, qubits=([qb0], )), + Command(engine=None, gate=Allocate, qubits=([qb4], )), + ] + mapper._stored_commands = cmd_list + [cmd_flush] + mapper._run() + assert mapper.num_mappings == 1 + assert len(mapper._stored_commands) == 2 + assert mapper._stored_commands[0] == cmd_list[-1] + + +def test_send_possible_commands_deallocate(simple_mapper): + mapper, backend = simple_mapper + + qb0 = WeakQubitRef(engine=None, idx=0) + cmd0 = Command( + engine=None, gate=Deallocate, qubits=([qb0], ), controls=[], tags=[]) + mapper._stored_commands = [cmd0] + mapper.current_mapping = dict() + mapper._currently_allocated_ids = set([10]) + # not yet allocated: + mapper._send_possible_commands() + assert len(backend.received_commands) == 0 + assert mapper._stored_commands == [cmd0] + # allocated: + mapper.current_mapping = {0: 3} + mapper._currently_allocated_ids.add(0) + mapper._send_possible_commands() + assert len(backend.received_commands) == 1 + assert len(mapper._stored_commands) == 0 + assert mapper.current_mapping == dict() + assert mapper._currently_allocated_ids == set([10]) + + +def test_send_possible_commands_no_initial_mapping(simple_mapper): + mapper, backend = simple_mapper + + assert mapper._current_mapping == {} + + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + qb2 = WeakQubitRef(engine=None, idx=-1) + + cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0], ), controls=[]) + cmd1 = Command(engine=None, gate=Allocate, qubits=([qb1], ), controls=[]) + cmd2 = Command(None, X, qubits=([qb0], ), controls=[qb1]) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb2], )) + all_cmds = [cmd0, cmd1, cmd2, cmd_flush] + mapper.receive(all_cmds) + + assert mapper._current_mapping + assert len(mapper._stored_commands) == 0 + + +def test_send_possible_commands_keep_remaining_gates(simple_mapper): + mapper, backend = simple_mapper + + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + cmd0 = Command( + engine=None, gate=Allocate, qubits=([qb0], ), controls=[], tags=[]) + cmd1 = Command( + engine=None, gate=Deallocate, qubits=([qb0], ), controls=[], tags=[]) + cmd2 = Command( + engine=None, gate=Allocate, qubits=([qb1], ), controls=[], tags=[]) + + mapper._stored_commands = [cmd0, cmd1, cmd2] + mapper.current_mapping = {0: 0} + mapper._send_possible_commands() + assert mapper._stored_commands == [cmd2] + + +def test_send_possible_commands_one_inactive_qubit(simple_mapper): + mapper, backend = simple_mapper + + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + cmd0 = Command( + engine=None, gate=Allocate, qubits=([qb0], ), controls=[], tags=[]) + cmd1 = Command(engine=None, gate=X, qubits=([qb0], ), controls=[qb1]) + mapper._stored_commands = [cmd0, cmd1] + mapper.current_mapping = {0: 0} + mapper._send_possible_commands() + assert mapper._stored_commands == [cmd1] + + +def test_run_and_receive(simple_graph, simple_mapper): + mapper, backend = simple_mapper + + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + + gates = [ + Command(None, X, qubits=([qb[0]], ), controls=[qb[1]]), + Command(None, X, qubits=([qb[1]], ), controls=[qb[2]]), + Command(None, X, qubits=([qb[1]], ), controls=[qb[5]]), + Command(None, X, qubits=([qb[2]], ), controls=[qb[3]]), + Command(None, X, qubits=([qb[5]], ), controls=[qb[3]]), + Command(None, X, qubits=([qb[3]], ), controls=[qb[4]]), + Command(None, X, qubits=([qb[3]], ), controls=[qb[6]]), + Command(None, X, qubits=([qb[4]], ), controls=[qb[6]]), + ] + deallocate_cmds = [ + Command(engine=None, gate=Deallocate, qubits=([qb[1]], )) + ] + + allocated_qubits_ref = set([0, 2, 3, 4, 5, 6]) + + all_cmds = list(itertools.chain(allocate_cmds, gates, deallocate_cmds)) + mapper.receive(all_cmds) + assert mapper._stored_commands == all_cmds + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + mapper.receive([cmd_flush]) + assert mapper._stored_commands == [] + assert len(backend.received_commands) == len(all_cmds) + 1 + assert mapper._currently_allocated_ids == allocated_qubits_ref + + mapping = dict(enumerate(range(len(simple_graph)))) + del mapping[1] + assert mapper.current_mapping == mapping + + cmd9 = Command(None, X, qubits=([qb[0]], ), controls=[qb[6]]) + mapper.receive([cmd9, cmd_flush]) + assert mapper._currently_allocated_ids == allocated_qubits_ref + for idx in allocated_qubits_ref: + assert idx in mapper.current_mapping + assert mapper._stored_commands == [] + assert len(mapper.current_mapping) == 6 + assert mapper.num_mappings == 1 + + +def test_send_two_qubit_gate_before_swap(simple_mapper): + qb, all_cmds = allocate_all_qubits_cmd(simple_mapper[0]) + + all_cmds.insert(3, None) + all_cmds.insert(5, Command(None, X, qubits=([qb[2]], ), controls=[qb[3]])) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + all_cmds.append( + Command(engine=None, gate=FlushGate(), qubits=([qb_flush], ))) + + for cmd in [ + Command(None, X, qubits=([qb[0]], ), controls=[qb[2]]), + Command(None, X, qubits=([qb[2]], ), controls=[qb[0]]) + ]: + mapper, backend = deepcopy(simple_mapper) + mapper.enable_caching = False + + all_cmds[3] = cmd + + mapper._stored_commands.clear() + mapper._stored_commands += all_cmds + mapper._run() + assert mapper.num_mappings == 1 + if mapper.current_mapping[2] == 2: + # qb[2] has not moved, all_cmds[5] is possible + assert mapper._stored_commands == all_cmds[-4:] + assert mapper.current_mapping == { + 0: 1, + 1: 0, + 2: 2, + 3: 3, + } + else: + # qb[2] moved, all_cmds[5] not possible + assert mapper._stored_commands == [all_cmds[5]] + all_cmds[-4:] + assert mapper.current_mapping == { + 0: 0, + 1: 2, + 2: 1, + 3: 3, + } + + +def test_send_two_qubit_gate_before_swap_nonallocated_qubits(simple_mapper): + qb, allocate_cmds = allocate_all_qubits_cmd(simple_mapper[0]) + + all_cmds = [ + allocate_cmds[0], + allocate_cmds[-1], + None, + Command(None, X, qubits=([qb[6]], ), controls=[qb[4]]), + ] + + idx = all_cmds.index(None) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + all_cmds.append( + Command(engine=None, gate=FlushGate(), qubits=([qb_flush], ))) + + for cmd in [ + Command(None, X, qubits=([qb[0]], ), controls=[qb[6]]), + Command(None, X, qubits=([qb[6]], ), controls=[qb[0]]) + ]: + mapper, backend = deepcopy(simple_mapper) + mapper.current_mapping = dict(enumerate(range(len(qb)))) + mapper.enable_caching = False + + all_cmds[idx] = cmd + + mapper._stored_commands = all_cmds + mapper._run() + assert mapper.num_mappings == 1 + + if mapper.current_mapping[4] == 4 and mapper.current_mapping[5] == 5: + if mapper.current_mapping[6] == 3: + # qb[6] is on position 3, all commands are possible + assert mapper._stored_commands == all_cmds[-1:] + assert mapper.current_mapping == {0: 2, 4: 4, 5: 5, 6: 3} + else: + # qb[6] is on position 2, all_cmds[8] is not possible + assert mapper._stored_commands == all_cmds[-2:] + assert mapper.current_mapping == {0: 1, 4: 4, 5: 5, 6: 2} + else: + # Should not happen... + assert False + + +def test_allocate_too_many_qubits(simple_mapper): + mapper, backend = simple_mapper + + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + + qb.append(WeakQubitRef(engine=None, idx=len(qb))) + allocate_cmds.append( + Command(engine=None, gate=Allocate, qubits=([qb[-1]], ))) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + with pytest.raises(RuntimeError): + mapper.receive(allocate_cmds + [cmd_flush]) + + +def test_send_possible_commands_reallocate_backend_id(grid22_graph_mapper): + mapper, backend = grid22_graph_mapper + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + qb2 = WeakQubitRef(engine=None, idx=2) + qb3 = WeakQubitRef(engine=None, idx=3) + qb4 = WeakQubitRef(engine=None, idx=4) + all_cmds = [ + Command(engine=None, gate=Allocate, qubits=([qb0], )), + Command(engine=None, gate=Allocate, qubits=([qb1], )), + Command(engine=None, gate=Allocate, qubits=([qb2], )), + Command(engine=None, gate=Allocate, qubits=([qb3], )), + Command(engine=None, gate=X, qubits=([qb1], )), + Command(engine=None, gate=Deallocate, qubits=([qb1], )), + Command(engine=None, gate=Allocate, qubits=([qb4], )), + Command(engine=None, gate=X, qubits=([qb4], )), + ] + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + mapper.receive(all_cmds + [cmd_flush]) + assert mapper.current_mapping == {0: 0, 2: 2, 3: 3, 4: 1} + assert len(mapper._stored_commands) == 0 + assert len(backend.received_commands) == 9 + + +def test_correct_stats(simple_mapper): + mapper, backend = simple_mapper + + # Should test stats for twice same mapping but depends on heuristic + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + qb2 = WeakQubitRef(engine=None, idx=2) + cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0], )) + cmd1 = Command(engine=None, gate=Allocate, qubits=([qb1], )) + cmd2 = Command(engine=None, gate=Allocate, qubits=([qb2], )) + + cmd3 = Command(None, X, qubits=([qb0], ), controls=[qb1]) + cmd4 = Command(None, X, qubits=([qb1], ), controls=[qb2]) + cmd5 = Command(None, X, qubits=([qb0], ), controls=[qb2]) + cmd6 = Command(None, X, qubits=([qb2], ), controls=[qb1]) + cmd7 = Command(None, X, qubits=([qb0], ), controls=[qb1]) + cmd8 = Command(None, X, qubits=([qb1], ), controls=[qb2]) + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + mapper.receive( + [cmd0, cmd1, cmd2, cmd3, cmd4, cmd5, cmd6, cmd7, cmd8, cmd_flush]) + assert mapper.num_mappings == 2 + + +def test_send_possible_cmds_before_new_mapping(simple_mapper): + mapper, backend = simple_mapper + + def dont_call_mapping(): + raise Exception + + mapper._find_paths = dont_call_mapping + + mapper.current_mapping = {0: 1} + qb0 = WeakQubitRef(engine=None, idx=0) + cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0], )) + qb2 = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb2], )) + mapper.receive([cmd0, cmd_flush]) + + +def test_logical_id_tags_allocate_and_deallocate(simple_mapper): + mapper, backend = simple_mapper + mapper.current_mapping = {0: 1, 1: 6} + + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0], )) + cmd1 = Command(engine=None, gate=Allocate, qubits=([qb1], )) + cmd2 = Command(None, X, qubits=([qb0], ), controls=[qb1]) + cmd3 = Command(engine=None, gate=Deallocate, qubits=([qb0], )) + cmd4 = Command(engine=None, gate=Deallocate, qubits=([qb1], )) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + mapper.receive([cmd0, cmd1, cmd2, cmd_flush]) + assert backend.received_commands[0].gate == Allocate + assert backend.received_commands[0].qubits[0][0].id == 1 + assert backend.received_commands[0].tags == [LogicalQubitIDTag(0)] + assert backend.received_commands[1].gate == Allocate + assert backend.received_commands[1].qubits[0][0].id == 6 + assert backend.received_commands[1].tags == [LogicalQubitIDTag(1)] + for cmd in backend.received_commands[2:]: + if cmd.gate == Allocate: + assert cmd.tags == [] + elif cmd.gate == Deallocate: + assert cmd.tags == [] + mapped_id_for_0 = mapper.current_mapping[0] + mapped_id_for_1 = mapper.current_mapping[1] + mapper.receive([cmd3, cmd4, cmd_flush]) + assert backend.received_commands[-3].gate == Deallocate + assert backend.received_commands[-3].qubits[0][0].id == mapped_id_for_0 + assert backend.received_commands[-3].tags == [LogicalQubitIDTag(0)] + assert backend.received_commands[-2].gate == Deallocate + assert backend.received_commands[-2].qubits[0][0].id == mapped_id_for_1 + assert backend.received_commands[-2].tags == [LogicalQubitIDTag(1)] + + +def test_check_that_local_optimizer_doesnt_merge(simple_graph): + mapper = graphm.GraphMapper(graph=simple_graph) + optimizer = LocalOptimizer(10) + backend = DummyEngine(save_commands=True) + backend.is_last_engine = True + mapper.next_engine = optimizer + mapper.current_mapping = dict(enumerate(range(len(simple_graph)))) + mapper.current_mapping = {0: 0} + mapper.storage = 1 + optimizer.next_engine = backend + + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0], )) + cmd1 = Command(None, X, qubits=([qb0], )) + cmd2 = Command(engine=None, gate=Deallocate, qubits=([qb0], )) + mapper.receive([cmd0, cmd1, cmd2]) + assert len(mapper._stored_commands) == 0 + mapper.current_mapping = {1: 0} + cmd3 = Command(engine=None, gate=Allocate, qubits=([qb1], )) + cmd4 = Command(None, X, qubits=([qb1], )) + cmd5 = Command(engine=None, gate=Deallocate, qubits=([qb1], )) + mapper.receive([cmd3, cmd4, cmd5, cmd_flush]) + assert len(backend.received_commands) == 7 + + +@pytest.mark.parametrize("enable_caching", [False, True]) +def test_3x3_grid_multiple_simultaneous_non_intersecting_paths( + grid33_graph_mapper, enable_caching): + mapper, backend = grid33_graph_mapper + mapper.enable_caching = enable_caching + + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + + # 0 - 1 - 2 + # | | | + # 3 - 4 - 5 + # | | | + # 6 - 7 - 8 + + cmd0 = Command(None, X, qubits=([qb[0]], ), controls=[qb[6]]) + cmd1 = Command(None, X, qubits=([qb[1]], ), controls=[qb[7]]) + cmd2 = Command(None, X, qubits=([qb[2]], ), controls=[qb[8]]) + cmd3 = Command(None, X, qubits=([qb[2]], ), controls=[qb[8]]) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + mapper.receive(allocate_cmds + [cmd0, cmd1, cmd2, cmd3, cmd_flush]) + assert not mapper._stored_commands + assert mapper.num_mappings == 1 + assert mapper.depth_of_swaps == {1: 1} + assert mapper.current_mapping == { + 0: 0, + 1: 1, + 2: 2, + 3: 6, + 4: 7, + 5: 8, + 6: 3, + 7: 4, + 8: 5 + } or mapper.current_mapping == { + 0: 3, + 1: 4, + 2: 5, + 3: 0, + 4: 1, + 5: 2, + 6: 6, + 7: 7, + 8: 8 + } + + cmd3 = Command(None, X, qubits=([qb[0]], ), controls=[qb[2]]) + cmd4 = Command(None, X, qubits=([qb[3]], ), controls=[qb[5]]) + cmd5 = Command(None, X, qubits=([qb[6]], ), controls=[qb[8]]) + mapper.receive([cmd3, cmd4, cmd5, cmd_flush]) + + assert not mapper._stored_commands + assert mapper.num_mappings == 2 + assert mapper.depth_of_swaps == {1: 2} + assert mapper.current_mapping == { + 0: 0, + 1: 2, + 2: 1, + 3: 6, + 4: 8, + 5: 7, + 6: 3, + 7: 5, + 8: 4 + } or mapper.current_mapping == { + 0: 4, + 1: 3, + 2: 5, + 3: 1, + 4: 0, + 5: 2, + 6: 7, + 7: 6, + 8: 8 + } + + if enable_caching: + assert mapper.paths.cache._cache + assert mapper.paths.cache.has_path(0, 6) + assert mapper.paths.cache.has_path(1, 7) + assert mapper.paths.cache.has_path(2, 8) + assert mapper.paths.cache.has_path(0, 2) + assert mapper.paths.cache.has_path(3, 5) + assert mapper.paths.cache.has_path(6, 8) + assert not mapper.paths.cache.has_path(0, 1) + assert not mapper.paths.cache.has_path(1, 2) + assert not mapper.paths.cache.has_path(3, 4) + assert not mapper.paths.cache.has_path(4, 5) + assert not mapper.paths.cache.has_path(6, 7) + assert not mapper.paths.cache.has_path(7, 8) + + +@pytest.mark.parametrize("enable_caching", [False, True]) +def test_3x3_grid_multiple_simultaneous_intersecting_paths_impossible( + grid33_graph_mapper, enable_caching): + mapper, backend = grid33_graph_mapper + mapper.enable_caching = enable_caching + + # 0 - 1 - 2 + # | | | + # 3 - 4 - 5 + # | | | + # 6 - 7 - 8 + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + + cmd0 = Command(None, X, qubits=([qb[1]], ), controls=[qb[7]]) + cmd1 = Command(None, X, qubits=([qb[3]], ), controls=[qb[5]]) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + mapper.receive(allocate_cmds + [cmd0, cmd1, cmd_flush]) + assert not mapper._stored_commands + assert mapper.num_mappings == 2 + assert mapper.depth_of_swaps == {1: 2} + assert mapper.current_mapping == { + 0: 0, + 1: 1, + 2: 2, + 3: 3, + 4: 7, + 5: 4, + 6: 6, + 7: 5, + 8: 8 + } or mapper.current_mapping == { + 0: 0, + 1: 3, + 2: 2, + 3: 4, + 4: 1, + 5: 5, + 6: 6, + 7: 7, + 8: 8 + } + + if enable_caching: + assert mapper.paths.cache._cache + assert mapper.paths.cache.has_path(1, 7) + assert mapper.paths.cache.has_path(3, 5) + + mapper.current_mapping = dict(enumerate(range(len(qb)))) + + cmd2 = Command(None, X, qubits=([qb[7]], ), controls=[qb[1]]) + cmd3 = Command(None, X, qubits=([qb[1]], ), controls=[qb[8]]) + mapper.receive(allocate_cmds + [cmd2, cmd3, cmd_flush]) + assert not mapper._stored_commands + assert mapper.num_mappings == 4 + assert mapper.depth_of_swaps == {1: 4} + + if enable_caching: + assert mapper.paths.cache._cache + assert mapper.paths.cache.has_path(1, 7) + assert mapper.paths.cache.has_path(3, 5) + assert mapper.paths.cache.has_path(1, 8) + + +@pytest.mark.parametrize("enable_caching", [False, True]) +def test_3x3_grid_multiple_simultaneous_intersecting_paths_possible( + grid33_graph_mapper, enable_caching): + mapper, backend = grid33_graph_mapper + mapper.enable_caching = enable_caching + + # 0 - 1 - 2 + # | | | + # 3 - 4 - 5 + # | | | + # 6 - 7 - 8 + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + + # NB. when generating the swaps for the paths through the graph, the path + # 0 -> 7 needs to be performed *before* the one 3 -> 5 + cmd0 = Command(None, X, qubits=([qb[3]], ), controls=[qb[5]]) + cmd1 = Command(None, X, qubits=([qb[0]], ), controls=[qb[7]]) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + mapper.receive(allocate_cmds + [cmd0, cmd1, cmd_flush]) + + assert not mapper._stored_commands + assert mapper.num_mappings == 1 + assert mapper.depth_of_swaps == {3: 1} + assert mapper.current_mapping == { + 0: 0, + 1: 3, + 2: 2, + 3: 4, + 4: 7, + 5: 5, + 6: 6, + 7: 1, + 8: 8 + } or mapper.current_mapping == { + 0: 0, + 1: 4, + 2: 2, + 3: 3, + 4: 5, + 5: 7, + 6: 6, + 7: 1, + 8: 8 + } + + if enable_caching: + assert mapper.paths.cache._cache + assert mapper.paths.cache.has_path(0, 7) + assert mapper.paths.cache.has_path(3, 5) + + +@pytest.mark.parametrize("enable_caching", [False, True]) +def test_mapper_to_str(simple_graph, enable_caching): + mapper = graphm.GraphMapper( + graph=simple_graph, + enable_caching=enable_caching, + add_qubits_to_mapping="fcfs") + backend = DummyEngine(save_commands=True) + eng = MainEngine(backend, [mapper]) + qureg = eng.allocate_qureg(len(simple_graph)) + + eng.flush() + assert mapper.current_mapping == dict(enumerate(range(len(simple_graph)))) + + H | qureg[0] + X | qureg[2] + + CNOT | (qureg[6], qureg[4]) + CNOT | (qureg[6], qureg[0]) + CNOT | (qureg[6], qureg[1]) + + All(Measure) | qureg + eng.flush() + + str_repr = str(mapper) + assert str_repr.count("Number of mappings: 2") == 1 + assert str_repr.count("1: 1") == 1 + assert str_repr.count("2: 1") == 2 + assert str_repr.count("3: 1") == 1 + assert str_repr.count(" 0 - 6: 1") == 1 + assert str_repr.count(" 0 - 3: 1") == 1 + + sent_gates = [cmd.gate for cmd in backend.received_commands] + assert sent_gates.count(H) == 1 + assert sent_gates.count(X) == 4 + assert sent_gates.count(Measure) == 7 diff --git a/pytest.ini b/pytest.ini index fab634b12..46f7c05ac 100755 --- a/pytest.ini +++ b/pytest.ini @@ -4,3 +4,5 @@ testpaths = projectq filterwarnings = error ignore:the matrix subclass is not the recommended way:PendingDeprecationWarning + ignore:invalid escape sequence:DeprecationWarning + ignore:Using or importing the ABCs from 'collections' instead:DeprecationWarning