Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions cirq-core/cirq/experiments/two_qubit_xeb.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

from __future__ import annotations

import concurrent.futures as cf
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit - can we use the futures module name here for less indirection - like in

from concurrent import futures

import functools
import itertools
from collections.abc import Mapping, Sequence
Expand Down Expand Up @@ -409,7 +410,7 @@ def parallel_xeb_workflow(
random_state: cirq.RANDOM_STATE_OR_SEED_LIKE = None,
ax: plt.Axes | None = None,
pairs: Sequence[tuple[cirq.GridQubit, cirq.GridQubit]] | None = None,
pool: multiprocessing.pool.Pool | None = None,
pool: multiprocessing.pool.Pool | cf.Executor | None = None,
batch_size: int = 9,
tags: Sequence[Any] = (),
**plot_kwargs,
Expand All @@ -428,7 +429,7 @@ def parallel_xeb_workflow(
ax: the plt.Axes to plot the device layout on. If not given,
no plot is created.
pairs: Pairs to use. If not specified, use all pairs between adjacent qubits.
pool: An optional multiprocessing pool.
pool: An optional pool.
batch_size: We call `run_batch` on the sampler, which can speed up execution in certain
environments. The number of (circuit, cycle_depth) tasks to be run in each batch
is given by this number.
Expand Down
11 changes: 6 additions & 5 deletions cirq-core/cirq/experiments/xeb_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

from __future__ import annotations

import concurrent.futures as cf
import dataclasses
from abc import ABC, abstractmethod
from collections.abc import Iterable, Sequence
Expand Down Expand Up @@ -49,7 +50,7 @@ def benchmark_2q_xeb_fidelities(
circuits: Sequence[cirq.Circuit],
cycle_depths: Sequence[int] | None = None,
param_resolver: cirq.ParamResolverOrSimilarType = None,
pool: multiprocessing.pool.Pool | None = None,
pool: multiprocessing.pool.Pool | cf.Executor | None = None,
) -> pd.DataFrame:
"""Simulate and benchmark two-qubit XEB circuits.

Expand Down Expand Up @@ -451,7 +452,7 @@ def characterize_phased_fsim_parameters_with_xeb(
xatol: float = 1e-3,
fatol: float = 1e-3,
verbose: bool = True,
pool: multiprocessing.pool.Pool | None = None,
pool: multiprocessing.pool.Pool | cf.Executor | None = None,
) -> XEBCharacterizationResult:
"""Run a classical optimization to fit phased fsim parameters to experimental data, and
thereby characterize PhasedFSim-like gates.
Expand All @@ -470,7 +471,7 @@ def characterize_phased_fsim_parameters_with_xeb(
fatol: The `fatol` argument for Nelder-Mead. This is the absolute error for convergence
in the function evaluation.
verbose: Whether to print progress updates.
pool: An optional multiprocessing pool to execute circuit simulations in parallel.
pool: An optional pool to execute circuit simulations in parallel.
"""
(pair,) = sampled_df['pair'].unique()
initial_simplex, names = options.get_initial_simplex_and_names(
Expand Down Expand Up @@ -546,7 +547,7 @@ def characterize_phased_fsim_parameters_with_xeb_by_pair(
initial_simplex_step_size: float = 0.1,
xatol: float = 1e-3,
fatol: float = 1e-3,
pool: multiprocessing.pool.Pool | None = None,
pool: multiprocessing.pool.Pool | cf.Executor | None = None,
) -> XEBCharacterizationResult:
"""Run a classical optimization to fit phased fsim parameters to experimental data, and
thereby characterize PhasedFSim-like gates grouped by pairs.
Expand All @@ -570,7 +571,7 @@ def characterize_phased_fsim_parameters_with_xeb_by_pair(
in the parameters.
fatol: The `fatol` argument for Nelder-Mead. This is the absolute error for convergence
in the function evaluation.
pool: An optional multiprocessing pool to execute pair optimization in parallel. Each
pool: An optional pool to execute pair optimization in parallel. Each
optimization (and the simulations therein) runs serially.
"""
pairs = sampled_df['pair'].unique()
Expand Down
3 changes: 2 additions & 1 deletion cirq-core/cirq/experiments/xeb_simulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

from __future__ import annotations

import concurrent.futures as cf
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any, TYPE_CHECKING
Expand Down Expand Up @@ -85,7 +86,7 @@ def simulate_2q_xeb_circuits(
circuits: Sequence[cirq.Circuit],
cycle_depths: Sequence[int],
param_resolver: cirq.ParamResolverOrSimilarType = None,
pool: multiprocessing.pool.Pool | None = None,
pool: multiprocessing.pool.Pool | cf.Executor | None = None,
simulator: cirq.SimulatesIntermediateState | None = None,
) -> pd.DataFrame:
"""Simulate two-qubit XEB circuits.
Expand Down
18 changes: 9 additions & 9 deletions cirq-core/cirq/experiments/z_phase_calibration.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

from __future__ import annotations

import multiprocessing
import concurrent.futures as cf
import multiprocessing.pool
from collections.abc import Sequence
from typing import Any, TYPE_CHECKING
Expand Down Expand Up @@ -46,7 +46,7 @@ def z_phase_calibration_workflow(
cycle_depths: Sequence[int] = tuple(np.arange(3, 100, 20)),
random_state: cirq.RANDOM_STATE_OR_SEED_LIKE = None,
atol: float = 1e-3,
num_workers_or_pool: int | multiprocessing.pool.Pool = -1,
num_workers_or_pool: int | multiprocessing.pool.Pool | cf.Executor = -1,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

AFAICT, the code later needs only the Pool.map or Executor.map functions.

Would it be possible to change this to accept either an int for a number of workers or a parallel-map function?

pairs: Sequence[tuple[cirq.GridQubit, cirq.GridQubit]] | None = None,
tags: Sequence[Any] = (),
) -> tuple[xeb_fitting.XEBCharacterizationResult, pd.DataFrame]:
Expand Down Expand Up @@ -81,7 +81,7 @@ def z_phase_calibration_workflow(
cycle_depths: The cycle depths to use.
random_state: The random state to use.
atol: Absolute tolerance to be used by the minimizer.
num_workers_or_pool: An optional multi-processing pool or number of workers.
num_workers_or_pool: An optional pool or number of workers.
A zero value means no multiprocessing.
A positive integer value will create a pool with the given number of workers.
A negative value will create pool with maximum number of workers.
Expand All @@ -92,12 +92,12 @@ def z_phase_calibration_workflow(
- A `pd.DataFrame` comparing the before and after fidelities.
"""

pool: multiprocessing.pool.Pool | None = None
pool: multiprocessing.pool.Pool | cf.Executor | None = None
local_pool = False
if isinstance(num_workers_or_pool, multiprocessing.pool.Pool):
if isinstance(num_workers_or_pool, (multiprocessing.pool.Pool, cf.Executor)):
pool = num_workers_or_pool # pragma: no cover
elif num_workers_or_pool != 0:
pool = multiprocessing.Pool(num_workers_or_pool if num_workers_or_pool > 0 else None)
pool = cf.ThreadPoolExecutor(num_workers_or_pool if num_workers_or_pool > 0 else None)
Copy link
Collaborator

@pavoljuhas pavoljuhas Mar 11, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ThreadPoolExecutor is subject to GIL. Unless the mapped function spends a lot of time in numpy calls or waiting for IO, the execution would be the same as in a serial call or worse due to thread-switching overhead.

I made a quick test with a many-term sums computed in series or in parallel with multiprocessing.Pool.map vs ThreadPoolExecutor.map. The ThreadPoolExecutor took about 2.5 times longer than a serial evaluation.

example timing code
def partial_sum(start_end: tuple[int, int]) -> float:
    total = 0
    for i in range(*start_end, 3):
        total += (-1) ** i * 1.0 / i
    return total


def tedious_sum(terms_count: int, mapfunc) -> float:
    total = sum(mapfunc(partial_sum, ((start, terms_count) for start in (1, 2, 3))))
    return total

# %timeit tedious_sum(10_000_000, map)
# 2.93 s ± 51.8 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)

# pool = multiprocessing.Pool(3)
# %timeit tedious_sum(10_000_000, pool.map)
# 1.01 s ± 25.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)

# tpx = concurrent.futures.ThreadPoolExecutor(3)
# %timeit tedious_sum(10_000_000, tpx.map)
# 8.46 s ± 1.11 s per loop (mean ± std. dev. of 7 runs, 1 loop each)
 

Can you make a quick comparison of the z_phase_calibration_workflow duration with ThreadPoolExecutor compared to a serial run?

If comparable I'd suggest to make it a default to do a serial evaluation.

local_pool = True

fids_df_0, circuits, sampled_df = parallel_xeb_workflow(
Expand Down Expand Up @@ -143,8 +143,8 @@ def z_phase_calibration_workflow(
)

if local_pool:
assert isinstance(pool, multiprocessing.pool.Pool)
pool.close()
assert isinstance(pool, cf.Executor)
pool.shutdown()
Comment on lines +146 to +147
Copy link
Collaborator

@pavoljuhas pavoljuhas Mar 11, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Consider wrapping the local pool in a contextlib.ExitStack context instead.
I think there is a risk otherwise for the pool to stay around if function aborts on exception.

return result, before_after


Expand All @@ -159,7 +159,7 @@ def calibrate_z_phases(
cycle_depths: Sequence[int] = tuple(np.arange(3, 100, 20)),
random_state: cirq.RANDOM_STATE_OR_SEED_LIKE = None,
atol: float = 1e-3,
num_workers_or_pool: int | multiprocessing.pool.Pool = -1,
num_workers_or_pool: int | multiprocessing.pool.Pool | cf.Executor = -1,
pairs: Sequence[tuple[cirq.GridQubit, cirq.GridQubit]] | None = None,
tags: Sequence[Any] = (),
) -> dict[tuple[cirq.Qid, cirq.Qid], cirq.PhasedFSimGate]:
Expand Down
28 changes: 28 additions & 0 deletions cirq-core/cirq/experiments/z_phase_calibration_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,34 @@ def test_calibrate_z_phases_workflow_no_options(pool, angles, error) -> None:
assert 'theta' not in params


@pytest.mark.parametrize(['angles', 'error'], _create_tests(n=3))
def test_calibrate_z_phases_workflow_no_options_no_pool(angles, error) -> None:
original_gate = cirq.PhasedFSimGate(**dict(zip(_ANGLES, angles)))
actual_gate = cirq.PhasedFSimGate(**{k: v + e for k, v, e in zip(_ANGLES, angles, error)})

sampler = _TestSimulator(original_gate, actual_gate, seed=_SEED)
qubits = cirq.q(0, 0), cirq.q(0, 1)
result, _ = z_phase_calibration_workflow(
sampler,
qubits,
original_gate,
options=None,
n_repetitions=1,
n_combinations=1,
n_circuits=1,
cycle_depths=(1, 2),
random_state=_SEED,
num_workers_or_pool=2,
)

for params in result.final_params.values():
assert 'zeta' in params
assert 'chi' in params
assert 'gamma' in params
assert 'phi' not in params
assert 'theta' not in params


def test_plot_z_phase_calibration_result() -> None:
df = pd.DataFrame()
qs = cirq.q(0, 0), cirq.q(0, 1), cirq.q(0, 2)
Expand Down
Loading