Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support tensornet trajectory simulation for unitary mixture noise channels #2520

Merged
merged 25 commits into from
Feb 6, 2025
Merged
Show file tree
Hide file tree
Changes from 19 commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
eeaf2ef
Initial work on adding support for cutensornetStateApplyUnitaryChannel
1tnguyen Dec 3, 2024
4914555
Enable some noise test cases on tensornet
1tnguyen Dec 4, 2024
a3980c2
Fix a copy-and-paste error
1tnguyen Dec 4, 2024
d01763b
support unitary mixture channel detection and enable more tests
1tnguyen Dec 5, 2024
2851826
MPS trajectory: we need to compute the MPS factorization for each tra…
1tnguyen Dec 6, 2024
79265d4
Merge branch 'main' into tnguyen/tensornet-trajectory
1tnguyen Jan 20, 2025
fdcbb06
Split cutensornetStateFinalizeMPS and (cutensornetStatePrepare + cute…
1tnguyen Jan 20, 2025
1207435
Add trajectories to observe
1tnguyen Jan 22, 2025
aef8b63
Handle unitary channels in all code paths
1tnguyen Jan 22, 2025
be2d38e
Code format
1tnguyen Jan 22, 2025
2be6d46
Reduce test time
1tnguyen Jan 22, 2025
bbc1d13
Update cutensornet version requirement
1tnguyen Jan 22, 2025
9201956
Add sampler cache for MPS trajectory
1tnguyen Jan 23, 2025
e27be87
Add cache workspace mem
1tnguyen Jan 24, 2025
255430c
Merge branch 'main' into tnguyen/tensornet-trajectory
1tnguyen Jan 29, 2025
0e53b7c
Add trajectory support to non-path-reuse path merging from main
1tnguyen Jan 29, 2025
1a5eef6
Update some of the cutensornet DEPRECATED enums
1tnguyen Jan 29, 2025
a6a2040
Make number of hyper sample configurable
1tnguyen Jan 29, 2025
67a970c
Docs update
1tnguyen Jan 29, 2025
8720fef
CR: Correct Pauli Y matrix
1tnguyen Jan 30, 2025
7542c74
CR: refactor SimulatorTensorNetBase::applyNoiseChannel
1tnguyen Jan 30, 2025
d0fc6e7
CR: code refactor in MPS implementation
1tnguyen Jan 30, 2025
a7e964b
Add an exact output state vec check for tensornet to check matrix data
1tnguyen Jan 30, 2025
80741ec
CR: Add a code comment for a helper function
1tnguyen Feb 3, 2025
2c23e78
Merge branch 'main' into tnguyen/tensornet-trajectory
1tnguyen Feb 6, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/sphinx/using/backends/simulators.rst
Original file line number Diff line number Diff line change
Expand Up @@ -483,6 +483,7 @@ Specific aspects of the simulation can be configured by setting the following of
* **`OMP_NUM_THREADS=X`**: To enable CPU parallelization, set X to `NUMBER_OF_CORES_PER_NODE/NUMBER_OF_GPUS_PER_NODE`.
* **`CUDAQ_TENSORNET_CONTROLLED_RANK=X`**: Specify the number of controlled qubits whereby the full tensor body of the controlled gate is expanded. If the number of controlled qubits is greater than this value, the gate is applied as a controlled tensor operator to the tensor network state. Default value is 1.
* **`CUDAQ_TENSORNET_OBSERVE_CONTRACT_PATH_REUSE=X`**: Set this environment variable to `TRUE` (`ON`) or `FALSE` (`OFF`) to enable or disable contraction path reuse when computing expectation values. Default is `OFF`.
* **`CUDAQ_TENSORNET_NUM_HYPER_SAMPLES=X`**: Specify the number of hyper samples used in the tensor network contraction path finder. Default value is 8 if not specified.

.. note::

Expand Down
4 changes: 2 additions & 2 deletions runtime/nvqir/cutensornet/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,8 @@ set(CUTENSORNET_PATCH ${CMAKE_MATCH_1})

set(CUTENSORNET_VERSION ${CUTENSORNET_MAJOR}.${CUTENSORNET_MINOR}.${CUTENSORNET_PATCH})
message(STATUS "Found cutensornet version: ${CUTENSORNET_VERSION}")
# We need cutensornet v2.5.0+
if (${CUTENSORNET_VERSION} VERSION_GREATER_EQUAL "2.5")
# We need cutensornet v2.6.0+ (cutensornetStateApplyUnitaryChannel)
if (${CUTENSORNET_VERSION} VERSION_GREATER_EQUAL "2.6")
set (BASE_TENSOR_BACKEND_SRS
simulator_cutensornet.cpp
tensornet_spin_op.cpp
Expand Down
202 changes: 195 additions & 7 deletions runtime/nvqir/cutensornet/simulator_cutensornet.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,194 @@ void SimulatorTensorNetBase::applyGate(const GateApplicationTask &task) {
}
}

template <typename T>
std::optional<double> isScaledUnitary(const std::vector<std::complex<T>> &mat,
double eps) {
typedef Eigen::Matrix<std::complex<T>, Eigen::Dynamic, Eigen::Dynamic,
Eigen::RowMajor>
RowMajorMatTy;
const int dim = std::log2(mat.size());
Eigen::Map<const RowMajorMatTy> kMat(mat.data(), dim, dim);
if (kMat.isZero())
return std::nullopt;
// Check that (K_dag * K) is a scaled identity matrix
// i.e., the K matrix is a scaled unitary.
auto kdK = kMat.adjoint() * kMat;
if (!kdK.isDiagonal())
return std::nullopt;
// First element
std::complex<T> val = kdK(0, 0);
if (std::abs(val) > eps && std::abs(val.imag()) < eps) {
auto scaledKdK = (std::complex<T>{1.0} / val) * kdK;
if (scaledKdK.isIdentity())
return val.real();
}
return std::nullopt;
}

std::optional<std::pair<
std::vector<double>,
std::vector<std::vector<std::complex<
double>>>>> static computeUnitaryMixture(const std::
vector<std::vector<
std::complex<double>>>
&krausOps,
double tol = 1e-6) {
std::vector<double> probs;
std::vector<std::vector<std::complex<double>>> mats;
const auto scaleMat = [](const std::vector<std::complex<double>> &mat,
double scaleFactor) {
std::vector<std::complex<double>> scaledMat = mat;
for (auto &x : scaledMat)
x /= scaleFactor;
return scaledMat;
};
for (const auto &op : krausOps) {
const auto scaledFactor = isScaledUnitary(op, tol);
if (!scaledFactor.has_value())
return std::nullopt;
probs.emplace_back(scaledFactor.value());
mats.emplace_back(scaleMat(op, scaledFactor.value()));
}

if (std::abs(1.0 - std::reduce(probs.begin(), probs.end())) > tol)
return std::nullopt;

return std::make_pair(probs, mats);
}

void SimulatorTensorNetBase::applyNoiseChannel(
const std::string_view gateName, const std::vector<std::size_t> &controls,
const std::vector<std::size_t> &targets,
const std::vector<double> &params) {
// Do nothing if no execution context
if (!executionContext)
return;

// Do nothing if no noise model
if (!executionContext->noiseModel)
return;

// Get the name as a string
std::string gName(gateName);
std::vector<int32_t> qubits{controls.begin(), controls.end()};
qubits.insert(qubits.end(), targets.begin(), targets.end());

// Get the Kraus channels specified for this gate and qubits
auto krausChannels = executionContext->noiseModel->get_channels(
gName, targets, controls, params);

// If none, do nothing
if (krausChannels.empty())
return;

cudaq::info(
"[SimulatorTensorNetBase] Applying {} kraus channels on qubits: {}",
krausChannels.size(), qubits);

const std::vector<std::complex<double>> matPauliI = {
{1.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {1.0, 0.0}};
const std::vector<std::complex<double>> matPauliX{
{0.0, 0.0}, {1.0, 0.0}, {1.0, 0.0}, {0.0, 0.0}};
const std::vector<std::complex<double>> matPauliY{
{0.0, 0.0}, {0.0, 1.0}, {0.0, -1.0}, {0.0, 0.0}};
const std::vector<std::complex<double>> matPauliZ{
{1.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {-1.0, 0.0}};

const auto getOrCacheMat = [&](const std::string &key,
const std::vector<std::complex<double>> &mat) {
const auto iter = m_gateDeviceMemCache.find(key);

if (iter == m_gateDeviceMemCache.end()) {
void *dMem = allocateGateMatrix(mat);
m_gateDeviceMemCache[key] = dMem;
return dMem;
}
return iter->second;
};

for (auto &krausChannel : krausChannels) {
switch (krausChannel.noise_type) {
case cudaq::noise_model_type::depolarization_channel: {
if (krausChannel.parameters.size() != 1)
throw std::runtime_error(
fmt::format("Invalid parameters for a depolarization channel. "
"Expecting 1 parameter, got {}.",
krausChannel.parameters.size()));
const std::vector<void *> channelMats{getOrCacheMat("PauliI", matPauliI),
getOrCacheMat("PauliX", matPauliX),
getOrCacheMat("PauliY", matPauliY),
getOrCacheMat("PauliZ", matPauliZ)};
const double p = krausChannel.parameters[0];
const std::vector<double> probabilities = {1 - p, p / 3., p / 3., p / 3.};
m_state->applyUnitaryChannel(qubits, channelMats, probabilities);
break;
}
case cudaq::noise_model_type::bit_flip_channel: {
if (krausChannel.parameters.size() != 1)
throw std::runtime_error(
fmt::format("Invalid parameters for a bit-flip channel. "
"Expecting 1 parameter, got {}.",
krausChannel.parameters.size()));

const std::vector<void *> channelMats{getOrCacheMat("PauliI", matPauliI),
getOrCacheMat("PauliX", matPauliX)};
const double p = krausChannel.parameters[0];
const std::vector<double> probabilities = {1 - p, p};
m_state->applyUnitaryChannel(qubits, channelMats, probabilities);
break;
}
case cudaq::noise_model_type::phase_flip_channel: {
if (krausChannel.parameters.size() != 1)
throw std::runtime_error(
fmt::format("Invalid parameters for a phase-flip channel. "
"Expecting 1 parameter, got {}.",
krausChannel.parameters.size()));

const std::vector<void *> channelMats{getOrCacheMat("PauliI", matPauliI),
getOrCacheMat("PauliZ", matPauliZ)};
const double p = krausChannel.parameters[0];
const std::vector<double> probabilities = {1 - p, p};
m_state->applyUnitaryChannel(qubits, channelMats, probabilities);
break;
}
case cudaq::noise_model_type::amplitude_damping_channel: {
if (krausChannel.parameters.size() != 1)
throw std::runtime_error(
fmt::format("Invalid parameters for a amplitude damping channel. "
"Expecting 1 parameter, got {}.",
krausChannel.parameters.size()));
if (krausChannel.parameters[0] != 0.0)
throw std::runtime_error(
"Non-unitary noise channels are not supported.");
break;
}
case cudaq::noise_model_type::unknown: {
std::vector<std::vector<std::complex<double>>> mats;
for (const auto &op : krausChannel.get_ops())
mats.emplace_back(op.data);
auto asUnitaryMixture = computeUnitaryMixture(mats);
if (asUnitaryMixture.has_value()) {
auto &[probabilities, unitaries] = asUnitaryMixture.value();
std::vector<void *> channelMats;
for (const auto &mat : unitaries)
channelMats.emplace_back(getOrCacheMat(
"ScaledUnitary_" + std::to_string(vecComplexHash(mat)), mat));
m_state->applyUnitaryChannel(qubits, channelMats, probabilities);
} else {
throw std::runtime_error(
"Non-unitary noise channels are not supported.");
}
break;
}
default:
throw std::runtime_error(
"Unsupported noise model type: " +
std::to_string(static_cast<int>(krausChannel.noise_type)));
}
}
}

/// @brief Reset the state of a given qubit to zero
void SimulatorTensorNetBase::resetQubit(const std::size_t qubitIdx) {
flushGateQueue();
Expand Down Expand Up @@ -225,10 +413,7 @@ cudaq::ExecutionResult
SimulatorTensorNetBase::sample(const std::vector<std::size_t> &measuredBits,
const int shots) {
LOG_API_TIME();
std::vector<int32_t> measuredBitIds;
std::transform(measuredBits.begin(), measuredBits.end(),
std::back_inserter(measuredBitIds),
[](std::size_t idx) { return static_cast<int32_t>(idx); });
std::vector<int32_t> measuredBitIds(measuredBits.begin(), measuredBits.end());
if (shots < 1) {
cudaq::spin_op::spin_op_term allZTerm(2 * m_state->getNumQubits(), 0);
for (const auto &m : measuredBits)
Expand All @@ -239,7 +424,8 @@ SimulatorTensorNetBase::sample(const std::vector<std::size_t> &measuredBits,
}

prepareQubitTensorState();
const auto samples = m_state->sample(measuredBitIds, shots);
const auto samples =
m_state->sample(measuredBitIds, shots, requireCacheWorkspace());
cudaq::ExecutionResult counts(samples);
double expVal = 0.0;
// Compute the expectation value from the counts
Expand Down Expand Up @@ -286,7 +472,8 @@ SimulatorTensorNetBase::observe(const cudaq::spin_op &ham) {
// cutensornetNetworkOperator_t and compute the expectation value.
TensorNetworkSpinOp spinOp(ham, m_cutnHandle);
std::complex<double> expVal =
m_state->computeExpVal(spinOp.getNetworkOperator());
m_state->computeExpVal(spinOp.getNetworkOperator(),
this->executionContext->numberTrajectories);
expVal += spinOp.getIdentityTermOffset();
return cudaq::observe_result(expVal.real(), ham,
cudaq::sample_result(cudaq::ExecutionResult(
Expand Down Expand Up @@ -316,7 +503,8 @@ SimulatorTensorNetBase::observe(const cudaq::spin_op &ham) {
});

// Compute the expectation value for all terms
const auto termExpVals = m_state->computeExpVals(terms);
const auto termExpVals = m_state->computeExpVals(
terms, this->executionContext->numberTrajectories);
std::complex<double> expVal = 0.0;
// Construct per-term data in the final observe_result
std::vector<cudaq::ExecutionResult> results;
Expand Down
10 changes: 10 additions & 0 deletions runtime/nvqir/cutensornet/simulator_cutensornet.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,12 @@ class SimulatorTensorNetBase : public nvqir::CircuitSimulatorBase<double> {
/// @brief Apply quantum gate
void applyGate(const GateApplicationTask &task) override;

/// @brief Apply a noise channel
void applyNoiseChannel(const std::string_view gateName,
const std::vector<std::size_t> &controls,
const std::vector<std::size_t> &targets,
const std::vector<double> &params) override;

// Override base calculateStateDim (we don't instantiate full state vector in
// the tensornet backend). When the user want to retrieve the state vector, we
// check if it is feasible to do so.
Expand Down Expand Up @@ -88,6 +94,10 @@ class SimulatorTensorNetBase : public nvqir::CircuitSimulatorBase<double> {
/// @brief Query if direct expectation value calculation is enabled
virtual bool canHandleObserve() override;

/// @brief Return true if this simulator can use cache workspace (e.g., for
/// intermediate tensors)
virtual bool requireCacheWorkspace() const = 0;

protected:
cutensornetHandle_t m_cutnHandle;
std::unique_ptr<TensorNetState> m_state;
Expand Down
Loading
Loading