tensor4all_quanticstransform/flip.rs
1//! Flip operator: f(x) = g(2^R - x)
2//!
3//! This transformation maps x -> 2^R - x in quantics representation.
4
5use anyhow::Result;
6use num_complex::Complex64;
7use num_traits::{One, Zero};
8use tensor4all_simplett::{types::tensor3_zeros, Tensor3Ops, TensorTrain};
9
10use crate::common::{
11 embed_single_var_mpo, tensortrain_to_linear_operator,
12 tensortrain_to_linear_operator_asymmetric, BoundaryCondition, QuanticsOperator,
13};
14
15/// Create a flip operator: f(x) = g(2^R - x)
16///
17/// This MPO transforms a function g(x) to f(x) = g(2^R - x) for x = 0, 1, ..., 2^R - 1.
18///
19/// # Arguments
20/// * `r` - Number of bits (sites)
21/// * `bc` - Boundary condition (affects behavior at x=0)
22///
23/// # Returns
24/// LinearOperator representing the flip transformation
25///
26/// # Examples
27///
28/// ```
29/// use tensor4all_quanticstransform::{flip_operator, BoundaryCondition};
30///
31/// // Create a flip operator for 4-bit (2^4 = 16 points) quantics representation
32/// let op = flip_operator(4, BoundaryCondition::Periodic).unwrap();
33///
34/// // The operator has one MPO tensor per bit
35/// assert_eq!(op.mpo.node_count(), 4);
36/// ```
37pub fn flip_operator(r: usize, bc: BoundaryCondition) -> Result<QuanticsOperator> {
38 if r == 0 {
39 return Err(anyhow::anyhow!("Number of sites must be positive"));
40 }
41 if r == 1 {
42 return Err(anyhow::anyhow!(
43 "MPO with one tensor is not supported for flip operator"
44 ));
45 }
46
47 let mpo = flip_mpo(r, bc)?;
48 let site_dims = vec![2; r];
49 tensortrain_to_linear_operator(&mpo, &site_dims)
50}
51
52/// Create a flip operator for one variable in a multi-variable system.
53///
54/// Acts as flip on `target_var` and identity on all other variables.
55/// The resulting operator works on interleaved quantics encoding where each
56/// site has local dimension `2^nvariables`.
57///
58/// # Arguments
59/// * `r` - Number of bits (sites). Must be at least 2.
60/// * `bc` - Boundary condition
61/// * `nvariables` - Total number of variables (must be at least 2)
62/// * `target_var` - Which variable to flip (0-indexed, must be < nvariables)
63///
64/// # Examples
65///
66/// ```
67/// use tensor4all_quanticstransform::{flip_operator_multivar, BoundaryCondition};
68///
69/// // Flip only the x-variable of a 2-variable function f(x, y)
70/// let op = flip_operator_multivar(4, BoundaryCondition::Periodic, 2, 0).unwrap();
71/// assert_eq!(op.mpo.node_count(), 4);
72/// ```
73pub fn flip_operator_multivar(
74 r: usize,
75 bc: BoundaryCondition,
76 nvariables: usize,
77 target_var: usize,
78) -> Result<QuanticsOperator> {
79 if r == 0 {
80 return Err(anyhow::anyhow!("Number of sites must be positive"));
81 }
82 if r == 1 {
83 return Err(anyhow::anyhow!(
84 "MPO with one tensor is not supported for flip operator"
85 ));
86 }
87
88 let mpo = flip_mpo(r, bc)?;
89 let embedded = embed_single_var_mpo(&mpo, nvariables, target_var)?;
90 let dim_multi = 1 << nvariables;
91 let dims = vec![dim_multi; r];
92 tensortrain_to_linear_operator_asymmetric(&embedded, &dims, &dims)
93}
94
95/// Create the flip MPO as a TensorTrain.
96///
97/// The flip operation computes 2^R - x using two's complement-like arithmetic.
98///
99/// Uses big-endian convention: site 0 = MSB, site R-1 = LSB.
100/// This matches Julia Quantics.jl's convention.
101///
102/// This is implemented using carry propagation where:
103/// - carry_in values: [-1, 0] (we start with +1 to compute 2^R - x = ~x + 1)
104/// - For each bit: out = -(a - 1) + carry_in
105/// - carry_out = out < 0 ? -1 : 0
106/// - result_bit = out mod 2
107///
108/// Carry propagates from LSB to MSB, so in big-endian convention:
109/// - Site 0 (MSB): has carry input from site 1, applies boundary condition
110/// - Site R-1 (LSB): initial carry = 0, has carry output to site R-2
111#[allow(clippy::needless_range_loop)]
112fn flip_mpo(r: usize, bc: BoundaryCondition) -> Result<TensorTrain<Complex64>> {
113 let single_tensor = single_tensor_flip();
114
115 let mut tensors = Vec::with_capacity(r);
116
117 // Create link indices with dimension 2 (for carry states)
118 // Carry states: index 0 = carry -1, index 1 = carry 0
119 //
120 // In big-endian convention with TensorTrain (left-to-right contraction):
121 // - Site 0 (MSB): applies BC, has cout going right (to site 1)
122 // - Site R-1 (LSB): initial carry cin=1 (carry=0), receives cin from left
123 //
124 // Carry propagates LSB → MSB, but in TT we contract left → right.
125 // So we store: left_bond = cout (to previous site), right_bond = cin (from next site)
126 // But this is reversed from standard. Instead, we flip the tensor storage:
127 // t[left, s, right] where left = cout going left, right = cin from right.
128
129 for n in 0..r {
130 if n == 0 {
131 // First tensor (MSB): apply boundary condition on left, receive cin from right
132 //
133 // Note: Julia's flipop only supports bc=1 (periodic) or bc=-1 (antisymmetric).
134 // For Open BC, we zero out the flip(0) case by setting bc_val = 0.
135 let bc_val = match bc {
136 BoundaryCondition::Periodic => Complex64::one(),
137 BoundaryCondition::Open => Complex64::zero(),
138 };
139
140 // Shape (1, 4, 2): left=1 (BC applied), site=4, right=2 (cin from site 1)
141 let mut t = tensor3_zeros(1, 4, 2);
142 for cin in 0..2 {
143 for a in 0..2 {
144 for b in 0..2 {
145 let mut sum = Complex64::zero();
146 for cout in 0..2 {
147 // cout=0 (carry=-1) gets weight 1.0, cout=1 (carry=0) gets weight bc
148 let bc_weight = if cout == 0 { Complex64::one() } else { bc_val };
149 sum += single_tensor[cin][cout][a][b] * bc_weight;
150 }
151 let s = a * 2 + b;
152 t.set3(0, s, cin, sum);
153 }
154 }
155 }
156 tensors.push(t);
157 } else if n == r - 1 {
158 // Last tensor (LSB): select initial carry state cin=1 (carry=0), send cout to left
159 // Shape (2, 4, 1): left=2 (cout to site R-2), site=4, right=1 (initial cin)
160 let mut t = tensor3_zeros(2, 4, 1);
161 for cout in 0..2 {
162 for a in 0..2 {
163 for b in 0..2 {
164 let val = single_tensor[1][cout][a][b]; // cin=1 (carry=0) is fixed
165 let s = a * 2 + b;
166 t.set3(cout, s, 0, val);
167 }
168 }
169 }
170 tensors.push(t);
171 } else {
172 // Middle tensors: receive cin from right, send cout to left
173 // Shape (2, 4, 2): left=2 (cout to left), site=4, right=2 (cin from right)
174 let mut t = tensor3_zeros(2, 4, 2);
175 for cin in 0..2 {
176 for cout in 0..2 {
177 for a in 0..2 {
178 for b in 0..2 {
179 let val = single_tensor[cin][cout][a][b];
180 let s = a * 2 + b;
181 t.set3(cout, s, cin, val);
182 }
183 }
184 }
185 }
186 tensors.push(t);
187 }
188 }
189
190 TensorTrain::new(tensors).map_err(|e| anyhow::anyhow!("Failed to create flip MPO: {}", e))
191}
192
193/// Create the single-site tensor for flip operation.
194///
195/// Returns a 4D tensor [cin][cout][a][b] where:
196/// - cin: input carry state (0 = carry -1, 1 = carry 0)
197/// - cout: output carry state
198/// - a: corresponds to s' (output site index in ITensor convention)
199/// - b: corresponds to s (input site index in ITensor convention)
200///
201/// The flip computes: out = -a + cval[cin]
202/// where a is the input bit value and cval = [-1, 0] for cin = [0, 1]
203///
204/// Note: In the Julia Quantics.jl code:
205/// - The loop variable `a` represents the input bit (0 or 1)
206/// - The computed `b` represents the output bit
207/// - The tensor is stored as tensor[cin, cout, a, b]
208/// - The ITensor is created as ITensor(t, (link_l, link_r, s', s))
209/// - This means a -> s' (output index) and b -> s (input index)
210///
211/// In TensorTrain MPO format, the combined site index is s = s' * 2 + s
212/// where s' is the output bit and s is the input bit.
213#[allow(clippy::needless_range_loop)]
214fn single_tensor_flip() -> [[[[Complex64; 2]; 2]; 2]; 2] {
215 let cval = [-1i32, 0i32];
216 let mut tensor = [[[[Complex64::zero(); 2]; 2]; 2]; 2];
217
218 for icin in 0..2 {
219 for a in 0..2 {
220 // a is the input bit value (0 or 1)
221 // Formula: out = -a + cval[icin]
222 let out = -(a as i32) + cval[icin];
223 let icout = if out < 0 { 0 } else { 1 };
224 let b = out.rem_euclid(2) as usize; // b is the output bit (0 or 1)
225 // Store as tensor[cin][cout][a][b] matching Julia exactly
226 tensor[icin][icout][a][b] = Complex64::one();
227 }
228 }
229
230 tensor
231}
232
233#[cfg(test)]
234mod tests;