problemreductions/rules/
ilp_qubo.rs

1//! Reduction from binary ILP to QUBO.
2//!
3//! Binary ILP: optimize c^T x s.t. Ax {<=,>=,=} b, x ∈ {0,1}^n.
4//!
5//! Formulation (following qubogen):
6//! 1. Normalize constraints to Ax = b by adding slack variables
7//! 2. QUBO = -diag(c + 2·P·b·A) + P·A^T·A
8//!
9//! For Minimize sense, c is negated (convert to maximization).
10//! Slack variables: ceil(log2(slack_range)) bits per inequality constraint.
11
12use crate::models::algebraic::{Comparison, ObjectiveSense, ILP, QUBO};
13use crate::reduction;
14use crate::rules::traits::{ReduceTo, ReductionResult};
15
16/// Result of reducing binary ILP to QUBO.
17#[derive(Debug, Clone)]
18pub struct ReductionILPToQUBO {
19    target: QUBO<f64>,
20    num_original_vars: usize,
21}
22
23impl ReductionResult for ReductionILPToQUBO {
24    type Source = ILP<bool>;
25    type Target = QUBO<f64>;
26
27    fn target_problem(&self) -> &Self::Target {
28        &self.target
29    }
30
31    /// Extract only the original variables (discard slack).
32    fn extract_solution(&self, target_solution: &[usize]) -> Vec<usize> {
33        target_solution[..self.num_original_vars].to_vec()
34    }
35}
36
37#[reduction(
38    overhead = { num_vars = "num_vars + num_constraints * num_vars" }
39)]
40impl ReduceTo<QUBO<f64>> for ILP<bool> {
41    type Result = ReductionILPToQUBO;
42
43    fn reduce_to(&self) -> Self::Result {
44        let n = self.num_vars;
45
46        // All variables are binary by type — no runtime check needed.
47
48        // Build dense constraint matrix A and rhs vector b
49        // Also compute slack sizes for inequality constraints
50        let num_constraints = self.constraints.len();
51        let mut a_dense = vec![vec![0.0; n]; num_constraints];
52        let mut b_vec = vec![0.0; num_constraints];
53        let mut slack_sizes = vec![0usize; num_constraints];
54
55        for (k, constraint) in self.constraints.iter().enumerate() {
56            for &(var, coef) in &constraint.terms {
57                a_dense[k][var] += coef;
58            }
59            b_vec[k] = constraint.rhs;
60
61            // Compute slack variable count: ceil(log2(slack_range + 1)) bits
62            // to represent integer values 0..slack_range with binary encoding.
63            // For binary variables, min_lhs = Σ min(0, a_i), max_lhs = Σ max(0, a_i).
64            match constraint.cmp {
65                Comparison::Eq => {} // no slack needed
66                Comparison::Le => {
67                    // Ax <= b → Ax + s = b, s ∈ {0, ..., b - min_lhs}
68                    let min_lhs: f64 = a_dense[k].iter().map(|&c| c.min(0.0)).sum();
69                    let slack_range = constraint.rhs - min_lhs;
70                    if slack_range > 0.0 {
71                        slack_sizes[k] = (slack_range + 1.0).log2().ceil() as usize;
72                    }
73                }
74                Comparison::Ge => {
75                    // Ax >= b → Ax - s = b, s ∈ {0, ..., max_lhs - b}
76                    let max_lhs: f64 = a_dense[k].iter().map(|&c| c.max(0.0)).sum();
77                    let slack_range = max_lhs - constraint.rhs;
78                    if slack_range > 0.0 {
79                        slack_sizes[k] = (slack_range + 1.0).log2().ceil() as usize;
80                    }
81                }
82            }
83        }
84
85        let total_slack: usize = slack_sizes.iter().sum();
86        let nq = n + total_slack;
87
88        // Extend A with slack columns
89        let mut a_ext = vec![vec![0.0; nq]; num_constraints];
90        for k in 0..num_constraints {
91            for j in 0..n {
92                a_ext[k][j] = a_dense[k][j];
93            }
94        }
95
96        // Add slack variable columns
97        let mut slack_col = n;
98        for (k, &ns) in slack_sizes.iter().enumerate() {
99            if ns > 0 {
100                let sign = match self.constraints[k].cmp {
101                    Comparison::Le => 1.0,  // Ax + s = b
102                    Comparison::Ge => -1.0, // Ax - s = b
103                    Comparison::Eq => 0.0,
104                };
105                for s in 0..ns {
106                    a_ext[k][slack_col + s] = sign * 2.0_f64.powi(s as i32);
107                }
108                slack_col += ns;
109            }
110        }
111
112        // Build dense cost vector (nq elements)
113        let mut c_vec = vec![0.0; nq];
114        for &(var, coef) in &self.objective {
115            c_vec[var] = coef;
116        }
117
118        // For Minimize sense, negate the cost (formula assumes maximization)
119        if self.sense == ObjectiveSense::Minimize {
120            for c in c_vec.iter_mut() {
121                *c = -*c;
122            }
123        }
124
125        // Penalty: must be large enough to enforce constraints
126        let penalty = 1.0
127            + c_vec.iter().map(|c| c.abs()).sum::<f64>()
128            + b_vec.iter().map(|b| b.abs()).sum::<f64>();
129
130        // QUBO = -diag(c + 2·P·b·A) + P·A^T·A
131        let mut matrix = vec![vec![0.0; nq]; nq];
132
133        // Compute b·A (b_vec dot each column of a_ext)
134        let mut ba = vec![0.0; nq];
135        for (j, ba_j) in ba.iter_mut().enumerate() {
136            for (k, &b_k) in b_vec.iter().enumerate() {
137                *ba_j += b_k * a_ext[k][j];
138            }
139        }
140
141        // Diagonal: -(c_j + 2·P·(b·A)_j)
142        for j in 0..nq {
143            matrix[j][j] = -(c_vec[j] + 2.0 * penalty * ba[j]);
144        }
145
146        // A^T·A contribution (upper-triangular convention)
147        // Diagonal: P · Σ_k a_{ki}²
148        // Off-diagonal (i<j): 2·P · Σ_k a_{ki}·a_{kj}
149        for row in &a_ext {
150            for (i, row_i) in matrix.iter_mut().enumerate() {
151                if row[i].abs() < 1e-15 {
152                    continue;
153                }
154                // Diagonal
155                row_i[i] += penalty * row[i] * row[i];
156                // Off-diagonal
157                for j in (i + 1)..nq {
158                    row_i[j] += 2.0 * penalty * row[i] * row[j];
159                }
160            }
161        }
162
163        ReductionILPToQUBO {
164            target: QUBO::from_matrix(matrix),
165            num_original_vars: n,
166        }
167    }
168}
169
170#[cfg(test)]
171#[path = "../unit_tests/rules/ilp_qubo.rs"]
172mod tests;