problemreductions/rules/
ilp_qubo.rs1use crate::models::algebraic::{Comparison, ObjectiveSense, ILP, QUBO};
13use crate::reduction;
14use crate::rules::traits::{ReduceTo, ReductionResult};
15
16#[derive(Debug, Clone)]
18pub struct ReductionILPToQUBO {
19 target: QUBO<f64>,
20 num_original_vars: usize,
21}
22
23impl ReductionResult for ReductionILPToQUBO {
24 type Source = ILP<bool>;
25 type Target = QUBO<f64>;
26
27 fn target_problem(&self) -> &Self::Target {
28 &self.target
29 }
30
31 fn extract_solution(&self, target_solution: &[usize]) -> Vec<usize> {
33 target_solution[..self.num_original_vars].to_vec()
34 }
35}
36
37#[reduction(
38 overhead = { num_vars = "num_vars + num_constraints * num_vars" }
39)]
40impl ReduceTo<QUBO<f64>> for ILP<bool> {
41 type Result = ReductionILPToQUBO;
42
43 fn reduce_to(&self) -> Self::Result {
44 let n = self.num_vars;
45
46 let num_constraints = self.constraints.len();
51 let mut a_dense = vec![vec![0.0; n]; num_constraints];
52 let mut b_vec = vec![0.0; num_constraints];
53 let mut slack_sizes = vec![0usize; num_constraints];
54
55 for (k, constraint) in self.constraints.iter().enumerate() {
56 for &(var, coef) in &constraint.terms {
57 a_dense[k][var] += coef;
58 }
59 b_vec[k] = constraint.rhs;
60
61 match constraint.cmp {
65 Comparison::Eq => {} Comparison::Le => {
67 let min_lhs: f64 = a_dense[k].iter().map(|&c| c.min(0.0)).sum();
69 let slack_range = constraint.rhs - min_lhs;
70 if slack_range > 0.0 {
71 slack_sizes[k] = (slack_range + 1.0).log2().ceil() as usize;
72 }
73 }
74 Comparison::Ge => {
75 let max_lhs: f64 = a_dense[k].iter().map(|&c| c.max(0.0)).sum();
77 let slack_range = max_lhs - constraint.rhs;
78 if slack_range > 0.0 {
79 slack_sizes[k] = (slack_range + 1.0).log2().ceil() as usize;
80 }
81 }
82 }
83 }
84
85 let total_slack: usize = slack_sizes.iter().sum();
86 let nq = n + total_slack;
87
88 let mut a_ext = vec![vec![0.0; nq]; num_constraints];
90 for k in 0..num_constraints {
91 for j in 0..n {
92 a_ext[k][j] = a_dense[k][j];
93 }
94 }
95
96 let mut slack_col = n;
98 for (k, &ns) in slack_sizes.iter().enumerate() {
99 if ns > 0 {
100 let sign = match self.constraints[k].cmp {
101 Comparison::Le => 1.0, Comparison::Ge => -1.0, Comparison::Eq => 0.0,
104 };
105 for s in 0..ns {
106 a_ext[k][slack_col + s] = sign * 2.0_f64.powi(s as i32);
107 }
108 slack_col += ns;
109 }
110 }
111
112 let mut c_vec = vec![0.0; nq];
114 for &(var, coef) in &self.objective {
115 c_vec[var] = coef;
116 }
117
118 if self.sense == ObjectiveSense::Minimize {
120 for c in c_vec.iter_mut() {
121 *c = -*c;
122 }
123 }
124
125 let penalty = 1.0
127 + c_vec.iter().map(|c| c.abs()).sum::<f64>()
128 + b_vec.iter().map(|b| b.abs()).sum::<f64>();
129
130 let mut matrix = vec![vec![0.0; nq]; nq];
132
133 let mut ba = vec![0.0; nq];
135 for (j, ba_j) in ba.iter_mut().enumerate() {
136 for (k, &b_k) in b_vec.iter().enumerate() {
137 *ba_j += b_k * a_ext[k][j];
138 }
139 }
140
141 for j in 0..nq {
143 matrix[j][j] = -(c_vec[j] + 2.0 * penalty * ba[j]);
144 }
145
146 for row in &a_ext {
150 for (i, row_i) in matrix.iter_mut().enumerate() {
151 if row[i].abs() < 1e-15 {
152 continue;
153 }
154 row_i[i] += penalty * row[i] * row[i];
156 for j in (i + 1)..nq {
158 row_i[j] += 2.0 * penalty * row[i] * row[j];
159 }
160 }
161 }
162
163 ReductionILPToQUBO {
164 target: QUBO::from_matrix(matrix),
165 num_original_vars: n,
166 }
167 }
168}
169
170#[cfg(test)]
171#[path = "../unit_tests/rules/ilp_qubo.rs"]
172mod tests;