problemreductions/rules/minimummatrixcover_ilp.rs
1//! Reduction from MinimumMatrixCover to ILP (Integer Linear Programming).
2//!
3//! Uses McCormick linearization to convert the quadratic sign assignment
4//! objective into a linear program with binary variables.
5//!
6//! Binary variables x_i ∈ {0,1} where f(i) = 2x_i - 1.
7//! For i<j, auxiliary variables y_{ij} linearize x_i·x_j via:
8//! y_{ij} ≤ x_i, y_{ij} ≤ x_j, y_{ij} ≥ x_i + x_j - 1
9
10use crate::models::algebraic::MinimumMatrixCover;
11use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP};
12use crate::reduction;
13use crate::rules::traits::{ReduceTo, ReductionResult};
14
15/// Result of reducing MinimumMatrixCover to ILP.
16#[derive(Debug, Clone)]
17pub struct ReductionMinimumMatrixCoverToILP {
18 target: ILP<bool>,
19 n: usize,
20}
21
22impl ReductionResult for ReductionMinimumMatrixCoverToILP {
23 type Source = MinimumMatrixCover;
24 type Target = ILP<bool>;
25
26 fn target_problem(&self) -> &ILP<bool> {
27 &self.target
28 }
29
30 fn extract_solution(&self, target_solution: &[usize]) -> Vec<usize> {
31 // First n variables are the sign variables x_0,...,x_{n-1}
32 target_solution[..self.n].to_vec()
33 }
34}
35
36/// Map pair (i,j) with i<j to auxiliary variable index.
37fn y_index(n: usize, i: usize, j: usize) -> usize {
38 debug_assert!(i < j);
39 // Index into upper triangle: sum_{k=0}^{i-1} (n-1-k) + (j - i - 1)
40 let offset: usize = (0..i).map(|k| n - 1 - k).sum();
41 n + offset + (j - i - 1)
42}
43
44#[reduction(
45 overhead = {
46 num_vars = "num_rows + num_rows * (num_rows - 1) / 2",
47 num_constraints = "3 * num_rows * (num_rows - 1) / 2",
48 }
49)]
50impl ReduceTo<ILP<bool>> for MinimumMatrixCover {
51 type Result = ReductionMinimumMatrixCoverToILP;
52
53 fn reduce_to(&self) -> Self::Result {
54 let n = self.num_rows();
55 let num_pairs = n * (n.saturating_sub(1)) / 2;
56 let num_vars = n + num_pairs;
57
58 // Build constraints: 3 per pair (i,j) with i<j
59 let mut constraints = Vec::with_capacity(3 * num_pairs);
60 for i in 0..n {
61 for j in (i + 1)..n {
62 let y = y_index(n, i, j);
63
64 // y_{ij} ≤ x_i → y_{ij} - x_i ≤ 0
65 constraints.push(LinearConstraint::le(vec![(y, 1.0), (i, -1.0)], 0.0));
66
67 // y_{ij} ≤ x_j → y_{ij} - x_j ≤ 0
68 constraints.push(LinearConstraint::le(vec![(y, 1.0), (j, -1.0)], 0.0));
69
70 // y_{ij} ≥ x_i + x_j - 1 → -y_{ij} + x_i + x_j ≤ 1
71 constraints.push(LinearConstraint::le(
72 vec![(y, -1.0), (i, 1.0), (j, 1.0)],
73 1.0,
74 ));
75 }
76 }
77
78 // Build objective coefficients.
79 // f(i)·f(j) = (2x_i-1)(2x_j-1) = 4x_ix_j - 2x_i - 2x_j + 1
80 //
81 // For i≠j (using y_{min(i,j),max(i,j)} for x_i·x_j):
82 // a_ij · f(i)·f(j) = a_ij · (4·y_{..} - 2·x_i - 2·x_j + 1)
83 //
84 // For diagonal (i=j): f(i)² = 1, so a_ii contributes a_ii (constant).
85 //
86 // Objective = Σ_{i≠j} a_ij·(4·y - 2·x_i - 2·x_j + 1) + Σ_i a_ii
87 // = Σ_{i<j} 4·(a_ij+a_ji)·y_{ij}
88 // + Σ_k [-2·(Σ_{j≠k} (a_kj + a_jk))]·x_k
89 // + constant
90 //
91 // The constant doesn't affect which x minimizes the objective.
92 // But we can still include it as an ILP constant offset... however
93 // ILP only has linear terms. Since extract_solution maps back to source
94 // and source.evaluate() computes the correct value, we just need the
95 // ILP to find the right optimum assignment. The constant is irrelevant.
96
97 let matrix = self.matrix();
98 let mut obj_coeffs = vec![0.0f64; num_vars];
99
100 // y_{ij} coefficients: 4·(a_ij + a_ji) for each i<j
101 for (i, row_i) in matrix.iter().enumerate() {
102 for j in (i + 1)..n {
103 let y = y_index(n, i, j);
104 obj_coeffs[y] = 4.0 * (row_i[j] + matrix[j][i]) as f64;
105 }
106 }
107
108 // x_k coefficients: -2·Σ_{j≠k} (a_kj + a_jk)
109 for (k, row_k) in matrix.iter().enumerate() {
110 let sum: i64 = (0..n)
111 .filter(|&j| j != k)
112 .map(|j| row_k[j] + matrix[j][k])
113 .sum();
114 obj_coeffs[k] = -2.0 * sum as f64;
115 }
116
117 let objective: Vec<(usize, f64)> = obj_coeffs
118 .into_iter()
119 .enumerate()
120 .filter(|&(_, c)| c != 0.0)
121 .collect();
122
123 let target = ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize);
124
125 ReductionMinimumMatrixCoverToILP { target, n }
126 }
127}
128
129#[cfg(feature = "example-db")]
130pub(crate) fn canonical_rule_example_specs() -> Vec<crate::example_db::specs::RuleExampleSpec> {
131 use crate::export::SolutionPair;
132
133 vec![crate::example_db::specs::RuleExampleSpec {
134 id: "minimum_matrix_cover_to_ilp",
135 build: || {
136 // Use a small 2×2 instance for the rule example
137 let source = MinimumMatrixCover::new(vec![vec![0, 3], vec![2, 0]]);
138 // Config [0,1] → f=(-1,+1) → value = 0·1 + 3·(-1) + 2·(-1) + 0·1 = -5
139 // Config [1,0] → f=(+1,-1) → value = 0·1 + 3·(-1) + 2·(-1) + 0·1 = -5
140 // Config [0,0] → f=(-1,-1) → value = 0+3+2+0 = 5
141 // Config [1,1] → f=(+1,+1) → value = 0+3+2+0 = 5
142 // Optimal is [0,1] or [1,0] with value -5
143 // Source config [0,1], target config: x_0=0, x_1=1, y_{01}=0
144 crate::example_db::specs::rule_example_with_witness::<_, ILP<bool>>(
145 source,
146 SolutionPair {
147 source_config: vec![0, 1],
148 target_config: vec![0, 1, 0],
149 },
150 )
151 },
152 }]
153}
154
155#[cfg(test)]
156#[path = "../unit_tests/rules/minimummatrixcover_ilp.rs"]
157mod tests;