problemreductions/rules/preemptivescheduling_ilp.rs
1//! Reduction from PreemptiveScheduling to ILP<i32>.
2//!
3//! Time-indexed formulation with an auxiliary integer makespan variable:
4//! - Variables: binary x_{t,u} for t in 0..n, u in 0..D_max (task t processed at slot u),
5//! plus integer M (the makespan), indexed at position n*D_max.
6//! - Variable index for x_{t,u}: t * D_max + u.
7//! - Variable index for M: n * D_max.
8//! - Constraints:
9//! 1. Work: Σ_u x_{t,u} = l(t) for each task t
10//! 2. Capacity: Σ_t x_{t,u} ≤ m for each time slot u
11//! 3. Precedence: for each (pred, succ) and each slot u,
12//! `l(pred) * x_{succ,u} ≤ Σ_{v=0}^{u-1} x_{pred,v}`
13//! This ensures succ can only be active at slot u if pred has already
14//! completed all l(pred) units of work in slots 0..u-1.
15//! 4. Makespan lower bound: M ≥ (u+1) when x_{t,u}=1:
16//! `M - (u+1)*x_{t,u} ≥ 0` for all t,u
17//! 5. Binary bounds: x_{t,u} ≤ 1 for each t,u
18//! (since ILP<i32> uses non-negative integer domain)
19//! - Objective: Minimize M.
20//!
21//! Note: ILP<i32> treats all variables as non-negative integers. Binary constraints
22//! on x_{t,u} are enforced by x_{t,u} ≤ 1.
23
24use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP};
25use crate::models::misc::PreemptiveScheduling;
26use crate::reduction;
27use crate::rules::traits::{ReduceTo, ReductionResult};
28
29/// Result of reducing PreemptiveScheduling to ILP<i32>.
30///
31/// Variable layout:
32/// - x_{t,u} at index t * D_max + u for t in 0..n, u in 0..D_max (n*D_max vars)
33/// - M at index n * D_max (1 integer var)
34///
35/// Total: n * D_max + 1 variables.
36#[derive(Debug, Clone)]
37pub struct ReductionPSToILP {
38 target: ILP<i32>,
39 num_tasks: usize,
40 d_max: usize,
41}
42
43impl ReductionResult for ReductionPSToILP {
44 type Source = PreemptiveScheduling;
45 type Target = ILP<i32>;
46
47 fn target_problem(&self) -> &ILP<i32> {
48 &self.target
49 }
50
51 /// Extract schedule from ILP solution.
52 ///
53 /// Returns a binary config of length n * D_max: `config[t * D_max + u] = x_{t,u}`.
54 fn extract_solution(&self, target_solution: &[usize]) -> Vec<usize> {
55 let nd = self.num_tasks * self.d_max;
56 target_solution[..nd.min(target_solution.len())].to_vec()
57 }
58}
59
60#[reduction(
61 overhead = {
62 num_vars = "num_tasks * d_max + 1",
63 num_constraints = "num_tasks + d_max + num_precedences * d_max + 2 * num_tasks * d_max",
64 }
65)]
66impl ReduceTo<ILP<i32>> for PreemptiveScheduling {
67 type Result = ReductionPSToILP;
68
69 fn reduce_to(&self) -> Self::Result {
70 let n = self.num_tasks();
71 let m = self.num_processors();
72 let d = self.d_max();
73 let num_task_vars = n * d;
74 let m_var = num_task_vars; // index of the makespan variable M
75 let num_vars = num_task_vars + 1;
76
77 let x = |t: usize, u: usize| t * d + u;
78
79 let mut constraints = Vec::new();
80
81 // 1. Work constraints: Σ_u x_{t,u} = l(t) for each task t
82 for t in 0..n {
83 let terms: Vec<(usize, f64)> = (0..d).map(|u| (x(t, u), 1.0)).collect();
84 constraints.push(LinearConstraint::eq(terms, self.lengths()[t] as f64));
85 }
86
87 // 2. Capacity constraints: Σ_t x_{t,u} ≤ m for each time slot u
88 for u in 0..d {
89 let terms: Vec<(usize, f64)> = (0..n).map(|t| (x(t, u), 1.0)).collect();
90 constraints.push(LinearConstraint::le(terms, m as f64));
91 }
92
93 // 3. Precedence constraints: for each (pred, succ) and each slot u:
94 // l(pred) * x_{succ,u} ≤ Σ_{v=0}^{u-1} x_{pred,v}
95 // i.e. l(pred) * x_{succ,u} - Σ_{v=0}^{u-1} x_{pred,v} ≤ 0
96 //
97 // Interpretation: succ can only be active at slot u once pred has
98 // accumulated all l(pred) units of work in strictly earlier slots.
99 for &(pred, succ) in self.precedences() {
100 let l_pred = self.lengths()[pred] as f64;
101 for u in 0..d {
102 // Σ_{v=0}^{u-1} x_{pred,v} - l(pred)*x_{succ,u} ≥ 0
103 // i.e. l(pred)*x_{succ,u} - Σ_{v<u} x_{pred,v} ≤ 0
104 let mut terms: Vec<(usize, f64)> = Vec::new();
105 // Cumulative pred work up to u-1
106 for v in 0..u {
107 terms.push((x(pred, v), -1.0));
108 }
109 terms.push((x(succ, u), l_pred));
110 constraints.push(LinearConstraint::le(terms, 0.0));
111 }
112 }
113
114 // 4. Makespan lower bound: M - (u+1)*x_{t,u} ≥ 0 for all t,u
115 for t in 0..n {
116 for u in 0..d {
117 constraints.push(LinearConstraint::ge(
118 vec![(m_var, 1.0), (x(t, u), -((u + 1) as f64))],
119 0.0,
120 ));
121 }
122 }
123
124 // 5. Binary upper bound: x_{t,u} ≤ 1 for all t,u
125 for t in 0..n {
126 for u in 0..d {
127 constraints.push(LinearConstraint::le(vec![(x(t, u), 1.0)], 1.0));
128 }
129 }
130
131 // Objective: minimize M
132 let objective = vec![(m_var, 1.0)];
133
134 ReductionPSToILP {
135 target: ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize),
136 num_tasks: n,
137 d_max: d,
138 }
139 }
140}
141
142#[cfg(feature = "example-db")]
143pub(crate) fn canonical_rule_example_specs() -> Vec<crate::example_db::specs::RuleExampleSpec> {
144 vec![crate::example_db::specs::RuleExampleSpec {
145 id: "preemptivescheduling_to_ilp",
146 build: || {
147 // 3 tasks, lengths [2,1,2], 2 processors, precedence (0,2)
148 let source = PreemptiveScheduling::new(vec![2, 1, 2], 2, vec![(0, 2)]);
149 crate::example_db::specs::rule_example_via_ilp::<_, i32>(source)
150 },
151 }]
152}
153
154#[cfg(test)]
155#[path = "../unit_tests/rules/preemptivescheduling_ilp.rs"]
156mod tests;