Skip to main content

problemreductions/models/misc/
preemptive_scheduling.rs

1//! Preemptive Scheduling problem implementation.
2//!
3//! A classical NP-hard scheduling problem (Garey & Johnson A5 SS6) where
4//! variable-length tasks may be split across non-contiguous time slots on
5//! `m` identical processors, subject to precedence constraints.
6//! The goal is to minimize the makespan (latest completion time).
7
8use crate::registry::{FieldInfo, ProblemSchemaEntry};
9use crate::traits::Problem;
10use crate::types::Min;
11use serde::{Deserialize, Serialize};
12
13inventory::submit! {
14    ProblemSchemaEntry {
15        name: "PreemptiveScheduling",
16        display_name: "Preemptive Scheduling",
17        aliases: &[],
18        dimensions: &[],
19        module_path: module_path!(),
20        description: "Minimize makespan for preemptive parallel-processor scheduling with precedence constraints",
21        fields: &[
22            FieldInfo { name: "lengths", type_name: "Vec<usize>", description: "Processing length l(t) for each task" },
23            FieldInfo { name: "num_processors", type_name: "usize", description: "Number of identical processors m" },
24            FieldInfo { name: "precedences", type_name: "Vec<(usize, usize)>", description: "Precedence pairs (pred, succ) — pred must finish before succ starts" },
25        ],
26    }
27}
28
29/// The Preemptive Scheduling problem.
30///
31/// Given `n` tasks with processing lengths `l(0), ..., l(n-1)`, `m` identical
32/// processors, and a set of precedence constraints, find a preemptive schedule
33/// that minimizes the makespan.
34///
35/// Tasks may be interrupted and resumed at later time slots (preemption).
36/// A configuration is a binary vector of length `n × D_max` where
37/// `D_max = sum of all lengths` is the worst-case makespan.
38///
39/// `config[t * D_max + u] = 1` means task `t` is processed at time slot `u`.
40///
41/// A valid schedule satisfies:
42/// - Each task `t` is active in exactly `l(t)` time slots.
43/// - At most `m` tasks are active at any time slot.
44/// - For each precedence `(pred, succ)`, the last active slot of `pred` is
45///   strictly less than the first active slot of `succ`.
46///
47/// The makespan is `max_t (last active slot of t + 1)`.
48///
49/// # Example
50///
51/// ```
52/// use problemreductions::models::misc::PreemptiveScheduling;
53/// use problemreductions::Problem;
54///
55/// let problem = PreemptiveScheduling::new(vec![2, 1], 2, vec![]);
56/// // D_max = 3, config length = 2 * 3 = 6
57/// // task 0 active at slots 0,1; task 1 active at slot 0
58/// let config = vec![1, 1, 0, 1, 0, 0];
59/// assert_eq!(problem.evaluate(&config), problemreductions::types::Min(Some(2)));
60/// ```
61#[derive(Debug, Clone, Serialize)]
62pub struct PreemptiveScheduling {
63    /// Processing length for each task.
64    lengths: Vec<usize>,
65    /// Number of identical processors.
66    num_processors: usize,
67    /// Precedence constraints: (pred, succ) means pred must finish before succ starts.
68    precedences: Vec<(usize, usize)>,
69}
70
71#[derive(Deserialize)]
72struct PreemptiveSchedulingSerde {
73    lengths: Vec<usize>,
74    num_processors: usize,
75    precedences: Vec<(usize, usize)>,
76}
77
78impl PreemptiveScheduling {
79    fn validate(
80        lengths: &[usize],
81        num_processors: usize,
82        precedences: &[(usize, usize)],
83    ) -> Result<(), String> {
84        if lengths.contains(&0) {
85            return Err("task lengths must be positive".to_string());
86        }
87        if num_processors == 0 {
88            return Err("num_processors must be positive".to_string());
89        }
90        let n = lengths.len();
91        for &(pred, succ) in precedences {
92            if pred >= n || succ >= n {
93                return Err(format!(
94                    "precedence index out of range: ({pred}, {succ}) but num_tasks = {n}"
95                ));
96            }
97        }
98        Ok(())
99    }
100
101    /// Create a new Preemptive Scheduling instance.
102    ///
103    /// # Arguments
104    /// * `lengths` - Processing length `l(t)` for each task (must be positive)
105    /// * `num_processors` - Number of identical processors `m` (must be positive)
106    /// * `precedences` - Pairs `(pred, succ)`: task `pred` must finish before task `succ` starts
107    ///
108    /// # Panics
109    ///
110    /// Panics if any length is zero, `num_processors` is zero, or any precedence
111    /// index is out of range.
112    pub fn new(
113        lengths: Vec<usize>,
114        num_processors: usize,
115        precedences: Vec<(usize, usize)>,
116    ) -> Self {
117        Self::validate(&lengths, num_processors, &precedences)
118            .unwrap_or_else(|err| panic!("{err}"));
119        Self {
120            lengths,
121            num_processors,
122            precedences,
123        }
124    }
125
126    /// Get the number of tasks.
127    pub fn num_tasks(&self) -> usize {
128        self.lengths.len()
129    }
130
131    /// Get the number of processors.
132    pub fn num_processors(&self) -> usize {
133        self.num_processors
134    }
135
136    /// Get the number of precedence constraints.
137    pub fn num_precedences(&self) -> usize {
138        self.precedences.len()
139    }
140
141    /// Get the processing lengths.
142    pub fn lengths(&self) -> &[usize] {
143        &self.lengths
144    }
145
146    /// Get the precedence constraints.
147    pub fn precedences(&self) -> &[(usize, usize)] {
148        &self.precedences
149    }
150
151    /// Compute `D_max = sum of all task lengths` (worst-case makespan).
152    pub fn d_max(&self) -> usize {
153        self.lengths.iter().sum()
154    }
155}
156
157impl TryFrom<PreemptiveSchedulingSerde> for PreemptiveScheduling {
158    type Error = String;
159
160    fn try_from(value: PreemptiveSchedulingSerde) -> Result<Self, Self::Error> {
161        Self::validate(&value.lengths, value.num_processors, &value.precedences)?;
162        Ok(Self {
163            lengths: value.lengths,
164            num_processors: value.num_processors,
165            precedences: value.precedences,
166        })
167    }
168}
169
170impl<'de> Deserialize<'de> for PreemptiveScheduling {
171    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
172    where
173        D: serde::Deserializer<'de>,
174    {
175        let value = PreemptiveSchedulingSerde::deserialize(deserializer)?;
176        Self::try_from(value).map_err(serde::de::Error::custom)
177    }
178}
179
180impl Problem for PreemptiveScheduling {
181    const NAME: &'static str = "PreemptiveScheduling";
182    type Value = Min<usize>;
183
184    fn variant() -> Vec<(&'static str, &'static str)> {
185        crate::variant_params![]
186    }
187
188    fn dims(&self) -> Vec<usize> {
189        let d = self.d_max();
190        vec![2; self.num_tasks() * d]
191    }
192
193    fn evaluate(&self, config: &[usize]) -> Min<usize> {
194        let n = self.num_tasks();
195        let d = self.d_max();
196
197        // Check config length
198        if config.len() != n * d {
199            return Min(None);
200        }
201
202        // Check each slot is binary
203        if config.iter().any(|&v| v > 1) {
204            return Min(None);
205        }
206
207        // Check each task t is active in exactly l(t) slots
208        for t in 0..n {
209            let active: usize = config[t * d..(t + 1) * d].iter().sum();
210            if active != self.lengths[t] {
211                return Min(None);
212            }
213        }
214
215        // Check processor capacity at each time slot
216        for u in 0..d {
217            let active_count: usize = (0..n).filter(|&t| config[t * d + u] == 1).count();
218            if active_count > self.num_processors {
219                return Min(None);
220            }
221        }
222
223        // Check precedence constraints:
224        // last active slot of pred < first active slot of succ
225        for &(pred, succ) in &self.precedences {
226            let last_pred = (0..d).rev().find(|&u| config[pred * d + u] == 1);
227            let first_succ = (0..d).find(|&u| config[succ * d + u] == 1);
228            if let (Some(lp), Some(fs)) = (last_pred, first_succ) {
229                if lp >= fs {
230                    return Min(None);
231                }
232            }
233        }
234
235        // Compute makespan: max over all t of (last active slot + 1)
236        let makespan = (0..n)
237            .filter_map(|t| (0..d).rev().find(|&u| config[t * d + u] == 1))
238            .map(|last| last + 1)
239            .max()
240            .unwrap_or(0);
241
242        Min(Some(makespan))
243    }
244}
245
246crate::declare_variants! {
247    default PreemptiveScheduling => "2^(num_tasks * num_tasks)",
248}
249
250#[cfg(feature = "example-db")]
251pub(crate) fn canonical_model_example_specs() -> Vec<crate::example_db::specs::ModelExampleSpec> {
252    // 5 tasks, lengths [2,1,3,2,1], 2 processors, precedences [(0,2),(1,3)]
253    // D_max = 2+1+3+2+1 = 9
254    // Optimal schedule (makespan 5):
255    //   t0: slots 0,1         → t0*9+0=1, t0*9+1=1
256    //   t1: slot 0            → t1*9+0=1
257    //   t2: slots 2,3,4       → t2*9+2=1, t2*9+3=1, t2*9+4=1
258    //   t3: slots 2,3         → t3*9+2=1, t3*9+3=1
259    //   t4: slot 1            → t4*9+1=1
260    // config indices (length 45):
261    //   t0 (0..9):  [1,1,0,0,0,0,0,0,0]
262    //   t1 (9..18): [1,0,0,0,0,0,0,0,0]
263    //   t2 (18..27):[0,0,1,1,1,0,0,0,0]
264    //   t3 (27..36):[0,0,1,1,0,0,0,0,0]
265    //   t4 (36..45):[0,1,0,0,0,0,0,0,0]
266    let mut config = vec![0usize; 5 * 9];
267    // t0 (config[0..9]) at slots 0,1
268    config[0] = 1;
269    config[1] = 1;
270    // t1 (config[9..18]) at slot 0
271    config[9] = 1;
272    // t2 (config[18..27]) at slots 2,3,4
273    config[18 + 2] = 1;
274    config[18 + 3] = 1;
275    config[18 + 4] = 1;
276    // t3 (config[27..36]) at slots 2,3
277    config[27 + 2] = 1;
278    config[27 + 3] = 1;
279    // t4 (config[36..45]) at slot 1
280    config[36 + 1] = 1;
281    vec![crate::example_db::specs::ModelExampleSpec {
282        id: "preemptive_scheduling",
283        instance: Box::new(PreemptiveScheduling::new(
284            vec![2, 1, 3, 2, 1],
285            2,
286            vec![(0, 2), (1, 3)],
287        )),
288        optimal_config: config,
289        optimal_value: serde_json::json!(5),
290    }]
291}
292
293#[cfg(test)]
294#[path = "../../unit_tests/models/misc/preemptive_scheduling.rs"]
295mod tests;