Deterministic parallelized snaps (#6527)

* initial pass

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

changes

Signed-off-by: Jess Frazelle <github@jessfraz.com>

more updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

more updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

serde variant name

Signed-off-by: Jess Frazelle <github@jessfraz.com>

fixes

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

some sort

Signed-off-by: Jess Frazelle <github@jessfraz.com>

some sort

Signed-off-by: Jess Frazelle <github@jessfraz.com>

some sort

Signed-off-by: Jess Frazelle <github@jessfraz.com>

some sort

Signed-off-by: Jess Frazelle <github@jessfraz.com>

some sort

Signed-off-by: Jess Frazelle <github@jessfraz.com>

some sort

Signed-off-by: Jess Frazelle <github@jessfraz.com>

some sort

Signed-off-by: Jess Frazelle <github@jessfraz.com>

some sort

Signed-off-by: Jess Frazelle <github@jessfraz.com>

some sort

Signed-off-by: Jess Frazelle <github@jessfraz.com>

some sort

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

sort the edges

Signed-off-by: Jess Frazelle <github@jessfraz.com>

fixes

Signed-off-by: Jess Frazelle <github@jessfraz.com>

u[dates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

u[dates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

cleanups

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

add bs-to-kcl

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

* fixes

Signed-off-by: Jess Frazelle <github@jessfraz.com>

* updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

* fixes

Signed-off-by: Jess Frazelle <github@jessfraz.com>

* fixes

Signed-off-by: Jess Frazelle <github@jessfraz.com>

* updates

Signed-off-by: Jess Frazelle <github@jessfraz.com>

---------

Signed-off-by: Jess Frazelle <github@jessfraz.com>
This commit is contained in:
Jess Frazelle
2025-04-29 06:38:52 -07:00
committed by GitHub
parent a173a82d59
commit 77e3efde9a
302 changed files with 233255 additions and 244172 deletions

View File

@ -38,7 +38,27 @@ pub struct ArtifactCommand {
pub command: ModelingCmd,
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, Hash, ts_rs::TS, JsonSchema)]
impl PartialOrd for ArtifactCommand {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
// Order by the source range.
let range = self.range.cmp(&other.range);
if range != std::cmp::Ordering::Equal {
return Some(range);
}
#[cfg(test)]
{
// If the ranges are equal, order by the serde variant.
Some(
crate::variant_name::variant_name(&self.command)
.cmp(&crate::variant_name::variant_name(&other.command)),
)
}
#[cfg(not(test))]
self.cmd_id.partial_cmp(&other.cmd_id)
}
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, Ord, PartialOrd, Hash, ts_rs::TS, JsonSchema)]
#[ts(export_to = "Artifact.ts")]
pub struct ArtifactId(Uuid);
@ -194,7 +214,7 @@ pub struct Sweep {
pub code_ref: CodeRef,
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, ts_rs::TS)]
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord, ts_rs::TS)]
#[ts(export_to = "Artifact.ts")]
#[serde(rename_all = "camelCase")]
pub enum SweepSubType {
@ -245,6 +265,8 @@ pub struct Wall {
/// This is for the sketch-on-face plane, not for the wall itself. Traverse
/// to the extrude and/or segment to get the wall's code_ref.
pub face_code_ref: CodeRef,
/// The command ID that got the data for this wall.
pub cmd_id: uuid::Uuid,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, ts_rs::TS)]
@ -263,7 +285,7 @@ pub struct Cap {
pub face_code_ref: CodeRef,
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, ts_rs::TS)]
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Ord, PartialOrd, Eq, ts_rs::TS)]
#[ts(export_to = "Artifact.ts")]
#[serde(rename_all = "camelCase")]
pub enum CapSubType {
@ -278,12 +300,13 @@ pub struct SweepEdge {
pub id: ArtifactId,
pub sub_type: SweepEdgeSubType,
pub seg_id: ArtifactId,
pub cmd_id: uuid::Uuid,
pub sweep_id: ArtifactId,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub common_surface_ids: Vec<ArtifactId>,
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, ts_rs::TS)]
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Ord, PartialOrd, Eq, ts_rs::TS)]
#[ts(export_to = "Artifact.ts")]
#[serde(rename_all = "camelCase")]
pub enum SweepEdgeSubType {
@ -305,7 +328,7 @@ pub struct EdgeCut {
pub code_ref: CodeRef,
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, ts_rs::TS)]
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, PartialOrd, Ord, Eq, ts_rs::TS)]
#[ts(export_to = "Artifact.ts")]
#[serde(rename_all = "camelCase")]
pub enum EdgeCutSubType {
@ -362,6 +385,122 @@ pub enum Artifact {
Helix(Helix),
}
impl Artifact {
pub(crate) fn rank(&self) -> u8 {
match self {
Artifact::Plane(_) => 0,
Artifact::StartSketchOnPlane(_) => 1,
Artifact::StartSketchOnFace(_) => 2,
Artifact::Path(_) => 3,
Artifact::Segment(_) => 4,
Artifact::Solid2d(_) => 5,
Artifact::Sweep(_) => 6,
Artifact::CompositeSolid(_) => 7,
Artifact::Wall(_) => 8,
Artifact::Cap(Cap { sub_type, .. }) if *sub_type == CapSubType::Start => 9,
Artifact::Cap(Cap { sub_type, .. }) if *sub_type == CapSubType::Start => 10,
Artifact::Cap(_) => 11,
Artifact::SweepEdge(SweepEdge { sub_type, .. }) if *sub_type == SweepEdgeSubType::Adjacent => 12,
Artifact::SweepEdge(SweepEdge { sub_type, .. }) if *sub_type == SweepEdgeSubType::Opposite => 13,
Artifact::SweepEdge(_) => 14,
Artifact::EdgeCut(_) => 15,
Artifact::EdgeCutEdge(_) => 16,
Artifact::Helix(_) => 17,
}
}
}
impl PartialOrd for Artifact {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
// The only thing we want to sort is if we have two sweep edges, we want
// to sort them by the sub_type.
match (self, other) {
(Artifact::SweepEdge(a), Artifact::SweepEdge(b)) => {
if a.sub_type != b.sub_type {
return Some(a.sub_type.cmp(&b.sub_type));
}
if a.sweep_id != b.sweep_id {
return Some(a.sweep_id.cmp(&b.sweep_id));
}
if a.cmd_id != b.cmd_id {
return Some(a.cmd_id.cmp(&b.cmd_id));
}
Some(a.id.cmp(&b.id))
}
(Artifact::Sweep(a), Artifact::Sweep(b)) => {
if a.code_ref.range != b.code_ref.range {
return Some(a.code_ref.range.cmp(&b.code_ref.range));
}
Some(a.id.cmp(&b.id))
}
// Sort the planes by their code_ref range.
(Artifact::Plane(a), Artifact::Plane(b)) => {
if a.code_ref.range != b.code_ref.range {
return Some(a.code_ref.range.cmp(&b.code_ref.range));
}
Some(a.id.cmp(&b.id))
}
// Sort the paths by their code_ref range.
(Artifact::Path(a), Artifact::Path(b)) => {
if a.code_ref.range != b.code_ref.range {
return Some(a.code_ref.range.cmp(&b.code_ref.range));
}
Some(a.id.cmp(&b.id))
}
// Sort the segments by their code_ref range.
(Artifact::Segment(a), Artifact::Segment(b)) => {
if a.code_ref.range != b.code_ref.range {
return Some(a.code_ref.range.cmp(&b.code_ref.range));
}
Some(a.id.cmp(&b.id))
}
// Sort the solid2d by their id.
(Artifact::Solid2d(a), Artifact::Solid2d(b)) => {
if a.path_id != b.path_id {
return Some(a.path_id.cmp(&b.path_id));
}
Some(a.id.cmp(&b.id))
}
// Sort the walls by their code_ref range.
(Artifact::Wall(a), Artifact::Wall(b)) => {
if a.sweep_id != b.sweep_id {
return Some(a.sweep_id.cmp(&b.sweep_id));
}
if a.cmd_id != b.cmd_id {
return Some(a.cmd_id.cmp(&b.cmd_id));
}
if a.face_code_ref.range != b.face_code_ref.range {
return Some(a.face_code_ref.range.cmp(&b.face_code_ref.range));
}
if a.seg_id != b.seg_id {
return Some(a.seg_id.cmp(&b.seg_id));
}
Some(a.id.cmp(&b.id))
}
// Sort the caps by their code_ref range.
(Artifact::Cap(a), Artifact::Cap(b)) => {
if a.sub_type != b.sub_type {
return Some(a.sub_type.cmp(&b.sub_type));
}
if a.sweep_id != b.sweep_id {
return Some(a.sweep_id.cmp(&b.sweep_id));
}
if a.face_code_ref.range != b.face_code_ref.range {
return Some(a.face_code_ref.range.cmp(&b.face_code_ref.range));
}
Some(a.id.cmp(&b.id))
}
(Artifact::CompositeSolid(a), Artifact::CompositeSolid(b)) => Some(a.id.cmp(&b.id)),
(Artifact::StartSketchOnFace(a), Artifact::StartSketchOnFace(b)) => Some(a.id.cmp(&b.id)),
(Artifact::StartSketchOnPlane(a), Artifact::StartSketchOnPlane(b)) => Some(a.id.cmp(&b.id)),
// Planes are first, then paths, then segments, then solids2ds, then sweeps, then
// walls, then caps, then sweep edges, then edge cuts, then edge cut edges, then
// helixes.
_ => Some(self.rank().cmp(&other.rank())),
}
}
}
impl Artifact {
pub(crate) fn id(&self) -> ArtifactId {
match self {
@ -533,6 +672,13 @@ impl ArtifactGraph {
pub fn len(&self) -> usize {
self.map.len()
}
/// Used to make the mermaid tests deterministic.
#[cfg(test)]
pub(crate) fn sort(&mut self) {
self.map
.sort_by(|_ak, av, _bk, bv| av.partial_cmp(bv).unwrap_or(std::cmp::Ordering::Equal));
}
}
pub(super) fn build_artifact_graph(
@ -709,6 +855,7 @@ fn artifacts_to_update(
sweep_id: wall.sweep_id,
path_ids: wall.path_ids.clone(),
face_code_ref: wall.face_code_ref.clone(),
cmd_id: artifact_command.cmd_id,
})]);
}
Some(Artifact::Cap(cap)) => {
@ -769,6 +916,7 @@ fn artifacts_to_update(
sweep_id: wall.sweep_id,
path_ids: vec![id],
face_code_ref: wall.face_code_ref.clone(),
cmd_id: artifact_command.cmd_id,
}));
}
if let Some(Artifact::Cap(cap)) = plane {
@ -933,6 +1081,7 @@ fn artifacts_to_update(
range: sketch_on_face_source_range,
path_to_node: Vec::new(),
},
cmd_id: artifact_command.cmd_id,
}));
let mut new_seg = seg.clone();
new_seg.surface_id = Some(face_id);
@ -1044,6 +1193,7 @@ fn artifacts_to_update(
id: response_edge_id,
sub_type,
seg_id: edge_id,
cmd_id: artifact_command.cmd_id,
sweep_id: sweep.id,
common_surface_ids: Vec::new(),
}));

View File

@ -194,6 +194,7 @@ impl ArtifactGraph {
let mut next_id = 1_u32;
let mut stable_id_map = FnvHashMap::default();
for id in self.map.keys() {
stable_id_map.insert(*id, next_id);
next_id = next_id.checked_add(1).unwrap();
@ -452,6 +453,7 @@ impl ArtifactGraph {
}
// Output the edges.
edges.par_sort_by(|ak, _, bk, _| (if ak.0 == bk.0 { ak.1.cmp(&bk.1) } else { ak.0.cmp(&bk.0) }));
for ((source_id, target_id), edge) in edges {
let extra = match edge.kind {
// Extra length. This is needed to make the graph layout more

View File

@ -49,6 +49,33 @@ pub enum Operation {
GroupEnd,
}
/// A way for sorting the operations in the timeline. This is used to sort
/// operations in the timeline and to determine the order of operations.
/// We use this for the multi-threaded snapshotting, so that we can have deterministic
/// output.
impl PartialOrd for Operation {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(match (self, other) {
(Self::StdLibCall { source_range: a, .. }, Self::StdLibCall { source_range: b, .. }) => a.cmp(b),
(Self::StdLibCall { source_range: a, .. }, Self::KclStdLibCall { source_range: b, .. }) => a.cmp(b),
(Self::StdLibCall { source_range: a, .. }, Self::GroupBegin { source_range: b, .. }) => a.cmp(b),
(Self::StdLibCall { .. }, Self::GroupEnd) => std::cmp::Ordering::Less,
(Self::KclStdLibCall { source_range: a, .. }, Self::KclStdLibCall { source_range: b, .. }) => a.cmp(b),
(Self::KclStdLibCall { source_range: a, .. }, Self::StdLibCall { source_range: b, .. }) => a.cmp(b),
(Self::KclStdLibCall { source_range: a, .. }, Self::GroupBegin { source_range: b, .. }) => a.cmp(b),
(Self::KclStdLibCall { .. }, Self::GroupEnd) => std::cmp::Ordering::Less,
(Self::GroupBegin { source_range: a, .. }, Self::GroupBegin { source_range: b, .. }) => a.cmp(b),
(Self::GroupBegin { source_range: a, .. }, Self::StdLibCall { source_range: b, .. }) => a.cmp(b),
(Self::GroupBegin { source_range: a, .. }, Self::KclStdLibCall { source_range: b, .. }) => a.cmp(b),
(Self::GroupBegin { .. }, Self::GroupEnd) => std::cmp::Ordering::Less,
(Self::GroupEnd, Self::StdLibCall { .. }) => std::cmp::Ordering::Greater,
(Self::GroupEnd, Self::KclStdLibCall { .. }) => std::cmp::Ordering::Greater,
(Self::GroupEnd, Self::GroupBegin { .. }) => std::cmp::Ordering::Greater,
(Self::GroupEnd, Self::GroupEnd) => std::cmp::Ordering::Equal,
})
}
}
impl Operation {
/// If the variant is `StdLibCall`, set the `is_error` field.
pub(crate) fn set_std_lib_call_is_error(&mut self, is_err: bool) {

View File

@ -717,37 +717,9 @@ impl ExecutorContext {
self.run_concurrent(program, exec_state, false).await
}
/// Perform the execution of a program.
///
/// You can optionally pass in some initialization memory for partial
/// execution.
///
/// To access non-fatal errors and warnings, extract them from the `ExecState`.
pub async fn run_single_threaded(
&self,
program: &crate::Program,
exec_state: &mut ExecState,
) -> Result<(EnvironmentRef, Option<ModelingSessionData>), KclErrorWithOutputs> {
exec_state.add_root_module_contents(program);
#[cfg(test)]
{
exec_state.single_threaded = true;
}
self.eval_prelude(exec_state, SourceRange::synthetic())
.await
.map_err(KclErrorWithOutputs::no_outputs)?;
self.inner_run(program, exec_state, false).await
}
/// Perform the execution of a program using an (experimental!) concurrent
/// Perform the execution of a program using a concurrent
/// execution model. This has the same signature as [Self::run].
///
/// For now -- do not use this unless you're willing to accept some
/// breakage.
///
/// You can optionally pass in some initialization memory for partial
/// execution.
///

View File

@ -32,9 +32,6 @@ pub struct ExecState {
pub(super) global: GlobalState,
pub(super) mod_local: ModuleState,
pub(super) exec_context: Option<super::ExecutorContext>,
/// If we should not parallelize execution.
#[cfg(test)]
pub single_threaded: bool,
}
pub type ModuleInfoMap = IndexMap<ModuleId, ModuleInfo>;
@ -96,8 +93,6 @@ impl ExecState {
global: GlobalState::new(&exec_context.settings),
mod_local: ModuleState::new(None, ProgramMemory::new(), Default::default()),
exec_context: Some(exec_context.clone()),
#[cfg(test)]
single_threaded: false,
}
}
@ -108,8 +103,6 @@ impl ExecState {
global,
mod_local: ModuleState::new(None, ProgramMemory::new(), Default::default()),
exec_context: Some(exec_context.clone()),
#[cfg(test)]
single_threaded: false,
};
}

View File

@ -76,6 +76,8 @@ pub mod std;
pub mod test_server;
mod thread;
mod unparser;
#[cfg(test)]
mod variant_name;
pub mod walk;
#[cfg(target_arch = "wasm32")]
mod wasm;

View File

@ -14,7 +14,9 @@ use crate::{
};
/// Identifier of a source file. Uses a u32 to keep the size small.
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize, ts_rs::TS, JsonSchema)]
#[derive(
Debug, Default, Ord, PartialOrd, Eq, PartialEq, Clone, Copy, Hash, Deserialize, Serialize, ts_rs::TS, JsonSchema,
)]
#[ts(export)]
pub struct ModuleId(u32);

View File

@ -1,5 +1,3 @@
#[cfg(feature = "artifact-graph")]
use std::collections::HashMap;
use std::{
panic::{catch_unwind, AssertUnwindSafe},
path::{Path, PathBuf},
@ -53,8 +51,11 @@ where
settings.set_snapshot_path(Path::new("..").join(&test.output_dir));
settings.set_prepend_module_to_snapshot(false);
settings.set_description(format!("{operation} {}.kcl", &test.name));
// Sorting maps makes them easier to diff.
settings.set_sort_maps(true);
// We don't do it on the flowchart
if operation != "Artifact graph flowchart" {
// Sorting maps makes them easier to diff.
settings.set_sort_maps(true);
}
// Replace UUIDs with the string "[uuid]", because otherwise the tests would constantly
// be changing the UUID. This is a stopgap measure until we make the engine more deterministic.
settings.add_filter(
@ -157,12 +158,9 @@ async fn execute_test(test: &Test, render_to_png: bool, export_step: bool) {
let ast = crate::Program::parse_no_errs(&input).unwrap();
// Run the program.
let exec_res = crate::test_server::execute_and_snapshot_ast_single_threaded(
ast,
Some(test.input_dir.join(&test.entry_point)),
export_step,
)
.await;
let exec_res =
crate::test_server::execute_and_snapshot_ast(ast, Some(test.input_dir.join(&test.entry_point)), export_step)
.await;
match exec_res {
Ok((exec_state, env_ref, png, step)) => {
let fail_path = test.output_dir.join("execution_error.snap");
@ -259,35 +257,23 @@ fn assert_common_snapshots(
artifact_commands: Vec<ArtifactCommand>,
artifact_graph: ArtifactGraph,
) {
let operations = {
// Make the operations deterministic by sorting them by their module ID,
// then by their range.
let mut operations = operations.clone();
operations.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
operations
};
let artifact_commands = {
// Due to our newfound concurrency, we're going to mess with the
// artifact_commands a bit -- we're going to maintain the order,
// but only for a given module ID. This means the artifact_commands
// is no longer meaningful, but it is deterministic and will hopefully
// catch meaningful changes in behavior.
// We sort by the source range, like we do for the operations.
let mut artifact_commands_map = artifact_commands
.into_iter()
.map(|v| (v.range.module_id().as_usize(), v))
.fold(
HashMap::<usize, Vec<ArtifactCommand>>::new(),
|mut map, (module_id, el)| {
let mut v = map.remove(&module_id).unwrap_or_default();
v.push(el);
map.insert(module_id, v);
map
},
);
let mut artifact_commands_keys = artifact_commands_map.keys().cloned().collect::<Vec<_>>();
artifact_commands_keys.sort();
let artifact_commands: Vec<ArtifactCommand> = artifact_commands_keys
.iter()
.flat_map(|idx| artifact_commands_map.remove(idx).unwrap())
.collect();
assert_eq!(0, artifact_commands_map.len());
let mut artifact_commands = artifact_commands.clone();
artifact_commands.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
artifact_commands
};
@ -316,6 +302,10 @@ fn assert_common_snapshots(
}));
let result3 = catch_unwind(AssertUnwindSafe(|| {
assert_snapshot(test, "Artifact graph flowchart", || {
let mut artifact_graph = artifact_graph.clone();
// Sort the map by artifact where we can.
artifact_graph.sort();
let flowchart = artifact_graph
.to_mermaid_flowchart()
.unwrap_or_else(|e| format!("Failed to convert artifact graph to flowchart: {e}"));

View File

@ -21,6 +21,27 @@ impl From<[usize; 3]> for SourceRange {
}
}
impl Ord for SourceRange {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
// Sort by module id first, then by start and end.
let module_id_cmp = self.module_id().cmp(&other.module_id());
if module_id_cmp != std::cmp::Ordering::Equal {
return module_id_cmp;
}
let start_cmp = self.start().cmp(&other.start());
if start_cmp != std::cmp::Ordering::Equal {
return start_cmp;
}
self.end().cmp(&other.end())
}
}
impl PartialOrd for SourceRange {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl From<&SourceRange> for miette::SourceSpan {
fn from(source_range: &SourceRange) -> Self {
let length = source_range.end() - source_range.start();

View File

@ -337,14 +337,18 @@ pub(crate) async fn do_post_extrude<'a>(
let next_adjacent_edge_uuid = exec_state.next_uuid();
let get_all_edge_faces_opposite_uuid = exec_state.next_uuid();
let get_all_edge_faces_next_uuid = exec_state.next_uuid();
#[cfg(test)]
let single_threaded = exec_state.single_threaded;
#[cfg(all(not(test), not(target_arch = "wasm32")))]
#[cfg(any(not(test), not(feature = "artifact-graph"), not(target_arch = "wasm32")))]
#[allow(unused_variables)]
let single_threaded = false;
// When running in vitest, we need to run this in a single thread.
// Because their workers are complete shit.
#[cfg(all(target_arch = "wasm32", not(test)))]
#[cfg(target_arch = "wasm32")]
let single_threaded = crate::wasm::vitest::running_in_vitest();
// If we are running in a test, for the arifact graph to be deterministic and not fail
// after say a fillet runs concurrently, we need to make sure that the
// async tasks are done before we return.
#[cfg(all(test, feature = "artifact-graph", not(target_arch = "wasm32")))]
let single_threaded = true;
// Get faces for original edge
// Since this one is batched we can just run it.

View File

@ -21,7 +21,7 @@ pub struct RequestBody {
pub async fn execute_and_snapshot(code: &str, current_file: Option<PathBuf>) -> Result<image::DynamicImage, ExecError> {
let ctx = new_context(true, current_file).await?;
let program = Program::parse_no_errs(code).map_err(KclErrorWithOutputs::no_outputs)?;
let res = do_execute_and_snapshot(&ctx, program, false)
let res = do_execute_and_snapshot(&ctx, program)
.await
.map(|(_, _, snap)| snap)
.map_err(|err| err.error);
@ -32,13 +32,13 @@ pub async fn execute_and_snapshot(code: &str, current_file: Option<PathBuf>) ->
/// Executes a kcl program and takes a snapshot of the result.
/// This returns the bytes of the snapshot.
#[cfg(test)]
pub async fn execute_and_snapshot_ast_single_threaded(
pub async fn execute_and_snapshot_ast(
ast: Program,
current_file: Option<PathBuf>,
with_export_step: bool,
) -> Result<(ExecState, EnvironmentRef, image::DynamicImage, Option<Vec<u8>>), ExecErrorWithState> {
let ctx = new_context(true, current_file).await?;
let (exec_state, env, img) = match do_execute_and_snapshot(&ctx, ast, true).await {
let (exec_state, env, img) = match do_execute_and_snapshot(&ctx, ast).await {
Ok((exec_state, env_ref, img)) => (exec_state, env_ref, img),
Err(err) => {
// If there was an error executing the program, return it.
@ -73,7 +73,7 @@ pub async fn execute_and_snapshot_no_auth(
) -> Result<(image::DynamicImage, EnvironmentRef), ExecError> {
let ctx = new_context(false, current_file).await?;
let program = Program::parse_no_errs(code).map_err(KclErrorWithOutputs::no_outputs)?;
let res = do_execute_and_snapshot(&ctx, program, false)
let res = do_execute_and_snapshot(&ctx, program)
.await
.map(|(_, env_ref, snap)| (snap, env_ref))
.map_err(|err| err.error);
@ -84,15 +84,12 @@ pub async fn execute_and_snapshot_no_auth(
async fn do_execute_and_snapshot(
ctx: &ExecutorContext,
program: Program,
single_threaded: bool,
) -> Result<(ExecState, EnvironmentRef, image::DynamicImage), ExecErrorWithState> {
let mut exec_state = ExecState::new(ctx);
let result = if single_threaded {
ctx.run_single_threaded(&program, &mut exec_state).await
} else {
ctx.run(&program, &mut exec_state).await
}
.map_err(|err| ExecErrorWithState::new(err.into(), exec_state.clone()))?;
let result = ctx
.run(&program, &mut exec_state)
.await
.map_err(|err| ExecErrorWithState::new(err.into(), exec_state.clone()))?;
for e in exec_state.errors() {
if e.severity.is_err() {
return Err(ExecErrorWithState::new(

View File

@ -0,0 +1,15 @@
use serde::Serialize;
use serde_json::Value;
/// Extract the variant tag of any enum that uses Serde.
#[allow(dead_code)]
pub fn variant_name<T: Serialize>(v: &T) -> String {
// 1. Serialize to JSON Value.
match serde_json::to_value(v).unwrap() {
// internally-tagged: {"type": "Foo", ...}
Value::Object(ref map) if map.get("type").is_some() => map["type"].as_str().unwrap().to_string(),
// externally-tagged: {"Foo": {...}}
Value::Object(map) => map.keys().next().unwrap().as_str().to_string(),
_ => panic!("untagged enum or unsupported representation"),
}
}