Deterministic artifact graph - bring back the clockwork universe (#7483)

* Change to use deterministic artifact graph

* Update output to use the new order

* Fix to clear everything when scene is cleared

* Fix lots

* Update artifact graph output for the last time

* Delete unused sorting code

* Remove unneeded cfg

* Fix to preserve top-level artifacts when there's an error

* Update output after error fix

* Add better doc comments

* Remove duplicate global operations

* Update comments

* Update ignored tests that were flaky

* Update graph for new samples after rebase

* Fix test assertion message
This commit is contained in:
Jonathan Tran
2025-06-16 13:55:24 -04:00
committed by GitHub
parent d6278cf075
commit aae34cf1e5
197 changed files with 79222 additions and 69896 deletions

View File

@ -18,8 +18,6 @@ use tokio::sync::{mpsc, oneshot, RwLock};
use tokio_tungstenite::tungstenite::Message as WsMsg;
use uuid::Uuid;
#[cfg(feature = "artifact-graph")]
use crate::execution::ArtifactCommand;
use crate::{
engine::{AsyncTasks, EngineManager, EngineStats},
errors::{KclError, KclErrorDetails},
@ -45,8 +43,6 @@ pub struct EngineConnection {
socket_health: Arc<RwLock<SocketHealth>>,
batch: Arc<RwLock<Vec<(WebSocketRequest, SourceRange)>>>,
batch_end: Arc<RwLock<IndexMap<uuid::Uuid, (WebSocketRequest, SourceRange)>>>,
#[cfg(feature = "artifact-graph")]
artifact_commands: Arc<RwLock<Vec<ArtifactCommand>>>,
ids_of_async_commands: Arc<RwLock<IndexMap<Uuid, SourceRange>>>,
/// The default planes for the scene.
@ -378,8 +374,6 @@ impl EngineConnection {
socket_health,
batch: Arc::new(RwLock::new(Vec::new())),
batch_end: Arc::new(RwLock::new(IndexMap::new())),
#[cfg(feature = "artifact-graph")]
artifact_commands: Arc::new(RwLock::new(Vec::new())),
ids_of_async_commands,
default_planes: Default::default(),
session_data,
@ -404,11 +398,6 @@ impl EngineManager for EngineConnection {
self.responses.responses.clone()
}
#[cfg(feature = "artifact-graph")]
fn artifact_commands(&self) -> Arc<RwLock<Vec<ArtifactCommand>>> {
self.artifact_commands.clone()
}
fn ids_of_async_commands(&self) -> Arc<RwLock<IndexMap<Uuid, SourceRange>>> {
self.ids_of_async_commands.clone()
}

View File

@ -16,8 +16,6 @@ use kittycad_modeling_cmds::{self as kcmc, websocket::ModelingCmdReq, ImportFile
use tokio::sync::RwLock;
use uuid::Uuid;
#[cfg(feature = "artifact-graph")]
use crate::execution::ArtifactCommand;
use crate::{
engine::{AsyncTasks, EngineStats},
errors::KclError,
@ -30,8 +28,6 @@ use crate::{
pub struct EngineConnection {
batch: Arc<RwLock<Vec<(WebSocketRequest, SourceRange)>>>,
batch_end: Arc<RwLock<IndexMap<uuid::Uuid, (WebSocketRequest, SourceRange)>>>,
#[cfg(feature = "artifact-graph")]
artifact_commands: Arc<RwLock<Vec<ArtifactCommand>>>,
ids_of_async_commands: Arc<RwLock<IndexMap<Uuid, SourceRange>>>,
responses: Arc<RwLock<IndexMap<Uuid, WebSocketResponse>>>,
/// The default planes for the scene.
@ -45,8 +41,6 @@ impl EngineConnection {
Ok(EngineConnection {
batch: Arc::new(RwLock::new(Vec::new())),
batch_end: Arc::new(RwLock::new(IndexMap::new())),
#[cfg(feature = "artifact-graph")]
artifact_commands: Arc::new(RwLock::new(Vec::new())),
ids_of_async_commands: Arc::new(RwLock::new(IndexMap::new())),
responses: Arc::new(RwLock::new(IndexMap::new())),
default_planes: Default::default(),
@ -74,11 +68,6 @@ impl crate::engine::EngineManager for EngineConnection {
&self.stats
}
#[cfg(feature = "artifact-graph")]
fn artifact_commands(&self) -> Arc<RwLock<Vec<ArtifactCommand>>> {
self.artifact_commands.clone()
}
fn ids_of_async_commands(&self) -> Arc<RwLock<IndexMap<Uuid, SourceRange>>> {
self.ids_of_async_commands.clone()
}

View File

@ -13,7 +13,7 @@ use wasm_bindgen::prelude::*;
use crate::{
engine::{AsyncTasks, EngineStats},
errors::{KclError, KclErrorDetails},
execution::{ArtifactCommand, DefaultPlanes, IdGenerator},
execution::{DefaultPlanes, IdGenerator},
SourceRange,
};
@ -56,7 +56,6 @@ pub struct EngineConnection {
response_context: Arc<ResponseContext>,
batch: Arc<RwLock<Vec<(WebSocketRequest, SourceRange)>>>,
batch_end: Arc<RwLock<IndexMap<uuid::Uuid, (WebSocketRequest, SourceRange)>>>,
artifact_commands: Arc<RwLock<Vec<ArtifactCommand>>>,
ids_of_async_commands: Arc<RwLock<IndexMap<Uuid, SourceRange>>>,
/// The default planes for the scene.
default_planes: Arc<RwLock<Option<DefaultPlanes>>>,
@ -129,7 +128,6 @@ impl EngineConnection {
batch: Arc::new(RwLock::new(Vec::new())),
batch_end: Arc::new(RwLock::new(IndexMap::new())),
response_context,
artifact_commands: Arc::new(RwLock::new(Vec::new())),
ids_of_async_commands: Arc::new(RwLock::new(IndexMap::new())),
default_planes: Default::default(),
stats: Default::default(),
@ -277,10 +275,6 @@ impl crate::engine::EngineManager for EngineConnection {
&self.stats
}
fn artifact_commands(&self) -> Arc<RwLock<Vec<ArtifactCommand>>> {
self.artifact_commands.clone()
}
fn ids_of_async_commands(&self) -> Arc<RwLock<IndexMap<Uuid, SourceRange>>> {
self.ids_of_async_commands.clone()
}

View File

@ -19,8 +19,6 @@ use std::{
pub use async_tasks::AsyncTasks;
use indexmap::IndexMap;
#[cfg(feature = "artifact-graph")]
use kcmc::id::ModelingCmdId;
use kcmc::{
each_cmd as mcmd,
length_unit::LengthUnit,
@ -39,8 +37,6 @@ use serde::{Deserialize, Serialize};
use tokio::sync::RwLock;
use uuid::Uuid;
#[cfg(feature = "artifact-graph")]
use crate::execution::ArtifactCommand;
use crate::{
errors::{KclError, KclErrorDetails},
execution::{types::UnitLen, DefaultPlanes, IdGenerator, PlaneInfo, Point3d},
@ -113,10 +109,6 @@ pub trait EngineManager: std::fmt::Debug + Send + Sync + 'static {
/// Get the command responses from the engine.
fn responses(&self) -> Arc<RwLock<IndexMap<Uuid, WebSocketResponse>>>;
/// Get the artifact commands that have accumulated so far.
#[cfg(feature = "artifact-graph")]
fn artifact_commands(&self) -> Arc<RwLock<Vec<ArtifactCommand>>>;
/// Get the ids of the async commands we are waiting for.
fn ids_of_async_commands(&self) -> Arc<RwLock<IndexMap<Uuid, SourceRange>>>;
@ -133,18 +125,6 @@ pub trait EngineManager: std::fmt::Debug + Send + Sync + 'static {
std::mem::take(&mut *self.batch_end().write().await)
}
/// Clear all artifact commands that have accumulated so far.
#[cfg(feature = "artifact-graph")]
async fn clear_artifact_commands(&self) {
self.artifact_commands().write().await.clear();
}
/// Take the artifact commands that have accumulated so far and clear them.
#[cfg(feature = "artifact-graph")]
async fn take_artifact_commands(&self) -> Vec<ArtifactCommand> {
std::mem::take(&mut *self.artifact_commands().write().await)
}
/// Take the ids of async commands that have accumulated so far and clear them.
async fn take_ids_of_async_commands(&self) -> IndexMap<Uuid, SourceRange> {
std::mem::take(&mut *self.ids_of_async_commands().write().await)
@ -237,11 +217,6 @@ pub trait EngineManager: std::fmt::Debug + Send + Sync + 'static {
// Otherwise the hooks below won't work.
self.flush_batch(false, source_range).await?;
// Ensure artifact commands are cleared so that we don't accumulate them
// across runs.
#[cfg(feature = "artifact-graph")]
self.clear_artifact_commands().await;
// Do the after clear scene hook.
self.clear_scene_post_hook(id_generator, source_range).await?;
@ -341,28 +316,6 @@ pub trait EngineManager: std::fmt::Debug + Send + Sync + 'static {
Ok(())
}
#[cfg(feature = "artifact-graph")]
async fn handle_artifact_command(
&self,
cmd: &ModelingCmd,
cmd_id: ModelingCmdId,
id_to_source_range: &HashMap<Uuid, SourceRange>,
) -> Result<(), KclError> {
let cmd_id = *cmd_id.as_ref();
let range = id_to_source_range
.get(&cmd_id)
.copied()
.ok_or_else(|| KclError::internal(format!("Failed to get source range for command ID: {:?}", cmd_id)))?;
// Add artifact command.
self.artifact_commands().write().await.push(ArtifactCommand {
cmd_id,
range,
command: cmd.clone(),
});
Ok(())
}
/// Re-run the command to apply the settings.
async fn reapply_settings(
&self,
@ -481,11 +434,6 @@ pub trait EngineManager: std::fmt::Debug + Send + Sync + 'static {
// Add the command ID to the list of async commands.
self.ids_of_async_commands().write().await.insert(id, source_range);
// Add to artifact commands.
#[cfg(feature = "artifact-graph")]
self.handle_artifact_command(cmd, id.into(), &HashMap::from([(id, source_range)]))
.await?;
// Fire off the command now, but don't wait for the response, we don't care about it.
self.inner_fire_modeling_cmd(
id,
@ -555,24 +503,6 @@ pub trait EngineManager: std::fmt::Debug + Send + Sync + 'static {
}
}
// Do the artifact commands.
#[cfg(feature = "artifact-graph")]
for (req, _) in orig_requests.iter() {
match &req {
WebSocketRequest::ModelingCmdBatchReq(ModelingBatch { requests, .. }) => {
for request in requests {
self.handle_artifact_command(&request.cmd, request.cmd_id, &id_to_source_range)
.await?;
}
}
WebSocketRequest::ModelingCmdReq(request) => {
self.handle_artifact_command(&request.cmd, request.cmd_id, &id_to_source_range)
.await?;
}
_ => {}
}
}
self.stats().batches_sent.fetch_add(1, Ordering::Relaxed);
// We pop off the responses to cleanup our mappings.

View File

@ -165,7 +165,7 @@ pub struct Sweep {
pub code_ref: CodeRef,
}
#[derive(Debug, Clone, Copy, Serialize, PartialEq, Eq, PartialOrd, Ord, ts_rs::TS)]
#[derive(Debug, Clone, Copy, Serialize, PartialEq, Eq, ts_rs::TS)]
#[ts(export_to = "Artifact.ts")]
#[serde(rename_all = "camelCase")]
pub enum SweepSubType {
@ -239,7 +239,7 @@ pub struct Cap {
pub cmd_id: uuid::Uuid,
}
#[derive(Debug, Clone, Copy, Serialize, PartialEq, Ord, PartialOrd, Eq, ts_rs::TS)]
#[derive(Debug, Clone, Copy, Serialize, PartialEq, Eq, ts_rs::TS)]
#[ts(export_to = "Artifact.ts")]
#[serde(rename_all = "camelCase")]
pub enum CapSubType {
@ -263,7 +263,7 @@ pub struct SweepEdge {
pub common_surface_ids: Vec<ArtifactId>,
}
#[derive(Debug, Clone, Copy, Serialize, PartialEq, Ord, PartialOrd, Eq, ts_rs::TS)]
#[derive(Debug, Clone, Copy, Serialize, PartialEq, Eq, ts_rs::TS)]
#[ts(export_to = "Artifact.ts")]
#[serde(rename_all = "camelCase")]
pub enum SweepEdgeSubType {
@ -285,7 +285,7 @@ pub struct EdgeCut {
pub code_ref: CodeRef,
}
#[derive(Debug, Clone, Copy, Serialize, PartialEq, PartialOrd, Ord, Eq, ts_rs::TS)]
#[derive(Debug, Clone, Copy, Serialize, PartialEq, Eq, ts_rs::TS)]
#[ts(export_to = "Artifact.ts")]
#[serde(rename_all = "camelCase")]
pub enum EdgeCutSubType {
@ -342,135 +342,6 @@ pub enum Artifact {
Helix(Helix),
}
impl Artifact {
pub(crate) fn rank(&self) -> u8 {
match self {
Artifact::Plane(_) => 0,
Artifact::StartSketchOnPlane(_) => 1,
Artifact::StartSketchOnFace(_) => 2,
Artifact::Path(_) => 3,
Artifact::Segment(_) => 4,
Artifact::Solid2d(_) => 5,
Artifact::Sweep(_) => 6,
Artifact::CompositeSolid(_) => 7,
Artifact::Wall(_) => 8,
Artifact::Cap(Cap { sub_type, .. }) if *sub_type == CapSubType::Start => 9,
Artifact::Cap(Cap { sub_type, .. }) if *sub_type == CapSubType::Start => 10,
Artifact::Cap(_) => 11,
Artifact::SweepEdge(SweepEdge { sub_type, .. }) if *sub_type == SweepEdgeSubType::Adjacent => 12,
Artifact::SweepEdge(SweepEdge { sub_type, .. }) if *sub_type == SweepEdgeSubType::Opposite => 13,
Artifact::SweepEdge(_) => 14,
Artifact::EdgeCut(_) => 15,
Artifact::EdgeCutEdge(_) => 16,
Artifact::Helix(_) => 17,
}
}
}
impl PartialOrd for Artifact {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
// The only thing we want to sort is if we have two sweep edges, we want
// to sort them by the sub_type.
match (self, other) {
(Artifact::SweepEdge(a), Artifact::SweepEdge(b)) => {
if a.sub_type != b.sub_type {
return Some(a.sub_type.cmp(&b.sub_type));
}
if a.sweep_id != b.sweep_id {
return Some(a.sweep_id.cmp(&b.sweep_id));
}
if a.cmd_id != b.cmd_id {
return Some(a.cmd_id.cmp(&b.cmd_id));
}
if a.index != b.index {
return Some(a.index.cmp(&b.index));
}
Some(a.id.cmp(&b.id))
}
(Artifact::EdgeCut(a), Artifact::EdgeCut(b)) => {
if a.code_ref.range != b.code_ref.range {
return Some(a.code_ref.range.cmp(&b.code_ref.range));
}
Some(a.id.cmp(&b.id))
}
(Artifact::EdgeCutEdge(a), Artifact::EdgeCutEdge(b)) => Some(a.edge_cut_id.cmp(&b.edge_cut_id)),
(Artifact::Sweep(a), Artifact::Sweep(b)) => {
if a.code_ref.range != b.code_ref.range {
return Some(a.code_ref.range.cmp(&b.code_ref.range));
}
Some(a.id.cmp(&b.id))
}
// Sort the planes by their code_ref range.
(Artifact::Plane(a), Artifact::Plane(b)) => {
if a.code_ref.range != b.code_ref.range {
return Some(a.code_ref.range.cmp(&b.code_ref.range));
}
Some(a.id.cmp(&b.id))
}
// Sort the paths by their code_ref range.
(Artifact::Path(a), Artifact::Path(b)) => {
if a.code_ref.range != b.code_ref.range {
return Some(a.code_ref.range.cmp(&b.code_ref.range));
}
Some(a.id.cmp(&b.id))
}
// Sort the segments by their code_ref range.
(Artifact::Segment(a), Artifact::Segment(b)) => {
if a.code_ref.range != b.code_ref.range {
return Some(a.code_ref.range.cmp(&b.code_ref.range));
}
Some(a.id.cmp(&b.id))
}
// Sort the solid2d by their id.
(Artifact::Solid2d(a), Artifact::Solid2d(b)) => {
if a.path_id != b.path_id {
return Some(a.path_id.cmp(&b.path_id));
}
Some(a.id.cmp(&b.id))
}
// Sort the walls by their code_ref range.
(Artifact::Wall(a), Artifact::Wall(b)) => {
if a.sweep_id != b.sweep_id {
return Some(a.sweep_id.cmp(&b.sweep_id));
}
if a.cmd_id != b.cmd_id {
return Some(a.cmd_id.cmp(&b.cmd_id));
}
if a.face_code_ref.range != b.face_code_ref.range {
return Some(a.face_code_ref.range.cmp(&b.face_code_ref.range));
}
if a.seg_id != b.seg_id {
return Some(a.seg_id.cmp(&b.seg_id));
}
Some(a.id.cmp(&b.id))
}
// Sort the caps by their code_ref range.
(Artifact::Cap(a), Artifact::Cap(b)) => {
if a.sub_type != b.sub_type {
return Some(a.sub_type.cmp(&b.sub_type));
}
if a.cmd_id != b.cmd_id {
return Some(a.cmd_id.cmp(&b.cmd_id));
}
if a.sweep_id != b.sweep_id {
return Some(a.sweep_id.cmp(&b.sweep_id));
}
if a.face_code_ref.range != b.face_code_ref.range {
return Some(a.face_code_ref.range.cmp(&b.face_code_ref.range));
}
Some(a.id.cmp(&b.id))
}
(Artifact::CompositeSolid(a), Artifact::CompositeSolid(b)) => Some(a.id.cmp(&b.id)),
(Artifact::StartSketchOnFace(a), Artifact::StartSketchOnFace(b)) => Some(a.id.cmp(&b.id)),
(Artifact::StartSketchOnPlane(a), Artifact::StartSketchOnPlane(b)) => Some(a.id.cmp(&b.id)),
// Planes are first, then paths, then segments, then solids2ds, then sweeps, then
// walls, then caps, then sweep edges, then edge cuts, then edge cut edges, then
// helixes.
_ => Some(self.rank().cmp(&other.rank())),
}
}
}
impl Artifact {
pub(crate) fn id(&self) -> ArtifactId {
match self {
@ -673,17 +544,15 @@ impl ArtifactGraph {
self.map.values()
}
pub fn clear(&mut self) {
self.map.clear();
self.item_count = 0;
}
/// Consume the artifact graph and return the map of artifacts.
fn into_map(self) -> IndexMap<ArtifactId, Artifact> {
self.map
}
/// Used to make the mermaid tests deterministic.
#[cfg(test)]
pub(crate) fn sort(&mut self) {
self.map
.sort_by(|_ak, av, _bk, bv| av.partial_cmp(bv).unwrap_or(std::cmp::Ordering::Equal));
}
}
/// Build the artifact graph from the artifact commands and the responses. The

View File

@ -109,7 +109,7 @@ impl GlobalState {
variables: self.main.exec_state.variables(self.main.result_env),
filenames: self.exec_state.filenames(),
#[cfg(feature = "artifact-graph")]
operations: self.exec_state.artifacts.operations,
operations: self.exec_state.root_module_artifacts.operations,
#[cfg(feature = "artifact-graph")]
artifact_graph: self.exec_state.artifacts.graph,
errors: self.exec_state.errors,

View File

@ -84,7 +84,10 @@ impl ExecutorContext {
preserve_mem: bool,
module_id: ModuleId,
path: &ModulePath,
) -> Result<(Option<KclValue>, EnvironmentRef, Vec<String>, ModuleArtifactState), KclError> {
) -> Result<
(Option<KclValue>, EnvironmentRef, Vec<String>, ModuleArtifactState),
(KclError, Option<ModuleArtifactState>),
> {
crate::log::log(format!("enter module {path} {}", exec_state.stack()));
let mut local_state = ModuleState::new(path.clone(), exec_state.stack().memory.clone(), Some(module_id));
@ -94,7 +97,8 @@ impl ExecutorContext {
let no_prelude = self
.handle_annotations(program.inner_attrs.iter(), crate::execution::BodyType::Root, exec_state)
.await?;
.await
.map_err(|err| (err, None))?;
if !preserve_mem {
exec_state.mut_stack().push_new_root_env(!no_prelude);
@ -113,12 +117,14 @@ impl ExecutorContext {
std::mem::swap(&mut exec_state.mod_local, &mut local_state);
local_state.artifacts
} else {
Default::default()
std::mem::take(&mut exec_state.mod_local.artifacts)
};
crate::log::log(format!("leave {path}"));
result.map(|result| (result, env_ref, local_state.module_exports, module_artifacts))
result
.map_err(|err| (err, Some(module_artifacts.clone())))
.map(|result| (result, env_ref, local_state.module_exports, module_artifacts))
}
/// Execute an AST's program.
@ -630,7 +636,9 @@ impl ExecutorContext {
.await;
exec_state.global.mod_loader.leave_module(path);
result.map_err(|err| {
// TODO: ModuleArtifactState is getting dropped here when there's an
// error. Should we propagate it for non-root modules?
result.map_err(|(err, _)| {
if let KclError::ImportCycle { .. } = err {
// It was an import cycle. Keep the original message.
err.override_source_ranges(vec![source_range])

View File

@ -519,6 +519,12 @@ impl ExecutorContext {
exec_state: &mut ExecState,
source_range: crate::execution::SourceRange,
) -> Result<(), KclError> {
// Ensure artifacts are cleared so that we don't accumulate them across
// runs.
exec_state.mod_local.artifacts.clear();
exec_state.global.root_module_artifacts.clear();
exec_state.global.artifacts.clear();
self.engine
.clear_scene(&mut exec_state.mod_local.id_generator, source_range)
.await
@ -650,8 +656,8 @@ impl ExecutorContext {
let (new_universe, new_universe_map) =
self.get_universe(&program, &mut new_exec_state).await?;
let clear_scene = new_universe.keys().any(|key| {
let id = new_universe[key].1;
let clear_scene = new_universe.values().any(|value| {
let id = value.1;
match (
cached_state.exec_state.get_source(id),
new_exec_state.global.get_source(id),
@ -965,11 +971,10 @@ impl ExecutorContext {
// Since we haven't technically started executing the root module yet,
// the operations corresponding to the imports will be missing unless we
// track them here.
#[cfg(all(test, feature = "artifact-graph"))]
exec_state
.global
.root_module_artifacts
.extend(exec_state.mod_local.artifacts.clone());
.extend(std::mem::take(&mut exec_state.mod_local.artifacts));
self.inner_run(program, exec_state, preserve_mem).await
}
@ -1114,7 +1119,7 @@ impl ExecutorContext {
// Because of execution caching, we may start with operations from a
// previous run.
#[cfg(feature = "artifact-graph")]
let start_op = exec_state.global.artifacts.operations.len();
let start_op = exec_state.global.root_module_artifacts.operations.len();
self.eval_prelude(exec_state, SourceRange::from(program).start_as_range())
.await?;
@ -1127,32 +1132,39 @@ impl ExecutorContext {
ModuleId::default(),
&ModulePath::Main,
)
.await;
#[cfg(all(test, feature = "artifact-graph"))]
let exec_result = exec_result.map(|(_, env_ref, _, module_artifacts)| {
exec_state.global.root_module_artifacts.extend(module_artifacts);
env_ref
});
#[cfg(not(all(test, feature = "artifact-graph")))]
let exec_result = exec_result.map(|(_, env_ref, _, _)| env_ref);
.await
.map(|(_, env_ref, _, module_artifacts)| {
// We need to extend because it may already have operations from
// imports.
exec_state.global.root_module_artifacts.extend(module_artifacts);
env_ref
})
.map_err(|(err, module_artifacts)| {
if let Some(module_artifacts) = module_artifacts {
// We need to extend because it may already have operations
// from imports.
exec_state.global.root_module_artifacts.extend(module_artifacts);
}
err
});
#[cfg(feature = "artifact-graph")]
{
// Fill in NodePath for operations.
let cached_body_items = exec_state.global.artifacts.cached_body_items();
for op in exec_state.global.artifacts.operations.iter_mut().skip(start_op) {
for op in exec_state
.global
.root_module_artifacts
.operations
.iter_mut()
.skip(start_op)
{
op.fill_node_paths(program, cached_body_items);
}
#[cfg(test)]
{
for op in exec_state.global.root_module_artifacts.operations.iter_mut() {
op.fill_node_paths(program, cached_body_items);
}
for module in exec_state.global.module_infos.values_mut() {
if let ModuleRepr::Kcl(_, Some((_, _, _, module_artifacts))) = &mut module.repr {
for op in &mut module_artifacts.operations {
op.fill_node_paths(program, cached_body_items);
}
for module in exec_state.global.module_infos.values_mut() {
if let ModuleRepr::Kcl(_, Some((_, _, _, module_artifacts))) = &mut module.repr {
for op in &mut module_artifacts.operations {
op.fill_node_paths(program, cached_body_items);
}
}
}
@ -1177,7 +1189,7 @@ impl ExecutorContext {
async fn eval_prelude(&self, exec_state: &mut ExecState, source_range: SourceRange) -> Result<(), KclError> {
if exec_state.stack().memory.requires_std() {
#[cfg(feature = "artifact-graph")]
let initial_ops = exec_state.global.artifacts.operations.len();
let initial_ops = exec_state.mod_local.artifacts.operations.len();
let path = vec!["std".to_owned(), "prelude".to_owned()];
let resolved_path = ModulePath::from_std_import_path(&path)?;
@ -1194,7 +1206,7 @@ impl ExecutorContext {
// TODO: Should we also clear them out of each module so that they
// don't appear in test output?
#[cfg(feature = "artifact-graph")]
exec_state.global.artifacts.operations.truncate(initial_ops);
exec_state.mod_local.artifacts.operations.truncate(initial_ops);
}
Ok(())
@ -2274,6 +2286,39 @@ w = f() + f()
ctx2.close().await;
}
#[cfg(feature = "artifact-graph")]
#[tokio::test(flavor = "multi_thread")]
async fn sim_sketch_mode_real_mock_real() {
let ctx = ExecutorContext::new_with_default_client().await.unwrap();
let code = r#"sketch001 = startSketchOn(XY)
profile001 = startProfile(sketch001, at = [0, 0])
|> line(end = [10, 0])
|> line(end = [0, 10])
|> line(end = [-10, 0])
|> line(end = [0, -10])
|> close()
"#;
let program = crate::Program::parse_no_errs(code).unwrap();
let result = ctx.run_with_caching(program).await.unwrap();
assert_eq!(result.operations.len(), 1);
let mock_ctx = ExecutorContext::new_mock(None).await;
let mock_program = crate::Program::parse_no_errs(code).unwrap();
let mock_result = mock_ctx.run_mock(mock_program, true).await.unwrap();
assert_eq!(mock_result.operations.len(), 0);
let code2 = code.to_owned()
+ r#"
extrude001 = extrude(profile001, length = 10)
"#;
let program2 = crate::Program::parse_no_errs(&code2).unwrap();
let result = ctx.run_with_caching(program2).await.unwrap();
assert_eq!(result.operations.len(), 2);
ctx.close().await;
mock_ctx.close().await;
}
#[tokio::test(flavor = "multi_thread")]
async fn read_tag_version() {
let ast = r#"fn bar(@t) {

View File

@ -2,10 +2,6 @@ use std::sync::Arc;
use anyhow::Result;
use indexmap::IndexMap;
#[cfg(feature = "artifact-graph")]
use kcmc::websocket::WebSocketResponse;
#[cfg(feature = "artifact-graph")]
use kittycad_modeling_cmds as kcmc;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
@ -50,31 +46,20 @@ pub(super) struct GlobalState {
pub mod_loader: ModuleLoader,
/// Errors and warnings.
pub errors: Vec<CompilationError>,
#[cfg_attr(not(feature = "artifact-graph"), allow(dead_code))]
/// Global artifacts that represent the entire program.
pub artifacts: ArtifactState,
#[cfg_attr(not(all(test, feature = "artifact-graph")), expect(dead_code))]
/// Artifacts for only the root module.
pub root_module_artifacts: ModuleArtifactState,
}
#[cfg(feature = "artifact-graph")]
#[derive(Debug, Clone, Default)]
pub(super) struct ArtifactState {
/// Output map of UUIDs to artifacts.
/// Internal map of UUIDs to exec artifacts. This needs to persist across
/// executions to allow the graph building to refer to cached artifacts.
pub artifacts: IndexMap<ArtifactId, Artifact>,
/// Output commands to allow building the artifact graph by the caller.
/// These are accumulated in the [`ExecutorContext`] but moved here for
/// convenience of the execution cache.
pub commands: Vec<ArtifactCommand>,
/// Responses from the engine for `artifact_commands`. We need to cache
/// this so that we can build the artifact graph. These are accumulated in
/// the [`ExecutorContext`] but moved here for convenience of the execution
/// cache.
pub responses: IndexMap<Uuid, WebSocketResponse>,
/// Output artifact graph.
pub graph: ArtifactGraph,
/// Operations that have been performed in execution order, for display in
/// the Feature Tree.
pub operations: Vec<Operation>,
}
#[cfg(not(feature = "artifact-graph"))]
@ -82,16 +67,23 @@ pub(super) struct ArtifactState {
pub(super) struct ArtifactState {}
/// Artifact state for a single module.
#[cfg(all(test, feature = "artifact-graph"))]
#[cfg(feature = "artifact-graph")]
#[derive(Debug, Clone, Default, PartialEq, Serialize)]
pub struct ModuleArtifactState {
/// Internal map of UUIDs to exec artifacts.
pub artifacts: IndexMap<ArtifactId, Artifact>,
/// Outgoing engine commands that have not yet been processed and integrated
/// into the artifact graph.
#[serde(skip)]
pub unprocessed_commands: Vec<ArtifactCommand>,
/// Outgoing engine commands.
pub commands: Vec<ArtifactCommand>,
/// Operations that have been performed in execution order.
/// Operations that have been performed in execution order, for display in
/// the Feature Tree.
pub operations: Vec<Operation>,
}
#[cfg(not(all(test, feature = "artifact-graph")))]
#[cfg(not(feature = "artifact-graph"))]
#[derive(Debug, Clone, Default, PartialEq, Serialize)]
pub struct ModuleArtifactState {}
@ -114,6 +106,7 @@ pub(super) struct ModuleState {
pub settings: MetaSettings,
pub(super) explicit_length_units: bool,
pub(super) path: ModulePath,
/// Artifacts for only this module.
pub artifacts: ModuleArtifactState,
}
@ -170,7 +163,7 @@ impl ExecState {
variables: self.mod_local.variables(main_ref),
filenames: self.global.filenames(),
#[cfg(feature = "artifact-graph")]
operations: self.global.artifacts.operations,
operations: self.global.root_module_artifacts.operations,
#[cfg(feature = "artifact-graph")]
artifact_graph: self.global.artifacts.graph,
errors: self.global.errors,
@ -210,23 +203,20 @@ impl ExecState {
#[cfg(feature = "artifact-graph")]
pub(crate) fn add_artifact(&mut self, artifact: Artifact) {
let id = artifact.id();
self.global.artifacts.artifacts.insert(id, artifact);
self.mod_local.artifacts.artifacts.insert(id, artifact);
}
pub(crate) fn push_op(&mut self, op: Operation) {
#[cfg(all(test, feature = "artifact-graph"))]
self.mod_local.artifacts.operations.push(op.clone());
#[cfg(feature = "artifact-graph")]
self.global.artifacts.operations.push(op);
self.mod_local.artifacts.operations.push(op.clone());
#[cfg(not(feature = "artifact-graph"))]
drop(op);
}
#[cfg(feature = "artifact-graph")]
pub(crate) fn push_command(&mut self, command: ArtifactCommand) {
#[cfg(all(test, feature = "artifact-graph"))]
self.mod_local.artifacts.commands.push(command);
#[cfg(not(all(test, feature = "artifact-graph")))]
self.mod_local.artifacts.unprocessed_commands.push(command);
#[cfg(not(feature = "artifact-graph"))]
drop(command);
}
@ -282,11 +272,6 @@ impl ExecState {
&self.global.module_infos
}
#[cfg(all(test, feature = "artifact-graph"))]
pub(crate) fn operations(&self) -> &[Operation] {
&self.global.artifacts.operations
}
#[cfg(all(test, feature = "artifact-graph"))]
pub(crate) fn root_module_artifact_state(&self) -> &ModuleArtifactState {
&self.global.root_module_artifacts
@ -344,9 +329,9 @@ impl ExecState {
error,
self.errors().to_vec(),
#[cfg(feature = "artifact-graph")]
self.global.artifacts.operations.clone(),
self.global.root_module_artifacts.operations.clone(),
#[cfg(feature = "artifact-graph")]
self.global.artifacts.commands.clone(),
Default::default(),
#[cfg(feature = "artifact-graph")]
self.global.artifacts.graph.clone(),
module_id_to_module_path,
@ -361,8 +346,30 @@ impl ExecState {
engine: &Arc<Box<dyn EngineManager>>,
program: NodeRef<'_, crate::parsing::ast::types::Program>,
) -> Result<(), KclError> {
let new_commands = engine.take_artifact_commands().await;
let mut new_commands = Vec::new();
let mut new_exec_artifacts = IndexMap::new();
for module in self.global.module_infos.values_mut() {
match &mut module.repr {
ModuleRepr::Kcl(_, Some((_, _, _, module_artifacts)))
| ModuleRepr::Foreign(_, Some((_, module_artifacts))) => {
new_commands.extend(module_artifacts.process_commands());
new_exec_artifacts.extend(module_artifacts.artifacts.clone());
}
ModuleRepr::Root | ModuleRepr::Kcl(_, None) | ModuleRepr::Foreign(_, None) | ModuleRepr::Dummy => {}
}
}
// Take from the module artifacts so that we don't try to process them
// again next time due to execution caching.
new_commands.extend(self.global.root_module_artifacts.process_commands());
// Note: These will get re-processed, but since we're just adding them
// to a map, it's fine.
new_exec_artifacts.extend(self.global.root_module_artifacts.artifacts.clone());
let new_responses = engine.take_responses().await;
// Move the artifacts into ExecState global to simplify cache
// management.
self.global.artifacts.artifacts.extend(new_exec_artifacts);
let initial_graph = self.global.artifacts.graph.clone();
// Build the artifact graph.
@ -373,10 +380,6 @@ impl ExecState {
&mut self.global.artifacts.artifacts,
initial_graph,
);
// Move the artifact commands and responses into ExecState to
// simplify cache management and error creation.
self.global.artifacts.commands.extend(new_commands);
self.global.artifacts.responses.extend(new_responses);
let artifact_graph = graph_result?;
self.global.artifacts.graph = artifact_graph;
@ -433,20 +436,54 @@ impl GlobalState {
}
}
#[cfg(feature = "artifact-graph")]
impl ArtifactState {
#[cfg(feature = "artifact-graph")]
pub fn cached_body_items(&self) -> usize {
self.graph.item_count
}
pub(crate) fn clear(&mut self) {
#[cfg(feature = "artifact-graph")]
{
self.artifacts.clear();
self.graph.clear();
}
}
}
impl ModuleArtifactState {
pub(crate) fn clear(&mut self) {
#[cfg(feature = "artifact-graph")]
{
self.artifacts.clear();
self.unprocessed_commands.clear();
self.commands.clear();
self.operations.clear();
}
}
#[cfg(not(feature = "artifact-graph"))]
pub(crate) fn extend(&mut self, _other: ModuleArtifactState) {}
/// When self is a cached state, extend it with new state.
#[cfg(all(test, feature = "artifact-graph"))]
#[cfg(feature = "artifact-graph")]
pub(crate) fn extend(&mut self, other: ModuleArtifactState) {
self.artifacts.extend(other.artifacts);
self.unprocessed_commands.extend(other.unprocessed_commands);
self.commands.extend(other.commands);
self.operations.extend(other.operations);
}
// Move unprocessed artifact commands so that we don't try to process them
// again next time due to execution caching. Returns a clone of the
// commands that were moved.
#[cfg(feature = "artifact-graph")]
pub(crate) fn process_commands(&mut self) -> Vec<ArtifactCommand> {
let unprocessed = std::mem::take(&mut self.unprocessed_commands);
let new_module_commands = unprocessed.clone();
self.commands.extend(unprocessed);
new_module_commands
}
}
impl ModuleState {

View File

@ -109,12 +109,12 @@ pub use unparser::{recast_dir, walk_dir};
// Rather than make executor public and make lots of it pub(crate), just re-export into a new module.
// Ideally we wouldn't export these things at all, they should only be used for testing.
pub mod exec {
#[cfg(feature = "artifact-graph")]
pub use crate::execution::ArtifactCommand;
pub use crate::execution::{
types::{NumericType, UnitAngle, UnitLen, UnitType},
DefaultPlanes, IdGenerator, KclValue, PlaneType, Sketch,
};
#[cfg(feature = "artifact-graph")]
pub use crate::execution::{ArtifactCommand, Operation};
}
#[cfg(target_arch = "wasm32")]

View File

@ -13,7 +13,7 @@ use crate::{
};
#[cfg(feature = "artifact-graph")]
use crate::{
execution::{ArtifactGraph, Operation},
execution::ArtifactGraph,
modules::{ModulePath, ModuleRepr},
};
@ -281,7 +281,7 @@ async fn execute_test(test: &Test, render_to_png: bool, export_step: bool) {
#[cfg(not(feature = "artifact-graph"))]
drop(module_state);
#[cfg(feature = "artifact-graph")]
assert_artifact_snapshots(test, module_state, outcome.operations, outcome.artifact_graph);
assert_artifact_snapshots(test, module_state, outcome.artifact_graph);
mem_result.unwrap();
}
Err(e) => {
@ -312,21 +312,11 @@ async fn execute_test(test: &Test, render_to_png: bool, export_step: bool) {
#[cfg(feature = "artifact-graph")]
{
let global_operations = if !error.operations.is_empty() {
error.operations
} else if let Some(exec_state) = &e.exec_state {
// Non-fatal compilation errors don't have artifact
// output attached, so we need to get it from
// ExecState.
exec_state.operations().to_vec()
} else {
Vec::new()
};
let module_state = e
.exec_state
.map(|e| e.to_module_state(&test.input_dir))
.unwrap_or_default();
assert_artifact_snapshots(test, module_state, global_operations, error.artifact_graph);
assert_artifact_snapshots(test, module_state, error.artifact_graph);
}
err_result.unwrap();
}
@ -347,7 +337,6 @@ async fn execute_test(test: &Test, render_to_png: bool, export_step: bool) {
fn assert_artifact_snapshots(
test: &Test,
module_state: IndexMap<String, ModuleArtifactState>,
global_operations: Vec<Operation>,
artifact_graph: ArtifactGraph,
) {
let module_operations = module_state
@ -391,22 +380,12 @@ fn assert_artifact_snapshots(
let is_writing = matches!(std::env::var("ZOO_SIM_UPDATE").as_deref(), Ok("always"));
if !test.skip_assert_artifact_graph || is_writing {
assert_snapshot(test, "Artifact graph flowchart", || {
let mut artifact_graph = artifact_graph.clone();
// Sort the map by artifact where we can.
artifact_graph.sort();
let flowchart = artifact_graph
.to_mermaid_flowchart()
.unwrap_or_else(|e| format!("Failed to convert artifact graph to flowchart: {e}"));
// Change the snapshot suffix so that it is rendered as a Markdown file
// in GitHub.
// Ignore the cpu cooler for now because its being a little bitch.
if test.name != "cpu-cooler"
&& test.name != "subtract_regression08"
&& test.name != "subtract_regression10"
{
insta::assert_binary_snapshot!("artifact_graph_flowchart.md", flowchart.as_bytes().to_owned());
}
insta::assert_binary_snapshot!("artifact_graph_flowchart.md", flowchart.as_bytes().to_owned());
})
}
}));
@ -414,25 +393,6 @@ fn assert_artifact_snapshots(
result1.unwrap();
result2.unwrap();
result3.unwrap();
// The global operations should be a superset of the main module. But it
// won't always be a superset of the operations of all modules.
let repo_root = std::path::Path::new(REPO_ROOT).canonicalize().unwrap();
let root_string: String = test
.entry_point
.canonicalize()
.unwrap_or_else(|_| panic!("Should be able to canonicalize the entry point {:?}", &test.entry_point))
.strip_prefix(&repo_root)
.expect("Repo root dir should be a prefix of the entry point")
.to_string_lossy()
.into_owned();
let main_operations = module_operations
.get(&root_string)
.expect("Main module state not found");
assert!(
global_operations.len() >= main_operations.len(),
"global_operations={global_operations:#?}, main_operations={main_operations:#?}"
);
}
mod cube {