Update api spec (#256)

* YOYO NEW API SPEC!

* I have generated the latest API!

---------

Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
This commit is contained in:
zoo-github-actions-auth[bot]
2024-08-22 14:11:23 -07:00
committed by GitHub
parent 48de213887
commit d724c8e706
25 changed files with 2431 additions and 1510 deletions

File diff suppressed because it is too large Load Diff

View File

@ -11,6 +11,7 @@ from ...models.file_mass import FileMass
from ...models.file_surface_area import FileSurfaceArea
from ...models.file_volume import FileVolume
from ...models.text_to_cad import TextToCad
from ...models.text_to_cad_iteration import TextToCadIteration
from ...types import Response
@ -44,6 +45,7 @@ def _parse_response(*, response: httpx.Response) -> Optional[
FileDensity,
FileSurfaceArea,
TextToCad,
TextToCadIteration,
Error,
]
]:
@ -108,6 +110,15 @@ def _parse_response(*, response: httpx.Response) -> Optional[
raise TypeError()
option_text_to_cad = TextToCad(**data)
return option_text_to_cad
except ValueError:
pass
except TypeError:
pass
try:
if not isinstance(data, dict):
raise TypeError()
option_text_to_cad_iteration = TextToCadIteration(**data)
return option_text_to_cad_iteration
except ValueError:
raise
except TypeError:
@ -131,6 +142,7 @@ def _build_response(*, response: httpx.Response) -> Response[
FileDensity,
FileSurfaceArea,
TextToCad,
TextToCadIteration,
Error,
]
]
@ -157,6 +169,7 @@ def sync_detailed(
FileDensity,
FileSurfaceArea,
TextToCad,
TextToCadIteration,
Error,
]
]
@ -187,6 +200,7 @@ def sync(
FileDensity,
FileSurfaceArea,
TextToCad,
TextToCadIteration,
Error,
]
]:
@ -216,6 +230,7 @@ async def asyncio_detailed(
FileDensity,
FileSurfaceArea,
TextToCad,
TextToCadIteration,
Error,
]
]
@ -244,6 +259,7 @@ async def asyncio(
FileDensity,
FileSurfaceArea,
TextToCad,
TextToCadIteration,
Error,
]
]:

View File

@ -14,7 +14,7 @@ def _get_kwargs(
*,
client: Client,
) -> Dict[str, Any]:
url = "{}/ai/kcl/completions".format(
url = "{}/ml/kcl/completions".format(
client.base_url,
) # noqa: E501

View File

@ -0,0 +1,118 @@
from typing import Any, Dict, Optional, Union
import httpx
from ...client import Client
from ...models.error import Error
from ...models.text_to_cad_iteration import TextToCadIteration
from ...models.text_to_cad_iteration_body import TextToCadIterationBody
from ...types import Response
def _get_kwargs(
body: TextToCadIterationBody,
*,
client: Client,
) -> Dict[str, Any]:
url = "{}/ml/text-to-cad/iteration".format(
client.base_url,
) # noqa: E501
headers: Dict[str, Any] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
return {
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
"content": body.model_dump_json(),
}
def _parse_response(
*, response: httpx.Response
) -> Optional[Union[TextToCadIteration, Error]]:
if response.status_code == 201:
response_201 = TextToCadIteration(**response.json())
return response_201
if response.status_code == 400:
response_4XX = Error(**response.json())
return response_4XX
if response.status_code == 500:
response_5XX = Error(**response.json())
return response_5XX
return Error(**response.json())
def _build_response(
*, response: httpx.Response
) -> Response[Optional[Union[TextToCadIteration, Error]]]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=_parse_response(response=response),
)
def sync_detailed(
body: TextToCadIterationBody,
*,
client: Client,
) -> Response[Optional[Union[TextToCadIteration, Error]]]:
kwargs = _get_kwargs(
body=body,
client=client,
)
response = httpx.post(
verify=client.verify_ssl,
**kwargs,
)
return _build_response(response=response)
def sync(
body: TextToCadIterationBody,
*,
client: Client,
) -> Optional[Union[TextToCadIteration, Error]]:
"""This operation is performed asynchronously, the `id` of the operation will be returned. You can use the `id` returned from the request to get status information about the async operation from the `/async/operations/{id}` endpoint.""" # noqa: E501
return sync_detailed(
body=body,
client=client,
).parsed
async def asyncio_detailed(
body: TextToCadIterationBody,
*,
client: Client,
) -> Response[Optional[Union[TextToCadIteration, Error]]]:
kwargs = _get_kwargs(
body=body,
client=client,
)
async with httpx.AsyncClient(verify=client.verify_ssl) as _client:
response = await _client.post(**kwargs)
return _build_response(response=response)
async def asyncio(
body: TextToCadIterationBody,
*,
client: Client,
) -> Optional[Union[TextToCadIteration, Error]]:
"""This operation is performed asynchronously, the `id` of the operation will be returned. You can use the `id` returned from the request to get status information about the async operation from the `/async/operations/{id}` endpoint.""" # noqa: E501
return (
await asyncio_detailed(
body=body,
client=client,
)
).parsed

View File

@ -3,14 +3,14 @@ from typing import Any, Dict, Optional
import httpx
from ...client import Client
from ...models.ai_feedback import AiFeedback
from ...models.error import Error
from ...models.ml_feedback import MlFeedback
from ...types import Response
def _get_kwargs(
id: str,
feedback: AiFeedback,
feedback: MlFeedback,
*,
client: Client,
) -> Dict[str, Any]:
@ -59,7 +59,7 @@ def _build_response(*, response: httpx.Response) -> Response[Optional[Error]]:
def sync_detailed(
id: str,
feedback: AiFeedback,
feedback: MlFeedback,
*,
client: Client,
) -> Response[Optional[Error]]:
@ -79,7 +79,7 @@ def sync_detailed(
def sync(
id: str,
feedback: AiFeedback,
feedback: MlFeedback,
*,
client: Client,
) -> Optional[Error]:
@ -94,7 +94,7 @@ def sync(
async def asyncio_detailed(
id: str,
feedback: AiFeedback,
feedback: MlFeedback,
*,
client: Client,
) -> Response[Optional[Error]]:
@ -112,7 +112,7 @@ async def asyncio_detailed(
async def asyncio(
id: str,
feedback: AiFeedback,
feedback: MlFeedback,
*,
client: Client,
) -> Optional[Error]:

View File

@ -3,8 +3,8 @@ from typing import Any, Dict, Optional, Union
import httpx
from ...client import Client
from ...models.ai_prompt import AiPrompt
from ...models.error import Error
from ...models.ml_prompt import MlPrompt
from ...types import Response
@ -13,7 +13,7 @@ def _get_kwargs(
*,
client: Client,
) -> Dict[str, Any]:
url = "{}/ai-prompts/{id}".format(
url = "{}/ml-prompts/{id}".format(
client.base_url,
id=id,
) # noqa: E501
@ -29,9 +29,9 @@ def _get_kwargs(
}
def _parse_response(*, response: httpx.Response) -> Optional[Union[AiPrompt, Error]]:
def _parse_response(*, response: httpx.Response) -> Optional[Union[MlPrompt, Error]]:
if response.status_code == 200:
response_200 = AiPrompt(**response.json())
response_200 = MlPrompt(**response.json())
return response_200
if response.status_code == 400:
response_4XX = Error(**response.json())
@ -44,7 +44,7 @@ def _parse_response(*, response: httpx.Response) -> Optional[Union[AiPrompt, Err
def _build_response(
*, response: httpx.Response
) -> Response[Optional[Union[AiPrompt, Error]]]:
) -> Response[Optional[Union[MlPrompt, Error]]]:
return Response(
status_code=response.status_code,
content=response.content,
@ -57,7 +57,7 @@ def sync_detailed(
id: str,
*,
client: Client,
) -> Response[Optional[Union[AiPrompt, Error]]]:
) -> Response[Optional[Union[MlPrompt, Error]]]:
kwargs = _get_kwargs(
id=id,
client=client,
@ -75,7 +75,7 @@ def sync(
id: str,
*,
client: Client,
) -> Optional[Union[AiPrompt, Error]]:
) -> Optional[Union[MlPrompt, Error]]:
"""This endpoint requires authentication by a Zoo employee.""" # noqa: E501
return sync_detailed(
@ -88,7 +88,7 @@ async def asyncio_detailed(
id: str,
*,
client: Client,
) -> Response[Optional[Union[AiPrompt, Error]]]:
) -> Response[Optional[Union[MlPrompt, Error]]]:
kwargs = _get_kwargs(
id=id,
client=client,
@ -104,7 +104,7 @@ async def asyncio(
id: str,
*,
client: Client,
) -> Optional[Union[AiPrompt, Error]]:
) -> Optional[Union[MlPrompt, Error]]:
"""This endpoint requires authentication by a Zoo employee.""" # noqa: E501
return (

View File

@ -3,9 +3,9 @@ from typing import Any, Dict, Optional, Union
import httpx
from ...client import Client
from ...models.ai_prompt_results_page import AiPromptResultsPage
from ...models.created_at_sort_mode import CreatedAtSortMode
from ...models.error import Error
from ...models.ml_prompt_results_page import MlPromptResultsPage
from ...types import Response
@ -16,7 +16,7 @@ def _get_kwargs(
limit: Optional[int] = None,
page_token: Optional[str] = None,
) -> Dict[str, Any]:
url = "{}/ai-prompts".format(
url = "{}/ml-prompts".format(
client.base_url,
) # noqa: E501
@ -54,9 +54,9 @@ def _get_kwargs(
def _parse_response(
*, response: httpx.Response
) -> Optional[Union[AiPromptResultsPage, Error]]:
) -> Optional[Union[MlPromptResultsPage, Error]]:
if response.status_code == 200:
response_200 = AiPromptResultsPage(**response.json())
response_200 = MlPromptResultsPage(**response.json())
return response_200
if response.status_code == 400:
response_4XX = Error(**response.json())
@ -69,7 +69,7 @@ def _parse_response(
def _build_response(
*, response: httpx.Response
) -> Response[Optional[Union[AiPromptResultsPage, Error]]]:
) -> Response[Optional[Union[MlPromptResultsPage, Error]]]:
return Response(
status_code=response.status_code,
content=response.content,
@ -84,7 +84,7 @@ def sync_detailed(
client: Client,
limit: Optional[int] = None,
page_token: Optional[str] = None,
) -> Response[Optional[Union[AiPromptResultsPage, Error]]]:
) -> Response[Optional[Union[MlPromptResultsPage, Error]]]:
kwargs = _get_kwargs(
limit=limit,
page_token=page_token,
@ -106,10 +106,10 @@ def sync(
client: Client,
limit: Optional[int] = None,
page_token: Optional[str] = None,
) -> Optional[Union[AiPromptResultsPage, Error]]:
) -> Optional[Union[MlPromptResultsPage, Error]]:
"""For text-to-cad prompts, this will always return the STEP file contents as well as the format the user originally requested.
This endpoint requires authentication by a Zoo employee.
The AI prompts are returned in order of creation, with the most recently created AI prompts first.
The ML prompts are returned in order of creation, with the most recently created ML prompts first.
""" # noqa: E501
return sync_detailed(
@ -126,7 +126,7 @@ async def asyncio_detailed(
client: Client,
limit: Optional[int] = None,
page_token: Optional[str] = None,
) -> Response[Optional[Union[AiPromptResultsPage, Error]]]:
) -> Response[Optional[Union[MlPromptResultsPage, Error]]]:
kwargs = _get_kwargs(
limit=limit,
page_token=page_token,
@ -146,10 +146,10 @@ async def asyncio(
client: Client,
limit: Optional[int] = None,
page_token: Optional[str] = None,
) -> Optional[Union[AiPromptResultsPage, Error]]:
) -> Optional[Union[MlPromptResultsPage, Error]]:
"""For text-to-cad prompts, this will always return the STEP file contents as well as the format the user originally requested.
This endpoint requires authentication by a Zoo employee.
The AI prompts are returned in order of creation, with the most recently created AI prompts first.
The ML prompts are returned in order of creation, with the most recently created ML prompts first.
""" # noqa: E501
return (

View File

@ -54,10 +54,11 @@ from kittycad.api.meta import (
from kittycad.api.ml import (
create_kcl_code_completions,
create_text_to_cad,
create_text_to_cad_iteration,
create_text_to_cad_model_feedback,
get_ai_prompt,
get_ml_prompt,
get_text_to_cad_model_for_user,
list_ai_prompts,
list_ml_prompts,
list_text_to_cad_models_for_user,
)
from kittycad.api.modeling import modeling_commands_ws
@ -154,8 +155,6 @@ from kittycad.api.users import (
from kittycad.client import ClientFromEnv
from kittycad.models import (
AccountProvider,
AiPrompt,
AiPromptResultsPage,
ApiCallQueryGroup,
ApiCallWithPrice,
ApiCallWithPriceResultsPage,
@ -180,6 +179,8 @@ from kittycad.models import (
IpAddrInfo,
KclCodeCompletionResponse,
Metadata,
MlPrompt,
MlPromptResultsPage,
Onboarding,
Org,
OrgMember,
@ -193,6 +194,7 @@ from kittycad.models import (
ServiceAccountResultsPage,
Session,
TextToCad,
TextToCadIteration,
TextToCadResultsPage,
UnitAngleConversion,
UnitAreaConversion,
@ -215,7 +217,6 @@ from kittycad.models import (
ZooProductSubscriptions,
)
from kittycad.models.add_org_member import AddOrgMember
from kittycad.models.ai_feedback import AiFeedback
from kittycad.models.api_call_query_group_by import ApiCallQueryGroupBy
from kittycad.models.api_call_status import ApiCallStatus
from kittycad.models.billing_info import BillingInfo
@ -228,6 +229,7 @@ from kittycad.models.file_import_format import FileImportFormat
from kittycad.models.idp_metadata_source import base64_encoded_xml
from kittycad.models.kcl_code_completion_params import KclCodeCompletionParams
from kittycad.models.kcl_code_completion_request import KclCodeCompletionRequest
from kittycad.models.ml_feedback import MlFeedback
from kittycad.models.modeling_app_event_type import ModelingAppEventType
from kittycad.models.modeling_app_individual_subscription_tier import (
ModelingAppIndividualSubscriptionTier,
@ -242,9 +244,13 @@ from kittycad.models.privacy_settings import PrivacySettings
from kittycad.models.rtc_sdp_type import RtcSdpType
from kittycad.models.rtc_session_description import RtcSessionDescription
from kittycad.models.saml_identity_provider_create import SamlIdentityProviderCreate
from kittycad.models.source_position import SourcePosition
from kittycad.models.source_range import SourceRange
from kittycad.models.source_range_prompt import SourceRangePrompt
from kittycad.models.store_coupon_params import StoreCouponParams
from kittycad.models.subscription_tier_price import per_user
from kittycad.models.text_to_cad_create_body import TextToCadCreateBody
from kittycad.models.text_to_cad_iteration_body import TextToCadIterationBody
from kittycad.models.unit_angle import UnitAngle
from kittycad.models.unit_area import UnitArea
from kittycad.models.unit_current import UnitCurrent
@ -388,193 +394,6 @@ async def test_get_ipinfo_async():
)
@pytest.mark.skip
def test_list_ai_prompts():
# Create our client.
client = ClientFromEnv()
result: Optional[Union[AiPromptResultsPage, Error]] = list_ai_prompts.sync(
client=client,
sort_by=CreatedAtSortMode.CREATED_AT_ASCENDING,
limit=None, # Optional[int]
page_token=None, # Optional[str]
)
if isinstance(result, Error) or result is None:
print(result)
raise Exception("Error in response")
body: AiPromptResultsPage = result
print(body)
# OR if you need more info (e.g. status_code)
response: Response[Optional[Union[AiPromptResultsPage, Error]]] = (
list_ai_prompts.sync_detailed(
client=client,
sort_by=CreatedAtSortMode.CREATED_AT_ASCENDING,
limit=None, # Optional[int]
page_token=None, # Optional[str]
)
)
# OR run async
@pytest.mark.asyncio
@pytest.mark.skip
async def test_list_ai_prompts_async():
# Create our client.
client = ClientFromEnv()
result: Optional[Union[AiPromptResultsPage, Error]] = await list_ai_prompts.asyncio(
client=client,
sort_by=CreatedAtSortMode.CREATED_AT_ASCENDING,
limit=None, # Optional[int]
page_token=None, # Optional[str]
)
# OR run async with more info
response: Response[Optional[Union[AiPromptResultsPage, Error]]] = (
await list_ai_prompts.asyncio_detailed(
client=client,
sort_by=CreatedAtSortMode.CREATED_AT_ASCENDING,
limit=None, # Optional[int]
page_token=None, # Optional[str]
)
)
@pytest.mark.skip
def test_get_ai_prompt():
# Create our client.
client = ClientFromEnv()
result: Optional[Union[AiPrompt, Error]] = get_ai_prompt.sync(
client=client,
id="<uuid>",
)
if isinstance(result, Error) or result is None:
print(result)
raise Exception("Error in response")
body: AiPrompt = result
print(body)
# OR if you need more info (e.g. status_code)
response: Response[Optional[Union[AiPrompt, Error]]] = get_ai_prompt.sync_detailed(
client=client,
id="<uuid>",
)
# OR run async
@pytest.mark.asyncio
@pytest.mark.skip
async def test_get_ai_prompt_async():
# Create our client.
client = ClientFromEnv()
result: Optional[Union[AiPrompt, Error]] = await get_ai_prompt.asyncio(
client=client,
id="<uuid>",
)
# OR run async with more info
response: Response[Optional[Union[AiPrompt, Error]]] = (
await get_ai_prompt.asyncio_detailed(
client=client,
id="<uuid>",
)
)
@pytest.mark.skip
def test_create_kcl_code_completions():
# Create our client.
client = ClientFromEnv()
result: Optional[Union[KclCodeCompletionResponse, Error]] = (
create_kcl_code_completions.sync(
client=client,
body=KclCodeCompletionRequest(
extra=KclCodeCompletionParams(
language="<string>",
trim_by_indentation=False,
),
prompt="<string>",
stop=["<string>"],
stream=False,
suffix="<string>",
),
)
)
if isinstance(result, Error) or result is None:
print(result)
raise Exception("Error in response")
body: KclCodeCompletionResponse = result
print(body)
# OR if you need more info (e.g. status_code)
response: Response[Optional[Union[KclCodeCompletionResponse, Error]]] = (
create_kcl_code_completions.sync_detailed(
client=client,
body=KclCodeCompletionRequest(
extra=KclCodeCompletionParams(
language="<string>",
trim_by_indentation=False,
),
prompt="<string>",
stop=["<string>"],
stream=False,
suffix="<string>",
),
)
)
# OR run async
@pytest.mark.asyncio
@pytest.mark.skip
async def test_create_kcl_code_completions_async():
# Create our client.
client = ClientFromEnv()
result: Optional[Union[KclCodeCompletionResponse, Error]] = (
await create_kcl_code_completions.asyncio(
client=client,
body=KclCodeCompletionRequest(
extra=KclCodeCompletionParams(
language="<string>",
trim_by_indentation=False,
),
prompt="<string>",
stop=["<string>"],
stream=False,
suffix="<string>",
),
)
)
# OR run async with more info
response: Response[Optional[Union[KclCodeCompletionResponse, Error]]] = (
await create_kcl_code_completions.asyncio_detailed(
client=client,
body=KclCodeCompletionRequest(
extra=KclCodeCompletionParams(
language="<string>",
trim_by_indentation=False,
),
prompt="<string>",
stop=["<string>"],
stream=False,
suffix="<string>",
),
)
)
@pytest.mark.skip
def test_create_text_to_cad():
# Create our client.
@ -993,6 +812,7 @@ def test_get_async_operation():
FileDensity,
FileSurfaceArea,
TextToCad,
TextToCadIteration,
Error,
]
] = get_async_operation.sync(
@ -1012,6 +832,7 @@ def test_get_async_operation():
FileDensity,
FileSurfaceArea,
TextToCad,
TextToCadIteration,
] = result
print(body)
@ -1026,6 +847,7 @@ def test_get_async_operation():
FileDensity,
FileSurfaceArea,
TextToCad,
TextToCadIteration,
Error,
]
]
@ -1051,6 +873,7 @@ async def test_get_async_operation_async():
FileDensity,
FileSurfaceArea,
TextToCad,
TextToCadIteration,
Error,
]
] = await get_async_operation.asyncio(
@ -1069,6 +892,7 @@ async def test_get_async_operation_async():
FileDensity,
FileSurfaceArea,
TextToCad,
TextToCadIteration,
Error,
]
]
@ -1887,6 +1711,312 @@ async def test_logout_async():
)
@pytest.mark.skip
def test_list_ml_prompts():
# Create our client.
client = ClientFromEnv()
result: Optional[Union[MlPromptResultsPage, Error]] = list_ml_prompts.sync(
client=client,
sort_by=CreatedAtSortMode.CREATED_AT_ASCENDING,
limit=None, # Optional[int]
page_token=None, # Optional[str]
)
if isinstance(result, Error) or result is None:
print(result)
raise Exception("Error in response")
body: MlPromptResultsPage = result
print(body)
# OR if you need more info (e.g. status_code)
response: Response[Optional[Union[MlPromptResultsPage, Error]]] = (
list_ml_prompts.sync_detailed(
client=client,
sort_by=CreatedAtSortMode.CREATED_AT_ASCENDING,
limit=None, # Optional[int]
page_token=None, # Optional[str]
)
)
# OR run async
@pytest.mark.asyncio
@pytest.mark.skip
async def test_list_ml_prompts_async():
# Create our client.
client = ClientFromEnv()
result: Optional[Union[MlPromptResultsPage, Error]] = await list_ml_prompts.asyncio(
client=client,
sort_by=CreatedAtSortMode.CREATED_AT_ASCENDING,
limit=None, # Optional[int]
page_token=None, # Optional[str]
)
# OR run async with more info
response: Response[Optional[Union[MlPromptResultsPage, Error]]] = (
await list_ml_prompts.asyncio_detailed(
client=client,
sort_by=CreatedAtSortMode.CREATED_AT_ASCENDING,
limit=None, # Optional[int]
page_token=None, # Optional[str]
)
)
@pytest.mark.skip
def test_get_ml_prompt():
# Create our client.
client = ClientFromEnv()
result: Optional[Union[MlPrompt, Error]] = get_ml_prompt.sync(
client=client,
id="<uuid>",
)
if isinstance(result, Error) or result is None:
print(result)
raise Exception("Error in response")
body: MlPrompt = result
print(body)
# OR if you need more info (e.g. status_code)
response: Response[Optional[Union[MlPrompt, Error]]] = get_ml_prompt.sync_detailed(
client=client,
id="<uuid>",
)
# OR run async
@pytest.mark.asyncio
@pytest.mark.skip
async def test_get_ml_prompt_async():
# Create our client.
client = ClientFromEnv()
result: Optional[Union[MlPrompt, Error]] = await get_ml_prompt.asyncio(
client=client,
id="<uuid>",
)
# OR run async with more info
response: Response[Optional[Union[MlPrompt, Error]]] = (
await get_ml_prompt.asyncio_detailed(
client=client,
id="<uuid>",
)
)
@pytest.mark.skip
def test_create_kcl_code_completions():
# Create our client.
client = ClientFromEnv()
result: Optional[Union[KclCodeCompletionResponse, Error]] = (
create_kcl_code_completions.sync(
client=client,
body=KclCodeCompletionRequest(
extra=KclCodeCompletionParams(
language="<string>",
trim_by_indentation=False,
),
prompt="<string>",
stop=["<string>"],
stream=False,
suffix="<string>",
),
)
)
if isinstance(result, Error) or result is None:
print(result)
raise Exception("Error in response")
body: KclCodeCompletionResponse = result
print(body)
# OR if you need more info (e.g. status_code)
response: Response[Optional[Union[KclCodeCompletionResponse, Error]]] = (
create_kcl_code_completions.sync_detailed(
client=client,
body=KclCodeCompletionRequest(
extra=KclCodeCompletionParams(
language="<string>",
trim_by_indentation=False,
),
prompt="<string>",
stop=["<string>"],
stream=False,
suffix="<string>",
),
)
)
# OR run async
@pytest.mark.asyncio
@pytest.mark.skip
async def test_create_kcl_code_completions_async():
# Create our client.
client = ClientFromEnv()
result: Optional[Union[KclCodeCompletionResponse, Error]] = (
await create_kcl_code_completions.asyncio(
client=client,
body=KclCodeCompletionRequest(
extra=KclCodeCompletionParams(
language="<string>",
trim_by_indentation=False,
),
prompt="<string>",
stop=["<string>"],
stream=False,
suffix="<string>",
),
)
)
# OR run async with more info
response: Response[Optional[Union[KclCodeCompletionResponse, Error]]] = (
await create_kcl_code_completions.asyncio_detailed(
client=client,
body=KclCodeCompletionRequest(
extra=KclCodeCompletionParams(
language="<string>",
trim_by_indentation=False,
),
prompt="<string>",
stop=["<string>"],
stream=False,
suffix="<string>",
),
)
)
@pytest.mark.skip
def test_create_text_to_cad_iteration():
# Create our client.
client = ClientFromEnv()
result: Optional[Union[TextToCadIteration, Error]] = (
create_text_to_cad_iteration.sync(
client=client,
body=TextToCadIterationBody(
original_source_code="<string>",
source_ranges=[
SourceRangePrompt(
prompt="<string>",
range=SourceRange(
end=SourcePosition(
column=10,
line=10,
),
start=SourcePosition(
column=10,
line=10,
),
),
)
],
),
)
)
if isinstance(result, Error) or result is None:
print(result)
raise Exception("Error in response")
body: TextToCadIteration = result
print(body)
# OR if you need more info (e.g. status_code)
response: Response[Optional[Union[TextToCadIteration, Error]]] = (
create_text_to_cad_iteration.sync_detailed(
client=client,
body=TextToCadIterationBody(
original_source_code="<string>",
source_ranges=[
SourceRangePrompt(
prompt="<string>",
range=SourceRange(
end=SourcePosition(
column=10,
line=10,
),
start=SourcePosition(
column=10,
line=10,
),
),
)
],
),
)
)
# OR run async
@pytest.mark.asyncio
@pytest.mark.skip
async def test_create_text_to_cad_iteration_async():
# Create our client.
client = ClientFromEnv()
result: Optional[Union[TextToCadIteration, Error]] = (
await create_text_to_cad_iteration.asyncio(
client=client,
body=TextToCadIterationBody(
original_source_code="<string>",
source_ranges=[
SourceRangePrompt(
prompt="<string>",
range=SourceRange(
end=SourcePosition(
column=10,
line=10,
),
start=SourcePosition(
column=10,
line=10,
),
),
)
],
),
)
)
# OR run async with more info
response: Response[Optional[Union[TextToCadIteration, Error]]] = (
await create_text_to_cad_iteration.asyncio_detailed(
client=client,
body=TextToCadIterationBody(
original_source_code="<string>",
source_ranges=[
SourceRangePrompt(
prompt="<string>",
range=SourceRange(
end=SourcePosition(
column=10,
line=10,
),
start=SourcePosition(
column=10,
line=10,
),
),
)
],
),
)
)
@pytest.mark.skip
def test_get_org():
# Create our client.
@ -6350,7 +6480,7 @@ def test_create_text_to_cad_model_feedback():
result: Optional[Error] = create_text_to_cad_model_feedback.sync(
client=client,
id="<uuid>",
feedback=AiFeedback.THUMBS_UP,
feedback=MlFeedback.THUMBS_UP,
)
if isinstance(result, Error) or result is None:
@ -6365,7 +6495,7 @@ def test_create_text_to_cad_model_feedback():
create_text_to_cad_model_feedback.sync_detailed(
client=client,
id="<uuid>",
feedback=AiFeedback.THUMBS_UP,
feedback=MlFeedback.THUMBS_UP,
)
)
@ -6380,7 +6510,7 @@ async def test_create_text_to_cad_model_feedback_async():
result: Optional[Error] = await create_text_to_cad_model_feedback.asyncio(
client=client,
id="<uuid>",
feedback=AiFeedback.THUMBS_UP,
feedback=MlFeedback.THUMBS_UP,
)
# OR run async with more info
@ -6388,7 +6518,7 @@ async def test_create_text_to_cad_model_feedback_async():
await create_text_to_cad_model_feedback.asyncio_detailed(
client=client,
id="<uuid>",
feedback=AiFeedback.THUMBS_UP,
feedback=MlFeedback.THUMBS_UP,
)
)

View File

@ -3,11 +3,6 @@
from .account_provider import AccountProvider
from .add_org_member import AddOrgMember
from .address_details import AddressDetails
from .ai_feedback import AiFeedback
from .ai_prompt import AiPrompt
from .ai_prompt_metadata import AiPromptMetadata
from .ai_prompt_results_page import AiPromptResultsPage
from .ai_prompt_type import AiPromptType
from .angle import Angle
from .annotation_line_end import AnnotationLineEnd
from .annotation_line_end_options import AnnotationLineEndOptions
@ -142,6 +137,11 @@ from .mass import Mass
from .meta_cluster_info import MetaClusterInfo
from .metadata import Metadata
from .method import Method
from .ml_feedback import MlFeedback
from .ml_prompt import MlPrompt
from .ml_prompt_metadata import MlPromptMetadata
from .ml_prompt_results_page import MlPromptResultsPage
from .ml_prompt_type import MlPromptType
from .modeling_app_event_type import ModelingAppEventType
from .modeling_app_individual_subscription_tier import (
ModelingAppIndividualSubscriptionTier,
@ -213,6 +213,9 @@ from .solid3d_get_extrusion_face_info import Solid3dGetExtrusionFaceInfo
from .solid3d_get_next_adjacent_edge import Solid3dGetNextAdjacentEdge
from .solid3d_get_opposite_edge import Solid3dGetOppositeEdge
from .solid3d_get_prev_adjacent_edge import Solid3dGetPrevAdjacentEdge
from .source_position import SourcePosition
from .source_range import SourceRange
from .source_range_prompt import SourceRangePrompt
from .stl_storage import StlStorage
from .store_coupon_params import StoreCouponParams
from .string_uuid import StringUuid
@ -227,6 +230,8 @@ from .system import System
from .take_snapshot import TakeSnapshot
from .text_to_cad import TextToCad
from .text_to_cad_create_body import TextToCadCreateBody
from .text_to_cad_iteration import TextToCadIteration
from .text_to_cad_iteration_body import TextToCadIterationBody
from .text_to_cad_model import TextToCadModel
from .text_to_cad_results_page import TextToCadResultsPage
from .unit_angle import UnitAngle

View File

@ -1,12 +0,0 @@
from typing import Optional
from pydantic import BaseModel, ConfigDict
class AiPromptMetadata(BaseModel):
"""Metadata for an AI prompt."""
code: Optional[str] = None
model_config = ConfigDict(protected_namespaces=())

View File

@ -1,16 +1,17 @@
import datetime
from typing import Dict, Literal, Optional, Union
from typing import Dict, List, Literal, Optional, Union
from pydantic import BaseModel, ConfigDict, Field, RootModel
from typing_extensions import Annotated
from ..models.ai_feedback import AiFeedback
from ..models.api_call_status import ApiCallStatus
from ..models.file_export_format import FileExportFormat
from ..models.file_import_format import FileImportFormat
from ..models.input_format import InputFormat
from ..models.ml_feedback import MlFeedback
from ..models.output_format import OutputFormat
from ..models.point3d import Point3d
from ..models.source_range_prompt import SourceRangePrompt
from ..models.text_to_cad_model import TextToCadModel
from ..models.unit_area import UnitArea
from ..models.unit_density import UnitDensity
@ -224,7 +225,7 @@ class text_to_cad(BaseModel):
error: Optional[str] = None
feedback: Optional[AiFeedback] = None
feedback: Optional[MlFeedback] = None
id: Uuid
@ -251,6 +252,44 @@ class text_to_cad(BaseModel):
model_config = ConfigDict(protected_namespaces=())
class text_to_cad_iteration(BaseModel):
"""Text to CAD iteration."""
code: str
completed_at: Optional[datetime.datetime] = None
created_at: datetime.datetime
error: Optional[str] = None
feedback: Optional[MlFeedback] = None
id: Uuid
model: TextToCadModel
model_version: str
original_source_code: str
prompt: Optional[str] = None
source_ranges: List[SourceRangePrompt]
started_at: Optional[datetime.datetime] = None
status: ApiCallStatus
type: Literal["text_to_cad_iteration"] = "text_to_cad_iteration"
updated_at: datetime.datetime
user_id: Uuid
model_config = ConfigDict(protected_namespaces=())
AsyncApiCallOutput = RootModel[
Annotated[
Union[
@ -261,6 +300,7 @@ AsyncApiCallOutput = RootModel[
file_density,
file_surface_area,
text_to_cad,
text_to_cad_iteration,
],
Field(discriminator="type"),
]

View File

@ -18,6 +18,8 @@ class AsyncApiCallType(str, Enum):
FILE_SURFACE_AREA = "file_surface_area"
"""# Text to CAD. """ # noqa: E501
TEXT_TO_CAD = "text_to_cad"
"""# Text to CAD iteration. """ # noqa: E501
TEXT_TO_CAD_ITERATION = "text_to_cad_iteration"
def __str__(self) -> str:
return str(self.value)

View File

@ -1,8 +1,8 @@
from enum import Enum
class AiFeedback(str, Enum):
"""Human feedback on an AI response.""" # noqa: E501
class MlFeedback(str, Enum):
"""Human feedback on an ML response.""" # noqa: E501
"""# Thumbs up. """ # noqa: E501
THUMBS_UP = "thumbs_up"

View File

@ -3,15 +3,15 @@ from typing import Optional
from pydantic import BaseModel, ConfigDict
from ..models.ai_feedback import AiFeedback
from ..models.ai_prompt_metadata import AiPromptMetadata
from ..models.ai_prompt_type import AiPromptType
from ..models.api_call_status import ApiCallStatus
from ..models.ml_feedback import MlFeedback
from ..models.ml_prompt_metadata import MlPromptMetadata
from ..models.ml_prompt_type import MlPromptType
from ..models.uuid import Uuid
class AiPrompt(BaseModel):
"""An AI prompt."""
class MlPrompt(BaseModel):
"""A ML prompt."""
completed_at: Optional[datetime.datetime] = None
@ -19,11 +19,11 @@ class AiPrompt(BaseModel):
error: Optional[str] = None
feedback: Optional[AiFeedback] = None
feedback: Optional[MlFeedback] = None
id: Uuid
metadata: Optional[AiPromptMetadata] = None
metadata: Optional[MlPromptMetadata] = None
model_version: str
@ -35,7 +35,7 @@ class AiPrompt(BaseModel):
status: ApiCallStatus
type: AiPromptType
type: MlPromptType
updated_at: datetime.datetime

View File

@ -0,0 +1,17 @@
from typing import List, Optional
from pydantic import BaseModel, ConfigDict
from ..models.source_range_prompt import SourceRangePrompt
class MlPromptMetadata(BaseModel):
"""Metadata for a ML prompt."""
code: Optional[str] = None
original_source_code: Optional[str] = None
source_ranges: Optional[List[SourceRangePrompt]] = None
model_config = ConfigDict(protected_namespaces=())

View File

@ -2,13 +2,13 @@ from typing import List, Optional
from pydantic import BaseModel, ConfigDict
from ..models.ai_prompt import AiPrompt
from ..models.ml_prompt import MlPrompt
class AiPromptResultsPage(BaseModel):
class MlPromptResultsPage(BaseModel):
"""A single page of results"""
items: List[AiPrompt]
items: List[MlPrompt]
next_page: Optional[str] = None

View File

@ -1,13 +1,15 @@
from enum import Enum
class AiPromptType(str, Enum):
"""A type of AI prompt.""" # noqa: E501
class MlPromptType(str, Enum):
"""A type of ML prompt.""" # noqa: E501
"""# Text to CAD. """ # noqa: E501
TEXT_TO_CAD = "text_to_cad"
"""# Text to KCL. """ # noqa: E501
TEXT_TO_KCL = "text_to_kcl"
"""# Text to Kcl iteration, """ # noqa: E501
TEXT_TO_KCL_ITERATION = "text_to_kcl_iteration"
def __str__(self) -> str:
return str(self.value)

View File

@ -0,0 +1,13 @@
from pydantic import BaseModel, ConfigDict
class SourcePosition(BaseModel):
"""A position in the source code."""
column: int
line: int
model_config = ConfigDict(protected_namespaces=())

View File

@ -0,0 +1,14 @@
from pydantic import BaseModel, ConfigDict
from ..models.source_position import SourcePosition
class SourceRange(BaseModel):
"""A source range of code."""
end: SourcePosition
start: SourcePosition
model_config = ConfigDict(protected_namespaces=())

View File

@ -0,0 +1,14 @@
from pydantic import BaseModel, ConfigDict
from ..models.source_range import SourceRange
class SourceRangePrompt(BaseModel):
"""A source range and prompt for a text to CAD iteration."""
prompt: str
range: SourceRange
model_config = ConfigDict(protected_namespaces=())

View File

@ -3,9 +3,9 @@ from typing import Dict, Optional
from pydantic import BaseModel, ConfigDict
from ..models.ai_feedback import AiFeedback
from ..models.api_call_status import ApiCallStatus
from ..models.file_export_format import FileExportFormat
from ..models.ml_feedback import MlFeedback
from ..models.text_to_cad_model import TextToCadModel
from ..models.uuid import Uuid
from .base64data import Base64Data
@ -22,7 +22,7 @@ class TextToCad(BaseModel):
error: Optional[str] = None
feedback: Optional[AiFeedback] = None
feedback: Optional[MlFeedback] = None
id: Uuid

View File

@ -0,0 +1,46 @@
import datetime
from typing import List, Optional
from pydantic import BaseModel, ConfigDict
from ..models.api_call_status import ApiCallStatus
from ..models.ml_feedback import MlFeedback
from ..models.source_range_prompt import SourceRangePrompt
from ..models.text_to_cad_model import TextToCadModel
from ..models.uuid import Uuid
class TextToCadIteration(BaseModel):
"""A response from a text to CAD iteration."""
code: str
completed_at: Optional[datetime.datetime] = None
created_at: datetime.datetime
error: Optional[str] = None
feedback: Optional[MlFeedback] = None
id: Uuid
model: TextToCadModel
model_version: str
original_source_code: str
prompt: Optional[str] = None
source_ranges: List[SourceRangePrompt]
started_at: Optional[datetime.datetime] = None
status: ApiCallStatus
updated_at: datetime.datetime
user_id: Uuid
model_config = ConfigDict(protected_namespaces=())

View File

@ -0,0 +1,17 @@
from typing import List, Optional
from pydantic import BaseModel, ConfigDict
from ..models.source_range_prompt import SourceRangePrompt
class TextToCadIterationBody(BaseModel):
"""Body for generating models from text."""
original_source_code: str
prompt: Optional[str] = None
source_ranges: List[SourceRangePrompt]
model_config = ConfigDict(protected_namespaces=())

View File

@ -8,6 +8,8 @@ class TextToCadModel(str, Enum):
CAD = "cad"
"""# KCL. """ # noqa: E501
KCL = "kcl"
"""# KCL iteration. """ # noqa: E501
KCL_ITERATION = "kcl_iteration"
def __str__(self) -> str:
return str(self.value)

1473
spec.json

File diff suppressed because it is too large Load Diff