Update api spec (#182)

* YOYO NEW API SPEC!

* I have generated the latest API!

---------

Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
This commit is contained in:
Jess Frazelle
2024-01-06 14:56:45 -08:00
committed by GitHub
parent 8925458665
commit 0ad6216cde
23 changed files with 694 additions and 1160 deletions

View File

@ -42,9 +42,7 @@ from kittycad.api.file import (
)
from kittycad.api.hidden import auth_email, auth_email_callback, logout
from kittycad.api.meta import (
get_ai_plugin_manifest,
get_metadata,
get_openai_schema,
get_schema,
internal_get_api_token_for_discord_user,
ping,
@ -91,7 +89,6 @@ from kittycad.api.users import (
)
from kittycad.client import ClientFromEnv
from kittycad.models import (
AiPluginManifest,
AiPrompt,
AiPromptResultsPage,
ApiCallQueryGroup,
@ -203,51 +200,6 @@ async def test_get_schema_async():
)
@pytest.mark.skip
def test_get_ai_plugin_manifest():
# Create our client.
client = ClientFromEnv()
result: Optional[Union[AiPluginManifest, Error]] = get_ai_plugin_manifest.sync(
client=client,
)
if isinstance(result, Error) or result is None:
print(result)
raise Exception("Error in response")
body: AiPluginManifest = result
print(body)
# OR if you need more info (e.g. status_code)
response: Response[
Optional[Union[AiPluginManifest, Error]]
] = get_ai_plugin_manifest.sync_detailed(
client=client,
)
# OR run async
@pytest.mark.asyncio
@pytest.mark.skip
async def test_get_ai_plugin_manifest_async():
# Create our client.
client = ClientFromEnv()
result: Optional[
Union[AiPluginManifest, Error]
] = await get_ai_plugin_manifest.asyncio(
client=client,
)
# OR run async with more info
response: Response[
Optional[Union[AiPluginManifest, Error]]
] = await get_ai_plugin_manifest.asyncio_detailed(
client=client,
)
@pytest.mark.skip
def test_get_metadata():
# Create our client.
@ -1491,38 +1443,6 @@ async def test_logout_async():
)
@pytest.mark.skip
def test_get_openai_schema():
# Create our client.
client = ClientFromEnv()
get_openai_schema.sync(
client=client,
)
# OR if you need more info (e.g. status_code)
get_openai_schema.sync_detailed(
client=client,
)
# OR run async
@pytest.mark.asyncio
@pytest.mark.skip
async def test_get_openai_schema_async():
# Create our client.
client = ClientFromEnv()
await get_openai_schema.asyncio(
client=client,
)
# OR run async with more info
await get_openai_schema.asyncio_detailed(
client=client,
)
@pytest.mark.skip
def test_ping():
# Create our client.
@ -3403,6 +3323,7 @@ def test_list_text_to_cad_models_for_user():
sort_by=CreatedAtSortMode.CREATED_AT_ASCENDING,
limit=None, # Optional[int]
page_token=None, # Optional[str]
no_models=None, # Optional[bool]
)
if isinstance(result, Error) or result is None:
@ -3420,6 +3341,7 @@ def test_list_text_to_cad_models_for_user():
sort_by=CreatedAtSortMode.CREATED_AT_ASCENDING,
limit=None, # Optional[int]
page_token=None, # Optional[str]
no_models=None, # Optional[bool]
)
@ -3437,6 +3359,7 @@ async def test_list_text_to_cad_models_for_user_async():
sort_by=CreatedAtSortMode.CREATED_AT_ASCENDING,
limit=None, # Optional[int]
page_token=None, # Optional[str]
no_models=None, # Optional[bool]
)
# OR run async with more info
@ -3447,6 +3370,7 @@ async def test_list_text_to_cad_models_for_user_async():
sort_by=CreatedAtSortMode.CREATED_AT_ASCENDING,
limit=None, # Optional[int]
page_token=None, # Optional[str]
no_models=None, # Optional[bool]
)