feat(api): OpenAPI spec update via Stainless API (#1187)

This commit is contained in:
stainless-app[bot] 2024-07-15 19:12:03 +00:00 committed by stainless-bot
parent 84d7b0dc1a
commit 1022f3066b
5 changed files with 494 additions and 615 deletions

View file

@ -1,2 +1,2 @@
configured_endpoints: 1256
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/cloudflare%2Fcloudflare-e9e968f2bf05402754affac40563fb49a9f7979ad72579fd407bedb0ba702ff5.yml
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/cloudflare%2Fcloudflare-7c1304c4f3acd7dbe8b476827fe5df13055ab6634accc43a8edbbb82e1208a98.yml

View file

@ -50,42 +50,6 @@ class AIResource(SyncAPIResource):
def with_streaming_response(self) -> AIResourceWithStreamingResponse:
return AIResourceWithStreamingResponse(self)
@overload
def run(
self,
model_name: str,
*,
account_id: str,
body: object,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Optional[AIRunResponse]:
"""
This endpoint provides users with the capability to run specific AI models
on-demand.
By submitting the required input data, users can receive real-time predictions
or results generated by the chosen AI model. The endpoint supports various AI
model types, ensuring flexibility and adaptability for diverse use cases.
Model specific inputs available in
[Cloudflare Docs](https://developers.cloudflare.com/workers-ai/models/).
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
def run(
self,
@ -367,7 +331,7 @@ class AIResource(SyncAPIResource):
model_name: str,
*,
account_id: str,
messages: Iterable[ai_run_params.Variant8Message],
messages: Iterable[ai_run_params.Variant7Message],
frequency_penalty: float | NotGiven = NOT_GIVEN,
max_tokens: int | NotGiven = NOT_GIVEN,
presence_penalty: float | NotGiven = NOT_GIVEN,
@ -375,7 +339,7 @@ class AIResource(SyncAPIResource):
seed: int | NotGiven = NOT_GIVEN,
stream: bool | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
tools: Iterable[ai_run_params.Variant8Tool] | NotGiven = NOT_GIVEN,
tools: Iterable[ai_run_params.Variant7Tool] | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@ -524,7 +488,6 @@ class AIResource(SyncAPIResource):
...
@required_args(
["account_id", "body"],
["account_id", "text"],
["account_id", "prompt"],
["account_id", "audio"],
@ -539,7 +502,6 @@ class AIResource(SyncAPIResource):
model_name: str,
*,
account_id: str,
body: object | NotGiven = NOT_GIVEN,
text: str | Union[str, List[str]] | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
guidance: float | NotGiven = NOT_GIVEN,
@ -565,8 +527,8 @@ class AIResource(SyncAPIResource):
temperature: float | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
messages: Iterable[ai_run_params.Variant8Message] | NotGiven = NOT_GIVEN,
tools: Iterable[ai_run_params.Variant8Tool] | NotGiven = NOT_GIVEN,
messages: Iterable[ai_run_params.Variant7Message] | NotGiven = NOT_GIVEN,
tools: Iterable[ai_run_params.Variant7Tool] | NotGiven = NOT_GIVEN,
target_lang: str | NotGiven = NOT_GIVEN,
source_lang: str | NotGiven = NOT_GIVEN,
input_text: str | NotGiven = NOT_GIVEN,
@ -586,7 +548,42 @@ class AIResource(SyncAPIResource):
Optional[AIRunResponse],
self._post(
f"/accounts/{account_id}/ai/run/{model_name}",
body=maybe_transform(body, ai_run_params.AIRunParams),
body=maybe_transform(
{
"text": text,
"prompt": prompt,
"guidance": guidance,
"height": height,
"image": image,
"image_b64": image_b64,
"lora_weights": lora_weights,
"loras": loras,
"mask": mask,
"negative_prompt": negative_prompt,
"num_steps": num_steps,
"seed": seed,
"strength": strength,
"width": width,
"audio": audio,
"frequency_penalty": frequency_penalty,
"lora": lora,
"max_tokens": max_tokens,
"presence_penalty": presence_penalty,
"raw": raw,
"repetition_penalty": repetition_penalty,
"stream": stream,
"temperature": temperature,
"top_k": top_k,
"top_p": top_p,
"messages": messages,
"tools": tools,
"target_lang": target_lang,
"source_lang": source_lang,
"input_text": input_text,
"max_length": max_length,
},
ai_run_params.AIRunParams,
),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@ -614,42 +611,6 @@ class AsyncAIResource(AsyncAPIResource):
def with_streaming_response(self) -> AsyncAIResourceWithStreamingResponse:
return AsyncAIResourceWithStreamingResponse(self)
@overload
async def run(
self,
model_name: str,
*,
account_id: str,
body: object,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Optional[AIRunResponse]:
"""
This endpoint provides users with the capability to run specific AI models
on-demand.
By submitting the required input data, users can receive real-time predictions
or results generated by the chosen AI model. The endpoint supports various AI
model types, ensuring flexibility and adaptability for diverse use cases.
Model specific inputs available in
[Cloudflare Docs](https://developers.cloudflare.com/workers-ai/models/).
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
async def run(
self,
@ -931,7 +892,7 @@ class AsyncAIResource(AsyncAPIResource):
model_name: str,
*,
account_id: str,
messages: Iterable[ai_run_params.Variant8Message],
messages: Iterable[ai_run_params.Variant7Message],
frequency_penalty: float | NotGiven = NOT_GIVEN,
max_tokens: int | NotGiven = NOT_GIVEN,
presence_penalty: float | NotGiven = NOT_GIVEN,
@ -939,7 +900,7 @@ class AsyncAIResource(AsyncAPIResource):
seed: int | NotGiven = NOT_GIVEN,
stream: bool | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
tools: Iterable[ai_run_params.Variant8Tool] | NotGiven = NOT_GIVEN,
tools: Iterable[ai_run_params.Variant7Tool] | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@ -1088,7 +1049,6 @@ class AsyncAIResource(AsyncAPIResource):
...
@required_args(
["account_id", "body"],
["account_id", "text"],
["account_id", "prompt"],
["account_id", "audio"],
@ -1103,7 +1063,6 @@ class AsyncAIResource(AsyncAPIResource):
model_name: str,
*,
account_id: str,
body: object | NotGiven = NOT_GIVEN,
text: str | Union[str, List[str]] | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
guidance: float | NotGiven = NOT_GIVEN,
@ -1129,8 +1088,8 @@ class AsyncAIResource(AsyncAPIResource):
temperature: float | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
messages: Iterable[ai_run_params.Variant8Message] | NotGiven = NOT_GIVEN,
tools: Iterable[ai_run_params.Variant8Tool] | NotGiven = NOT_GIVEN,
messages: Iterable[ai_run_params.Variant7Message] | NotGiven = NOT_GIVEN,
tools: Iterable[ai_run_params.Variant7Tool] | NotGiven = NOT_GIVEN,
target_lang: str | NotGiven = NOT_GIVEN,
source_lang: str | NotGiven = NOT_GIVEN,
input_text: str | NotGiven = NOT_GIVEN,
@ -1150,7 +1109,42 @@ class AsyncAIResource(AsyncAPIResource):
Optional[AIRunResponse],
await self._post(
f"/accounts/{account_id}/ai/run/{model_name}",
body=await async_maybe_transform(body, ai_run_params.AIRunParams),
body=await async_maybe_transform(
{
"text": text,
"prompt": prompt,
"guidance": guidance,
"height": height,
"image": image,
"image_b64": image_b64,
"lora_weights": lora_weights,
"loras": loras,
"mask": mask,
"negative_prompt": negative_prompt,
"num_steps": num_steps,
"seed": seed,
"strength": strength,
"width": width,
"audio": audio,
"frequency_penalty": frequency_penalty,
"lora": lora,
"max_tokens": max_tokens,
"presence_penalty": presence_penalty,
"raw": raw,
"repetition_penalty": repetition_penalty,
"stream": stream,
"temperature": temperature,
"top_k": top_k,
"top_p": top_p,
"messages": messages,
"tools": tools,
"target_lang": target_lang,
"source_lang": source_lang,
"input_text": input_text,
"max_length": max_length,
},
ai_run_params.AIRunParams,
),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,

View file

@ -7,20 +7,19 @@ from typing_extensions import Required, TypedDict
__all__ = [
"AIRunParams",
"DumbPipe",
"TextClassification",
"TextToImage",
"TextEmbeddings",
"AutomaticSpeechRecognition",
"ImageClassification",
"ObjectDetection",
"Variant6",
"Variant7",
"Variant8",
"Variant8Message",
"Variant8Tool",
"Variant8ToolFunction",
"Variant8ToolFunctionParameters",
"Variant8ToolFunctionParametersProperties",
"Variant7Message",
"Variant7Tool",
"Variant7ToolFunction",
"Variant7ToolFunctionParameters",
"Variant7ToolFunctionParametersProperties",
"Translation",
"Summarization",
"ImageToText",
@ -28,12 +27,6 @@ __all__ = [
]
class DumbPipe(TypedDict, total=False):
account_id: Required[str]
body: Required[object]
class TextClassification(TypedDict, total=False):
account_id: Required[str]
@ -94,7 +87,7 @@ class ObjectDetection(TypedDict, total=False):
image: Iterable[float]
class Variant7(TypedDict, total=False):
class Variant6(TypedDict, total=False):
account_id: Required[str]
prompt: Required[str]
@ -122,10 +115,10 @@ class Variant7(TypedDict, total=False):
top_p: float
class Variant8(TypedDict, total=False):
class Variant7(TypedDict, total=False):
account_id: Required[str]
messages: Required[Iterable[Variant8Message]]
messages: Required[Iterable[Variant7Message]]
frequency_penalty: float
@ -141,43 +134,43 @@ class Variant8(TypedDict, total=False):
temperature: float
tools: Iterable[Variant8Tool]
tools: Iterable[Variant7Tool]
top_k: int
top_p: float
class Variant8Message(TypedDict, total=False):
class Variant7Message(TypedDict, total=False):
content: Required[str]
role: Required[str]
class Variant8ToolFunctionParametersProperties(TypedDict, total=False):
class Variant7ToolFunctionParametersProperties(TypedDict, total=False):
description: str
type: str
class Variant8ToolFunctionParameters(TypedDict, total=False):
properties: Dict[str, Variant8ToolFunctionParametersProperties]
class Variant7ToolFunctionParameters(TypedDict, total=False):
properties: Dict[str, Variant7ToolFunctionParametersProperties]
required: List[str]
type: str
class Variant8ToolFunction(TypedDict, total=False):
class Variant7ToolFunction(TypedDict, total=False):
description: str
name: str
parameters: Variant8ToolFunctionParameters
parameters: Variant7ToolFunctionParameters
class Variant8Tool(TypedDict, total=False):
function: Variant8ToolFunction
class Variant7Tool(TypedDict, total=False):
function: Variant7ToolFunction
type: str
@ -223,15 +216,14 @@ class ImageToTextMessage(TypedDict, total=False):
AIRunParams = Union[
DumbPipe,
TextClassification,
TextToImage,
TextEmbeddings,
AutomaticSpeechRecognition,
ImageClassification,
ObjectDetection,
Variant6,
Variant7,
Variant8,
Translation,
Summarization,
ImageToText,

View file

@ -111,5 +111,4 @@ AIRunResponse = Union[
Translation,
Summarization,
ImageToText,
object,
]

File diff suppressed because it is too large Load diff