Skip to content

Commit c33bd76

Browse files
committed
fix flake8 validations
1 parent f848161 commit c33bd76

File tree

2 files changed

+38
-27
lines changed

2 files changed

+38
-27
lines changed

azure/functions/decorators/function_app.py

Lines changed: 35 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -3217,8 +3217,9 @@ def decorator():
32173217
def text_completion_input(self,
32183218
arg_name: str,
32193219
prompt: str,
3220-
chat_model: Optional[
3221-
OpenAIModels] = OpenAIModels.DefaultChatModel,
3220+
chat_model: Optional
3221+
[OpenAIModels]
3222+
= OpenAIModels.DefaultChatModel,
32223223
temperature: Optional[str] = "0.5",
32233224
top_p: Optional[str] = None,
32243225
max_tokens: Optional[str] = "100",
@@ -3374,8 +3375,9 @@ def decorator():
33743375
def assistant_post_input(self, arg_name: str,
33753376
id: str,
33763377
user_message: str,
3377-
chat_model: Optional[
3378-
OpenAIModels] = OpenAIModels.DefaultChatModel,
3378+
chat_model: Optional
3379+
[OpenAIModels]
3380+
= OpenAIModels.DefaultChatModel,
33793381
chat_storage_connection_setting: Optional[str] = "AzureWebJobsStorage", # noqa: E501
33803382
collection_name: Optional[str] = "ChatState", # noqa: E501
33813383
temperature: Optional[str] = "0.5",
@@ -3394,7 +3396,8 @@ def assistant_post_input(self, arg_name: str,
33943396
:param id: The ID of the assistant to update.
33953397
:param user_message: The user message that user has entered for
33963398
assistant to respond to.
3397-
:param chat_model: The deployment name or model name of OpenAI Chat Completion API.
3399+
:param chat_model: The deployment name or model name of OpenAI Chat
3400+
Completion API.
33983401
:param chat_storage_connection_setting: The configuration section name
33993402
for the table settings for assistant chat storage. The default value is
34003403
"AzureWebJobsStorage".
@@ -3413,8 +3416,8 @@ def assistant_post_input(self, arg_name: str,
34133416
exceed the model's context length. Most models have a context length of
34143417
2048 tokens (except for the newest models, which support 4096)
34153418
:param is_reasoning_model: Whether the configured chat completion model
3416-
is a reasoning model or not. Properties max_tokens and temperature are not
3417-
supported for reasoning models.
3419+
is a reasoning model or not. Properties max_tokens and temperature are
3420+
not supported for reasoning models.
34183421
:param data_type: Defines how Functions runtime should treat the
34193422
parameter value
34203423
:param kwargs: Keyword arguments for specifying additional binding
@@ -3451,8 +3454,9 @@ def embeddings_input(self,
34513454
arg_name: str,
34523455
input: str,
34533456
input_type: InputType,
3454-
embeddings_model: Optional[
3455-
OpenAIModels] = OpenAIModels.DefaultEmbeddingsModel,
3457+
embeddings_model: Optional
3458+
[OpenAIModels]
3459+
= OpenAIModels.DefaultEmbeddingsModel,
34563460
max_chunk_length: Optional[int] = 8 * 1024,
34573461
max_overlap: Optional[int] = 128,
34583462
data_type: Optional[
@@ -3469,7 +3473,8 @@ def embeddings_input(self,
34693473
:param input: The input source containing the data to generate
34703474
embeddings for.
34713475
:param input_type: The type of the input.
3472-
:param embeddings_model: The deployment name or model name for OpenAI Embeddings.
3476+
:param embeddings_model: The deployment name or model name for OpenAI
3477+
Embeddings.
34733478
:param max_chunk_length: The maximum number of characters to chunk the
34743479
input into. Default value: 8 * 1024
34753480
:param max_overlap: The maximum number of characters to overlap
@@ -3507,10 +3512,12 @@ def semantic_search_input(self,
35073512
search_connection_name: str,
35083513
collection: str,
35093514
query: Optional[str] = None,
3510-
embeddings_model: Optional[
3511-
OpenAIModels] = OpenAIModels.DefaultEmbeddingsModel,
3512-
chat_model: Optional[
3513-
OpenAIModels] = OpenAIModels.DefaultChatModel,
3515+
embeddings_model: Optional
3516+
[OpenAIModels]
3517+
= OpenAIModels.DefaultEmbeddingsModel,
3518+
chat_model: Optional
3519+
[OpenAIModels]
3520+
= OpenAIModels.DefaultChatModel,
35143521
system_prompt: Optional[str] = semantic_search_system_prompt, # NoQA
35153522
max_knowledge_count: Optional[int] = 1,
35163523
temperature: Optional[str] = "0.5",
@@ -3533,14 +3540,15 @@ def semantic_search_input(self,
35333540
Ref: https://platform.openai.com/docs/guides/embeddings
35343541
35353542
:param arg_name: The name of binding parameter in the function code.
3536-
:param search_connection_name: app setting or environment variable which
3537-
contains a vector search connection setting value.
3543+
:param search_connection_name: app setting or environment variable
3544+
which contains a vector search connection setting value.
35383545
:param collection: The name of the collection or table to search or
35393546
store.
35403547
:param query: The semantic query text to use for searching.
3541-
:param embeddings_model: The deployment name or model name for OpenAI Embeddings.
3542-
The default value is "text-embedding-ada-002".
3543-
:param chat_model: The deployment name or model name of OpenAI Chat Completion API.
3548+
:param embeddings_model: The deployment name or model name for OpenAI
3549+
Embeddings. The default value is "text-embedding-ada-002".
3550+
:param chat_model: The deployment name or model name of OpenAI Chat
3551+
Completion API.
35443552
:param system_prompt: Optional. The system prompt to use for prompting
35453553
the large language model.
35463554
:param max_knowledge_count: Optional. The number of knowledge items to
@@ -3556,10 +3564,10 @@ def semantic_search_input(self,
35563564
:param max_tokens: The maximum number of tokens to generate in the
35573565
completion. The token count of your prompt plus max_tokens cannot
35583566
exceed the model's context length. Most models have a context length of
3559-
2048 tokens (except for the newest models, which support 4096)
3567+
2048 tokens (except for the newest models, which support 4096)
35603568
:param is_reasoning_model: Whether the configured chat completion model
3561-
is a reasoning model or not. Properties max_tokens and temperature are not
3562-
supported for reasoning models.
3569+
is a reasoning model or not. Properties max_tokens and temperature are
3570+
not supported for reasoning models.
35633571
:param data_type: Optional. Defines how Functions runtime should treat
35643572
the parameter value. Default value: None
35653573
:param kwargs: Keyword arguments for specifying additional binding
@@ -3600,8 +3608,9 @@ def embeddings_store_output(self,
36003608
input_type: InputType,
36013609
store_connection_name: str,
36023610
collection: str,
3603-
embeddings_model: Optional[
3604-
OpenAIModels] = OpenAIModels.DefaultEmbeddingsModel,
3611+
embeddings_model: Optional
3612+
[OpenAIModels]
3613+
= OpenAIModels.DefaultEmbeddingsModel,
36053614
max_chunk_length: Optional[int] = 8 * 1024,
36063615
max_overlap: Optional[int] = 128,
36073616
data_type: Optional[
@@ -3624,7 +3633,8 @@ def embeddings_store_output(self,
36243633
:param store_connection_name: The name of an app setting or environment
36253634
variable which contains a vectore store connection setting value
36263635
:param collection: The collection or table to search.
3627-
:param embeddings_model: The deployment name or model name for OpenAI Embeddings.
3636+
:param embeddings_model: The deployment name or model name for OpenAI
3637+
Embeddings.
36283638
:param max_chunk_length: The maximum number of characters to chunk the
36293639
input into.
36303640
:param max_overlap: The maximum number of characters to overlap between

azure/functions/decorators/openai.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,9 @@ def get_binding_name() -> str:
5151
def __init__(self,
5252
name: str,
5353
prompt: str,
54-
chat_model: Optional[
55-
OpenAIModels] = OpenAIModels.DefaultChatModel,
54+
chat_model: Optional
55+
[OpenAIModels]
56+
= OpenAIModels.DefaultChatModel,
5657
temperature: Optional[str] = "0.5",
5758
top_p: Optional[str] = None,
5859
max_tokens: Optional[str] = "100",

0 commit comments

Comments
 (0)