Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Python: Upgrade Minimum Onnx Version to enable MacOS Unit Tests #9981

Merged
merged 5 commits into from
Dec 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions python/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ ollama = [
"ollama ~= 0.4"
]
onnx = [
"onnxruntime-genai ~= 0.4; platform_system != 'Darwin'"
"onnxruntime-genai ~= 0.5"
]
anthropic = [
"anthropic ~= 0.32"
Expand Down Expand Up @@ -156,7 +156,8 @@ filterwarnings = [
]
timeout = 120
markers = [
"ollama: mark a test as requiring the Ollama service (use \"not ollama\" to skip those tests)"
"ollama: mark a test as requiring the Ollama service (use \"not ollama\" to skip those tests)",
"onnx: mark a test as requiring the Onnx service (use \"not onnx\" to skip those tests)"
]

[tool.ruff]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@


import os
import platform
import sys
from typing import Annotated

Expand All @@ -22,6 +21,7 @@
from semantic_kernel.connectors.ai.google.vertex_ai import VertexAIChatCompletion, VertexAIChatPromptExecutionSettings
from semantic_kernel.connectors.ai.mistral_ai import MistralAIChatCompletion, MistralAIChatPromptExecutionSettings
from semantic_kernel.connectors.ai.ollama import OllamaChatCompletion, OllamaChatPromptExecutionSettings
from semantic_kernel.connectors.ai.onnx import OnnxGenAIChatCompletion, OnnxGenAIPromptExecutionSettings, ONNXTemplate
from semantic_kernel.connectors.ai.open_ai import (
AzureChatCompletion,
AzureChatPromptExecutionSettings,
Expand Down Expand Up @@ -71,12 +71,6 @@
bedrock_setup: bool = is_service_setup_for_testing(["AWS_DEFAULT_REGION"], raise_if_not_set=False)


skip_on_mac_available = platform.system() == "Darwin"
if not skip_on_mac_available:
from semantic_kernel.connectors.ai.onnx import OnnxGenAIChatCompletion, OnnxGenAIPromptExecutionSettings
from semantic_kernel.connectors.ai.onnx.utils import ONNXTemplate


# A mock plugin that contains a function that returns a complex object.
class PersonDetails(KernelBaseModel):
id: str
Expand Down Expand Up @@ -155,7 +149,7 @@ def services(self) -> dict[str, tuple[ServiceType | None, type[PromptExecutionSe
"vertex_ai": (VertexAIChatCompletion() if vertex_ai_setup else None, VertexAIChatPromptExecutionSettings),
"onnx_gen_ai": (
OnnxGenAIChatCompletion(template=ONNXTemplate.PHI3V) if onnx_setup else None,
OnnxGenAIPromptExecutionSettings if not skip_on_mac_available else None,
OnnxGenAIPromptExecutionSettings,
),
"bedrock_amazon_titan": (
BedrockChatCompletion(model_id="amazon.titan-text-premier-v1:0") if bedrock_setup else None,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,10 @@
ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Where was it made?")]),
],
{},
marks=pytest.mark.skipif(not onnx_setup, reason="Need a Onnx Model setup"),
marks=(
pytest.mark.skipif(not onnx_setup, reason="Need a Onnx Model setup"),
pytest.mark.onnx,
),
id="onnx_gen_ai_image_input_file",
),
pytest.param(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,10 @@ class Reasoning(KernelBaseModel):
ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="How are you today?")]),
],
{},
marks=pytest.mark.skipif(not onnx_setup, reason="Need a Onnx Model setup"),
marks=(
pytest.mark.skipif(not onnx_setup, reason="Need a Onnx Model setup"),
pytest.mark.onnx,
),
id="onnx_gen_ai",
),
# endregion
Expand Down
14 changes: 6 additions & 8 deletions python/tests/integration/completions/test_text_completion.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
# Copyright (c) Microsoft. All rights reserved.

import platform
import sys
from functools import partial
from typing import Any
Expand All @@ -19,6 +18,7 @@
from semantic_kernel.connectors.ai.google.vertex_ai import VertexAITextCompletion, VertexAITextPromptExecutionSettings
from semantic_kernel.connectors.ai.hugging_face import HuggingFacePromptExecutionSettings, HuggingFaceTextCompletion
from semantic_kernel.connectors.ai.ollama import OllamaTextCompletion, OllamaTextPromptExecutionSettings
from semantic_kernel.connectors.ai.onnx import OnnxGenAIPromptExecutionSettings, OnnxGenAITextCompletion
from semantic_kernel.connectors.ai.open_ai import (
AzureOpenAISettings,
AzureTextCompletion,
Expand All @@ -43,11 +43,6 @@
) # Tests are optional for ONNX
bedrock_setup = is_service_setup_for_testing(["AWS_DEFAULT_REGION"], raise_if_not_set=False)

skip_on_mac_available = platform.system() == "Darwin"
if not skip_on_mac_available:
from semantic_kernel.connectors.ai.onnx import OnnxGenAIPromptExecutionSettings, OnnxGenAITextCompletion


pytestmark = pytest.mark.parametrize(
"service_id, execution_settings_kwargs, inputs, kwargs",
[
Expand Down Expand Up @@ -128,7 +123,10 @@
{},
["<|user|>Repeat the word Hello<|end|><|assistant|>"],
{},
marks=pytest.mark.skipif(not onnx_setup, reason="Need local Onnx setup"),
marks=(
pytest.mark.skipif(not onnx_setup, reason="Need a Onnx Model setup"),
pytest.mark.onnx,
),
id="onnx_gen_ai_text_completion",
),
pytest.param(
Expand Down Expand Up @@ -242,7 +240,7 @@ def services(self) -> dict[str, tuple[ServiceType | None, type[PromptExecutionSe
),
"onnx_gen_ai": (
OnnxGenAITextCompletion() if onnx_setup else None,
OnnxGenAIPromptExecutionSettings if not skip_on_mac_available else None,
OnnxGenAIPromptExecutionSettings,
),
# Amazon Bedrock supports models from multiple providers but requests to and responses from the models are
# inconsistent. So we need to test each model separately.
Expand Down
Loading
Loading