Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion scripts/devenv-builder/configure-composer.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ install_and_configure_composer() {
git composer-cli ostree rpm-ostree \
cockpit-composer bash-completion podman runc genisoimage \
createrepo yum-utils selinux-policy-devel jq wget lorax rpm-build \
containernetworking-plugins expect httpd-tools"
containernetworking-plugins expect httpd-tools vim-common"

# The mock utility comes from the EPEL repository
"${DNF_RETRY}" "install" "https://dl.fedoraproject.org/pub/epel/epel-release-latest-${version_id_major}.noarch.rpm"
Expand Down
17 changes: 17 additions & 0 deletions test/assets/ai-model-serving/ovms-query-preparation.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#!/usr/bin/env bash
set -xeuo pipefail

OUTPUT=$1
PAYLOAD=/tmp/bee.jpeg

# Download payload
curl -o "${PAYLOAD}" https://raw.githubusercontent.com/openvinotoolkit/model_server/main/demos/common/static/images/bee.jpeg

# Add an inference header (len=63)
echo -n '{"inputs" : [{"name": "0", "shape": [1], "datatype": "BYTES"}]}' > "${OUTPUT}"

# Add size of the data (image) in binary format (4 bytes, little endian)
printf "%08X" "$(stat --format=%s "${PAYLOAD}")" | sed 's/\(..\)/\1\n/g' | tac | tr -d '\n' | xxd -r -p >> "${OUTPUT}"

# Add the data, i.e. the image
cat "${PAYLOAD}" >> "${OUTPUT}"
30 changes: 30 additions & 0 deletions test/assets/ai-model-serving/ovms-resources.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
# Definition of InferenceService with a model packages in form of OCI image.
# Features extra argument for the model server (--layout), so the data layout
# format expected by the model server matches what we send during testing.
apiVersion: "serving.kserve.io/v1beta1"
kind: "InferenceService"
metadata:
name: openvino-resnet
spec:
predictor:
model:
protocolVersion: v2
modelFormat:
name: openvino_ir
storageUri: "oci://quay.io/microshift/ai-testing-model:ovms-resnet50"
args:
- --layout=NHWC:NCHW
---
apiVersion: route.openshift.io/v1
kind: Route
metadata:
name: openvino-resnet-predictor
spec:
host: openvino-resnet-predictor-test-ai.apps.example.com
port:
targetPort: 8888
to:
kind: Service
name: openvino-resnet-predictor
weight: 100
wildcardPolicy: None
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,11 @@ COPY ./bootc-images/$USHIFT_RPM_REPO_NAME.repo ./bootc-images/microshift-fast-da
RUN dnf repoinfo --enabled && \
dnf install -y "microshift-olm-{{ .Env.SOURCE_VERSION }}" \
"microshift-multus-{{ .Env.SOURCE_VERSION }}" \
# {{- if and (env.Getenv "UNAME_M" "") (eq "x86_64" .Env.UNAME_M) }}
# Currently, RHOAI is only available for x86_64
"microshift-ai-model-serving-{{ .Env.SOURCE_VERSION }}" \
"microshift-ai-model-serving-release-info-{{ .Env.SOURCE_VERSION }}" \
# {{- end }}
"microshift-gateway-api-{{ .Env.SOURCE_VERSION }}" && \
rm -vf /etc/yum.repos.d/microshift-*.repo && \
rm -rvf $USHIFT_RPM_REPO_PATH && \
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{{- if and (env.Getenv "UNAME_M" "") (eq "x86_64" .Env.UNAME_M) }}
localhost/rhel96-bootc-source-ai-model-serving:latest
{{- end }}
{{- end }}
10 changes: 10 additions & 0 deletions test/resources/common.resource
Original file line number Diff line number Diff line change
Expand Up @@ -128,3 +128,13 @@ Create Remote Dir For Path
... mkdir -pZ $(dirname ${file_path})
... sudo=True return_rc=True return_stdout=True return_stderr=True
Should Be Equal As Integers 0 ${rc}

Local Command Should Work
[Documentation] Run a command localy, log stdout, fail if RC is not 0, return stdout.
... stderr is redirected to the stdout
[Arguments] ${command}
${rc} ${stdout}= Run And Return RC And Output
... ${command} 2>&1
Log ${stdout}
Should Be Equal As Integers 0 ${rc}
RETURN ${stdout}
3 changes: 2 additions & 1 deletion test/resources/kubeconfig.resource
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
Documentation Keywords for accessing a kubeconfig file for the MicroShift host

Library OperatingSystem
Library Process
Library String
Library DataFormats.py
Library libipv6.py
Expand Down Expand Up @@ -56,7 +57,7 @@ Run With Kubeconfig
[Documentation] Run a command using KUBECONFIG from the test suite.
[Arguments] ${cmd} ${allow_fail}=False ${return_rc}=False
${stdout_file}= Create Random Temp File
${result}= Run Process ${cmd} env:KUBECONFIG=${KUBECONFIG}
${result}= Process.Run Process ${cmd} env:KUBECONFIG=${KUBECONFIG}
... stderr=STDOUT shell=True
... stdout=${stdout_file}

Expand Down
1 change: 1 addition & 0 deletions test/resources/microshift-host.resource
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
*** Settings ***
Documentation Keywords for working with the MicroShift host

Library OperatingSystem
Library SSHLibrary
Library libostree.py

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,5 +34,5 @@ scenario_run_tests() {
local -r full_guest_name=$(full_vm_name host1)
run_tests host1 \
--variable "GUEST_NAME:${full_guest_name}" \
suites/ai-model-serving/ai-model-serving.robot
suites/ai-model-serving/ai-model-serving-offline.robot
}
36 changes: 36 additions & 0 deletions test/scenarios-bootc/presubmits/[email protected]
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
#!/bin/bash

# Sourced from scenario.sh and uses functions defined there.

# Currently, RHOAI is only available for x86_64
check_platform() {
local -r record_junit=${1:-false}

if [[ "${UNAME_M}" =~ aarch64 ]]; then
if "${record_junit}"; then
record_junit "setup" "scenario_create_vms" "SKIPPED"
fi
exit 0
fi
}

scenario_create_vms() {
check_platform true

# Increased disk size because of the additional embedded images (especially OVMS which is ~3.5GiB)
LVM_SYSROOT_SIZE=20480 prepare_kickstart host1 kickstart-bootc.ks.template rhel96-bootc-source-optionals
launch_vm --boot_blueprint rhel96-bootc --vm_disksize 30
}

scenario_remove_vms() {
check_platform

remove_vm host1
}

scenario_run_tests() {
check_platform

run_tests host1 \
suites/ai-model-serving/ai-model-serving-online.robot
}
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ Sanity Test
Wait For A Deployment test-ai openvino-resnet-predictor
Wait Until Keyword Succeeds 10x 10s
... Check If Model Is Ready
Query Model Metrics
Prepare Request Data
Query Model Server

Expand All @@ -44,6 +45,17 @@ Check If Model Is Ready
... --connect-to "${DOMAIN}::${IP}:"
Guest Process Should Succeed ${cmd}

Query Model Metrics
[Documentation] Makes a query against the model server metrics endpoint.
${cmd}= Catenate
... curl
... --fail
... --request GET
... ${DOMAIN}/metrics
... --connect-to "${DOMAIN}::${IP}:"
${output}= Guest Process Should Succeed ${cmd}
Should Contain ${output} ovms_requests_success Number of successful requests to a model or a DAG.

Query Model Server
[Documentation] Makes a query against the model server.

Expand Down
117 changes: 117 additions & 0 deletions test/suites/ai-model-serving/ai-model-serving-online.robot
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
*** Settings ***
Documentation Sanity test for AI Model Serving

Library ../../resources/DataFormats.py
Resource ../../resources/common.resource
Resource ../../resources/oc.resource

Suite Setup Setup Suite
Suite Teardown Teardown Suite


*** Variables ***
${USHIFT_HOST}= ${EMPTY}
${OVMS_KSERVE_MANIFEST}= /tmp/ovms-kserve.yaml
${OVMS_REQUEST}= /tmp/ovms-request.json


*** Test Cases ***
Test OpenVINO model
[Documentation] Sanity test for AI OpenVino Model Serving

Set Test Variable ${MODEL_NAME} openvino-resnet
Set Test Variable ${DOMAIN} ${MODEL_NAME}-predictor-test-ai.apps.example.com
${ns}= Create Unique Namespace
Set Test Variable ${NAMESPACE} ${ns}

Deploy OpenVINO Serving Runtime
Deploy OpenVINO Resnet Model

Check If Model Is Ready
Query Model Metrics Endpoint
Prepare Request Data
Query Model Infer Endpoint

[Teardown] Run Keywords
... Remove Namespace ${NAMESPACE}
... AND
... Remove Tmp Data


*** Keywords ***
Deploy OpenVINO Serving Runtime
[Documentation] Deploys OpenVino server.

${ovms_image}= Command Should Work
... jq -r '.images | with_entries(select(.key == "ovms-image")) | .[]' /usr/share/microshift/release/release-ai-model-serving-"$(uname -i)".json
SSHLibrary.Get File
... /usr/lib/microshift/manifests.d/001-microshift-ai-model-serving/runtimes/ovms-kserve.yaml
... ${OVMS_KSERVE_MANIFEST}
Local Command Should Work sed -i "s,image: ovms-image,image: ${ovms_image}," "${OVMS_KSERVE_MANIFEST}"
Oc Apply -n ${NAMESPACE} -f ${OVMS_KSERVE_MANIFEST}

Deploy OpenVINO Resnet Model
[Documentation] Deploys InferenceService object to create Deployment and Service to serve the model.
... Also creates a Route to export the model endpoint outside the MicroShift cluster.

Oc Apply -n ${NAMESPACE} -f ./assets/ai-model-serving/ovms-resources.yaml
Wait Until Keyword Succeeds 30x 1s
... Run With Kubeconfig oc rollout status -n\=${NAMESPACE} --timeout=60s deployment openvino-resnet-predictor

Check If Model Is Ready
[Documentation] Asks model server is model is ready for inference.
${cmd}= Catenate
... curl
... --fail
... -i ${DOMAIN}/v2/models/${MODEL_NAME}/ready
... --connect-to "${DOMAIN}::${USHIFT_HOST}:"
Wait Until Keyword Succeeds 10x 10s
... Local Command Should Work ${cmd}

Query Model Metrics Endpoint
[Documentation] Makes a query against the model server metrics endpoint.

${cmd}= Catenate
... curl
... --fail
... --request GET
... ${DOMAIN}/metrics
... --connect-to "${DOMAIN}::${USHIFT_HOST}:"
${output}= Local Command Should Work ${cmd}
Should Contain ${output} ovms_requests_success Number of successful requests to a model or a DAG.

Prepare Request Data
[Documentation] Executes a script that prepares a request data.

Local Command Should Work bash -x assets/ai-model-serving/ovms-query-preparation.sh ${OVMS_REQUEST}

Remove Tmp Data
[Documentation] Remove temp data for this test.

Local Command Should Work rm ${OVMS_REQUEST} ${OVMS_KSERVE_MANIFEST}

Query Model Infer Endpoint
[Documentation] Makes a query against the model server.

# Inference-Header-Content-Length is the len of the JSON at the begining of the request.json
${cmd}= Catenate
... curl
... --silent
... --fail
... --request POST
... --data-binary "@${OVMS_REQUEST}"
... --header "Inference-Header-Content-Length: 63"
... ${DOMAIN}/v2/models/${MODEL_NAME}/infer
... --connect-to "${DOMAIN}::${USHIFT_HOST}:"
${output}= Local Command Should Work ${cmd}
${result}= Json Parse ${output}
${data}= Set Variable ${result["outputs"][0]["data"]}
# Following expression can be referred to as 'argmax': index of the highest element.
${argmax}= Evaluate ${data}.index(max(${data}))

# Request data includes bee.jpeg file so according to the OpenVino examples,
# we should expect argmax to be 309.
# See following for reference
# - https://github.com/openvinotoolkit/model_server/tree/releases/2025/0/client/python/kserve-api/samples#run-the-client-to-perform-inference-1
# - https://github.com/openvinotoolkit/model_server/blob/releases/2025/0/demos/image_classification/input_images.txt
Should Be Equal As Integers ${argmax} 309