Skip to content

Remove deprecated UserMessage constructors #2968

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ private AdvisedRequest before(AdvisedRequest request) {
.systemParams(advisedSystemParams)
.build();

UserMessage userMessage = new UserMessage(request.userText(), request.media());
UserMessage userMessage = UserMessage.builder().text(request.userText()).media(request.media()).build();
this.getChatMemoryStore()
.write(toDocuments(List.of(userMessage), this.doGetConversationId(request.adviseContext())));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ void addAndGet() {
.data("hello".getBytes(StandardCharsets.UTF_8))
.build(),
Media.builder().data(URI.create("http://www.google.com").toURL()).mimeType(textPlain).build());
UserMessage userMessageWithMedia = new UserMessage("Message with media", media);
UserMessage userMessageWithMedia = UserMessage.builder().text("Message with media").media(media).build();
memory.add(sessionId, userMessageWithMedia);

messages = memory.get(sessionId, Integer.MAX_VALUE);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -265,8 +265,11 @@ private Message buildUserMessage(org.neo4j.driver.Record record, Map<String, Obj
List<Media> mediaList) {
Message message;
Map<String, Object> metadata = record.get("metadata").asMap();
message = new UserMessage(messageMap.get(MessageAttributes.TEXT_CONTENT.getValue()).toString(), mediaList,
metadata);
message = UserMessage.builder()
.text(messageMap.get(MessageAttributes.TEXT_CONTENT.getValue()).toString())
.media(mediaList)
.metadata(metadata)
.build();
return message;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -236,8 +236,10 @@ void multiModalityTest() throws IOException {

var imageData = new ClassPathResource("/test.png");

var userMessage = new UserMessage("Explain what do you see on this picture?",
List.of(new Media(MimeTypeUtils.IMAGE_PNG, imageData)));
var userMessage = UserMessage.builder()
.text("Explain what do you see on this picture?")
.media(List.of(new Media(MimeTypeUtils.IMAGE_PNG, imageData)))
.build();

var response = this.chatModel.call(new Prompt(List.of(userMessage)));

Expand All @@ -251,9 +253,10 @@ void multiModalityPdfTest() throws IOException {

var pdfData = new ClassPathResource("/spring-ai-reference-overview.pdf");

var userMessage = new UserMessage(
"You are a very professional document summarization specialist. Please summarize the given document.",
List.of(new Media(new MimeType("application", "pdf"), pdfData)));
var userMessage = UserMessage.builder()
.text("You are a very professional document summarization specialist. Please summarize the given document.")
.media(List.of(new Media(new MimeType("application", "pdf"), pdfData)))
.build();

var response = this.chatModel.call(new Prompt(List.of(userMessage),
ToolCallingChatOptions.builder().model(AnthropicApi.ChatModel.CLAUDE_3_5_SONNET.getName()).build()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -234,8 +234,10 @@ void multiModalityTest() throws IOException {

var imageData = new ClassPathResource("/test.png");

var userMessage = new UserMessage("Explain what do you see on this picture?",
List.of(new Media(MimeTypeUtils.IMAGE_PNG, imageData)));
var userMessage = UserMessage.builder()
.text("Explain what do you see on this picture?")
.media(List.of(new Media(MimeTypeUtils.IMAGE_PNG, imageData)))
.build();

var response = this.chatModel.call(new Prompt(List.of(userMessage)));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -265,8 +265,10 @@ void streamFunctionCallTest() {
void multiModalityEmbeddedImage(String modelName) {
var imageData = new ClassPathResource("/test.png");

var userMessage = new UserMessage("Explain what do you see on this picture?",
List.of(new Media(MimeTypeUtils.IMAGE_PNG, imageData)));
var userMessage = UserMessage.builder()
.text("Explain what do you see on this picture?")
.media(List.of(new Media(MimeTypeUtils.IMAGE_PNG, imageData)))
.build();

var response = this.chatModel
.call(new Prompt(List.of(userMessage), ChatOptions.builder().model(modelName).build()));
Expand All @@ -279,11 +281,13 @@ void multiModalityEmbeddedImage(String modelName) {
@ParameterizedTest(name = "{0} : {displayName} ")
@ValueSource(strings = { "pixtral-large-latest" })
void multiModalityImageUrl(String modelName) throws IOException {
var userMessage = new UserMessage("Explain what do you see on this picture?",
List.of(Media.builder()
.mimeType(MimeTypeUtils.IMAGE_PNG)
.data(new URL("https://docs.spring.io/spring-ai/reference/_images/multimodal.test.png"))
.build()));
var userMessage = UserMessage.builder()
.text("Explain what do you see on this picture?")
.media(List.of(Media.builder()
.mimeType(MimeTypeUtils.IMAGE_PNG)
.data(new URL("https://docs.spring.io/spring-ai/reference/_images/multimodal.test.png"))
.build()))
.build();

ChatResponse response = this.chatModel
.call(new Prompt(List.of(userMessage), ChatOptions.builder().model(modelName).build()));
Expand All @@ -295,11 +299,13 @@ void multiModalityImageUrl(String modelName) throws IOException {

@Test
void streamingMultiModalityImageUrl() throws IOException {
var userMessage = new UserMessage("Explain what do you see on this picture?",
List.of(Media.builder()
.mimeType(MimeTypeUtils.IMAGE_PNG)
.data(new URL("https://docs.spring.io/spring-ai/reference/_images/multimodal.test.png"))
.build()));
var userMessage = UserMessage.builder()
.text("Explain what do you see on this picture?")
.media(List.of(Media.builder()
.mimeType(MimeTypeUtils.IMAGE_PNG)
.data(new URL("https://docs.spring.io/spring-ai/reference/_images/multimodal.test.png"))
.build()))
.build();

Flux<ChatResponse> response = this.streamingChatModel.stream(new Prompt(List.of(userMessage),
ChatOptions.builder().model(MistralAiApi.ChatModel.PIXTRAL_LARGE.getValue()).build()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,10 @@ class OllamaChatModelMultimodalIT extends BaseOllamaIT {
void unsupportedMediaType() {
var imageData = new ClassPathResource("/norway.webp");

var userMessage = new UserMessage("Explain what do you see in this picture?",
List.of(new Media(MimeTypeUtils.IMAGE_PNG, imageData)));
var userMessage = UserMessage.builder()
.text("Explain what do you see in this picture?")
.media(List.of(new Media(MimeTypeUtils.IMAGE_PNG, imageData)))
.build();

assertThatThrownBy(() -> this.chatModel.call(new Prompt(List.of(userMessage))))
.isInstanceOf(RuntimeException.class);
Expand All @@ -62,8 +64,10 @@ void unsupportedMediaType() {
void multiModalityTest() {
var imageData = new ClassPathResource("/test.png");

var userMessage = new UserMessage("Explain what do you see in this picture?",
List.of(new Media(MimeTypeUtils.IMAGE_PNG, imageData)));
var userMessage = UserMessage.builder()
.text("Explain what do you see in this picture?")
.media(List.of(new Media(MimeTypeUtils.IMAGE_PNG, imageData)))
.build();

var response = this.chatModel.call(new Prompt(List.of(userMessage)));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
package org.springframework.ai.openai.chat;

import java.net.MalformedURLException;
import java.net.URI;
import java.net.URL;
import java.util.List;
import java.util.Map;
Expand Down Expand Up @@ -127,8 +128,10 @@ public void userMessageWithMediaType() throws MalformedURLException {
.willReturn(Mockito.mock(ResponseEntity.class));

URL mediaUrl = new URL("http://test");
this.chatModel.call(new Prompt(List.of(new UserMessage("test message",
List.of(Media.builder().mimeType(MimeTypeUtils.IMAGE_JPEG).data(mediaUrl).build())))));
this.chatModel.call(new Prompt(List.of(UserMessage.builder()
.text("test message")
.media(List.of(Media.builder().mimeType(MimeTypeUtils.IMAGE_JPEG).data(mediaUrl).build()))
.build())));

validateComplexContent(this.pomptCaptor.getValue());
}
Expand All @@ -139,10 +142,11 @@ public void streamUserMessageWithMediaType() throws MalformedURLException {
given(this.openAiApi.chatCompletionStream(this.pomptCaptor.capture(), this.headersCaptor.capture()))
.willReturn(this.fluxResponse);

URL mediaUrl = new URL("http://test");
this.chatModel.stream(new Prompt(List.of(new UserMessage("test message",
List.of(Media.builder().mimeType(MimeTypeUtils.IMAGE_JPEG).data(mediaUrl).build())))))
.subscribe();
URI mediaUrl = URI.create("http://test");
this.chatModel.stream(new Prompt(List.of(UserMessage.builder()
.text("test message")
.media(List.of(Media.builder().mimeType(MimeTypeUtils.IMAGE_JPEG).data(mediaUrl).build()))
.build()))).subscribe();

validateComplexContent(this.pomptCaptor.getValue());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -457,8 +457,10 @@ void multiModalityEmbeddedImage(String modelName) throws IOException {

var imageData = new ClassPathResource("/test.png");

var userMessage = new UserMessage("Explain what do you see on this picture?",
List.of(new Media(MimeTypeUtils.IMAGE_PNG, imageData)));
var userMessage = UserMessage.builder()
.text("Explain what do you see on this picture?")
.media(List.of(new Media(MimeTypeUtils.IMAGE_PNG, imageData)))
.build();

var response = this.chatModel
.call(new Prompt(List.of(userMessage), OpenAiChatOptions.builder().model(modelName).build()));
Expand All @@ -472,11 +474,13 @@ void multiModalityEmbeddedImage(String modelName) throws IOException {
@ValueSource(strings = { "gpt-4o" })
void multiModalityImageUrl(String modelName) throws IOException {

var userMessage = new UserMessage("Explain what do you see on this picture?",
List.of(Media.builder()
.mimeType(MimeTypeUtils.IMAGE_PNG)
.data(new URL("https://docs.spring.io/spring-ai/reference/_images/multimodal.test.png"))
.build()));
var userMessage = UserMessage.builder()
.text("Explain what do you see on this picture?")
.media(List.of(Media.builder()
.mimeType(MimeTypeUtils.IMAGE_PNG)
.data(new URL("https://docs.spring.io/spring-ai/reference/_images/multimodal.test.png"))
.build()))
.build();

ChatResponse response = this.chatModel
.call(new Prompt(List.of(userMessage), OpenAiChatOptions.builder().model(modelName).build()));
Expand All @@ -489,11 +493,13 @@ void multiModalityImageUrl(String modelName) throws IOException {
@Test
void streamingMultiModalityImageUrl() throws IOException {

var userMessage = new UserMessage("Explain what do you see on this picture?",
List.of(Media.builder()
.mimeType(MimeTypeUtils.IMAGE_PNG)
.data(new URL("https://docs.spring.io/spring-ai/reference/_images/multimodal.test.png"))
.build()));
var userMessage = UserMessage.builder()
.text("Explain what do you see on this picture?")
.media(List.of(Media.builder()
.mimeType(MimeTypeUtils.IMAGE_PNG)
.data(new URL("https://docs.spring.io/spring-ai/reference/_images/multimodal.test.png"))
.build()))
.build();

Flux<ChatResponse> response = this.streamingChatModel.stream(new Prompt(List.of(userMessage),
OpenAiChatOptions.builder().model(OpenAiApi.ChatModel.GPT_4_O.getValue()).build()));
Expand Down Expand Up @@ -552,8 +558,10 @@ void streamingMultiModalityOutputAudio(String modelName) throws IOException {
@ValueSource(strings = { "gpt-4o-audio-preview" })
void multiModalityInputAudio(String modelName) {
var audioResource = new ClassPathResource("speech1.mp3");
var userMessage = new UserMessage("What is this recording about?",
List.of(new Media(MimeTypeUtils.parseMimeType("audio/mp3"), audioResource)));
var userMessage = UserMessage.builder()
.text("What is this recording about?")
.media(List.of(new Media(MimeTypeUtils.parseMimeType("audio/mp3"), audioResource)))
.build();

ChatResponse response = this.chatModel
.call(new Prompt(List.of(userMessage), ChatOptions.builder().model(modelName).build()));
Expand All @@ -567,8 +575,10 @@ void multiModalityInputAudio(String modelName) {
@ValueSource(strings = { "gpt-4o-audio-preview" })
void streamingMultiModalityInputAudio(String modelName) {
var audioResource = new ClassPathResource("speech1.mp3");
var userMessage = new UserMessage("What is this recording about?",
List.of(new Media(MimeTypeUtils.parseMimeType("audio/mp3"), audioResource)));
var userMessage = UserMessage.builder()
.text("What is this recording about?")
.media(List.of(new Media(MimeTypeUtils.parseMimeType("audio/mp3"), audioResource)))
.build();

Flux<ChatResponse> response = this.chatModel
.stream(new Prompt(List.of(userMessage), OpenAiChatOptions.builder().model(modelName).build()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -299,8 +299,10 @@ void multiModalityEmbeddedImage(String modelName) throws IOException {

var imageData = new ClassPathResource("/test.png");

var userMessage = new UserMessage("Explain what do you see on this picture?",
List.of(new Media(MimeTypeUtils.IMAGE_PNG, imageData)));
var userMessage = UserMessage.builder()
.text("Explain what do you see on this picture?")
.media(List.of(new Media(MimeTypeUtils.IMAGE_PNG, imageData)))
.build();

var response = this.chatModel
.call(new Prompt(List.of(userMessage), OpenAiChatOptions.builder().model(modelName).build()));
Expand All @@ -315,11 +317,13 @@ void multiModalityEmbeddedImage(String modelName) throws IOException {
@ValueSource(strings = { "llama3-70b-8192" })
void multiModalityImageUrl(String modelName) throws IOException {

var userMessage = new UserMessage("Explain what do you see on this picture?",
List.of(Media.builder()
.mimeType(MimeTypeUtils.IMAGE_PNG)
.data(new URL("https://docs.spring.io/spring-ai/reference/_images/multimodal.test.png"))
.build()));
var userMessage = UserMessage.builder()
.text("Explain what do you see on this picture?")
.media(List.of(Media.builder()
.mimeType(MimeTypeUtils.IMAGE_PNG)
.data(new URL("https://docs.spring.io/spring-ai/reference/_images/multimodal.test.png"))
.build()))
.build();

ChatResponse response = this.chatModel
.call(new Prompt(List.of(userMessage), OpenAiChatOptions.builder().model(modelName).build()));
Expand All @@ -333,11 +337,13 @@ void multiModalityImageUrl(String modelName) throws IOException {
@Test
void streamingMultiModalityImageUrl() throws IOException {

var userMessage = new UserMessage("Explain what do you see on this picture?",
List.of(Media.builder()
.mimeType(MimeTypeUtils.IMAGE_PNG)
.data(new URL("https://docs.spring.io/spring-ai/reference/_images/multimodal.test.png"))
.build()));
var userMessage = UserMessage.builder()
.text("Explain what do you see on this picture?")
.media(List.of(Media.builder()
.mimeType(MimeTypeUtils.IMAGE_PNG)
.data(new URL("https://docs.spring.io/spring-ai/reference/_images/multimodal.test.png"))
.build()))
.build();

Flux<ChatResponse> response = this.chatModel.stream(new Prompt(List.of(userMessage)));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
package org.springframework.ai.openai.chat.proxy;

import java.io.IOException;
import java.net.URI;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
Expand Down Expand Up @@ -305,8 +306,10 @@ void multiModalityEmbeddedImage(String modelName) throws IOException {

var imageData = new ClassPathResource("/test.png");

var userMessage = new UserMessage("Explain what do you see on this picture?",
List.of(new Media(MimeTypeUtils.IMAGE_PNG, imageData)));
var userMessage = UserMessage.builder()
.text("Explain what do you see on this picture?")
.media(List.of(new Media(MimeTypeUtils.IMAGE_PNG, imageData)))
.build();

var response = this.chatModel
.call(new Prompt(List.of(userMessage), OpenAiChatOptions.builder().model(modelName).build()));
Expand All @@ -321,11 +324,13 @@ void multiModalityEmbeddedImage(String modelName) throws IOException {
@ValueSource(strings = { "mistral-small-latest" })
void multiModalityImageUrl(String modelName) throws IOException {

var userMessage = new UserMessage("Explain what do you see on this picture?",
List.of(Media.builder()
.mimeType(MimeTypeUtils.IMAGE_PNG)
.data(new URL("https://docs.spring.io/spring-ai/reference/_images/multimodal.test.png"))
.build()));
var userMessage = UserMessage.builder()
.text("Explain what do you see on this picture?")
.media(List.of(Media.builder()
.mimeType(MimeTypeUtils.IMAGE_PNG)
.data(new URL("https://docs.spring.io/spring-ai/reference/_images/multimodal.test.png"))
.build()))
.build();

ChatResponse response = this.chatModel
.call(new Prompt(List.of(userMessage), OpenAiChatOptions.builder().model(modelName).build()));
Expand All @@ -339,11 +344,13 @@ void multiModalityImageUrl(String modelName) throws IOException {
@Test
void streamingMultiModalityImageUrl() throws IOException {

var userMessage = new UserMessage("Explain what do you see on this picture?",
List.of(Media.builder()
.mimeType(MimeTypeUtils.IMAGE_PNG)
.data(new URL("https://docs.spring.io/spring-ai/reference/_images/multimodal.test.png"))
.build()));
var userMessage = UserMessage.builder()
.text("Explain what do you see on this picture?")
.media(List.of(Media.builder()
.mimeType(MimeTypeUtils.IMAGE_PNG)
.data(URI.create("https://docs.spring.io/spring-ai/reference/_images/multimodal.test.png"))
.build()))
.build();

Flux<ChatResponse> response = this.chatModel.stream(new Prompt(List.of(userMessage)));

Expand Down
Loading