Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -728,19 +728,23 @@ protected List<Generation> responseCandidateToGeneration(Candidate candidate) {
.orElse(List.of())
.stream()
.filter(part -> part.toolCall().isEmpty() && part.toolResponse().isEmpty())
.filter(part -> StringUtils.hasText(part.text().orElse("")))
.map(part -> {
var partMessageMetadata = new HashMap<>(messageMetadata);
partMessageMetadata.put("isThought", part.thought().orElse(false));
return AssistantMessage.builder()
.content(part.text().orElse(""))
.content(part.text().get())
.properties(partMessageMetadata)
.build();
})
.map(assistantMessage -> new Generation(assistantMessage, chatGenerationMetadata))
.toList();

// If all parts were server-side tool invocations, return a single generation
// with empty text but with the server-side tool invocation metadata
// If all parts were server-side tool invocations or had no text content,
// return a single generation with the metadata (no empty text parts).
// Empty text AssistantMessages must not be added to chat history because
// the Google API rejects subsequent requests containing content with no parts.
// See: https://github.com/spring-projects/spring-ai/issues/4556
if (generations.isEmpty()) {
AssistantMessage assistantMessage = AssistantMessage.builder()
.content("")
Expand Down Expand Up @@ -1005,6 +1009,7 @@ private List<Content> toGeminiContent(List<Message> instructions) {
.role(toGeminiMessageType(message.getMessageType()).getValue())
.parts(messageToGeminiParts(message))
.build())
.filter(content -> content.parts().isPresent() && !content.parts().get().isEmpty())
.toList();

return contents;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -441,4 +441,77 @@ void testUsageWithNullMetadata() {
assertThat(genAiUsage.getCachedContentTokenCount()).isNull();
}

@Test
void testResponseCandidateWithEmptyTextPartsProducesNoEmptyGenerations() {
// Regression test for https://github.com/spring-projects/spring-ai/issues/4556
// A candidate whose parts have no text (e.g. thought-signature-only parts) must not
// produce an AssistantMessage with empty content, because the Google API rejects
// subsequent requests that include Content with an empty parts list.

// Part with no text — simulates a thought-signature-only part returned by Gemini
Part emptyTextPart = Part.builder().build(); // no .text(...)

Content responseContent = Content.builder().parts(emptyTextPart).build();

GenerateContentResponseUsageMetadata usageMetadata = GenerateContentResponseUsageMetadata.builder()
.promptTokenCount(10)
.candidatesTokenCount(0)
.totalTokenCount(10)
.build();

Candidate candidate = Candidate.builder().content(responseContent).index(0).build();

GenerateContentResponse mockResponse = GenerateContentResponse.builder()
.candidates(List.of(candidate))
.usageMetadata(usageMetadata)
.modelVersion("gemini-2.0-flash")
.build();

this.chatModel.setMockGenerateContentResponse(mockResponse);

UserMessage userMessage = new UserMessage("Hello");
Prompt prompt = new Prompt(List.of(userMessage));
ChatResponse response = this.chatModel.call(prompt);

// The response must have exactly one generation (the empty-content fallback),
// and that generation must not contain null content.
assertThat(response.getResults()).isNotNull();
assertThat(response.getResults()).hasSize(1);
assertThat(response.getResults().get(0).getOutput().getText()).isNotNull();
}

@Test
void testToGeminiContentFiltersOutEmptyPartContent() {
// Regression test for https://github.com/spring-projects/spring-ai/issues/4556
// toGeminiContent must not include Content items whose parts list is empty,
// because the Google API rejects requests with empty-parts Content entries.

// Candidate with a real text part
Part textPart = Part.builder().text("Hello from Gemini").build();
Content responseContent = Content.builder().parts(textPart).build();

GenerateContentResponseUsageMetadata usageMetadata = GenerateContentResponseUsageMetadata.builder()
.promptTokenCount(5)
.candidatesTokenCount(4)
.totalTokenCount(9)
.build();

Candidate candidate = Candidate.builder().content(responseContent).index(0).build();

GenerateContentResponse mockResponse = GenerateContentResponse.builder()
.candidates(List.of(candidate))
.usageMetadata(usageMetadata)
.modelVersion("gemini-2.0-flash")
.build();

this.chatModel.setMockGenerateContentResponse(mockResponse);

UserMessage userMessage = new UserMessage("Hi");
Prompt prompt = new Prompt(List.of(userMessage));
ChatResponse response = this.chatModel.call(prompt);

assertThat(response.getResults()).hasSize(1);
assertThat(response.getResults().get(0).getOutput().getText()).isEqualTo("Hello from Gemini");
}

}