Skip to content

LiteLlm + include_contents="none" results in empty content sent to the provider -> Error #3242

@DenisStefanAndrei

Description

@DenisStefanAndrei

Describe the bug
When using LlmAgent together with LiteLlm and include_contents='none' the provider throws an error saying that content can not be empty.

To Reproduce
Here is the code

import asyncio
import datetime
import os
import uuid
from zoneinfo import ZoneInfo

from google.adk import Runner
from google.adk.agents import Agent, SequentialAgent, LlmAgent
from google.adk.models.lite_llm import LiteLlm
from google.adk.sessions import DatabaseSessionService

from aiops_common.connectors.sql_connector import SQLConnector
from google.genai import types

os.environ["GEMINI_API_KEY"] = "API KEY HERE"
#

MODEL = "gemini/gemini-2.0-flash"



def get_weather(city: str) -> dict:
    """Retrieves the current weather report for a specified city.
    Args:
        city (str): The name of the city for which to retrieve the weather report.
    Returns:d
        dict: status and result or error msg.
    """
    if city.lower() == "new york":
        return {
            "status": "success",
            "report": (
                "The weather in New York is sunny with a temperature of 25 degrees"
                " Celsius (77 degrees Fahrenheit)."
            ),
        }
    else:
        return {
            "status": "error",
            "error_message": f"Weather information for '{city}' is not available.",
        }
def get_current_time(city: str) -> dict:
    """Returns the current time in a specified city.
    Args:
        city (str): The name of the city for which to retrieve the current time.
    Returns:
        dict: status and result or error msg.
    """
    if city.lower() == "new york":
        tz_identifier = "America/New_York"
    else:
        return {
            "status": "error",
            "error_message": (
                f"Sorry, I don't have timezone information for {city}."
            ),
        }
    tz = ZoneInfo(tz_identifier)
    now = datetime.datetime.now(tz)
    report = (
        f'The current time in {city} is {now.strftime("%Y-%m-%d %H:%M:%S %Z%z")}'
    )
    return {"status": "success", "report": report}
weather_agent = LlmAgent(
    name="weather_time_agent",
    model=LiteLlm(model=MODEL),
    description=(
    "Agent that tells both time and weather"
    ),
    instruction=(
        "If the user provides a city, tell him both the weather and time"
    ),
    tools=[get_weather, get_current_time],
)
story_teller = LlmAgent(
    name="story_teller_agent",
    model=LiteLlm(model=MODEL),
    description=(
    "Create a short phrase about the city in the context"
    ),
    instruction=(
        "Create a short phrase about the city in the context"
    ),
)
story_teller2 = LlmAgent(
    name="story_teller_agent_continuation",
    model=LiteLlm(model=MODEL),
    include_contents="none",
    description=(
    "Continue the phrase of the last agent"
    ),
    instruction=(
        "Continue the phrase of the last agent with a short sentence"
    )
)
root_agent = SequentialAgent(name="root_agent", sub_agents=[weather_agent, story_teller, story_teller2])









#################


WORKFLOW_APP_NAME = "TEST"
WORKFLOW_USER_ID = "denis"
SESSION_ID = str(uuid.uuid4())
print("session id is: ", SESSION_ID)
USER_INPUT = "new york"
async def runner_func():
    connection_url = 'postgresql://postgres:postgres@192.168.0.166:5432/modular'
    db_session = DatabaseSessionService(connection_url)

    initial_state = {
        "session_id": SESSION_ID
    }

    session = db_session.get_session(app_name=WORKFLOW_APP_NAME, user_id=WORKFLOW_USER_ID, session_id=SESSION_ID)
    if session is None:
        db_session.create_session(
            app_name=WORKFLOW_APP_NAME,
            user_id=WORKFLOW_USER_ID,
            session_id=SESSION_ID,
            state=initial_state
        )

    runner = Runner(
        agent=root_agent,
        app_name=WORKFLOW_APP_NAME,
        session_service=db_session
    )

    content = types.Content(role='user', parts=[types.Part(text=USER_INPUT)])

    events = []
    import logging
    logging.basicConfig(
        level=logging.DEBUG,
        format='%(asctime)s - %(levelname)s - %(name)s - %(message)s'
    )

    async for event in runner.run_async(user_id=WORKFLOW_USER_ID, session_id=SESSION_ID,  # pyright: ignore
                                        new_message=content):  # pyright: ignore

        # print(f"  [Event] Author: {event.author}, Type: {type(event).__name__}, Final:"
        #                  f" {event.is_final_response()}, Content: {event.content}")
        events.append(event)
    return events


if __name__ == "__main__":
    asyncio.run(runner_func())

Running this leads to this error:

    raise BadRequestError(
litellm.exceptions.BadRequestError: litellm.BadRequestError: VertexAIException BadRequestError - {
  "error": {
    "code": 400,
    "message": "* GenerateContentRequest.contents: contents is not specified\n",
    "status": "INVALID_ARGUMENT"
  }
}

Running this in debug mode.. I can see that system + content + function that are sent to the llm provider

LLM Request:
-----------------------------------------------------------
System Instruction:
Continue the phrase of the last agent with a short sentence

You are an agent. Your internal name is "story_teller_agent_continuation".

 The description about you is "Continue the phrase of the last agent"
-----------------------------------------------------------
Contents:

-----------------------------------------------------------
Functions:

-----------------------------------------------------------

Expected behavior
I would expect this to run like it does If i replace LiteLlm with just "model="gemini-2.0-flash","

LLM Request:
-----------------------------------------------------------
System Instruction:
Continue the phrase of the last agent with a short sentence

You are an agent. Your internal name is "story_teller_agent_continuation".

 The description about you is "Continue the phrase of the last agent"
-----------------------------------------------------------
Contents:
{"parts":[{"text":"Handle the requests as specified in the System Instruction."}],"role":"user"}
-----------------------------------------------------------
Functions:

-----------------------------------------------------------

As you can see content is added by adk {"parts":[{"text":"Handle the requests as specified in the System Instruction."}],"role":"user"}

What can I do to make include_contents work with LiteLlm

Metadata

Metadata

Assignees

Labels

models[Component] Issues related to model support

Type

Projects

No projects

Relationships

None yet

Development

No branches or pull requests

Issue actions