Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions libs/3rdparty/python/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,11 @@ python_requirements(
source="rich-requirements.txt",
)

python_requirements(
name="a2a",
source="a2a-requirements.txt",
)

python_requirements(
name="mcp",
source="mcp-requirements.txt",
Expand Down
1 change: 1 addition & 0 deletions libs/3rdparty/python/a2a-requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
a2a-sdk>=0.3.0
44 changes: 44 additions & 0 deletions libs/next_gen_ui_a2a/BUILD
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# This target sets the metadata for all the Python non-test files in this directory.
python_sources(
name="lib",
dependencies=[
"libs/3rdparty/python:a2a",
],
)

# This target sets the metadata for all the Python test files in this directory.
python_tests(
name="tests",
dependencies=[
"libs/3rdparty/python:a2a",
],
)

# This target allows us to build a `.whl` bdist and a `.tar.gz` sdist by auto-generating
# `setup.py`. See https://www.pantsbuild.org/docs/python-distributions.
#
# Because this target has no source code, Pants cannot infer dependencies. We depend on `:lib`,
# which means we'll include all the non-test Python files in this directory, and any of
# their dependencies.
python_distribution(
name="dist",
dependencies=[
":lib",
],
provides=python_artifact(
name="next_gen_ui_a2a",
version=env("VERSION"),
license="Apache-2.0",
description="A2A integration for Next Gen UI Agent",
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"License :: OSI Approved :: Apache Software License",
],
url="https://github.com/RedHat-UX/next-gen-ui-agent",
),
long_description_path="libs/next_gen_ui_a2a/README.md",
generate_setup=True,
)
30 changes: 30 additions & 0 deletions libs/next_gen_ui_a2a/Containerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
FROM registry.access.redhat.com/ubi9/python-312

# Set work directory
WORKDIR /opt/app-root/

# Install dependencies
RUN pip install a2a-sdk a2a-sdk[http-server] uvicorn langchain_openai


# Copy Python Project Files (Container context must be the `python` directory)
COPY . /opt/app-root

USER root

# Install next_gen_ui dependencies
RUN pip install \
/opt/ngui-dist/next_gen_ui_agent*.whl \
/opt/ngui-dist/next_gen_ui_rhds*.whl


# Allow non-root user to access the everything in app-root
RUN chgrp -R root /opt/app-root/ && chmod -R g+rwx /opt/app-root/

# Expose default port (change if needed)
EXPOSE 9999

USER 1001

# Run the agent
CMD python .
222 changes: 222 additions & 0 deletions libs/next_gen_ui_a2a/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,222 @@
# Next Gen UI Agent A2A Protocol Integration

[A2A Protocol](https://a2a-protocol.org/) provides standard how to communicate with agent
and provides interoparability by client SDKs in different languages.

This package provides resp. helps you build:
1. Standard A2A API to the Next Gen UI agent
2. HTTP Server to run the A2A API and execute the agent
3. Docker image

To interact with agent via A2A protocol use any A2A client implemntation.

## Installation

```sh
pip install -U next_gen_ui_a2a
```

## Example

### Run A2A server with Next Gen UI agent

```py
import uvicorn
from a2a.server.apps import A2AStarletteApplication
from a2a.server.request_handlers import DefaultRequestHandler
from a2a.server.tasks import InMemoryTaskStore
from langchain_openai import ChatOpenAI

from next_gen_ui_a2a.agent_card import card
from next_gen_ui_a2a.agent_executor import NextGenUIAgentExecutor
from next_gen_ui_agent.model import LangChainModelInference
from next_gen_ui_agent.types import AgentConfig

if not os.environ.get("OPENAI_API_KEY"):
os.environ["OPENAI_API_KEY"] = "ollama"
model = os.getenv("INFERENCE_MODEL", "llama3.2")
base_url = os.getenv("OPEN_API_URL", "http://localhost:11434/v1")

# Create Chat API used by next_gen_ui agent
llm = ChatOpenAI(model=model, base_url=base_url)
inference = LangChainModelInference(llm)
config = AgentConfig(inference=inference)

request_handler = DefaultRequestHandler(
agent_executor=NextGenUIAgentExecutor(config),
task_store=InMemoryTaskStore(),
)

server = A2AStarletteApplication(
agent_card=card,
http_handler=request_handler,
)

uvicorn.run(server.build(), host="0.0.0.0", port=9999)
```

### Run A2A client

```py
import logging
from uuid import uuid4

import httpx
from a2a.client import A2ACardResolver, A2AClient
from a2a.types import ( # SendStreamingMessageRequest,
AgentCard,
Message,
MessageSendParams,
Part,
Role,
SendMessageRequest,
TextPart,
)
from a2a.utils.constants import AGENT_CARD_WELL_KNOWN_PATH


async def main() -> None:
# Configure logging to show INFO level messages
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__) # Get a logger instance

base_url = "http://localhost:9999"

async with httpx.AsyncClient(timeout=120) as httpx_client:
# Initialize A2ACardResolver
resolver = A2ACardResolver(
httpx_client=httpx_client,
base_url=base_url,
# agent_card_path uses default, extended_agent_card_path also uses default
)

# Fetch Public Agent Card and Initialize Client
final_agent_card_to_use: AgentCard | None = None

try:
logger.info(
f"Attempting to fetch public agent card from: {base_url}{AGENT_CARD_WELL_KNOWN_PATH}"
)
_public_card = (
await resolver.get_agent_card()
) # Fetches from default public path
logger.info("Successfully fetched public agent card:")
logger.info(_public_card.model_dump_json(indent=2, exclude_none=True))
final_agent_card_to_use = _public_card
logger.info(
"\nUsing PUBLIC agent card for client initialization (default)."
)

except Exception as e:
logger.exception("Critical error fetching public agent card")
raise RuntimeError(
"Failed to fetch the public agent card. Cannot continue."
) from e

client = A2AClient(
httpx_client=httpx_client,
agent_card=final_agent_card_to_use,
)
logger.info("A2AClient initialized.")

movies_data = {
"movie": {
"languages": ["English"],
"year": 1995,
"imdbId": "0114709",
"runtime": 81,
"imdbRating": 8.3,
"movieId": "1",
"countries": ["USA"],
"imdbVotes": 591836,
"title": "Toy Story",
"url": "https://themoviedb.org/movie/862",
"revenue": 373554033,
"tmdbId": "862",
"plot": "A cowboy doll is profoundly threatened and jealous when a new spaceman figure supplants him as top toy in a boy's room.",
"posterUrl": "https://image.tmdb.org/t/p/w440_and_h660_face/uXDfjJbdP4ijW5hWSBrPrlKpxab.jpg",
"released": "2022-11-02",
"trailerUrl": "https://www.youtube.com/watch?v=v-PjgYDrg70",
"budget": 30000000,
},
"actors": ["Jim Varney", "Tim Allen", "Tom Hanks", "Don Rickles"],
}

message = Message(
role=Role.user,
parts=[
Part(
root=TextPart(
text="Tell me details about Toy Story",
metadata={
"data": movies_data,
"type": "search_movie",
},
)
),
# Part(root=DataPart(data=movies_data)),
],
message_id=str(uuid4()),
)
request = SendMessageRequest(
id=str(uuid4()), params=MessageSendParams(message=message)
)

response = await client.send_message(request)
logger.info("Execution finished.")
print(response.model_dump(mode="json", exclude_none=True))

# streaming_request = SendStreamingMessageRequest(
# id=str(uuid4()), params=MessageSendParams(message=message)
# )
# stream_response = client.send_message_streaming(streaming_request)
# async for chunk in stream_response:
# print(chunk.model_dump(mode="json", exclude_none=True))


if __name__ == "__main__":
import asyncio

asyncio.run(main())
```


## Build Container Image

Agent can also be built using a container file.

1. Build project

```sh
pants package ::
```

2. Navigate to the directory `libs/next_gen_ui_a2a` directory:

```sh
cd libs/next_gen_ui_a2a
```

3. Build the container file

```sh
export PROJ_DIST_DIR=$(realpath ../../dist)
podman build . -v $PROJ_DIST_DIR:/opt/ngui-dist:ro,z -t ngui-a2a-server
```

> [!Tip]
> Podman is a drop-in replacement for `docker` which can also be used in these commands.

3. Run you container

```bash
podman run --rm -p 9999:9999 \
-e INFERENCE_MODEL=llama3.2 \
-e OPEN_API_URL=http://host.containers.internal:11434/v1 \
ngui-a2a-server
```
4. Validate server A2A

```sh
curl http://localhost:9999/.well-known/agent-card.json
```
5 changes: 5 additions & 0 deletions libs/next_gen_ui_a2a/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
from next_gen_ui_a2a.agent_executor import NextGenUIAgentExecutor

__all__ = [
"NextGenUIAgentExecutor",
]
41 changes: 41 additions & 0 deletions libs/next_gen_ui_a2a/__main__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import logging
import os

import uvicorn # pants: no-infer-dep
from a2a.server.apps import A2AStarletteApplication # pants: no-infer-dep
from a2a.server.request_handlers import DefaultRequestHandler # pants: no-infer-dep
from a2a.server.tasks import InMemoryTaskStore # pants: no-infer-dep
from agent_card import card # type: ignore[import-not-found]
from agent_executor import NextGenUIAgentExecutor # type: ignore[import-not-found]
from langchain_openai import ChatOpenAI # pants: no-infer-dep
from next_gen_ui_agent.model import LangChainModelInference
from next_gen_ui_agent.types import AgentConfig

if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

if not os.environ.get("OPENAI_API_KEY"):
os.environ["OPENAI_API_KEY"] = "ollama"
model = os.getenv("INFERENCE_MODEL", "llama3.2")
base_url = os.getenv("OPEN_API_URL", "http://localhost:11434/v1")

logger.info(
"Starting Next Gen UI A2A Server. base_url=%s, model=%s", base_url, model
)

llm = ChatOpenAI(model=model, base_url=base_url)
inference = LangChainModelInference(llm)
config = AgentConfig(inference=inference)

request_handler = DefaultRequestHandler(
agent_executor=NextGenUIAgentExecutor(config),
task_store=InMemoryTaskStore(),
)

server = A2AStarletteApplication(
agent_card=card,
http_handler=request_handler,
)

uvicorn.run(server.build(), host="0.0.0.0", port=9999)
24 changes: 24 additions & 0 deletions libs/next_gen_ui_a2a/agent_card.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
from a2a.types import AgentCapabilities, AgentCard, AgentSkill

skill = AgentSkill(
id="generate_ui_components",
name="Generates UI component",
description="Returns generated UI component",
tags=["ui"],
examples=[
"First message TextPart should be user prompt. backend data could be passed as 'data' field in metadata or following DataParts"
],
)

# This will be the public-facing agent card
card = AgentCard(
name="Next Gen UI Agent",
description="Generates UI component based on structured input data and user prompt",
url="http://localhost:9999/",
version="1.0.0",
default_input_modes=["text"],
default_output_modes=["text"],
capabilities=AgentCapabilities(streaming=True),
skills=[skill],
supports_authenticated_extended_card=False,
)
Loading