|
| 1 | +# Next Gen UI Agent A2A Protocol Integration |
| 2 | + |
| 3 | +[A2A Protocol](https://a2a-protocol.org/) provides standard how to communicate with agent |
| 4 | +and provides interoparability by client SDKs in different languages. |
| 5 | + |
| 6 | +This package provides resp. helps you build: |
| 7 | +1. Standard A2A API to the Next Gen UI agent |
| 8 | +2. HTTP Server to run the A2A API and execute the agent |
| 9 | +3. Docker image |
| 10 | + |
| 11 | +To interact with agent via A2A protocol use any A2A client implemntation. |
| 12 | + |
| 13 | +## Installation |
| 14 | + |
| 15 | +```sh |
| 16 | +pip install -U next_gen_ui_a2a |
| 17 | +``` |
| 18 | + |
| 19 | +## Example |
| 20 | + |
| 21 | +### Run A2A server with Next Gen UI agent |
| 22 | + |
| 23 | +```py |
| 24 | +import uvicorn |
| 25 | +from a2a.server.apps import A2AStarletteApplication |
| 26 | +from a2a.server.request_handlers import DefaultRequestHandler |
| 27 | +from a2a.server.tasks import InMemoryTaskStore |
| 28 | +from langchain_openai import ChatOpenAI |
| 29 | + |
| 30 | +from next_gen_ui_a2a.agent_card import card |
| 31 | +from next_gen_ui_a2a.agent_executor import NextGenUIAgentExecutor |
| 32 | +from next_gen_ui_agent.model import LangChainModelInference |
| 33 | +from next_gen_ui_agent.types import AgentConfig |
| 34 | + |
| 35 | +if not os.environ.get("OPENAI_API_KEY"): |
| 36 | + os.environ["OPENAI_API_KEY"] = "ollama" |
| 37 | +model = os.getenv("INFERENCE_MODEL", "llama3.2") |
| 38 | +base_url = os.getenv("OPEN_API_URL", "http://localhost:11434/v1") |
| 39 | + |
| 40 | +# Create Chat API used by next_gen_ui agent |
| 41 | +llm = ChatOpenAI(model=model, base_url=base_url) |
| 42 | +inference = LangChainModelInference(llm) |
| 43 | +config = AgentConfig(inference=inference) |
| 44 | + |
| 45 | +request_handler = DefaultRequestHandler( |
| 46 | + agent_executor=NextGenUIAgentExecutor(config), |
| 47 | + task_store=InMemoryTaskStore(), |
| 48 | +) |
| 49 | + |
| 50 | +server = A2AStarletteApplication( |
| 51 | + agent_card=card, |
| 52 | + http_handler=request_handler, |
| 53 | +) |
| 54 | + |
| 55 | +uvicorn.run(server.build(), host="0.0.0.0", port=9999) |
| 56 | +``` |
| 57 | + |
| 58 | +### Run A2A client |
| 59 | + |
| 60 | +```py |
| 61 | +import logging |
| 62 | +from uuid import uuid4 |
| 63 | + |
| 64 | +import httpx |
| 65 | +from a2a.client import A2ACardResolver, A2AClient |
| 66 | +from a2a.types import ( # SendStreamingMessageRequest, |
| 67 | + AgentCard, |
| 68 | + Message, |
| 69 | + MessageSendParams, |
| 70 | + Part, |
| 71 | + Role, |
| 72 | + SendMessageRequest, |
| 73 | + TextPart, |
| 74 | +) |
| 75 | +from a2a.utils.constants import AGENT_CARD_WELL_KNOWN_PATH |
| 76 | + |
| 77 | + |
| 78 | +async def main() -> None: |
| 79 | + # Configure logging to show INFO level messages |
| 80 | + logging.basicConfig(level=logging.INFO) |
| 81 | + logger = logging.getLogger(__name__) # Get a logger instance |
| 82 | + |
| 83 | + base_url = "http://localhost:9999" |
| 84 | + |
| 85 | + async with httpx.AsyncClient(timeout=120) as httpx_client: |
| 86 | + # Initialize A2ACardResolver |
| 87 | + resolver = A2ACardResolver( |
| 88 | + httpx_client=httpx_client, |
| 89 | + base_url=base_url, |
| 90 | + # agent_card_path uses default, extended_agent_card_path also uses default |
| 91 | + ) |
| 92 | + |
| 93 | + # Fetch Public Agent Card and Initialize Client |
| 94 | + final_agent_card_to_use: AgentCard | None = None |
| 95 | + |
| 96 | + try: |
| 97 | + logger.info( |
| 98 | + f"Attempting to fetch public agent card from: {base_url}{AGENT_CARD_WELL_KNOWN_PATH}" |
| 99 | + ) |
| 100 | + _public_card = ( |
| 101 | + await resolver.get_agent_card() |
| 102 | + ) # Fetches from default public path |
| 103 | + logger.info("Successfully fetched public agent card:") |
| 104 | + logger.info(_public_card.model_dump_json(indent=2, exclude_none=True)) |
| 105 | + final_agent_card_to_use = _public_card |
| 106 | + logger.info( |
| 107 | + "\nUsing PUBLIC agent card for client initialization (default)." |
| 108 | + ) |
| 109 | + |
| 110 | + except Exception as e: |
| 111 | + logger.exception("Critical error fetching public agent card") |
| 112 | + raise RuntimeError( |
| 113 | + "Failed to fetch the public agent card. Cannot continue." |
| 114 | + ) from e |
| 115 | + |
| 116 | + client = A2AClient( |
| 117 | + httpx_client=httpx_client, |
| 118 | + agent_card=final_agent_card_to_use, |
| 119 | + ) |
| 120 | + logger.info("A2AClient initialized.") |
| 121 | + |
| 122 | + movies_data = { |
| 123 | + "movie": { |
| 124 | + "languages": ["English"], |
| 125 | + "year": 1995, |
| 126 | + "imdbId": "0114709", |
| 127 | + "runtime": 81, |
| 128 | + "imdbRating": 8.3, |
| 129 | + "movieId": "1", |
| 130 | + "countries": ["USA"], |
| 131 | + "imdbVotes": 591836, |
| 132 | + "title": "Toy Story", |
| 133 | + "url": "https://themoviedb.org/movie/862", |
| 134 | + "revenue": 373554033, |
| 135 | + "tmdbId": "862", |
| 136 | + "plot": "A cowboy doll is profoundly threatened and jealous when a new spaceman figure supplants him as top toy in a boy's room.", |
| 137 | + "posterUrl": "https://image.tmdb.org/t/p/w440_and_h660_face/uXDfjJbdP4ijW5hWSBrPrlKpxab.jpg", |
| 138 | + "released": "2022-11-02", |
| 139 | + "trailerUrl": "https://www.youtube.com/watch?v=v-PjgYDrg70", |
| 140 | + "budget": 30000000, |
| 141 | + }, |
| 142 | + "actors": ["Jim Varney", "Tim Allen", "Tom Hanks", "Don Rickles"], |
| 143 | + } |
| 144 | + |
| 145 | + message = Message( |
| 146 | + role=Role.user, |
| 147 | + parts=[ |
| 148 | + Part( |
| 149 | + root=TextPart( |
| 150 | + text="Tell me details about Toy Story", |
| 151 | + metadata={ |
| 152 | + "data": movies_data, |
| 153 | + "type": "search_movie", |
| 154 | + }, |
| 155 | + ) |
| 156 | + ), |
| 157 | + # Part(root=DataPart(data=movies_data)), |
| 158 | + ], |
| 159 | + message_id=str(uuid4()), |
| 160 | + ) |
| 161 | + request = SendMessageRequest( |
| 162 | + id=str(uuid4()), params=MessageSendParams(message=message) |
| 163 | + ) |
| 164 | + |
| 165 | + response = await client.send_message(request) |
| 166 | + logger.info("Execution finished.") |
| 167 | + print(response.model_dump(mode="json", exclude_none=True)) |
| 168 | + |
| 169 | + # streaming_request = SendStreamingMessageRequest( |
| 170 | + # id=str(uuid4()), params=MessageSendParams(message=message) |
| 171 | + # ) |
| 172 | + # stream_response = client.send_message_streaming(streaming_request) |
| 173 | + # async for chunk in stream_response: |
| 174 | + # print(chunk.model_dump(mode="json", exclude_none=True)) |
| 175 | + |
| 176 | + |
| 177 | +if __name__ == "__main__": |
| 178 | + import asyncio |
| 179 | + |
| 180 | + asyncio.run(main()) |
| 181 | +``` |
| 182 | + |
| 183 | + |
| 184 | +## Build Container Image |
| 185 | + |
| 186 | +Agent can also be built using a container file. |
| 187 | + |
| 188 | +1. Build project |
| 189 | + |
| 190 | +```sh |
| 191 | +pants package :: |
| 192 | +``` |
| 193 | + |
| 194 | +2. Navigate to the directory `libs/next_gen_ui_a2a` directory: |
| 195 | + |
| 196 | +```sh |
| 197 | +cd libs/next_gen_ui_a2a |
| 198 | +``` |
| 199 | + |
| 200 | +3. Build the container file |
| 201 | + |
| 202 | +```sh |
| 203 | +export PROJ_DIST_DIR=$(realpath ../../dist) |
| 204 | +podman build . -v $PROJ_DIST_DIR:/opt/ngui-dist:ro,z -t ngui-a2a-server |
| 205 | +``` |
| 206 | + |
| 207 | +> [!Tip] |
| 208 | +> Podman is a drop-in replacement for `docker` which can also be used in these commands. |
| 209 | +
|
| 210 | +3. Run you container |
| 211 | + |
| 212 | +```bash |
| 213 | +podman run --rm -p 9999:9999 \ |
| 214 | + -e INFERENCE_MODEL=llama3.2 \ |
| 215 | + -e OPEN_API_URL=http://host.containers.internal:11434/v1 \ |
| 216 | + ngui-a2a-server |
| 217 | +``` |
| 218 | +4. Validate server A2A |
| 219 | + |
| 220 | +```sh |
| 221 | +curl http://localhost:9999/.well-known/agent-card.json |
| 222 | +``` |
0 commit comments