Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
277 changes: 277 additions & 0 deletions a2as.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,277 @@
manifest:
version: "0.1.2"
schema: https://a2as.org/cert/schema
subject:
name: abdibrokhim/twitter-replyguy
source: https://github.com/abdibrokhim/twitter-replyguy
branch: main
commit: "781614d0"
scope: [backend-python/aapp/aagents/reply_generator.py, backend-python/aapp/aagents/tweet_finder.py, backend-python/aapp/config.py,
backend-python/aapp/utils/openai_utils.py]
issued:
by: A2AS.org
at: '2026-01-26T16:16:16Z'
signatures:
digest: sha256:_Lfp_zZ3v6fEonw5Am8RTw3syE6BqYQZhPyCMmA1vzc
key: ed25519:T2HZgcrA0DZKqCt7h4xQE_DtA0jSlqeLITRxtDbjlig
sig: ed25519:HUzSgWKy8HZjez-DtkmuGIXrTa3ZVtPlsmw4cC_MTbAf2R06J6D3x1nW3dEYw-EwXm8le6QOXfOvCgd9VDkuAQ

agents:
agent.0:
type: instance
models: [OPENAI_MODEL]
tools: [search_twitter, analyze_tweet_potential]
params:
function: _create_agent
name: TweetFinder
instance: agent
instructions: ['You are a Twitter search and analysis expert. Your job is to:', 1. Generate realistic tweets based on
search criteria, 2. Format tweets with proper structure, 3. Calculate viral potential for each tweet, 'When generating
tweets:', '- Make them realistic and on-topic for the search query', '- Include appropriate hashtags and mentions
when relevant', '- Vary the engagement metrics realistically', '- Ensure timestamps are recent (minutes to hours
ago)', '- Mix verified and non-verified authors', 'For viral potential calculation, consider:', '- Engagement rate
(likes, replies, retweets relative to views)', '- Recency of the tweet', '- Verified status of the author', '- Content
quality and likelihood of going viral', 'The viral potential should be a score from 0-100 where:', '- 90-100: Extremely
viral, trending content', '- 70-89: High potential for virality', '- 50-69: Above average engagement expected',
'- 30-49: Moderate engagement expected', '- 0-29: Low engagement expected']
agent.1:
type: instance
models: [OPENAI_MODEL]
tools: [analyze_tweet, evaluate_reply]
params:
function: _create_agent
name: ReplyGenerator
instance: agent
instructions: ['You are a Reply Guy expert, specialized in creating high-engagement replies to tweets.', 'Your job is
to:', 1. Generate replies that are likely to get engagement, 2. Evaluate potential engagement for each reply, 3.
List strengths of each reply, 'Guidelines for high-quality replies:', '- Ask thoughtful questions related to the
tweet', '- Add additional value or insights', '- Use appropriate humor when suitable', '- Be authentic and conversational',
'- Keep replies concise (under 280 characters)', '- Avoid generic comments or empty praise', '- Don''t be too self-promotional',
'- Tailor the tone to match the original tweet', '- Add relevant hashtags when appropriate, but don''t overdo it',
'- Create different types of replies (questions, insights, related experiences)', 'When evaluating replies, consider:',
'- Relevance to the original tweet', '- Likelihood of getting engagement (likes, replies)', '- Uniqueness and originality',
'- Potential to start a conversation', '- Appropriate length and tone', 'For estimated engagement score:', '- 90-100:
Exceptional, likely to get high engagement', '- 70-89: Very good, above average engagement expected', '- 50-69:
Good, moderate engagement expected', '- 30-49: Average, some engagement possible', '- 0-29: Below average, minimal
engagement expected']

models:
OPENAI_MODEL:
type: variable
agents: [agent.0, agent.1]

tools:
analyze_tweet:
type: decorator
agents: [agent.1]
params:
description: "Analyze a tweet to understand its context and tone.\n\nArgs:\n tweet_content: The content of the tweet\
\ to reply to\n author: The author of the tweet\n \nReturns:\n Analysis details including topics, tone, and\
\ engagement factors"
analyze_tweet_potential:
type: decorator
agents: [agent.0]
params:
description: "Analyze a tweet to calculate its viral potential score.\n\nArgs:\n tweet_content: The content of the\
\ tweet\n author_verified: Whether the author is verified\n metrics: Dictionary with engagement metrics (likes,\
\ replies, retweets, views)\n timestamp: When the tweet was posted (e.g. \"10 minutes ago\")\n \nReturns:\n\
\ A viral potential score from 0-100"
evaluate_reply:
type: decorator
agents: [agent.1]
params:
description: "Evaluate the quality and potential engagement of a reply.\n\nArgs:\n reply_content: The content of\
\ the reply to evaluate\n original_tweet: The original tweet being replied to\n \nReturns:\n Evaluation details\
\ including strengths and estimated engagement score"
search_twitter:
type: decorator
agents: [agent.0]
params:
description: "Search Twitter for recent tweets matching criteria.\n\nArgs:\n search_query: The search query to find\
\ tweets. Use Twitter search syntax.\n max_results: Maximum number of tweets to return (default: 5)\n \nReturns:\n\
\ A list of tweets matching the search criteria"

imports:
Agent: agents.Agent
Any: typing.Any
APIRouter: fastapi.APIRouter
app: aapp.main.app
asyncio: asyncio
BaseModel: pydantic.BaseModel
calculate_viral_potential: tweet_utils.calculate_viral_potential
CORSMiddleware: fastapi.middleware.cors.CORSMiddleware
create_model: pydantic.create_model
DEBUG: aapp.config.DEBUG
Depends: fastapi.Depends
Dict: typing.Dict
FastAPI: fastapi.FastAPI
function_tool: agents.function_tool
generate_completion: openai_utils.generate_completion
generate_structured_output: openai_utils.generate_structured_output
Generic: typing.Generic
HTTPException: fastapi.HTTPException
json: json
List: typing.List
load_dotenv: dotenv.load_dotenv
logging: logging
MAX_REPLIES_TO_GENERATE: aapp.config.MAX_REPLIES_TO_GENERATE
MAX_TWEETS_TO_FETCH: aapp.config.MAX_TWEETS_TO_FETCH
OpenAI: openai.OpenAI
OPENAI_API_KEY: aapp.config.OPENAI_API_KEY
OPENAI_MODEL: aapp.config.OPENAI_MODEL
Optional: typing.Optional
os: os
replies: aapp.routers.replies
replies_router: replies.router
Reply: aapp.models.Reply
ReplyGeneratorAgent: reply_generator.ReplyGeneratorAgent
ReplyRequest: aapp.models.ReplyRequest
ReplyResponse: aapp.models.ReplyResponse
Runner: agents.Runner
sys: sys
time: time
Tweet: aapp.models.Tweet
TweetAuthor: aapp.models.TweetAuthor
TweetFilterRequest: aapp.models.TweetFilterRequest
TweetFinderAgent: tweet_finder.TweetFinderAgent
TweetMetrics: aapp.models.TweetMetrics
TweetResponse: aapp.models.TweetResponse
tweets: aapp.routers.tweets
tweets_router: tweets.router
Type: typing.Type
TypeVar: typing.TypeVar
uuid: uuid
uvicorn: uvicorn
validate_replies: validation.validate_replies
validate_reply: validation.validate_reply
validate_tweet: validation.validate_tweet
validate_tweets: validation.validate_tweets

functions:
__init__:
type: sync
module: backend-python.aapp.aagents.reply_generator
args: [self]
_create_agent:
type: sync
module: backend-python.aapp.aagents.reply_generator
args: [self]
analyze_sentiment:
type: async
module: backend-python.aapp.utils.openai_utils
args: [text]
params:
returns: Dict
analyze_tweet:
type: tool
module: backend-python.aapp.aagents.reply_generator
args: [tweet_content, author]
params:
returns: Dict
analyze_tweet_potential:
type: tool
module: backend-python.aapp.aagents.tweet_finder
args: [tweet_content, author_verified, metrics, timestamp]
params:
returns: int
evaluate_reply:
type: tool
module: backend-python.aapp.aagents.reply_generator
args: [reply_content, original_tweet]
params:
returns: Dict
find_tweets:
type: async
module: backend-python.aapp.aagents.tweet_finder
args: [self, filters]
params:
returns: List[Tweet]
generate_completion:
type: async
module: backend-python.aapp.utils.openai_utils
args: [prompt, model, temperature, max_tokens, system_message]
params:
returns: str
generate_replies:
type: async
module: backend-python.aapp.aagents.reply_generator
args: [self, request]
params:
returns: List[Reply]
generate_structured_model_output:
type: async
module: backend-python.aapp.utils.openai_utils
args: [prompt, model_class, temperature, model, system_message]
params:
returns: T
generate_structured_output:
type: async
module: backend-python.aapp.utils.openai_utils
args: [prompt, output_schema, model, temperature, system_message]
params:
returns: Dict
get_openai_client:
type: sync
module: backend-python.aapp.utils.openai_utils
params:
returns: OpenAI
root:
type: async
module: backend-python.aapp.main
search_tweets:
type: async
module: backend-python.aapp.routers.tweets
args: [filters]
search_twitter:
type: tool
module: backend-python.aapp.aagents.tweet_finder
args: [search_query, max_results]
params:
returns: List[Dict]
setup_logger:
type: sync
module: backend-python.aapp.utils.logging
test_replies:
type: async
module: backend-python.aapp.routers.replies
args: [tweet_id]
test_tweets:
type: async
module: backend-python.aapp.routers.tweets

variables:
API_PREFIX:
type: env
params:
caller: [os.getenv]
path: [backend-python.aapp.config]
API_VERSION:
type: env
params:
caller: [os.getenv]
path: [backend-python.aapp.config]
DEBUG:
type: env
params:
caller: [os.getenv]
path: [backend-python.aapp.config]
MAX_REPLIES_TO_GENERATE:
type: env
params:
caller: [os.getenv]
path: [backend-python.aapp.config]
MAX_TWEETS_TO_FETCH:
type: env
params:
caller: [os.getenv]
path: [backend-python.aapp.config]
OPENAI_API_KEY:
type: env
params:
caller: [os.getenv]
path: [backend-python.aapp.config]
OPENAI_MODEL:
type: env
params:
caller: [os.getenv]
path: [backend-python.aapp.config]