diff --git a/README.md b/README.md index 2fd3d292d..d87eac7d9 100644 --- a/README.md +++ b/README.md @@ -2022,7 +2022,7 @@ Accelerate Bugs, Features, and Demos to implement by supporting us here: Join our growing community around the world, for real-time support, ideas, and discussions on Swarms 😊 - View our official [Blog](https://docs.swarms.world) -- Chat live with us on [Discord](https://discord.gg/kS3rwKs3ZC) +- Chat live with us on [Discord](https://discord.gg/jM3Z6M9uMq) - Follow us on [Twitter](https://twitter.com/kyegomez) - Connect with us on [LinkedIn](https://www.linkedin.com/company/the-swarm-corporation) - Visit us on [YouTube](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) diff --git a/deepseek_example.py b/deepseek_example.py new file mode 100644 index 000000000..f1f34e7a2 --- /dev/null +++ b/deepseek_example.py @@ -0,0 +1,70 @@ +import os + +from dotenv import load_dotenv +from openai import OpenAI + +from swarms import Agent +from swarms.prompts.finance_agent_sys_prompt import ( + FINANCIAL_AGENT_SYS_PROMPT, +) + +load_dotenv() + + +class DeepSeekChat: + def __init__( + self, + api_key: str = os.getenv("DEEPSEEK_API_KEY"), + system_prompt: str = None, + ): + self.api_key = api_key + + self.client = OpenAI( + api_key=api_key, base_url="https://api.deepseek.com" + ) + + def run(self, task: str): + response = self.client.chat.completions.create( + model="deepseek-chat", + messages=[ + { + "role": "system", + "content": "You are a helpful assistant", + }, + {"role": "user", "content": task}, + ], + stream=False, + ) + + print(response) + + out = response.choices[0].message.content + print(out) + + return out + + +model = DeepSeekChat() + +# Initialize the agent +agent = Agent( + agent_name="Financial-Analysis-Agent", + agent_description="Personal finance advisor agent", + system_prompt=FINANCIAL_AGENT_SYS_PROMPT, + max_loops=1, + llm=model, + dynamic_temperature_enabled=True, + user_name="swarms_corp", + retry_attempts=3, + context_length=8192, + return_step_meta=False, + output_type="str", # "json", "dict", "csv" OR "string" "yaml" and + auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task + max_tokens=4000, # max output tokens +) + +print( + agent.run( + "Create a table of super high growth opportunities for AI. I have $40k to invest in ETFs, index funds, and more. Please create a table in markdown.", + ) +) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 372f4db91..22d72895b 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -229,6 +229,15 @@ nav: - Full API Reference: "swarms/framework/reference.md" - Examples: - Unique Swarms: "swarms/examples/unique_swarms.md" + - Various Model Providers: + - OpenAI: "swarms/examples/openai_example.md" + - Anthropic: "swarms/examples/claude.md" + - Groq: "swarms/examples/groq.md" + - Cohere: "swarms/examples/cohere.md" + - DeepSeek: "swarms/examples/deepseek.md" + - Ollama: "swarms/examples/ollama.md" + - OpenRouter: "swarms/examples/openrouter.md" + - XAI: "swarms/examples/xai.md" - Swarm Models: - Overview: "swarms/models/index.md" # - Models Available: "swarms/models/index.md" diff --git a/docs/zh/swarms/examples/claude.md b/docs/zh/swarms/examples/claude.md new file mode 100644 index 000000000..75cb85c81 --- /dev/null +++ b/docs/zh/swarms/examples/claude.md @@ -0,0 +1,25 @@ +# Agent with Anthropic/Claude + +- Get their api keys and put it in the `.env` + +- Select your model_name like `claude-3-sonnet-20240229` follows LiteLLM conventions + + +```python +from swarms import Agent +import os +from dotenv import load_dotenv + +load_dotenv() + +# Initialize the agent with ChromaDB memory +agent = Agent( + agent_name="Financial-Analysis-Agent", + model_name="claude-3-sonnet-20240229", + system_prompt="Agent system prompt here", + agent_description="Agent performs financial analysis.", +) + +# Run a query +agent.run("What are the components of a startup's stock incentive equity plan?") +``` \ No newline at end of file diff --git a/docs/zh/swarms/examples/cohere.md b/docs/zh/swarms/examples/cohere.md new file mode 100644 index 000000000..9f2a0eb99 --- /dev/null +++ b/docs/zh/swarms/examples/cohere.md @@ -0,0 +1,25 @@ +# Agent with Cohere + +- Add your `COHERE_API_KEY` in the `.env` file + +- Select your model_name like `command-r` follows LiteLLM conventions + + +```python +from swarms import Agent +import os +from dotenv import load_dotenv + +load_dotenv() + +# Initialize the agent with ChromaDB memory +agent = Agent( + agent_name="Financial-Analysis-Agent", + model_name="command-r", + system_prompt="Agent system prompt here", + agent_description="Agent performs financial analysis.", +) + +# Run a query +agent.run("What are the components of a startup's stock incentive equity plan?") +``` \ No newline at end of file diff --git a/docs/zh/swarms/examples/deepseek.md b/docs/zh/swarms/examples/deepseek.md new file mode 100644 index 000000000..7b4769b21 --- /dev/null +++ b/docs/zh/swarms/examples/deepseek.md @@ -0,0 +1,27 @@ +# Agent with DeepSeek + +- Add your `DEEPSEEK_API_KEY` in the `.env` file + +- Select your model_name like `deepseek/deepseek-chat` follows [LiteLLM conventions](https://docs.litellm.ai/docs/providers/deepseek) + +- Execute your agent! + + +```python +from swarms import Agent +import os +from dotenv import load_dotenv + +load_dotenv() + +# Initialize the agent with ChromaDB memory +agent = Agent( + agent_name="Financial-Analysis-Agent", + model_name="deepseek/deepseek-chat", + system_prompt="Agent system prompt here", + agent_description="Agent performs financial analysis.", +) + +# Run a query +agent.run("What are the components of a startup's stock incentive equity plan?") +``` \ No newline at end of file diff --git a/docs/zh/swarms/examples/groq.md b/docs/zh/swarms/examples/groq.md new file mode 100644 index 000000000..d9dd23a29 --- /dev/null +++ b/docs/zh/swarms/examples/groq.md @@ -0,0 +1,48 @@ +# Agent with Groq + +- Add your `GROQ_API_KEY` + +```python +import os + +from swarm_models import OpenAIChat + +from swarms import Agent + +company = "NVDA" + +# Get the OpenAI API key from the environment variable +api_key = os.getenv("GROQ_API_KEY") + +# Model +model = OpenAIChat( + openai_api_base="https://api.groq.com/openai/v1", + openai_api_key=api_key, + model_name="llama-3.1-70b-versatile", + temperature=0.1, +) + + +# Initialize the Managing Director agent +managing_director = Agent( + agent_name="Managing-Director", + system_prompt=f""" + As the Managing Director at Blackstone, your role is to oversee the entire investment analysis process for potential acquisitions. + Your responsibilities include: + 1. Setting the overall strategy and direction for the analysis + 2. Coordinating the efforts of the various team members and ensuring a comprehensive evaluation + 3. Reviewing the findings and recommendations from each team member + 4. Making the final decision on whether to proceed with the acquisition + + For the current potential acquisition of {company}, direct the tasks for the team to thoroughly analyze all aspects of the company, including its financials, industry position, technology, market potential, and regulatory compliance. Provide guidance and feedback as needed to ensure a rigorous and unbiased assessment. + """, + llm=model, + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="managing-director.json", +) +``` \ No newline at end of file diff --git a/docs/zh/swarms/examples/ollama.md b/docs/zh/swarms/examples/ollama.md new file mode 100644 index 000000000..9e019167c --- /dev/null +++ b/docs/zh/swarms/examples/ollama.md @@ -0,0 +1,24 @@ +# Agent with Ollama + +- No API key needed +- Select your model_name like `ollama/llama2` follows [LiteLLM conventions](https://docs.litellm.ai/docs/providers/ollama) + + +```python +from swarms import Agent +import os +from dotenv import load_dotenv + +load_dotenv() + +# Initialize the agent with ChromaDB memory +agent = Agent( + agent_name="Financial-Analysis-Agent", + model_name="ollama/llama2", + system_prompt="Agent system prompt here", + agent_description="Agent performs financial analysis.", +) + +# Run a query +agent.run("What are the components of a startup's stock incentive equity plan?") +``` \ No newline at end of file diff --git a/docs/zh/swarms/examples/openai_example.md b/docs/zh/swarms/examples/openai_example.md new file mode 100644 index 000000000..181c00391 --- /dev/null +++ b/docs/zh/swarms/examples/openai_example.md @@ -0,0 +1,16 @@ +# Agent with GPT-4o-Mini + +- Add `OPENAI_API_KEY="your_key"` to your `.env` file +- Select your model like `gpt-4o-mini` or `gpt-4o` + +```python +from swarms import Agent + +Agent( + agent_name="Stock-Analysis-Agent", + model_name="gpt-4o-mini", + max_loops="auto", + interactive=True, + streaming_on=True, +).run("What are 5 hft algorithms") +``` \ No newline at end of file diff --git a/docs/zh/swarms/examples/openrouter.md b/docs/zh/swarms/examples/openrouter.md new file mode 100644 index 000000000..827ea949c --- /dev/null +++ b/docs/zh/swarms/examples/openrouter.md @@ -0,0 +1,27 @@ +# Agent with OpenRouter + +- Add your `OPENROUTER_API_KEY` in the `.env` file + +- Select your model_name like `openrouter/google/palm-2-chat-bison` follows [LiteLLM conventions](https://docs.litellm.ai/docs/providers/openrouter) + +- Execute your agent! + + +```python +from swarms import Agent +import os +from dotenv import load_dotenv + +load_dotenv() + +# Initialize the agent with ChromaDB memory +agent = Agent( + agent_name="Financial-Analysis-Agent", + model_name="openrouter/google/palm-2-chat-bison", + system_prompt="Agent system prompt here", + agent_description="Agent performs financial analysis.", +) + +# Run a query +agent.run("What are the components of a startup's stock incentive equity plan?") +``` \ No newline at end of file diff --git a/docs/zh/swarms/examples/xai.md b/docs/zh/swarms/examples/xai.md new file mode 100644 index 000000000..47acfec9e --- /dev/null +++ b/docs/zh/swarms/examples/xai.md @@ -0,0 +1,27 @@ +# Agent with XAI + +- Add your `XAI_API_KEY` in the `.env` file + +- Select your model_name like `xai/grok-beta` follows [LiteLLM conventions](https://docs.litellm.ai/docs/providers/xai) + +- Execute your agent! + + +```python +from swarms import Agent +import os +from dotenv import load_dotenv + +load_dotenv() + +# Initialize the agent with ChromaDB memory +agent = Agent( + agent_name="Financial-Analysis-Agent", + model_name="xai/grok-beta", + system_prompt="Agent system prompt here", + agent_description="Agent performs financial analysis.", +) + +# Run a query +agent.run("What are the components of a startup's stock incentive equity plan?") +``` \ No newline at end of file diff --git a/csvagent_example.py b/new_features_examples/csvagent_example.py similarity index 94% rename from csvagent_example.py rename to new_features_examples/csvagent_example.py index e781335a1..f47ae9057 100644 --- a/csvagent_example.py +++ b/new_features_examples/csvagent_example.py @@ -1,6 +1,9 @@ # Example usage from pathlib import Path -from swarms.structs.csv_to_agent import AgentLoader, AgentValidationError +from swarms.structs.csv_to_agent import ( + AgentLoader, + AgentValidationError, +) if __name__ == "__main__": diff --git a/graph_swarm_example.py b/new_features_examples/graph_swarm_example.py similarity index 100% rename from graph_swarm_example.py rename to new_features_examples/graph_swarm_example.py diff --git a/multi_agent_router_example.py b/new_features_examples/multi_agent_router_example.py similarity index 100% rename from multi_agent_router_example.py rename to new_features_examples/multi_agent_router_example.py diff --git a/new_features_examples/ollama_demo.py b/new_features_examples/ollama_demo.py deleted file mode 100644 index 4d1d41ef9..000000000 --- a/new_features_examples/ollama_demo.py +++ /dev/null @@ -1,252 +0,0 @@ -""" -- For each diagnosis, pull lab results, -- egfr -- for each diagnosis, pull lab ranges, -- pull ranges for diagnosis - -- if the diagnosis is x, then the lab ranges should be a to b -- train the agents, increase the load of input -- medical history sent to the agent -- setup rag for the agents -- run the first agent -> kidney disease -> don't know the stage -> stage 2 -> lab results -> indicative of stage 3 -> the case got elavated -> -- how to manage diseases and by looking at correlating lab, docs, diagnoses -- put docs in rag -> -- monitoring, evaluation, and treatment -- can we confirm for every diagnosis -> monitoring, evaluation, and treatment, specialized for these things -- find diagnosis -> or have diagnosis, -> for each diagnosis are there evidence of those 3 things -- swarm of those 4 agents, -> -- fda api for healthcare for commerically available papers -- - -""" - -from datetime import datetime - -from swarms import Agent, AgentRearrange, create_file_in_folder - -from swarm_models import OllamaModel - -model = OllamaModel(model_name="llama3.2") - -chief_medical_officer = Agent( - agent_name="Chief Medical Officer", - system_prompt="""You are the Chief Medical Officer coordinating a team of medical specialists for viral disease diagnosis. - Your responsibilities include: - - Gathering initial patient symptoms and medical history - - Coordinating with specialists to form differential diagnoses - - Synthesizing different specialist opinions into a cohesive diagnosis - - Ensuring all relevant symptoms and test results are considered - - Making final diagnostic recommendations - - Suggesting treatment plans based on team input - - Identifying when additional specialists need to be consulted - - For each diferrential diagnosis provide minimum lab ranges to meet that diagnosis or be indicative of that diagnosis minimum and maximum - - Format all responses with clear sections for: - - Initial Assessment (include preliminary ICD-10 codes for symptoms) - - Differential Diagnoses (with corresponding ICD-10 codes) - - Specialist Consultations Needed - - Recommended Next Steps - - - """, - llm=model, - max_loops=1, -) - -virologist = Agent( - agent_name="Virologist", - system_prompt="""You are a specialist in viral diseases. For each case, provide: - - Clinical Analysis: - - Detailed viral symptom analysis - - Disease progression timeline - - Risk factors and complications - - Coding Requirements: - - List relevant ICD-10 codes for: - * Confirmed viral conditions - * Suspected viral conditions - * Associated symptoms - * Complications - - Include both: - * Primary diagnostic codes - * Secondary condition codes - - Document all findings using proper medical coding standards and include rationale for code selection.""", - llm=model, - max_loops=1, -) - -internist = Agent( - agent_name="Internist", - system_prompt="""You are an Internal Medicine specialist responsible for comprehensive evaluation. - - For each case, provide: - - Clinical Assessment: - - System-by-system review - - Vital signs analysis - - Comorbidity evaluation - - Medical Coding: - - ICD-10 codes for: - * Primary conditions - * Secondary diagnoses - * Complications - * Chronic conditions - * Signs and symptoms - - Include hierarchical condition category (HCC) codes where applicable - - Document supporting evidence for each code selected.""", - llm=model, - max_loops=1, -) - -medical_coder = Agent( - agent_name="Medical Coder", - system_prompt="""You are a certified medical coder responsible for: - - Primary Tasks: - 1. Reviewing all clinical documentation - 2. Assigning accurate ICD-10 codes - 3. Ensuring coding compliance - 4. Documenting code justification - - Coding Process: - - Review all specialist inputs - - Identify primary and secondary diagnoses - - Assign appropriate ICD-10 codes - - Document supporting evidence - - Note any coding queries - - Output Format: - 1. Primary Diagnosis Codes - - ICD-10 code - - Description - - Supporting documentation - 2. Secondary Diagnosis Codes - - Listed in order of clinical significance - 3. Symptom Codes - 4. Complication Codes - 5. Coding Notes""", - llm=model, - max_loops=1, -) - -synthesizer = Agent( - agent_name="Diagnostic Synthesizer", - system_prompt="""You are responsible for creating the final diagnostic and coding assessment. - - Synthesis Requirements: - 1. Integrate all specialist findings - 2. Reconcile any conflicting diagnoses - 3. Verify coding accuracy and completeness - - Final Report Sections: - 1. Clinical Summary - - Primary diagnosis with ICD-10 - - Secondary diagnoses with ICD-10 - - Supporting evidence - 2. Coding Summary - - Complete code list with descriptions - - Code hierarchy and relationships - - Supporting documentation - 3. Recommendations - - Additional testing needed - - Follow-up care - - Documentation improvements needed - - Include confidence levels and evidence quality for all diagnoses and codes.""", - llm=model, - max_loops=1, -) - -# Create agent list -agents = [ - chief_medical_officer, - virologist, - internist, - medical_coder, - synthesizer, -] - -# Define diagnostic flow -flow = f"""{chief_medical_officer.agent_name} -> {virologist.agent_name} -> {internist.agent_name} -> {medical_coder.agent_name} -> {synthesizer.agent_name}""" - -# Create the swarm system -diagnosis_system = AgentRearrange( - name="Medical-coding-diagnosis-swarm", - description="Comprehensive medical diagnosis and coding system", - agents=agents, - flow=flow, - max_loops=1, - output_type="all", -) - - -def generate_coding_report(diagnosis_output: str) -> str: - """ - Generate a structured medical coding report from the diagnosis output. - """ - timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - - report = f"""# Medical Diagnosis and Coding Report - Generated: {timestamp} - - ## Clinical Summary - {diagnosis_output} - - ## Coding Summary - ### Primary Diagnosis Codes - [Extracted from synthesis] - - ### Secondary Diagnosis Codes - [Extracted from synthesis] - - ### Symptom Codes - [Extracted from synthesis] - - ### Procedure Codes (if applicable) - [Extracted from synthesis] - - ## Documentation and Compliance Notes - - Code justification - - Supporting documentation references - - Any coding queries or clarifications needed - - ## Recommendations - - Additional documentation needed - - Suggested follow-up - - Coding optimization opportunities - """ - return report - - -if __name__ == "__main__": - # Example patient case - patient_case = """ - Patient: 45-year-old White Male - - Lab Results: - - egfr - - 59 ml / min / 1.73 - - non african-american - - """ - - # Add timestamp to the patient case - case_info = f"Timestamp: {datetime.now()}\nPatient Information: {patient_case}" - - # Run the diagnostic process - diagnosis = diagnosis_system.run(case_info) - - # Generate coding report - coding_report = generate_coding_report(diagnosis) - - # Create reports - create_file_in_folder( - "reports", "medical_diagnosis_report.md", diagnosis - ) - create_file_in_folder( - "reports", "medical_coding_report.md", coding_report - ) diff --git a/new_features_examples/openai_assistant_wrapper.py b/new_features_examples/openai_assistant_wrapper.py deleted file mode 100644 index 2944ec111..000000000 --- a/new_features_examples/openai_assistant_wrapper.py +++ /dev/null @@ -1,14 +0,0 @@ -from swarms.prompts.finance_agent_sys_prompt import ( - FINANCIAL_AGENT_SYS_PROMPT, -) -from swarms.agents.openai_assistant import OpenAIAssistant - -agent = OpenAIAssistant( - name="test", instructions=FINANCIAL_AGENT_SYS_PROMPT -) - -print( - agent.run( - "Create a table of super high growth opportunities for AI. I have $40k to invest in ETFs, index funds, and more. Please create a table in markdown.", - ) -) diff --git a/new_features_examples/persistent_legal_agent.py b/new_features_examples/persistent_legal_agent.py deleted file mode 100644 index 65e8d61a4..000000000 --- a/new_features_examples/persistent_legal_agent.py +++ /dev/null @@ -1,113 +0,0 @@ -import os -from swarms import Agent -from swarm_models import OpenAIChat -from dotenv import load_dotenv - -# Custom system prompt for VC legal document generation -VC_LEGAL_AGENT_PROMPT = """You are a specialized legal document assistant focusing on venture capital documentation. -Your role is to help draft preliminary versions of common VC legal documents while adhering to these guidelines: - -1. Always include standard legal disclaimers -2. Follow standard VC document structures -3. Flag areas that need attorney review -4. Request necessary information for document completion -5. Maintain consistency across related documents -6. Output only when document is complete and verified - -Remember: All output should be marked as 'DRAFT' and require professional legal review.""" - - -def create_vc_legal_agent(): - load_dotenv() - api_key = os.getenv("OPENAI_API_KEY") - - # Configure the model with appropriate parameters for legal work - # Get the OpenAI API key from the environment variable - api_key = os.getenv("GROQ_API_KEY") - - # Model - model = OpenAIChat( - openai_api_base="https://api.groq.com/openai/v1", - openai_api_key=api_key, - model_name="llama-3.1-70b-versatile", - temperature=0.1, - ) - - # Initialize the persistent agent - agent = Agent( - agent_name="VC-Legal-Document-Agent", - system_prompt=VC_LEGAL_AGENT_PROMPT, - llm=model, - max_loops="auto", # Allows multiple iterations until completion - stopping_token="", # Agent will continue until this token is output - autosave=True, - dashboard=True, # Enable dashboard for monitoring - verbose=True, - dynamic_temperature_enabled=False, # Disable for consistency in legal documents - saved_state_path="vc_legal_agent_state.json", - user_name="legal_corp", - retry_attempts=3, - context_length=200000, - return_step_meta=True, - output_type="string", - streaming_on=False, - ) - - return agent - - -def generate_legal_document(agent, document_type, parameters): - """ - Generate a legal document with multiple refinement iterations - - Args: - agent: The initialized VC legal agent - document_type: Type of document to generate (e.g., "term_sheet", "investment_agreement") - parameters: Dict containing necessary parameters for the document - - Returns: - str: The generated document content - """ - prompt = f""" - Generate a {document_type} with the following parameters: - {parameters} - - Please follow these steps: - 1. Create initial draft - 2. Review for completeness - 3. Add necessary legal disclaimers - 4. Verify all required sections - 5. Output when complete - - Include [REQUIRES LEGAL REVIEW] tags for sections needing attorney attention. - """ - - return agent.run(prompt) - - -# Example usage -if __name__ == "__main__": - # Initialize the agent - legal_agent = create_vc_legal_agent() - - # Example parameters for a term sheet - parameters = { - "company_name": "TechStartup Inc.", - "investment_amount": "$5,000,000", - "valuation": "$20,000,000", - "investor_rights": [ - "Board seat", - "Pro-rata rights", - "Information rights", - ], - "type_of_security": "Series A Preferred Stock", - } - - # Generate a term sheet - document = generate_legal_document( - legal_agent, "term_sheet", parameters - ) - - # Save the generated document - with open("generated_term_sheet_draft.md", "w") as f: - f.write(document) diff --git a/new_features_examples/privacy_building.py b/new_features_examples/privacy_building.py deleted file mode 100644 index 68d85e3e7..000000000 --- a/new_features_examples/privacy_building.py +++ /dev/null @@ -1,263 +0,0 @@ -import os -from swarms import Agent, AgentRearrange -from swarm_models import OpenAIChat - -# Get the OpenAI API key from the environment variable -api_key = os.getenv("OPENAI_API_KEY") - -# Create an instance of the OpenAIChat class -model = OpenAIChat( - api_key=api_key, model_name="gpt-4o-mini", temperature=0.1 -) - -# Initialize the matchmaker agent (Director) -matchmaker_agent = Agent( - agent_name="MatchmakerAgent", - system_prompt=""" - - You are the MatchmakerAgent, the primary coordinator for managing user profiles and facilitating meaningful connections while maintaining strict privacy standards. - - - - - - Full names - - Contact information (phone, email, social media) - - Exact location/address - - Financial information - - Personal identification numbers - - Workplace specifics - - - - - First name only - - Age range (not exact birth date) - - General location (city/region only) - - Interests and hobbies - - Relationship goals - - General profession category - - - - - Profile_Management - - - Review and verify user profiles for authenticity - - Ensure all shared information adheres to privacy guidelines - - Flag any potential security concerns - - - Match_Coordination - - - Analyze compatibility factors between users - - Prioritize matches based on shared interests and goals - - Monitor interaction patterns for safety and satisfaction - - - Communication_Flow - - - Coordinate information exchange between ProfileAnalyzer and ConnectionFacilitator - - Ensure smooth transition of approved information - - Maintain audit trail of information sharing - - - - - Consent_First - Never share information without explicit user consent - - Safety_Priority - Prioritize user safety and privacy over match potential - - Transparency - Be clear about what information is being shared and why - - """, - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="matchmaker_agent.json", -) - -# Initialize worker 1: Profile Analyzer -profile_analyzer = Agent( - agent_name="ProfileAnalyzer", - system_prompt=""" - - You are the ProfileAnalyzer, responsible for deeply understanding user profiles and identifying meaningful compatibility factors while maintaining strict privacy protocols. - - - - - - - All sensitive information must be encrypted - - Access logs must be maintained - - Data retention policies must be followed - - - - - Use anonymized IDs for internal processing - - Apply privacy-preserving analysis techniques - - Implement data minimization principles - - - - - - - Shared interests alignment - - Relationship goal compatibility - - Value system overlap - - Lifestyle compatibility - - Communication style matching - - - - - Inconsistent information - - Suspicious behavior patterns - - Policy violations - - Safety concerns - - - - - - - - Generate compatibility scores - - Identify shared interests and potential conversation starters - - Flag potential concerns for review - - Provide reasoning for match recommendations - - - - - Apply progressive information disclosure rules - - Implement multi-stage verification for sensitive data sharing - - Maintain audit trails of information access - - - """, - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="profile_analyzer.json", -) - -# Initialize worker 2: Connection Facilitator -connection_facilitator = Agent( - agent_name="ConnectionFacilitator", - system_prompt=""" - - You are the ConnectionFacilitator, responsible for managing the interaction between matched users and ensuring smooth, safe, and meaningful communication. - - - - - - - Manage introduction messages - - Monitor response patterns - - Flag any concerning behavior - - - - - Track engagement levels - - Identify conversation quality indicators - - Provide conversation suggestions when appropriate - - - - - Monitor relationship progression - - Record user feedback - - Update matching algorithms based on successful connections - - - - - - - Screen for inappropriate content - - Block prohibited information sharing - - Monitor for harassment or abuse - - - - - Implement progressive contact information sharing - - Maintain anonymized communication channels - - Protect user identity until mutual consent - - - - - - - - User engagement rates - - Communication quality scores - - Safety incident reports - - User satisfaction ratings - - - - - Collect interaction data - - Analyze success patterns - - Implement refinements to matching criteria - - Update safety protocols as needed - - - """, - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="connection_facilitator.json", -) - -# Swarm-Level Prompt (Collaboration Prompt) -swarm_prompt = """ - As a dating platform swarm, your collective goal is to facilitate meaningful connections while maintaining - the highest standards of privacy and safety. The MatchmakerAgent oversees the entire matching process, - coordinating between the ProfileAnalyzer who deeply understands user compatibility, and the ConnectionFacilitator - who manages the development of connections. Together, you must ensure that: - - 1. User privacy is maintained at all times - 2. Information is shared progressively and with consent - 3. Safety protocols are strictly followed - 4. Meaningful connections are prioritized over quantity - 5. User experience remains positive and engaging -""" - -# Create a list of agents -agents = [matchmaker_agent, profile_analyzer, connection_facilitator] - -# Define the flow pattern for the swarm -flow = "MatchmakerAgent -> ProfileAnalyzer -> ConnectionFacilitator" - -# Using AgentRearrange class to manage the swarm -agent_system = AgentRearrange( - name="dating-swarm", - description="Privacy-focused dating platform agent system", - agents=agents, - flow=flow, - return_json=False, - output_type="final", - max_loops=1, -) - -# Example task for the swarm -task = f""" - {swarm_prompt} - - Process a new batch of user profiles and identify potential matches while ensuring all privacy protocols - are followed. For each potential match, provide compatibility reasoning and suggested conversation - starters without revealing any restricted information. -""" - -# Run the swarm system with the task -output = agent_system.run(task) -print(output) diff --git a/unique_swarms_examples.py b/new_features_examples/unique_swarms_examples.py similarity index 100% rename from unique_swarms_examples.py rename to new_features_examples/unique_swarms_examples.py diff --git a/swarms/structs/csv_to_agent.py b/swarms/structs/csv_to_agent.py index aa6fdf73c..624e35774 100644 --- a/swarms/structs/csv_to_agent.py +++ b/swarms/structs/csv_to_agent.py @@ -136,6 +136,7 @@ def validate_config(config: Dict[str, Any]) -> AgentConfigDict: str(e), str(e.__class__.__name__), str(config) ) + class AgentLoader: """Class to manage agents through CSV with type safety""" @@ -202,7 +203,9 @@ def load_agents(self, file_type: str = "csv") -> List[Agent]: elif file_type == "json": return self._load_agents_from_json() else: - raise ValueError("Unsupported file type. Use 'csv' or 'json'.") + raise ValueError( + "Unsupported file type. Use 'csv' or 'json'." + ) def _load_agents_from_csv(self) -> List[Agent]: """Load agents from a CSV file""" @@ -229,13 +232,13 @@ def _load_agents_from_json(self) -> List[Agent]: """Load agents from a JSON file""" import json - if not self.csv_path.with_suffix('.json').exists(): + if not self.csv_path.with_suffix(".json").exists(): raise FileNotFoundError( f"JSON file not found at {self.csv_path.with_suffix('.json')}" ) agents: List[Agent] = [] - with open(self.csv_path.with_suffix('.json'), "r") as f: + with open(self.csv_path.with_suffix(".json"), "r") as f: agents_data = json.load(f) for agent in agents_data: try: @@ -250,10 +253,14 @@ def _load_agents_from_json(self) -> List[Agent]: ) continue - print(f"Loaded {len(agents)} agents from {self.csv_path.with_suffix('.json')}") + print( + f"Loaded {len(agents)} agents from {self.csv_path.with_suffix('.json')}" + ) return agents - def _create_agent(self, validated_config: AgentConfigDict) -> Agent: + def _create_agent( + self, validated_config: AgentConfigDict + ) -> Agent: """Create an Agent instance from validated configuration""" return Agent( agent_name=validated_config["agent_name"], @@ -263,7 +270,9 @@ def _create_agent(self, validated_config: AgentConfigDict) -> Agent: autosave=validated_config["autosave"], dashboard=validated_config["dashboard"], verbose=validated_config["verbose"], - dynamic_temperature_enabled=validated_config["dynamic_temperature"], + dynamic_temperature_enabled=validated_config[ + "dynamic_temperature" + ], saved_state_path=validated_config["saved_state_path"], user_name=validated_config["user_name"], retry_attempts=validated_config["retry_attempts"], @@ -271,4 +280,4 @@ def _create_agent(self, validated_config: AgentConfigDict) -> Agent: return_step_meta=validated_config["return_step_meta"], output_type=validated_config["output_type"], streaming_on=validated_config["streaming"], - ) \ No newline at end of file + ) diff --git a/swarms/structs/graph_swarm.py b/swarms/structs/graph_swarm.py index 70f2323eb..e67add52f 100644 --- a/swarms/structs/graph_swarm.py +++ b/swarms/structs/graph_swarm.py @@ -190,7 +190,7 @@ class GraphSwarm: def __init__( self, name: str = "graph-swarm-01", - description: str = "Graph swarm : build your own graph of agents", + description: str = "Graph swarm : build your own graph of agents", agents: Union[ List[Agent], List[Tuple[Agent, List[str]]], List[Callable] ] = None, diff --git a/swarms/telemetry/bootup.py b/swarms/telemetry/bootup.py index 1f2acd5aa..edb491335 100644 --- a/swarms/telemetry/bootup.py +++ b/swarms/telemetry/bootup.py @@ -54,7 +54,3 @@ def bootup(): except Exception as e: logger.error(f"Error during bootup: {str(e)}") raise - - -# Run bootup -bootup() diff --git a/run_all_tests.py b/tests/run_all_tests.py similarity index 100% rename from run_all_tests.py rename to tests/run_all_tests.py