Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 13 additions & 2 deletions browser_agent/browser_tool.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,21 @@
import os
import sys
from pathlib import Path
from typing import Optional, Any, Literal
from langchain_core.tools import BaseTool
from browser_use import Agent, Browser, BrowserConfig
from langchain_openai import ChatOpenAI
from langchain_anthropic import ChatAnthropic
from pydantic import Field

# Add the root directory to Python path
root_dir = Path(__file__).resolve().parent.parent
if str(root_dir) not in sys.path:
sys.path.insert(0, str(root_dir))

from llm_provider import get_llm
from langchain_core.language_models.chat_models import BaseChatModel

class BrowserTool(BaseTool):
"""Tool for autonomous web browsing and research."""

Expand All @@ -21,14 +32,14 @@ class BrowserTool(BaseTool):
- "Sign up for a gym membership at Planet Fitness"
- "Schedule a grocery delivery from Whole Foods"
"""
llm: ChatAnthropic = Field(default_factory=lambda: ChatAnthropic(model="claude-3-5-sonnet-latest"))
llm: BaseChatModel = Field(default_factory=lambda: get_llm(model="claude-3-5-sonnet-latest")) # Model parameter only used if falling back to Anthropic
browser: Browser = Field(default_factory=lambda: Browser(
config=BrowserConfig(
chrome_instance_path='/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
)
))

def __init__(self, llm: Optional[ChatAnthropic] = None, browser: Optional[Browser] = None):
def __init__(self, llm: Optional[BaseChatModel] = None, browser: Optional[Browser] = None):
"""Initialize the browser tool with an optional LLM and browser instance."""
super().__init__()
if llm is not None:
Expand Down
12 changes: 7 additions & 5 deletions chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
from langchain.tools import Tool
from langchain_core.runnables import RunnableConfig
from browser_agent import BrowserToolkit
from llm_provider import get_llm

# Import Coinbase AgentKit related modules
from coinbase_agentkit import (
Expand Down Expand Up @@ -94,18 +95,19 @@
# Add the import for WritingTool near the other imports at the top of the file
from writing_agent.writing_tool import WritingTool

async def generate_llm_podcast_query(llm: ChatAnthropic = None) -> str:
async def generate_llm_podcast_query(llm = None) -> str:
"""
Generates a dynamic, contextually-aware query for the podcast knowledge base using an LLM.
Uses various prompting techniques to create unique and insightful queries.

Args:
llm: ChatAnthropic instance. If None, creates a new one.
llm: LLM instance. If None, creates a new one.

Returns:
str: A generated query string
"""
llm = ChatAnthropic(model="claude-3-5-haiku-20241022")
if llm is None:
llm = get_llm(model="claude-3-5-haiku-20241022")

# Format the prompt with random selections
prompt = PODCAST_QUERY_PROMPT.format(
Expand Down Expand Up @@ -137,7 +139,7 @@ async def generate_podcast_query() -> str:
"""
try:
# Create LLM instance
llm = ChatAnthropic(model="claude-3-5-sonnet-20241022")
llm = get_llm(model="claude-3-5-sonnet-20241022")
# Get LLM-generated query
query = await generate_llm_podcast_query(llm)
return query
Expand Down Expand Up @@ -402,7 +404,7 @@ async def initialize_agent():
"""Initialize the agent with tools and configuration."""
try:
print_system("Initializing LLM...")
llm = ChatAnthropic(model="claude-3-5-sonnet-20241022")
llm = get_llm(model="claude-3-5-sonnet-20241022") # Model parameter only used if falling back to Anthropic

print_system("Loading character configuration...")
try:
Expand Down
130 changes: 130 additions & 0 deletions llm_provider.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
from typing import Optional, Any, Dict, List
import os
from langchain_anthropic import ChatAnthropic
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.outputs import ChatGeneration, ChatResult
from langchain_core.messages import AIMessage
from mira_network.sync_client import MiraSyncClient
from mira_network.models import AiRequest
from pydantic import Field, model_validator
import json

class MiraLLM(BaseChatModel):
"""LangChain compatible wrapper for Mira's sync client."""

base_url: str = Field(default="https://api.mira.network")
api_key: Optional[str] = Field(default=None)
model: str = Field(default="claude-3.5-sonnet")
temperature: float = Field(default=0.7)
client: MiraSyncClient = Field(default=None)

@model_validator(mode='after')
def initialize_client(self) -> 'MiraLLM':
"""Initialize the Mira client after all fields are set."""
self.client = MiraSyncClient(base_url=self.base_url, api_key=self.api_key)
return self

def invoke(self, messages, **kwargs):
"""Invoke the chat model with messages."""
formatted_messages = []
for message in messages:
# Map message types to roles
if message.type == "human":
role = "user"
elif message.type == "ai":
role = "assistant"
elif message.type == "system":
role = "system"
else:
role = "user" # default to user for unknown types

formatted_messages.append({
"role": role,
"content": message.content
})

request = AiRequest(
model=self.model,
messages=formatted_messages,
temperature=self.temperature,
stream=False,
**kwargs
)

try:
response = self.client.generate(request)

# Handle response format where data is nested
if isinstance(response, dict) and "data" in response:
data = response["data"]
if "choices" in data and len(data["choices"]) > 0:
return data["choices"][0]["message"]["content"]

raise ValueError(f"Unexpected response format from Mira API: {response}")
except Exception as e:
raise

def _generate(self, messages=None, stop=None, run_manager=None, **kwargs) -> ChatResult:
"""Required abstract method implementation for BaseChatModel."""
response = self.invoke(messages, **kwargs)
message = AIMessage(content=response)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])

@property
def _llm_type(self) -> str:
"""Return identifier of the LLM."""
return "mira"

def get_llm(
model: str = "claude-3.5-sonnet", # Only used for Anthropic if Mira is not available, also other models available for mira are "gpt-4o", "deepseek-r1", "llama-3.3-70b-instruct"
temperature: float = 0.7,
base_url: str = "https://api.mira.network",
api_key: Optional[str] = None,
anthropic_api_key: Optional[str] = None,
) -> BaseChatModel:
"""Get the appropriate LLM based on environment configuration.

Args:
model: Model name to use (only used when falling back to Anthropic, ignored for Mira)
temperature: Temperature for generation
base_url: Base URL for Mira API (only used if Mira is selected)
api_key: Optional API key for Mira
anthropic_api_key: Optional API key for Anthropic

Returns:
A LangChain compatible chat model

Raises:
ValueError: If neither Mira API key nor Anthropic API key is available

Note:
The model parameter is only used when falling back to Anthropic.
When using Mira, the model is fixed to "claude-3.5-sonnet" as that's what Mira provides.
"""
# Check for Mira API key in environment or passed directly
mira_api_key = api_key or os.environ.get("MIRA_API_KEY")
anthropic_key = anthropic_api_key or os.environ.get("ANTHROPIC_API_KEY")

if not mira_api_key and not anthropic_key:
raise ValueError(
"No API keys found. Please provide either a Mira API key (via MIRA_API_KEY "
"environment variable or api_key parameter) or an Anthropic API key (via "
"ANTHROPIC_API_KEY environment variable or anthropic_api_key parameter)"
)

if mira_api_key:
# Use Mira if API key is available - model is fixed to claude-3.5-sonnet
return MiraLLM(
base_url=base_url,
api_key=mira_api_key,
model="claude-3.5-sonnet", # Fixed for Mira, other models available for mira are "gpt-4o", "deepseek-r1", "llama-3.3-70b-instruct"
temperature=temperature
)
else:
# Use ChatAnthropic with specified model
return ChatAnthropic(
model=model,
temperature=temperature,
anthropic_api_key=anthropic_key
)
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ yt-dlp = "^2025.2.19"
anthropic = ">=0.41.0,<1.0.0"
pypdf = "^4.0.1"
requests = "^2.31.0"
mira-network = "^0.1.10"

[tool.poetry.group.dev.dependencies]
pytest-playwright = "^0.6.2"
Expand Down
3 changes: 2 additions & 1 deletion server/src/server/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from langchain_anthropic import ChatAnthropic
from dotenv import load_dotenv
from browser_agent import BrowserToolkit
from llm_provider import get_llm

# from coinbase_agentkit import (
# AgentKit,
Expand Down Expand Up @@ -89,7 +90,7 @@
ALLOW_DANGEROUS_REQUEST = True

# Initialize base components
llm = ChatAnthropic(model="claude-3-sonnet-20240229")
llm = get_llm(model="claude-3-sonnet-20240229") # Model parameter only used if falling back to Anthropic

# Initialize config
config = {
Expand Down
36 changes: 25 additions & 11 deletions writing_agent/writing_agent.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,21 @@
import os
import sys
from pathlib import Path
from typing import List, Dict, Any, Optional
import logging
import traceback
from langchain_anthropic import ChatAnthropic
from langchain_core.messages import HumanMessage

# Add the root directory to Python path
root_dir = Path(__file__).resolve().parent.parent
if str(root_dir) not in sys.path:
sys.path.insert(0, str(root_dir))

from llm_provider import get_llm
from base_utils.utils import print_system, print_error
from .web_searcher import WebSearcher
from .document_sender import DocumentSender
from writing_agent.web_searcher import WebSearcher
from writing_agent.document_sender import DocumentSender

class WritingAgent:
"""Main agent for writing content with research and style adaptation."""
Expand All @@ -20,33 +28,39 @@ def __init__(self, api_key: Optional[str] = None):
Initialize the writing agent.

Args:
api_key: Optional API key for the language model
api_key: Optional API key for the language model (can be either MIRA or Anthropic API key)
"""
# Set up logging
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger(__name__)

# Initialize components
self.api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
if not self.api_key:
self.logger.warning("No ANTHROPIC_API_KEY provided. API-dependent features will not work.")
self.mira_api_key = api_key or os.environ.get("MIRA_API_KEY")
self.anthropic_api_key = os.environ.get("ANTHROPIC_API_KEY")

if not self.mira_api_key and not self.anthropic_api_key:
self.logger.warning("No API keys provided. API-dependent features will not work.")
else:
self.logger.info("API key found and set successfully")
provider = "Mira" if self.mira_api_key else "Anthropic"
self.logger.info(f"{provider} API key found and set successfully")

self.searcher = WebSearcher()
self.logger.info("Web searcher initialized")

# Initialize document sender for direct document integration
self.document_sender = DocumentSender(api_key=self.api_key)
# Pass the appropriate API key to document sender
self.document_sender = DocumentSender(api_key=self.anthropic_api_key) # Document sender still uses Anthropic
self.logger.info("Document sender initialized")

# Initialize reference tracking
self.reference_materials = []

# Initialize language model
self.llm = ChatAnthropic(model="claude-3-5-sonnet-20240620",
temperature=0.7,
anthropic_api_key=self.api_key)
self.llm = get_llm(
api_key=self.mira_api_key, # Will use Mira if available (with fixed model)
anthropic_api_key=self.anthropic_api_key, # Will fall back to Anthropic if needed
model="claude-3-5-sonnet-20240620" # Only used if falling back to Anthropic
)
self.logger.info("Language model initialized")

# Initialize default article parameters
Expand Down