Skip to content

Amr/gradio live deployment #61

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,14 @@
---
title: Hyperbolic-AgentKit
emoji: 🤖
colorFrom: blue
colorTo: green
sdk: gradio
sdk_version: 5.23.2
python_version: 3.12
app_file: gradio_ui.py
pinned: false
---
# Hyperbolic's Agent Framework

This repository is inspired by and modified from Coinbase's [CDP Agentkit](https://github.com/coinbase/cdp-agentkit). We extend our gratitude to the Coinbase Developer Platform team for their original work.
Expand Down
37 changes: 12 additions & 25 deletions chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ def loadCharacters(charactersArg: str) -> List[Dict[str, Any]]:

if not characterPaths:
# Load default chainyoda character
default_path = os.path.join(os.path.dirname(__file__), "characters/default.json")
default_path = os.path.join(os.path.dirname(__file__), "characters/hyperbolic.json")
characterPaths.append(default_path)

for characterPath in characterPaths:
Expand Down Expand Up @@ -377,7 +377,7 @@ def create_agent_tools(llm, knowledge_base, podcast_knowledge_base, agent_kit, c
print_system(f"Added {len(coinbase_tools)} Coinbase tools")

# Add Hyperbolic tools
if os.getenv("USE_HYPERBOLIC_TOOLS", "false").lower() == "true":
if os.getenv("USE_HYPERBOLIC_TOOLS", "true").lower() == "true":
hyperbolic_agentkit = HyperbolicAgentkitWrapper()
hyperbolic_toolkit = HyperbolicToolkit.from_hyperbolic_agentkit_wrapper(hyperbolic_agentkit)
tools.extend(hyperbolic_toolkit.get_tools())
Expand Down Expand Up @@ -478,38 +478,28 @@ async def initialize_agent():
f.write(wallet_data)

# Twitter Knowledge Base initialization
while True:
init_twitter_kb = input("\nDo you want to initialize the Twitter knowledge base? (y/n): ").lower().strip()
if init_twitter_kb in ['y', 'n']:
break
print("Invalid choice. Please enter 'y' or 'n'.")
# Remove the interactive prompt and always attempt initialization
init_twitter_kb = 'n' # Always initialize

if init_twitter_kb == 'y':
try:
knowledge_base = TweetKnowledgeBase()
stats = knowledge_base.get_collection_stats()
print_system(f"Initial Twitter knowledge base stats: {stats}")

# Initialize Twitter client here, before we need it
print_system("\n=== Initializing Twitter Client ===")
twitter_client = TwitterClient()
print_system("Twitter client initialized successfully")

while True:
clear_choice = input("\nDo you want to clear the existing Twitter knowledge base? (y/n): ").lower().strip()
if clear_choice in ['y', 'n']:
break
print("Invalid choice. Please enter 'y' or 'n'.")

# Keep the logic for clearing and updating, controlled by environment vars or future config
clear_choice = os.getenv("CLEAR_TWITTER_KB_ON_START", "n").lower() # Example using env var

if clear_choice == 'y':
knowledge_base.clear_collection()
print_system("Knowledge base cleared")
print_system("Knowledge base cleared based on CLEAR_TWITTER_KB_ON_START setting.")

while True:
update_choice = input("\nDo you want to update the Twitter knowledge base with KOL tweets? (y/n): ").lower().strip()
if update_choice in ['y', 'n']:
break
print("Invalid choice. Please enter 'y' or 'n'.")
update_choice = os.getenv("UPDATE_TWITTER_KB_ON_START", "y").lower() # Example using env var

if update_choice == 'y':
print_system("\n=== Starting Twitter Knowledge Base Update ===")
Expand Down Expand Up @@ -560,11 +550,8 @@ async def initialize_agent():
print_error(f"Error initializing Twitter knowledge base: {e}")

# Podcast Knowledge Base initialization
while True:
init_podcast_kb = input("\nDo you want to initialize the Podcast knowledge base? (y/n): ").lower().strip()
if init_podcast_kb in ['y', 'n']:
break
print("Invalid choice. Please enter 'y' or 'n'.")
# Remove the interactive prompt and always attempt initialization
init_podcast_kb = 'y' # Always initialize

if init_podcast_kb == 'y':
try:
Expand Down
70 changes: 39 additions & 31 deletions gradio_ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import gradio as gr
import asyncio
from chatbot import initialize_agent
from langchain_core.messages import HumanMessage
from langchain_core.messages import HumanMessage, AIMessage
from langchain_core.runnables import RunnableConfig
from base_utils.utils import format_ai_message_content
from datetime import datetime
Expand All @@ -14,57 +14,65 @@
async def chat_with_agent(message, history):
global agent, agent_config

# Convert history into messages format that the agent expects
messages = []
if history:
print("History:", history) # Debug print
for msg in history:
if isinstance(msg, dict):
if msg.get("role") == "user":
messages.append(HumanMessage(content=msg["content"]))
elif msg.get("role") == "assistant":
messages.append({"role": "assistant", "content": msg["content"]})

# Add the current message
print("History:", history)
# Iterate through the flat list of message dictionaries
for msg_dict in history:
if isinstance(msg_dict, dict) and 'role' in msg_dict and 'content' in msg_dict:
role = msg_dict['role']
content = msg_dict['content']
if role == "user":
messages.append(HumanMessage(content=content))
elif role == "assistant":
# Pass the content as is; Langchain can handle AIMessage content
messages.append(AIMessage(content=content))
elif isinstance(msg_dict, (list, tuple)) and len(msg_dict) == 2:
# Fallback for older Gradio history format (just in case)
user_msg, ai_msg = msg_dict
if user_msg:
messages.append(HumanMessage(content=user_msg))
if ai_msg:
messages.append(AIMessage(content=ai_msg))
else:
print(f"Skipping unexpected history item: {msg_dict}")


# Add the current user message
messages.append(HumanMessage(content=message))

print("Final messages:", messages) # Debug print
print("Final messages being sent to agent:", messages)

runnable_config = RunnableConfig(
recursion_limit=agent_config["configurable"]["recursion_limit"],
configurable={
"thread_id": agent_config["configurable"]["thread_id"],
"checkpoint_ns": "chat_mode",
"checkpoint_id": str(datetime.now().timestamp())
"checkpoint_id": str(datetime.now().timestamp()) # Keep checkpointing per interaction
}
)

response_messages = []
yield response_messages
# Process message with agent
current_turn_messages = []

async for chunk in agent.astream(
{"messages": messages}, # Pass the full message history
{"messages": messages},
runnable_config
):
if "agent" in chunk:
print("agent in chunk")
response = chunk["agent"]["messages"][0].content
response_messages.append(dict(
role="assistant",
content=format_ai_message_content(response, format_mode="markdown")
))
print(response_messages)
yield response_messages
response_content = chunk["agent"]["messages"][0].content
formatted_content = format_ai_message_content(response_content, format_mode="markdown")
current_turn_messages.append(formatted_content)
print("Yielding agent response:", current_turn_messages)
yield "\n\n".join(current_turn_messages) # Join with double newline for better separation

elif "tools" in chunk:
print("tools in chunk")
tool_message = str(chunk["tools"]["messages"][0].content)
response_messages.append(dict(
role="assistant",
content=tool_message,
metadata={"title": "🛠️ Tool Call"}
))
print(response_messages)
yield response_messages
formatted_content = f"**🛠️ Tool Call:**\n```\n{tool_message}\n```"
current_turn_messages.append(formatted_content)
print("Yielding tool response:", current_turn_messages)
yield "\n\n".join(current_turn_messages) # Join with double newline

def create_ui():
# Create the Gradio interface
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -47,4 +47,4 @@ pytest-playwright = "^0.6.2"

[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
build-backend = "poetry.core.masonry.api"
Loading