Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 0 additions & 14 deletions agent/agent_executors.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,30 +72,16 @@
# Select model based on configuration
if not config.SUBNET_MODE:
SUGGESTIONS_MODEL = GOOGLE_GEMINI_20_FLASH_MODEL
ROUTING_MODEL = GOOGLE_GEMINI_FLASH_15_8B_MODEL
REASONING_MODEL = GOOGLE_GEMINI_20_FLASH_MODEL
BASE_URL = "https://generativelanguage.googleapis.com/v1beta/"
API_KEY = os.getenv("GEMINI_API_KEY")
else:
SUGGESTIONS_MODEL = LOCAL_LLM_MODEL
ROUTING_MODEL = LOCAL_LLM_MODEL
REASONING_MODEL = LOCAL_LLM_MODEL
BASE_URL = LOCAL_LLM_BASE_URL
API_KEY = "dummy_key"


def create_routing_model() -> BaseChatModel:
return ChatOpenAI(
model=ROUTING_MODEL,
temperature=0.0,
max_tokens=500,
api_key=config.DUMMY_X402_API_KEY,
http_async_client=x402_http_client,
stream_usage=True,
streaming=True,
base_url=config.LLM_SERVER_URL,
)


def create_suggestions_model() -> BaseChatModel:
return ChatOpenAI(
Expand Down
27 changes: 0 additions & 27 deletions agent/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
investor_agent_template = env.get_template("investor_agent.jinja2")
analytics_agent_template = env.get_template("analytics_agent.jinja2")
suggestions_template = env.get_template("suggestions.jinja2")
router_template = env.get_template("router.jinja2")


# We ignore token holdings with a total value of less than $1
Expand Down Expand Up @@ -106,29 +105,3 @@ def get_analytics_prompt(
)

return analytics_agent_prompt


def get_router_prompt(message_history: List[Message], current_message: str) -> str:
"""Get the router prompt to determine which agent should handle the request."""

MAX_AGENT_MESSAGE_LENGTH = 400

# Truncate assistant response to 400 characters, also include the message type
message_history = [
{
"type": message.type,
"message": (
message.message[:MAX_AGENT_MESSAGE_LENGTH] + "..."
if message.type == "assistant"
and len(message.message) > MAX_AGENT_MESSAGE_LENGTH
else message.message
),
}
for message in message_history
]

router_prompt = router_template.render(
message_history=message_history,
current_message=current_message,
)
return router_prompt
9 changes: 9 additions & 0 deletions agent/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,10 @@
analyze_price_trend,
analyze_wallet_portfolio,
get_coingecko_current_price,
get_token_market_info,
get_top_coins_by_market_cap,
get_global_market_overview,
compare_tokens,
)
from onchain.tokens.trending import (
get_trending_tokens,
Expand Down Expand Up @@ -96,6 +100,11 @@ async def search_token(
portfolio_volatility,
analyze_wallet_portfolio,
get_coingecko_current_price,
# Market data tools
get_token_market_info,
get_top_coins_by_market_cap,
get_global_market_overview,
compare_tokens,
# Token tools
get_trending_tokens,
evaluate_token_risk,
Expand Down
2 changes: 1 addition & 1 deletion api/api_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ class Context(BaseModel):
class AgentChatRequest(BaseModel):
context: Context
message: UserMessage
agent: Optional[AgentType] = None
agent: AgentType
captchaToken: Optional[str] = None


Expand Down
Loading